code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule RDF.Query do
@moduledoc """
The RDF Graph query API.
"""
alias RDF.Graph
alias RDF.Query.{BGP, Builder}
@default_matcher RDF.Query.BGP.Stream
@doc """
Execute the given `query` against the given `graph`.
The `query` can be given directly as `RDF.Query.BGP` struct created with one
of the builder functions in this module or as basic graph pattern expression
accepted by `bgp/1`.
The result is a list of maps with the solutions for the variables in the graph
pattern query and will be returned in a `:ok` tuple. In case of an error a
`:error` tuple is returned.
## Example
Let's assume we have an `example_graph` with these triples:
```turtle
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix ex: <http://example.com/> .
ex:Outlaw
foaf:name "<NAME>" ;
foaf:mbox <mailto:<EMAIL>> .
ex:Goodguy
foaf:name "<NAME>" ;
foaf:mbox <mailto:<EMAIL>> ;
foaf:friend ex:Outlaw .
```
iex> {:_, FOAF.name, :name?} |> RDF.Query.execute(example_graph())
{:ok, [%{name: ~L"<NAME>"}, %{name: ~L"Johnny Lee Outlaw"}]}
iex> [
...> {:_, FOAF.name, :name?},
...> {:_, FOAF.mbox, :mbox?},
...> ] |> RDF.Query.execute(example_graph())
{:ok, [
%{name: ~L"<NAME>", mbox: ~I<mailto:<EMAIL>>},
%{name: ~L"<NAME>", mbox: ~I<mailto:<EMAIL>>}
]}
iex> query = [
...> {:_, FOAF.name, :name?},
...> {:_, FOAF.mbox, :mbox?},
...> ] |> RDF.Query.bgp()
...> RDF.Query.execute(query, example_graph())
{:ok, [
%{name: ~L"<NAME>", mbox: ~I<mailto:<EMAIL>>},
%{name: ~L"<NAME> Outlaw", mbox: ~I<mailto:<EMAIL>>}
]}
iex> [
...> EX.Goodguy, FOAF.friend, FOAF.name, :name?
...> ] |> RDF.Query.path() |> RDF.Query.execute(example_graph())
{:ok, [%{name: ~L"Johnny Lee Outlaw"}]}
"""
def execute(query, graph, opts \\ [])
def execute(%BGP{} = query, %Graph{} = graph, opts) do
matcher = Keyword.get(opts, :matcher, @default_matcher)
{:ok, matcher.execute(query, graph, opts)}
end
def execute(query, graph, opts) when is_list(query) or is_tuple(query) do
with {:ok, bgp} <- Builder.bgp(query) do
execute(bgp, graph, opts)
end
end
@doc """
Execute the given `query` against the given `graph`.
As opposed to `execute/3` this returns the results directly or fails with an
exception.
"""
def execute!(query, graph, opts \\ []) do
case execute(query, graph, opts) do
{:ok, results} -> results
{:error, error} -> raise error
end
end
@doc """
Returns a `Stream` for the execution of the given `query` against the given `graph`.
Just like on `execute/3` the `query` can be given directly as `RDF.Query.BGP` struct
created with one of the builder functions in this module or as basic graph pattern
expression accepted by `bgp/1`.
The stream of solutions for variable bindings will be returned in a `:ok` tuple.
In case of an error a `:error` tuple is returned.
## Example
Let's assume we have an `example_graph` with these triples:
```turtle
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix ex: <http://example.com/> .
ex:Outlaw
foaf:name "<NAME>" ;
foaf:mbox <mailto:<EMAIL>> .
ex:Goodguy
foaf:name "<NAME>" ;
foaf:mbox <mailto:<EMAIL>> ;
foaf:friend ex:Outlaw .
```
iex> {:ok, stream} = {:_, FOAF.name, :name?} |> RDF.Query.stream(example_graph())
...> Enum.to_list(stream)
[%{name: ~L"<NAME>"}, %{name: ~L"<NAME>"}]
iex> {:ok, stream} = [
...> {:_, FOAF.name, :name?},
...> {:_, FOAF.mbox, :mbox?},
...> ] |> RDF.Query.stream(example_graph())
...> Enum.take(stream, 1)
[
%{name: ~L"<NAME>", mbox: ~I<mailto:<EMAIL>>},
]
"""
def stream(query, graph, opts \\ [])
def stream(%BGP{} = query, %Graph{} = graph, opts) do
matcher = Keyword.get(opts, :matcher, @default_matcher)
{:ok, matcher.stream(query, graph, opts)}
end
def stream(query, graph, opts) when is_list(query) or is_tuple(query) do
with {:ok, bgp} <- Builder.bgp(query) do
stream(bgp, graph, opts)
end
end
@doc """
Returns a `Stream` for the execution of the given `query` against the given `graph`.
As opposed to `stream/3` this returns the stream directly or fails with an
exception.
"""
def stream!(query, graph, opts \\ []) do
case stream(query, graph, opts) do
{:ok, results} -> results
{:error, error} -> raise error
end
end
@doc """
Creates a `RDF.Query.BGP` struct.
A basic graph pattern consist of single or list of triple patterns.
A triple pattern is a tuple which consists of RDF terms or variables for
the subject, predicate and object of a RDF triple.
As RDF terms `RDF.IRI`s, `RDF.BlankNode`s, `RDF.Literal`s or all Elixir
values which can be coerced to any of those are allowed, i.e.
`RDF.Vocabulary.Namespace` atoms or Elixir values which can be coerced to RDF
literals with `RDF.Literal.coerce/1` (only on object position). On predicate
position the `:a` atom can be used for the `rdf:type` property.
Variables are written as atoms ending with a question mark. Blank nodes which
in a graph query patterns act like a variable which doesn't show up in the
results can be written as atoms starting with an underscore.
Here's a basic graph pattern example:
```elixir
[
{:s?, :a, EX.Foo},
{:s?, :a, EX.Bar},
{:s?, RDFS.label, "foo"},
{:s?, :p?, :o?}
]
```
Multiple triple patterns sharing the same subject and/or predicate can be grouped:
- Multiple objects to the same subject-predicate pair can be written by just
writing them one by one in the same triple pattern.
- Multiple predicate-objects pair on the same subject can be written by
grouping them with square brackets.
With these, the previous example can be shortened to:
```elixir
{
:s?,
[:a, EX.Foo, EX.Bar],
[RDFS.label, "foo"],
[:p?, :o?]
}
```
"""
defdelegate bgp(query), to: Builder, as: :bgp!
@doc """
Creates a `RDF.Query.BGP` struct for a path through a graph.
The elements of the path can consist of the same RDF terms and variable
expressions allowed in `bgp/1` expressions.
## Example
The `RDF.Query.BGP` struct build with this:
RDF.Query.path [EX.S, EX.p, RDFS.label, :name?]
is the same as the one build by this `bgp/1` call:
RDF.Query.bgp [
{EX.S, EX.p, :_o},
{:_o, RDFS.label, :name?},
]
"""
defdelegate path(query, opts \\ []), to: Builder, as: :path!
end
|
lib/rdf/query.ex
| 0.919367
| 0.914825
|
query.ex
|
starcoder
|
defmodule FinanceTS.Adapters.Finnhub do
@moduledoc """
An Adapter for Finnhub.io
Homepage: https://finnhub.io/
API Docs: https://finnhub.io/docs/api
"""
use Tesla
plug(Tesla.Middleware.BaseUrl, "https://finnhub.io/api/v1")
@behaviour FinanceTS.Adapter
@supported_resolutions [:minute, {:minute, 5}, {:minute, 15}, {:minute, 30}, :hour, :day, :week, :month]
def get_stream(symbol, resolution, opts \\ []) do
check_resolution(resolution)
case get_raw_ohclv_csv(symbol, resolution, opts) do
{:ok, csv} ->
stream =
csv
|> String.replace_leading("t,o,h,l,c,v\n", "")
|> String.trim_trailing("\n")
|> String.split("\n")
|> Stream.map(fn line ->
[t, o, h, l, c, v] = String.split(line, ",")
[cast_int(t), cast_float(o), cast_float(h), cast_float(l), cast_float(c), cast_float(v)]
end)
{:ok, stream, symbol, "USD", "Unknown"}
{:error, error} ->
{:error, error}
end
end
# Private functions
defp cast_int(price_str) do
case Integer.parse(price_str) do
{price, _} -> price
:error -> nil
end
end
defp cast_float(price_str) do
case Float.parse(price_str) do
{price, _} -> price
:error -> nil
end
end
defp get_raw_ohclv_csv(symbol, resolution, opts) do
from = opts[:from] || DateTime.to_unix(~U[1999-01-04 00:00:00Z])
to = opts[:to] || DateTime.to_unix(DateTime.utc_now())
resolution_param = convert_resolution_to_parameter(resolution)
url = "/stock/candle?symbol=#{symbol}&resolution=#{resolution_param}&from=#{from}&to=#{to}&format=csv&token=#{api_key()}"
case get(url) do
{:ok, %{body: "t,o,h,l,c,v\n"}} ->
{:error, "no data"}
{:ok, %{body: body}} ->
if String.contains?(body, "no_data") do
{:error, "no data"}
else
{:ok, body}
end
{:error, error} ->
{:error, error}
end
end
defp convert_resolution_to_parameter(:minute), do: "1"
defp convert_resolution_to_parameter({:minute, 5}), do: "5"
defp convert_resolution_to_parameter({:minute, 15}), do: "15"
defp convert_resolution_to_parameter({:minute, 30}), do: "30"
defp convert_resolution_to_parameter(:hour), do: "60"
defp convert_resolution_to_parameter(:day), do: "D"
defp convert_resolution_to_parameter(:week), do: "W"
defp convert_resolution_to_parameter(:month), do: "M"
defp check_resolution(r) when r in @supported_resolutions, do: nil
defp check_resolution(r), do: raise("Resolution #{inspect(r)} not supported. Use one of the following: #{inspect(@supported_resolutions)}.")
defp api_key do
Application.get_env(:finance_ts, :finnhub)[:api_key]
end
end
|
lib/finance_ts/adapters/finnhub.ex
| 0.80765
| 0.453262
|
finnhub.ex
|
starcoder
|
defmodule InteropProxy.Message do
@moduledoc """
Contains the Protobuf messages from exprotobuf.
"""
@external_resource "lib/messages/interop.proto"
use Protobuf,
from: Path.expand("../messages/interop.proto", __DIR__),
use_package_names: true
@doc ~S"""
Takes a map and turns it into a Protobuf struct recursively.
By default, exprotobuf doesn't handle nested messages so this
function will take care of that for us.
## Examples
The nested values can be in both optional and repeated fields.
iex> alias InteropProxy.Message
iex> alias InteropProxy.Message.Interop.InteropMission
iex> map = %{waypoints: [%{lat: 12, lon: 23}]}
iex> Message.form_message map, InteropMission
%InteropProxy.Message.Interop.InteropMission{
air_drop_pos: nil,
current_mission: nil,
emergent_pos: nil,
fly_zones: [],
off_axis_pos: nil,
search_area: [],
time: nil,
waypoints: [
%InteropProxy.Message.Interop.AerialPosition{
alt_msl: nil,
lat: 12,
lon: 23
}
]
}
Keys can also be strings (useful when map was converted from JSON).
iex> alias InteropProxy.Message
iex> alias InteropProxy.Message.Interop.InteropTelem
iex> map = %{:time => 12, "pos" => %{"lat" => 1, "lon" => 2}}
iex> Message.form_message map, InteropTelem
%InteropProxy.Message.Interop.InteropTelem{
pos: %InteropProxy.Message.Interop.AerialPosition{
alt_msl: nil,
lat: 1,
lon: 2
},
time: 12,
yaw: nil
}
"""
def form_message(map, module), do: do_form_message map, module, defs()
defp do_form_message(map, module, defs) do
fields = get_fields module, defs
# Taking the map and putting entries into a new struct.
Enum.reduce map, module.new, fn {key, value}, struct ->
if nested? value do
case get_nested fields, key do
# If it's a normal nested message, recursively call the
# function again to resolve more nested messages.
{mod, :optional} ->
struct
|> update(key, do_form_message(value, mod, defs))
# If it's a repeated message it's a list, so we'll do the
# above but for each element in the list.
{mod, :repeated} ->
struct
|> update(key, value |> Enum.map(&do_form_message(&1, mod, defs)))
end
else
# If we don't have anything nested, we're just entering a
# normal key-value pair
struct
|> update(key, value)
end
end
end
# Gets the list of fields for a message.
defp get_fields(module, defs) do
{_, fields} = defs
|> Enum.find(fn
{{:msg, ^module}, _} -> true
_ -> false
end)
fields
end
# Checking if a value is a nested message.
defp nested?(value) when is_map(value), do: true
defp nested?([head | _tail]) when is_map(head), do: true
defp nested?(_value), do: false
# Getting the module name and occurrence for a nested message.
defp get_nested(fields, key) do
%Protobuf.Field{type: {:msg, mod}, occurrence: occurrence} = fields
|> Enum.find(fn
%Protobuf.Field{name: ^key} when is_atom(key) ->
true
%Protobuf.Field{name: atom_key} when is_binary(key) ->
Atom.to_string(atom_key) === key
_ ->
false
end)
{mod, occurrence}
end
# Doing a normal key update.
defp update(struct, key, value) when is_atom(key) do
struct
|> Map.put(key, value)
end
# Doing a key update, but converting the string to an atom.
defp update(struct, key, value) when is_binary(key) do
struct
|> Map.put(key |> String.to_atom, value)
end
end
|
services/interop-proxy/lib/interop_proxy/message.ex
| 0.79542
| 0.438064
|
message.ex
|
starcoder
|
defmodule Crux.Structs.Util do
@moduledoc """
Collection of util functions.
"""
@moduledoc since: "0.1.0"
alias Crux.Structs
alias Crux.Structs.Snowflake
@doc ~S"""
Converts a list of raw api data to structs keyed under the passed key.
## Examples
```elixir
iex> [
...> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"},
...> %{"username" => "Drahcirius", "discriminator" => "1336", "id" => "130175406673231873", "avatar" => "c896aebec82c90f590b08cfebcdc4e3b"}
...> ]
...> |> Crux.Structs.Util.raw_data_to_map(Crux.Structs.User)
%{
130175406673231873 => %Crux.Structs.User{
username: "Drahcirius",
discriminator: "1336",
id: 130175406673231873,
avatar: "c896aebec82c90f590b08cfebcdc4e3b",
bot: false,
system: false
},
218348062828003328 => %Crux.Structs.User{
username: "space",
discriminator: "0001",
id: 218348062828003328,
avatar: "46a356e237350bf8b8dfde15667dfc4",
bot: false,
system: false
}
}
iex> [
...> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"},
...> %{"username" => "Drahcirius", "discriminator" => "1336", "id" => "130175406673231873", "avatar" => "c896aebec82c90f590b08cfebcdc4e3b"}
...> ]
...> |> Crux.Structs.Util.raw_data_to_map(Crux.Structs.User, :username)
%{
"Drahcirius" => %Crux.Structs.User{
username: "Drahcirius",
discriminator: "1336",
id: 130175406673231873,
avatar: "c896aebec82c90f590b08cfebcdc4e3b",
bot: false,
system: false
},
"space" => %Crux.Structs.User{
username: "space",
discriminator: "0001",
id: 218348062828003328,
avatar: "46a356e237350bf8b8dfde15667dfc4",
bot: false,
system: false
}
}
```
"""
@doc since: "0.1.0"
@spec raw_data_to_map(data :: list, target :: module(), key :: atom()) :: map()
def raw_data_to_map(data, target, key \\ :id) do
data
|> Structs.create(target)
|> Map.new(fn struct -> {Map.fetch!(struct, key), struct} end)
end
@doc ~S"""
Returns a function converting a passed map to an id, using the specified key as key.
## Examples
```elixir
# Id is already a number
iex> Crux.Structs.Util.map_to_id(:foo).(%{foo: 123})
123
# Id is a string
iex> Crux.Structs.Util.map_to_id(:foo).(%{foo: "123"})
123
# No id exists
iex> Crux.Structs.Util.map_to_id(:foo).(%{"foo" => "123"})
nil
# Example using `Enum.map/2`
iex> [
...> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"},
...> %{"username" => "Drahcirius", "discriminator" => "1336", "id" => "130175406673231873", "avatar" => "c896aebec82c90f590b08cfebcdc4e3b"}
...> ]
...> |> Enum.map(Crux.Structs.Util.map_to_id("id"))
[218348062828003328, 130175406673231873]
```
"""
@doc since: "0.2.0"
@spec map_to_id(key :: term()) :: (map() -> Snowflake.t() | nil)
def map_to_id(key \\ :id) do
fn
%{^key => value} -> Snowflake.to_snowflake(value)
_ -> nil
end
end
@doc ~S"""
Atomifies all keys in a passed list or map to avoid the mess of mixed string and atom keys the gateway sends.
## Examples
```elixir
# A map
iex> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"}
...> |> Crux.Structs.Util.atomify()
%{username: "space", discriminator: "0001", id: "218348062828003328", avatar: "46a356e237350bf8b8dfde15667dfc4"}
# A list
iex> [
...> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"},
...> %{"username" => "Drahcirius", "discriminator" => "1336", "id" => "130175406673231873", "avatar" => "c896aebec82c90f590b08cfebcdc4e3b"}
...> ]
...> |> Crux.Structs.Util.atomify()
[
%{username: "space", discriminator: "0001", id: "218348062828003328", avatar: "46a356e237350bf8b8dfde15667dfc4"},
%{username: "Drahcirius", discriminator: "1336", id: "130175406673231873", avatar: "c896aebec82c90f590b08cfebcdc4e3b"}
]
# A nested map
iex> %{"foo" => "bar", "bar" => %{"baz" => "foo"}}
...> |> Crux.Structs.Util.atomify()
%{foo: "bar", bar: %{baz: "foo"}}
# A nested list
iex> [[%{"foo" => "bar"}], %{"bar" => "foo"}]
...> |> Crux.Structs.Util.atomify()
[[%{foo: "bar"}], %{bar: "foo"}]
# A struct
iex> %Crux.Structs.Overwrite{id: 448394877194076161, type: "role", allow: 0, deny: 0}
...> |> Crux.Structs.Util.atomify()
%{id: 448394877194076161, type: "role", allow: 0, deny: 0}
```
"""
@doc since: "0.1.0"
@spec atomify(input :: map() | list()) :: map() | list()
def atomify(input)
def atomify(%{__struct__: _struct} = struct), do: struct |> Map.from_struct() |> atomify()
def atomify(%{} = map), do: Map.new(map, &atomify_kv/1)
def atomify(list) when is_list(list), do: Enum.map(list, &atomify/1)
def atomify(other), do: other
defp atomify_kv({k, v}) when is_atom(k), do: {k, atomify(v)}
defp atomify_kv({k, v}), do: {String.to_atom(k), atomify(v)}
end
|
lib/structs/util.ex
| 0.844249
| 0.610976
|
util.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.PackStream.V2 do
alias Bolt.Sips.Types.{TimeWithTZOffset, DateTimeWithTZOffset, Duration, Point}
alias Bolt.Sips.Internals.PackStream.Encoder
defmacro __using__(_options) do
quote do
import unquote(__MODULE__)
@last_version Bolt.Sips.Internals.BoltVersionHelper.last()
# Local Time
@local_time_signature 0x74
# Time With TZ Offset
@time_with_tz_signature 0x54
# Date
@date_signature 0x44
# Local DateTime
@local_datetime_signature 0x64
# Datetime with TZ offset
@datetime_with_zone_offset_signature 0x46
# Datetime with TZ id
@datetime_with_zone_id_signature 0x66
# Duration
@duration_signature 0x45
# Point 2D
@point2d_signature 0x58
# Point 3D
@point3d_signature 0x59
defp do_call_encode(:local_time, local_time, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode({@local_time_signature, [day_time(local_time)]}, bolt_version)
end
defp do_call_encode(
:time_with_tz,
%TimeWithTZOffset{time: time, timezone_offset: offset},
bolt_version
)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode({@time_with_tz_signature, [day_time(time), offset]}, bolt_version)
end
defp do_call_encode(:date, date, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
epoch = Date.diff(date, ~D[1970-01-01])
Encoder.encode({@date_signature, [epoch]}, bolt_version)
end
defp do_call_encode(:local_datetime, local_datetime, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode(
{@local_datetime_signature, decompose_datetime(local_datetime)},
bolt_version
)
end
defp do_call_encode(:datetime_with_tz_id, datetime, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
data = decompose_datetime(DateTime.to_naive(datetime)) ++ [datetime.time_zone]
Encoder.encode({@datetime_with_zone_id_signature, data}, bolt_version)
end
defp do_call_encode(
:datetime_with_tz_offset,
%DateTimeWithTZOffset{naive_datetime: ndt, timezone_offset: tz_offset},
bolt_version
)
when bolt_version >= 2 and bolt_version <= @last_version do
data = decompose_datetime(ndt) ++ [tz_offset]
Encoder.encode({@datetime_with_zone_offset_signature, data}, bolt_version)
end
defp do_call_encode(:duration, %Duration{} = duration, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode({@duration_signature, compact_duration(duration)}, bolt_version)
end
defp do_call_encode(:point, %Point{z: nil} = point, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode({@point2d_signature, [point.srid, point.x, point.y]}, bolt_version)
end
defp do_call_encode(:point, %Point{} = point, bolt_version)
when bolt_version >= 2 and bolt_version <= @last_version do
Encoder.encode(
{@point3d_signature, [point.srid, point.x, point.y, point.z]},
bolt_version
)
end
end
end
end
|
lib/bolt_sips/internals/pack_stream/v2.ex
| 0.647464
| 0.533823
|
v2.ex
|
starcoder
|
defmodule Herald.Router do
@moduledoc """
Provides the routes DSL.
Routes is a way trought Herald know:
* Which Message Schema uses to encode a
message before send it;
* Which function must be used to process
a message when receives
An router must be implemented as a module of your
application, as bellow:
```elixir
defmodule MyApp.Router do
use Herald.Router
route "my_queue1",
schema: MyApp.Message1,
processor: &MyApp.Message1.my_processor/1
route "my_queue2",
schema: MyApp.Message2,
processor: &MyApp.Message2.my_processor/1
end
```
Each application using Herald must have only one
router.
You need to inform Herald where is your Router
using application configurations, as bellow:
```elixir
config :herald,
router: MyApp.Router
```
For more details, see `route/2`
"""
alias Herald.Errors.InvalidRoute
alias Herald.Errors.InvalidRouteProcessor
defmacro __using__(_opts) do
quote do
@routes %{}
@before_compile Herald.Router
import Herald.Router
end
end
@doc """
Defines a `config` for a given `queue`.
### Config fields
* `schema` - Represents a `struct` using
`Herald.Message` which will be used to
represent any message received in `queue`;
* `processor` - Represents a function
which will process messages received in
`queue`.
For more details, see the [module doc](#content).
"""
defmacro route(queue, _config = [schema: schema, processor: processor]) do
quote do
queue = unquote(queue)
schema = unquote(schema)
processor = unquote(processor)
if not is_function(processor) do
raise InvalidRouteProcessor,
message: "Invalid processor! Processor must be a function"
end
@routes Map.put(@routes, queue, {schema,processor})
end
end
defmacro route(queue, _config = [schema: schema]) do
quote do
queue = unquote(queue)
schema = unquote(schema)
processor = :empty
@routes Map.put(@routes, queue, {schema,processor})
end
end
defmacro route(_, _) do
raise InvalidRoute, message: """
Invalid route!
A correct route must includes a queue name and
schema to represent it, as bellow:
route "queue",
schema: MyApp.MessageSchema
Additionally, it can includes a processor function,
to indicates processor of messages received in that
queue:
route "queue",
schema: MyApp.MessageSchema,
processor: &MyApp.MessageSchema.func/1
"""
end
defmacro __before_compile__(_env) do
quote do
def routes(), do: @routes
def get_queue_route(queue) do
case Map.get(@routes, queue) do
nil ->
{:error, :queue_with_no_routes}
route ->
{:ok, route}
end
end
end
end
end
|
lib/herald/router.ex
| 0.85564
| 0.655977
|
router.ex
|
starcoder
|
defmodule MeshxRpc.Common.Telemetry do
@moduledoc false
alias MeshxRpc.Common.Structs.Data
require Logger
@error_prefix :error_rpc
@error_prefix_remote :error_rpc_remote
def attach([_n, i] = telemetry_prefix, id \\ nil) do
id = if is_nil(id), do: i, else: id
events = [
telemetry_prefix ++ [:init],
telemetry_prefix ++ [:hsk],
telemetry_prefix ++ [:idle],
telemetry_prefix ++ [:recv],
telemetry_prefix ++ [:send],
telemetry_prefix ++ [:call],
telemetry_prefix ++ [:cast]
]
:ok = :telemetry.attach_many(id, events, &handle_event/4, nil)
end
def execute(%Data{} = data) do
event_name = if is_nil(data.fun_req), do: data.telemetry_prefix ++ [data.state], else: data.telemetry_prefix ++ [data.fun_req]
metrics = data.metrics
time = Enum.map(metrics.time, fn {k, v} -> if v < 0, do: {k, -1}, else: {k, v} end)
metrics = %{metrics | time: time}
measurements = Map.from_struct(metrics)
result = if is_nil(data.telemetry_result), do: data.result, else: data.telemetry_result
metadata = %{
address: data.address,
fun_name: data.fun_name,
fun_req: data.fun_req,
hsk_ref: data.hsk_ref,
id: data.pool_id,
local: Map.from_struct(data.local),
remote: Map.from_struct(data.remote),
req_ref: data.req_ref,
result: result,
socket: data.socket,
state: data.state
}
:ok = :telemetry.execute(event_name, measurements, metadata)
end
defp handle_event(event_name, measurements, metadata, _config) do
{result, meta} = Map.pop!(metadata, :result)
{local, meta} = Map.pop!(meta, :local)
{remote, meta} = Map.pop!(meta, :remote)
{address, meta} = Map.pop!(meta, :address)
{fun_name, meta} = Map.pop!(meta, :fun_name)
{_fun_req, meta} = Map.pop!(meta, :fun_req)
{_id, meta} = Map.pop!(meta, :id)
event_name = if is_nil(fun_name), do: event_name, else: event_name ++ [fun_name]
meta = if is_nil(fun_name), do: Map.delete(meta, :state), else: meta
local = if is_nil(local.conn_ref), do: local, else: %{local | conn_ref: local.conn_ref |> Base.encode64(padding: false)}
remote = if is_nil(remote.conn_ref), do: remote, else: %{remote | conn_ref: remote.conn_ref |> Base.encode64(padding: false)}
meta = Enum.reject(meta, fn {_k, v} -> is_nil(v) end)
{time, measurements} = Map.pop!(measurements, :time)
{size, measurements} = Map.pop!(measurements, :size)
{blocks, _measurements} = Map.pop!(measurements, :blocks)
{idle, time} = Keyword.pop!(time, :idle)
idle = idle / 1_000
time = Enum.reject(time, fn {_k, v} -> v == 0 end) |> Enum.map(fn {k, time} -> {k, time / 1_000} end)
t_req = Enum.reduce(time, 0, fn {_k, v}, acc -> v + acc end)
t_req = if is_float(t_req), do: Float.round(t_req, 3), else: t_req
size = Enum.map(size, fn {k, s} -> {k, pretty_bytes(s)} end)
blocks = Map.to_list(blocks)
level =
case result do
{@error_prefix, _e} -> :error
{@error_prefix_remote, _e} -> :error
_ -> :debug
end
Logger.log(
level,
"""
#{inspect(event_name)} -> #{inspect(result)}
local: #{inspect(local)}
remote: #{inspect(remote)}
address: #{inspect(address)}
meta: #{inspect(meta)}
t_req: #{t_req} #{inspect(time)}
t_idle: #{idle}
size: #{inspect(size)}
blocks: #{inspect(blocks)}
"""
)
end
defp pretty_bytes(val) do
{val_div, val_unit} =
cond do
val < 1_000 -> {1, "B"}
val < 1_000_000 -> {1_000, "KB"}
val < 1_000_000_000 -> {1_000_000, "MB"}
val < 1_000_000_000_000 -> {1_000_000_000, "GB"}
true -> {1_000_000_000_000, "TB"}
end
if val_unit == "B", do: "#{round(val / val_div)}#{val_unit}", else: "#{Float.round(val / val_div, 3)}#{val_unit}"
end
end
|
lib/common/telemetry.ex
| 0.521715
| 0.413596
|
telemetry.ex
|
starcoder
|
defmodule Binance do
alias Binance.Rest.HTTPClient
@type error ::
{:binance_error, %{code: integer(), message: String.t()}}
| {:http_error, any()}
| {:poison_decode_error, any()}
| {:config_missing, String.t()}
# Server
@doc """
Pings binance API
"""
@spec ping() :: {:ok, %{}} | {:error, error()}
def ping() do
HTTPClient.get_binance("/api/v3/ping")
end
@doc """
Get binance server time in unix epoch.
## Example
```
{:ok, 1515390701097}
```
"""
@spec get_server_time() :: {:ok, integer()} | {:error, error()}
def get_server_time() do
case HTTPClient.get_binance("/api/v3/time") do
{:ok, %{"serverTime" => time}} -> {:ok, time}
err -> err
end
end
@spec get_exchange_info() :: {:ok, %Binance.ExchangeInfo{}} | {:error, error()}
def get_exchange_info() do
case HTTPClient.get_binance("/api/v3/exchangeInfo") do
{:ok, data} -> {:ok, Binance.ExchangeInfo.new(data)}
err -> err
end
end
@doc """
Start a new user data stream. The stream will close after 60 minutes unless a keepalive is sent.
## Example response
```
{
"listenKey": "<KEY>"
}
```
Note: Binance Spot does not require us to sign this request body while this very same API on Binance Futures does
"""
@spec create_listen_key(map() | nil) :: {:ok, map()} | {:error, error()}
def create_listen_key(config \\ nil) do
case HTTPClient.post_binance("/api/v3/userDataStream", %{}, config, false) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
@doc """
Keepalive a user data stream to prevent a time out. User data streams will close after 60 minutes. It's recommended to send a ping about every 30 minutes.
## Example response
```
{}
```
Note: Binance Spot does not require us to sign this request body while this very same API on Binance Futures does
"""
@spec keep_alive_listen_key(String.t(), map() | nil) :: {:ok, %{}} | {:error, error()}
def keep_alive_listen_key(listen_key, config \\ nil) do
arguments = %{
listenKey: listen_key
}
case HTTPClient.put_binance("/api/v3/userDataStream", arguments, config, false) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
# Ticker
@doc """
Retrieves the current ticker information for the given trade pair.
Symbol can be a binance symbol in the form of `"ETHBTC"`.
## Example
```
{:ok,
%Binance.Ticker{ask_price: "0.07548800", bid_price: "0.07542100",
close_time: 1515391124878, count: 661676, first_id: 16797673,
high_price: "0.07948000", last_id: 17459348, last_price: "0.07542000",
low_price: "0.06330000", open_price: "0.06593800", open_time: 1515304724878,
prev_close_price: "0.06593800", price_change: "0.00948200",
price_change_percent: "14.380", volume: "507770.18500000",
weighted_avg_price: "0.06946930"}}
```
"""
@spec get_ticker(String.t()) :: {:ok, %Binance.Ticker{}} | {:error, error()}
def get_ticker(symbol) when is_binary(symbol) do
case HTTPClient.get_binance("/api/v3/ticker/24hr?symbol=#{symbol}") do
{:ok, data} -> {:ok, Binance.Ticker.new(data)}
err -> err
end
end
@doc """
Retrieves the bids & asks of the order book up to the depth for the given symbol
## Example
```
{:ok,
%Binance.OrderBook{
asks: [
["8400.00000000", "2.04078100", []],
["8405.35000000", "0.50354700", []],
["8406.00000000", "0.32769800", []],
["8406.33000000", "0.00239000", []],
["8406.51000000", "0.03241000", []]
],
bids: [
["8393.00000000", "0.20453200", []],
["8392.57000000", "0.02639000", []],
["8392.00000000", "1.40893300", []],
["8390.09000000", "0.07047100", []],
["8388.72000000", "0.04577400", []]
],
last_update_id: 113634395
}
}
```
"""
@spec get_depth(String.t(), integer()) :: {:ok, %Binance.OrderBook{}} | {:error, error()}
def get_depth(symbol, limit) do
case HTTPClient.get_binance("/api/v3/depth?symbol=#{symbol}&limit=#{limit}") do
{:ok, data} -> {:ok, Binance.OrderBook.new(data)}
err -> err
end
end
# Account
@doc """
Fetches user account from binance
In the case of a error on binance, for example with invalid parameters, `{:error, Binance.error()}` will be returned.
Please read https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data to understand API
"""
@spec get_account(map() | nil) :: {:ok, %Binance.Account{}} | {:error, error()}
def get_account(config \\ nil) do
case HTTPClient.get_binance("/api/v3/account", %{}, config) do
{:ok, data} -> {:ok, Binance.Account.new(data)}
error -> error
end
end
# Order
@doc """
Creates a new order on binance
Please read https://www.binance.com/restapipub.html#user-content-account-endpoints to understand all the parameters
"""
@spec create_order(map(), map() | nil) :: {:ok, map()} | {:error, error()}
def create_order(
%{symbol: symbol, side: side, type: type, quantity: quantity} = params,
config \\ nil
) do
arguments = %{
symbol: symbol,
side: side,
type: type,
quantity: quantity,
timestamp: params[:timestamp] || :os.system_time(:millisecond),
recvWindow: params[:recv_window] || 1500
}
arguments =
arguments
|> Map.merge(
unless(
is_nil(params[:new_client_order_id]),
do: %{newClientOrderId: params[:new_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params[:stop_price]), do: %{stopPrice: params[:stop_price]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:iceberg_quantity]),
do: %{icebergQty: params[:iceberg_quantity]},
else: %{}
)
)
|> Map.merge(
unless(
is_nil(params[:time_in_force]),
do: %{timeInForce: params[:time_in_force]},
else: %{}
)
)
|> Map.merge(unless(is_nil(params[:price]), do: %{price: params[:price]}, else: %{}))
case HTTPClient.post_binance("/api/v3/order", arguments, config) do
{:ok, data} ->
{:ok, Binance.OrderResponse.new(data)}
error ->
error
end
end
# Open orders
@doc """
Get all open orders, alternatively open orders by symbol (params[:symbol])
Returns `{:ok, [%Binance.Order{}]}` or `{:error, reason}`.
Weight: 1 for a single symbol; 40 when the symbol parameter is omitted
## Example
```
{:ok,
[%Binance.Order{price: "0.1", origQty: "1.0", executedQty: "0.0", ...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
...]}
```
"""
@spec get_open_orders(map(), map() | nil) :: {:ok, list(%Binance.Order{})} | {:error, error()}
def get_open_orders(params \\ %{}, config \\ nil) do
case HTTPClient.get_binance("/api/v3/openOrders", params, config) do
{:ok, data} -> {:ok, Enum.map(data, &Binance.Order.new(&1))}
err -> err
end
end
# Order
@doc """
Get order by symbol, timestamp and either orderId or origClientOrderId are mandatory
Weight: 1
## Example
```
{:ok, %Binance.Order{price: "0.1", origQty: "1.0", executedQty: "0.0", ...}}
```
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
"""
@spec get_order(map(), map() | nil) :: {:ok, list(%Binance.Order{})} | {:error, error()}
def get_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol],
timestamp: params[:timestamp] || :os.system_time(:millisecond)
}
|> Map.merge(
unless(is_nil(params[:order_id]), do: %{orderId: params[:order_id]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:orig_client_order_id]),
do: %{origClientOrderId: params[:orig_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params[:recv_window]), do: %{recvWindow: params[:recv_window]}, else: %{})
)
case HTTPClient.get_binance("/api/v3/order", arguments, config) do
{:ok, data} -> {:ok, Binance.Order.new(data)}
err -> err
end
end
@doc """
Cancel an active order..
Symbol and either orderId or origClientOrderId must be sent.
Weight: 1
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
"""
@spec cancel_order(map(), map() | nil) :: {:ok, %Binance.Order{}} | {:error, error()}
def cancel_order(params, config \\ nil) do
arguments =
%{
symbol: params[:symbol],
timestamp: params[:timestamp] || :os.system_time(:millisecond)
}
|> Map.merge(
unless(is_nil(params[:order_id]), do: %{orderId: params[:order_id]}, else: %{})
)
|> Map.merge(
unless(
is_nil(params[:orig_client_order_id]),
do: %{origClientOrderId: params[:orig_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(
is_nil(params[:new_client_order_id]),
do: %{newClientOrderId: params[:new_client_order_id]},
else: %{}
)
)
|> Map.merge(
unless(
is_nil(params[:recv_window]),
do: %{recvWindow: params[:recv_window]},
else: %{}
)
)
case HTTPClient.delete_binance("/api/v3/order", arguments, config) do
{:ok, data} -> {:ok, Binance.Order.new(data)}
err -> err
end
end
end
|
lib/binance.ex
| 0.889373
| 0.596081
|
binance.ex
|
starcoder
|
defmodule StaffNotesWeb.PrimerHelpers do
@moduledoc """
Helper functions for generating elements that work with [Primer](https://primer.github.io/).
"""
use Phoenix.HTML
import Phoenix.View, only: [render: 3, render_many: 4]
import PhoenixOcticons
alias Phoenix.HTML.Form
alias StaffNotes.Markdown
alias StaffNotesWeb.ErrorHelpers
alias StaffNotesWeb.Primer
@doc """
Displays the avatar for the `StaffNotes.Accounts.User`.
## Options
Valid options are:
* `:size` -- the value in pixels to use for both the width and height of the avatar image
"""
@spec avatar(User.t(), keyword) :: Phoenix.HTML.safe()
def avatar(user, options \\ [])
def avatar(user, []) do
content_tag(:img, "", class: "avatar", src: user.avatar_url)
end
def avatar(user, size: size) do
content_tag(
:img,
"",
class: "avatar",
src: "#{user.avatar_url}&s=#{size}",
width: size,
height: size
)
end
@doc """
Generates a link button with the given text and options.
## Options
* **required** `:to` -- the URL to link to
"""
def link_button(text, options \\ []) do
options = Keyword.merge(options, type: "button")
link(text, options)
end
@doc """
Displays the appropriate input control for the given field.
## Options
* `:using` -- override the built-in selection of input field based on data type. Can be any of the
`Phoenix.HTML.Form` input function names or the special value `:markdown` which displays a
specially-formatted `textarea`
See:
[Dynamic forms with Phoenix](http://blog.plataformatec.com.br/2016/09/dynamic-forms-with-phoenix/)
"""
@spec input(Phoenix.HTML.FormData.t(), atom, keyword) :: Phoenix.HTML.safe()
def input(form, field, options \\ []) do
type = options[:using] || Form.input_type(form, field)
wrapper_opts = [class: "form-group #{error_class(form, field)}"]
label_opts = []
input_opts = [class: "form-control #{options[:class]}"]
content_tag :dl, wrapper_opts do
label =
content_tag :dt do
label(form, field, humanize(field), label_opts)
end
input =
content_tag :dd do
input(type, form, field, input_opts)
end
error = error_tag(form, field) || ""
[label, input, error]
end
end
def static(form, field, options \\ []) do
wrapper_opts = [class: "form-group"]
label_opts = []
static_opts = []
value = options[:value] || Form.input_value(form, field)
content_tag :dl, wrapper_opts do
label =
content_tag :dt do
label(form, field, humanize(field), label_opts)
end
static =
content_tag :dd do
content_tag :div, static_opts do
value
end
end
[label, static]
end
end
@doc """
Renders a standard list of items.
Takes the same parameters and options as `Phoenix.View.render_many/4` except that the
`template_root` parameter is the root name of the template. This root name will have either
`_blankslate.html` appended if `collection` is empty or have `_item.html` appended if the
collection is non-empty.
"""
def render_list(collection, module, template_root, assigns \\ %{}) do
Primer.box do
Primer.box_body do
content_tag :ul do
render_items(collection, module, template_root, assigns)
end
end
end
end
@doc """
Renders the tabnav element.
## Examples
In Elixir code:
```
tabnav do
[
tabnav_item("Text", "https://example.com")
]
end
```
In Slime template:
```
= tabnav do
= tabnav_item("Text", "https://example.com")
```
"""
def tabnav(do: block) do
content_tag :div, class: "tabnav" do
content_tag(:nav, block, class: "tabnav-tabs", "aria-label": "Navigation bar")
end
end
@doc """
Generates a tabnav item to be rendered inside a tabnav element.
## Examples
Rendering a tabnav item with an icon, counter, aligned right, and selected:
```
tabnav_item(
"Settings",
"https://example.com",
counter: 5,
icon: :gear,
right: true,
selected: true
)
```
"""
def tabnav_item(text, path, options \\ []) do
selected = Keyword.get(options, :selected)
icon = Keyword.get(options, :icon)
right = Keyword.get(options, :right)
counter = Keyword.get(options, :counter)
contents = build_contents(icon, text, counter)
class = build_class(selected, right)
options = [href: path, class: class]
options = if selected, do: Keyword.put(options, :"aria-current", "page"), else: options
content_tag(:a, contents, options)
end
defp build_class(false, right), do: build_class(nil, right)
defp build_class(nil, nil), do: "tabnav-tab"
defp build_class(nil, _), do: "tabnav-tab float-right"
defp build_class(_, nil), do: "tabnav-tab selected"
defp build_class(_, _), do: "tabnav-tab float-right selected"
defp build_contents(nil, text, nil), do: [text]
defp build_contents(icon, text, nil), do: [octicon(icon), text]
defp build_contents(nil, text, counter) do
[text, content_tag(:span, counter, class: "Counter")]
end
defp build_contents(icon, text, counter) do
[
octicon(icon),
text,
content_tag(:span, counter, class: "Counter")
]
end
defp error_class(form, field) do
cond do
!form.source.action -> ""
form.errors[field] -> "errored"
true -> ""
end
end
defp error_tag(form, field) do
Enum.map(Keyword.get_values(form.errors, field), fn error ->
content_tag :dd, class: "error" do
ErrorHelpers.translate_error(error)
end
end)
end
defp input(:markdown, form, field, input_opts) do
content =
case Form.input_value(form, field) do
nil -> nil
%Markdown{} = markdown -> markdown.text
value -> value
end
opts =
input_opts
|> Keyword.merge(id: Form.input_id(form, field), name: Form.input_name(form, field))
|> Keyword.put(:class, (input_opts[:class] || "") <> " image-drop")
content_tag(:textarea, "#{content}\n", opts)
end
defp input(:permission_select, form, field, input_opts) do
selected = Form.input_value(form, field)
select(form, field, [:owner, :read, :write], selected: selected)
end
defp input(type, form, field, input_opts) do
apply(Form, type, [form, field, input_opts])
end
defp render_items(collection, module, template, assigns) when length(collection) == 0 do
render(module, "#{template}_blankslate.html", assigns)
end
defp render_items(collection, module, template, assigns) do
render_many(collection, module, "#{template}_item.html", assigns)
end
end
|
lib/staff_notes_web/helpers/primer_helpers.ex
| 0.883293
| 0.746959
|
primer_helpers.ex
|
starcoder
|
defmodule AWS.EMR do
@moduledoc """
Amazon EMR is a web service that makes it easy to process large amounts of
data efficiently. Amazon EMR uses Hadoop processing combined with several
AWS products to do tasks such as web indexing, data mining, log file
analysis, machine learning, scientific simulation, and data warehousing.
"""
@doc """
Adds an instance fleet to a running cluster.
<note> The instance fleet configuration is available only in Amazon EMR
versions 4.8.0 and later, excluding 5.0.x.
</note>
"""
def add_instance_fleet(client, input, options \\ []) do
request(client, "AddInstanceFleet", input, options)
end
@doc """
Adds one or more instance groups to a running cluster.
"""
def add_instance_groups(client, input, options \\ []) do
request(client, "AddInstanceGroups", input, options)
end
@doc """
AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps
are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex,
you may require more than 256 steps to process your data. You can bypass
the 256-step limitation in various ways, including using SSH to connect to
the master node and submitting queries directly to the software running on
the master node, such as Hive and Hadoop. For more information on how to do
this, see [Add More than 256 Steps to a
Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
A step specifies the location of a JAR file stored either on the master
node of the cluster or in Amazon S3. Each step is performed by the main
function of the main class of the JAR file. The main class can be specified
either in the manifest of the JAR or by using the MainFunction parameter of
the step.
Amazon EMR executes each step in the order listed. For a step to be
considered complete, the main function must exit with a zero exit code and
all Hadoop jobs started while the step was running must have completed and
run successfully.
You can only add steps to a cluster that is in one of the following states:
STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
"""
def add_job_flow_steps(client, input, options \\ []) do
request(client, "AddJobFlowSteps", input, options)
end
@doc """
Adds tags to an Amazon EMR resource. Tags make it easier to associate
clusters in various ways, such as grouping clusters to track your Amazon
EMR resource allocation costs. For more information, see [Tag
Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
"""
def add_tags(client, input, options \\ []) do
request(client, "AddTags", input, options)
end
@doc """
Cancels a pending step or steps in a running cluster. Available only in
Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of
256 steps are allowed in each CancelSteps request. CancelSteps is
idempotent but asynchronous; it does not guarantee a step will be canceled,
even if the request is successfully submitted. You can only cancel steps
that are in a `PENDING` state.
"""
def cancel_steps(client, input, options \\ []) do
request(client, "CancelSteps", input, options)
end
@doc """
Creates a security configuration, which is stored in the service and can be
specified when a cluster is created.
"""
def create_security_configuration(client, input, options \\ []) do
request(client, "CreateSecurityConfiguration", input, options)
end
@doc """
Deletes a security configuration.
"""
def delete_security_configuration(client, input, options \\ []) do
request(client, "DeleteSecurityConfiguration", input, options)
end
@doc """
Provides cluster-level details including status, hardware and software
configuration, VPC settings, and so on.
"""
def describe_cluster(client, input, options \\ []) do
request(client, "DescribeCluster", input, options)
end
@doc """
This API is deprecated and will eventually be removed. We recommend you use
`ListClusters`, `DescribeCluster`, `ListSteps`, `ListInstanceGroups` and
`ListBootstrapActions` instead.
DescribeJobFlows returns a list of job flows that match all of the supplied
parameters. The parameters can include a list of job flow IDs, job flow
states, and restrictions on job flow creation date and time.
Regardless of supplied parameters, only job flows created within the last
two months are returned.
If no parameters are supplied, then job flows matching either of the
following criteria are returned:
<ul> <li> Job flows created and completed in the last two weeks
</li> <li> Job flows created within the last two months that are in one of
the following states: `RUNNING`, `WAITING`, `SHUTTING_DOWN`, `STARTING`
</li> </ul> Amazon EMR can return a maximum of 512 job flow descriptions.
"""
def describe_job_flows(client, input, options \\ []) do
request(client, "DescribeJobFlows", input, options)
end
@doc """
Provides the details of a security configuration by returning the
configuration JSON.
"""
def describe_security_configuration(client, input, options \\ []) do
request(client, "DescribeSecurityConfiguration", input, options)
end
@doc """
Provides more detail about the cluster step.
"""
def describe_step(client, input, options \\ []) do
request(client, "DescribeStep", input, options)
end
@doc """
Provides information about the bootstrap actions associated with a cluster.
"""
def list_bootstrap_actions(client, input, options \\ []) do
request(client, "ListBootstrapActions", input, options)
end
@doc """
Provides the status of all clusters visible to this AWS account. Allows you
to filter the list of clusters based on certain criteria; for example,
filtering by cluster creation date and time or by status. This call returns
a maximum of 50 clusters per call, but returns a marker to track the paging
of the cluster list across multiple ListClusters calls.
"""
def list_clusters(client, input, options \\ []) do
request(client, "ListClusters", input, options)
end
@doc """
Lists all available details about the instance fleets in a cluster.
<note> The instance fleet configuration is available only in Amazon EMR
versions 4.8.0 and later, excluding 5.0.x versions.
</note>
"""
def list_instance_fleets(client, input, options \\ []) do
request(client, "ListInstanceFleets", input, options)
end
@doc """
Provides all available details about the instance groups in a cluster.
"""
def list_instance_groups(client, input, options \\ []) do
request(client, "ListInstanceGroups", input, options)
end
@doc """
Provides information for all active EC2 instances and EC2 instances
terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in
any of the following states are considered active: AWAITING_FULFILLMENT,
PROVISIONING, BOOTSTRAPPING, RUNNING.
"""
def list_instances(client, input, options \\ []) do
request(client, "ListInstances", input, options)
end
@doc """
Lists all the security configurations visible to this account, providing
their creation dates and times, and their names. This call returns a
maximum of 50 clusters per call, but returns a marker to track the paging
of the cluster list across multiple ListSecurityConfigurations calls.
"""
def list_security_configurations(client, input, options \\ []) do
request(client, "ListSecurityConfigurations", input, options)
end
@doc """
Provides a list of steps for the cluster in reverse order unless you
specify stepIds with the request.
"""
def list_steps(client, input, options \\ []) do
request(client, "ListSteps", input, options)
end
@doc """
Modifies the target On-Demand and target Spot capacities for the instance
fleet with the specified InstanceFleetID within the cluster specified using
ClusterID. The call either succeeds or fails atomically.
<note> The instance fleet configuration is available only in Amazon EMR
versions 4.8.0 and later, excluding 5.0.x versions.
</note>
"""
def modify_instance_fleet(client, input, options \\ []) do
request(client, "ModifyInstanceFleet", input, options)
end
@doc """
ModifyInstanceGroups modifies the number of nodes and configuration
settings of an instance group. The input parameters include the new target
instance count for the group and the instance group ID. The call will
either succeed or fail atomically.
"""
def modify_instance_groups(client, input, options \\ []) do
request(client, "ModifyInstanceGroups", input, options)
end
@doc """
Creates or updates an automatic scaling policy for a core instance group or
task instance group in an Amazon EMR cluster. The automatic scaling policy
defines how an instance group dynamically adds and terminates EC2 instances
in response to the value of a CloudWatch metric.
"""
def put_auto_scaling_policy(client, input, options \\ []) do
request(client, "PutAutoScalingPolicy", input, options)
end
@doc """
Removes an automatic scaling policy from a specified instance group within
an EMR cluster.
"""
def remove_auto_scaling_policy(client, input, options \\ []) do
request(client, "RemoveAutoScalingPolicy", input, options)
end
@doc """
Removes tags from an Amazon EMR resource. Tags make it easier to associate
clusters in various ways, such as grouping clusters to track your Amazon
EMR resource allocation costs. For more information, see [Tag
Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
The following example removes the stack tag with value Prod from a cluster:
"""
def remove_tags(client, input, options \\ []) do
request(client, "RemoveTags", input, options)
end
@doc """
RunJobFlow creates and starts running a new cluster (job flow). The cluster
runs the steps specified. After the steps complete, the cluster stops and
the HDFS partition is lost. To prevent loss of data, configure the last
step of the job flow to store results in Amazon S3. If the
`JobFlowInstancesConfig` `KeepJobFlowAliveWhenNoSteps` parameter is set to
`TRUE`, the cluster transitions to the WAITING state rather than shutting
down after the steps have completed.
For additional protection, you can set the `JobFlowInstancesConfig`
`TerminationProtected` parameter to `TRUE` to lock the cluster and prevent
it from being terminated by API call, user intervention, or in the event of
a job flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex,
you may require more than 256 steps to process your data. You can bypass
the 256-step limitation in various ways, including using the SSH shell to
connect to the master node and submitting queries directly to the software
running on the master node, such as Hive and Hadoop. For more information
on how to do this, see [Add More than 256 Steps to a
Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
For long running clusters, we recommend that you periodically store your
results.
<note> The instance fleets configuration is available only in Amazon EMR
versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request
can contain InstanceFleets parameters or InstanceGroups parameters, but not
both.
</note>
"""
def run_job_flow(client, input, options \\ []) do
request(client, "RunJobFlow", input, options)
end
@doc """
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in
the cluster cannot be terminated by user intervention, an API call, or in
the event of a job-flow error. The cluster still terminates upon successful
completion of the job flow. Calling `SetTerminationProtection` on a cluster
is similar to calling the Amazon EC2 `DisableAPITermination` API on all EC2
instances in a cluster.
`SetTerminationProtection` is used to prevent accidental termination of a
cluster and to ensure that in the event of an error, the instances persist
so that you can recover any data stored in their ephemeral instance
storage.
To terminate a cluster that has been locked by setting
`SetTerminationProtection` to `true`, you must first unlock the job flow by
a subsequent call to `SetTerminationProtection` in which you set the value
to `false`.
For more information, see[Managing Cluster
Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_TerminationProtection.html)
in the *Amazon EMR Management Guide*.
"""
def set_termination_protection(client, input, options \\ []) do
request(client, "SetTerminationProtection", input, options)
end
@doc """
Sets whether all AWS Identity and Access Management (IAM) users under your
account can access the specified clusters (job flows). This action works on
running clusters. You can also set the visibility of a cluster when you
launch it using the `VisibleToAllUsers` parameter of `RunJobFlow`. The
SetVisibleToAllUsers action can be called only by an IAM user who created
the cluster or the AWS account that owns the cluster.
"""
def set_visible_to_all_users(client, input, options \\ []) do
request(client, "SetVisibleToAllUsers", input, options)
end
@doc """
TerminateJobFlows shuts a list of clusters (job flows) down. When a job
flow is shut down, any step not yet completed is canceled and the EC2
instances on which the cluster is running are stopped. Any log files not
already saved are uploaded to Amazon S3 if a LogUri was specified when the
cluster was created.
The maximum number of clusters allowed is 10. The call to
`TerminateJobFlows` is asynchronous. Depending on the configuration of the
cluster, it may take up to 1-5 minutes for the cluster to completely
terminate and release allocated resources, such as Amazon EC2 instances.
"""
def terminate_job_flows(client, input, options \\ []) do
request(client, "TerminateJobFlows", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "elasticmapreduce"}
host = get_host("elasticmapreduce", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ElasticMapReduce.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/emr.ex
| 0.854824
| 0.664756
|
emr.ex
|
starcoder
|
defmodule AWS.Comprehend do
@moduledoc """
Amazon Comprehend is an AWS service for gaining insight into the content of
documents.
Use these actions to determine the topics contained in your documents, the
topics they discuss, the predominant sentiment expressed in them, the
predominant language used, and more.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2017-11-27",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "comprehend",
global?: false,
protocol: "json",
service_id: "Comprehend",
signature_version: "v4",
signing_name: "comprehend",
target_prefix: "Comprehend_20171127"
}
end
@doc """
Determines the dominant language of the input text for a batch of documents.
For a list of languages that Amazon Comprehend can detect, see [Amazon Comprehend Supported
Languages](https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html).
"""
def batch_detect_dominant_language(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDetectDominantLanguage", input, options)
end
@doc """
Inspects the text of a batch of documents for named entities and returns
information about them.
For more information about named entities, see `how-entities`
"""
def batch_detect_entities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDetectEntities", input, options)
end
@doc """
Detects the key noun phrases found in a batch of documents.
"""
def batch_detect_key_phrases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDetectKeyPhrases", input, options)
end
@doc """
Inspects a batch of documents and returns an inference of the prevailing
sentiment, `POSITIVE`, `NEUTRAL`, `MIXED`, or `NEGATIVE`, in each one.
"""
def batch_detect_sentiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDetectSentiment", input, options)
end
@doc """
Inspects the text of a batch of documents for the syntax and part of speech of
the words in the document and returns information about them.
For more information, see `how-syntax`.
"""
def batch_detect_syntax(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchDetectSyntax", input, options)
end
@doc """
Creates a new document classification request to analyze a single document in
real-time, using a previously created and trained custom model and an endpoint.
"""
def classify_document(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ClassifyDocument", input, options)
end
@doc """
Creates a new document classifier that you can use to categorize documents.
To create a classifier, you provide a set of training documents that labeled
with the categories that you want to use. After the classifier is trained you
can use it to categorize a set of labeled documents into the categories. For
more information, see `how-document-classification`.
"""
def create_document_classifier(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDocumentClassifier", input, options)
end
@doc """
Creates a model-specific endpoint for synchronous inference for a previously
trained custom model
"""
def create_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEndpoint", input, options)
end
@doc """
Creates an entity recognizer using submitted files.
After your `CreateEntityRecognizer` request is submitted, you can check job
status using the API.
"""
def create_entity_recognizer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateEntityRecognizer", input, options)
end
@doc """
Deletes a previously created document classifier
Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be
deleted.
If an active inference job is using the model, a `ResourceInUseException` will
be returned.
This is an asynchronous action that puts the classifier into a DELETING state,
and it is then removed by a background job. Once removed, the classifier
disappears from your account and is no longer available for use.
"""
def delete_document_classifier(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDocumentClassifier", input, options)
end
@doc """
Deletes a model-specific endpoint for a previously-trained custom model.
All endpoints must be deleted in order for the model to be deleted.
"""
def delete_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEndpoint", input, options)
end
@doc """
Deletes an entity recognizer.
Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be
deleted. If an active inference job is using the model, a
`ResourceInUseException` will be returned.
This is an asynchronous action that puts the recognizer into a DELETING state,
and it is then removed by a background job. Once removed, the recognizer
disappears from your account and is no longer available for use.
"""
def delete_entity_recognizer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteEntityRecognizer", input, options)
end
@doc """
Gets the properties associated with a document classification job.
Use this operation to get the status of a classification job.
"""
def describe_document_classification_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDocumentClassificationJob", input, options)
end
@doc """
Gets the properties associated with a document classifier.
"""
def describe_document_classifier(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDocumentClassifier", input, options)
end
@doc """
Gets the properties associated with a dominant language detection job.
Use this operation to get the status of a detection job.
"""
def describe_dominant_language_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeDominantLanguageDetectionJob",
input,
options
)
end
@doc """
Gets the properties associated with a specific endpoint.
Use this operation to get the status of an endpoint.
"""
def describe_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEndpoint", input, options)
end
@doc """
Gets the properties associated with an entities detection job.
Use this operation to get the status of a detection job.
"""
def describe_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEntitiesDetectionJob", input, options)
end
@doc """
Provides details about an entity recognizer including status, S3 buckets
containing training data, recognizer metadata, metrics, and so on.
"""
def describe_entity_recognizer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEntityRecognizer", input, options)
end
@doc """
Gets the status and details of an events detection job.
"""
def describe_events_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventsDetectionJob", input, options)
end
@doc """
Gets the properties associated with a key phrases detection job.
Use this operation to get the status of a detection job.
"""
def describe_key_phrases_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeKeyPhrasesDetectionJob", input, options)
end
@doc """
Gets the properties associated with a PII entities detection job.
For example, you can use this operation to get the job status.
"""
def describe_pii_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePiiEntitiesDetectionJob", input, options)
end
@doc """
Gets the properties associated with a sentiment detection job.
Use this operation to get the status of a detection job.
"""
def describe_sentiment_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSentimentDetectionJob", input, options)
end
@doc """
Gets the properties associated with a topic detection job.
Use this operation to get the status of a detection job.
"""
def describe_topics_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTopicsDetectionJob", input, options)
end
@doc """
Determines the dominant language of the input text.
For a list of languages that Amazon Comprehend can detect, see [Amazon Comprehend Supported
Languages](https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html).
"""
def detect_dominant_language(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectDominantLanguage", input, options)
end
@doc """
Inspects text for named entities, and returns information about them.
For more information, about named entities, see `how-entities`.
"""
def detect_entities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectEntities", input, options)
end
@doc """
Detects the key noun phrases found in the text.
"""
def detect_key_phrases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectKeyPhrases", input, options)
end
@doc """
Inspects the input text for entities that contain personally identifiable
information (PII) and returns information about them.
"""
def detect_pii_entities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectPiiEntities", input, options)
end
@doc """
Inspects text and returns an inference of the prevailing sentiment (`POSITIVE`,
`NEUTRAL`, `MIXED`, or `NEGATIVE`).
"""
def detect_sentiment(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectSentiment", input, options)
end
@doc """
Inspects text for syntax and the part of speech of words in the document.
For more information, `how-syntax`.
"""
def detect_syntax(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetectSyntax", input, options)
end
@doc """
Gets a list of the documentation classification jobs that you have submitted.
"""
def list_document_classification_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDocumentClassificationJobs", input, options)
end
@doc """
Gets a list of the document classifiers that you have created.
"""
def list_document_classifiers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDocumentClassifiers", input, options)
end
@doc """
Gets a list of the dominant language detection jobs that you have submitted.
"""
def list_dominant_language_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDominantLanguageDetectionJobs", input, options)
end
@doc """
Gets a list of all existing endpoints that you've created.
"""
def list_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEndpoints", input, options)
end
@doc """
Gets a list of the entity detection jobs that you have submitted.
"""
def list_entities_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEntitiesDetectionJobs", input, options)
end
@doc """
Gets a list of the properties of all entity recognizers that you created,
including recognizers currently in training.
Allows you to filter the list of recognizers based on criteria such as status
and submission time. This call returns up to 500 entity recognizers in the list,
with a default number of 100 recognizers in the list.
The results of this list are not in any particular order. Please get the list
and sort locally if needed.
"""
def list_entity_recognizers(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEntityRecognizers", input, options)
end
@doc """
Gets a list of the events detection jobs that you have submitted.
"""
def list_events_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListEventsDetectionJobs", input, options)
end
@doc """
Get a list of key phrase detection jobs that you have submitted.
"""
def list_key_phrases_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListKeyPhrasesDetectionJobs", input, options)
end
@doc """
Gets a list of the PII entity detection jobs that you have submitted.
"""
def list_pii_entities_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPiiEntitiesDetectionJobs", input, options)
end
@doc """
Gets a list of sentiment detection jobs that you have submitted.
"""
def list_sentiment_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSentimentDetectionJobs", input, options)
end
@doc """
Lists all tags associated with a given Amazon Comprehend resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Gets a list of the topic detection jobs that you have submitted.
"""
def list_topics_detection_jobs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTopicsDetectionJobs", input, options)
end
@doc """
Starts an asynchronous document classification job.
Use the operation to track the progress of the job.
"""
def start_document_classification_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartDocumentClassificationJob", input, options)
end
@doc """
Starts an asynchronous dominant language detection job for a collection of
documents.
Use the operation to track the status of a job.
"""
def start_dominant_language_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartDominantLanguageDetectionJob", input, options)
end
@doc """
Starts an asynchronous entity detection job for a collection of documents.
Use the operation to track the status of a job.
This API can be used for either standard entity detection or custom entity
recognition. In order to be used for custom entity recognition, the optional
`EntityRecognizerArn` must be used in order to provide access to the recognizer
being used to detect the custom entity.
"""
def start_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartEntitiesDetectionJob", input, options)
end
@doc """
Starts an asynchronous event detection job for a collection of documents.
"""
def start_events_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartEventsDetectionJob", input, options)
end
@doc """
Starts an asynchronous key phrase detection job for a collection of documents.
Use the operation to track the status of a job.
"""
def start_key_phrases_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartKeyPhrasesDetectionJob", input, options)
end
@doc """
Starts an asynchronous PII entity detection job for a collection of documents.
"""
def start_pii_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartPiiEntitiesDetectionJob", input, options)
end
@doc """
Starts an asynchronous sentiment detection job for a collection of documents.
use the operation to track the status of a job.
"""
def start_sentiment_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartSentimentDetectionJob", input, options)
end
@doc """
Starts an asynchronous topic detection job.
Use the `DescribeTopicDetectionJob` operation to track the status of a job.
"""
def start_topics_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartTopicsDetectionJob", input, options)
end
@doc """
Stops a dominant language detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put into
the `STOP_REQUESTED` state. If the job completes before it can be stopped, it is
put into the `COMPLETED` state; otherwise the job is stopped and put into the
`STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the output
location.
"""
def stop_dominant_language_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopDominantLanguageDetectionJob", input, options)
end
@doc """
Stops an entities detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put into
the `STOP_REQUESTED` state. If the job completes before it can be stopped, it is
put into the `COMPLETED` state; otherwise the job is stopped and put into the
`STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the output
location.
"""
def stop_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopEntitiesDetectionJob", input, options)
end
@doc """
Stops an events detection job in progress.
"""
def stop_events_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopEventsDetectionJob", input, options)
end
@doc """
Stops a key phrases detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put into
the `STOP_REQUESTED` state. If the job completes before it can be stopped, it is
put into the `COMPLETED` state; otherwise the job is stopped and put into the
`STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the output
location.
"""
def stop_key_phrases_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopKeyPhrasesDetectionJob", input, options)
end
@doc """
Stops a PII entities detection job in progress.
"""
def stop_pii_entities_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopPiiEntitiesDetectionJob", input, options)
end
@doc """
Stops a sentiment detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put into
the `STOP_REQUESTED` state. If the job completes before it can be stopped, it is
put into the `COMPLETED` state; otherwise the job is be stopped and put into the
`STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the output
location.
"""
def stop_sentiment_detection_job(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopSentimentDetectionJob", input, options)
end
@doc """
Stops a document classifier training job while in progress.
If the training job state is `TRAINING`, the job is marked for termination and
put into the `STOP_REQUESTED` state. If the training job completes before it can
be stopped, it is put into the `TRAINED`; otherwise the training job is stopped
and put into the `STOPPED` state and the service sends back an HTTP 200 response
with an empty HTTP body.
"""
def stop_training_document_classifier(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopTrainingDocumentClassifier", input, options)
end
@doc """
Stops an entity recognizer training job while in progress.
If the training job state is `TRAINING`, the job is marked for termination and
put into the `STOP_REQUESTED` state. If the training job completes before it can
be stopped, it is put into the `TRAINED`; otherwise the training job is stopped
and putted into the `STOPPED` state and the service sends back an HTTP 200
response with an empty HTTP body.
"""
def stop_training_entity_recognizer(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopTrainingEntityRecognizer", input, options)
end
@doc """
Associates a specific tag with an Amazon Comprehend resource.
A tag is a key-value pair that adds as a metadata to a resource used by Amazon
Comprehend. For example, a tag with "Sales" as the key might be added to a
resource to indicate its use by the sales department.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes a specific tag associated with an Amazon Comprehend resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates information about the specified endpoint.
"""
def update_endpoint(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateEndpoint", input, options)
end
end
|
lib/aws/generated/comprehend.ex
| 0.900163
| 0.520374
|
comprehend.ex
|
starcoder
|
defmodule ManhattanV2 do
@moduledoc """
The waypoint starts 10 units east and 1 unit north.
Action N means to move the waypoint north by the given value.
Action S means to move the waypoint south by the given value.
Action E means to move the waypoint east by the given value.
Action W means to move the waypoint west by the given value.
Action L means to rotate the waypoint around the ship left (counter-clockwise) the given number of degrees.
Action R means to rotate the waypoint around the ship right (clockwise) the given number of degrees.
Action F means to move forward to the waypoint a number of times equal to the given value.
"""
defstruct pos: {0, 0}, waypoint: {1, 10}
def new(), do: %__MODULE__{}
@doc """
iex> ManhattanV2.run(ManhattanV2.sample())
286
"""
def run(cmds) do
run(new(), cmds)
|> distance()
end
@doc """
iex> import ManhattanV2
iex> run(new(), sample())
%ManhattanV2{pos: {-72, 214}, waypoint: {-10, 4}}
"""
def run(state, []), do: state
def run(state, [cmd | cmds]) do
run1(cmd, state)
|> run(cmds)
end
@doc """
iex> import ManhattanV2
iex> run1("F10", new())
%ManhattanV2{pos: {10, 100}, waypoint: {1, 10}}
iex> run1("N10", new())
%ManhattanV2{pos: {0, 0}, waypoint: {11, 10}}
"""
def run1(cmd, state = %{pos: {lat, lng}, waypoint: {dlat, dlng}}) do
[ltr, val] = Regex.run(~r/(.)(\d+)/, cmd, capture: :all_but_first)
val = String.to_integer(val)
case ltr do
"F" -> %{state | pos: {lat + dlat * val, lng + dlng * val}}
"N" -> %{state | waypoint: {dlat + val, dlng}}
"S" -> %{state | waypoint: {dlat - val, dlng}}
"E" -> %{state | waypoint: {dlat, dlng + val}}
"W" -> %{state | waypoint: {dlat, dlng - val}}
"L" -> %{state | waypoint: rotate(state.waypoint, val)}
"R" -> %{state | waypoint: rotate(state.waypoint, 360 - val)}
end
end
@doc """
iex> import ManhattanV2
iex> rotate({1,10}, 90)
{10,-1}
iex> rotate({1,10}, 270)
{-10,1}
"""
def rotate({dlat, dlng}, 90), do: {dlng, -dlat}
def rotate(wp, deg), do: rotate(wp, 90) |> rotate(deg - 90)
@doc """
iex> ManhattanV2.distance(%{pos: {-1, 4}})
5
"""
def distance(%{pos: {lat, lng}}) do
abs(lat) + abs(lng)
end
def sample() do
"""
F10
N3
F7
R90
F11
"""
|> String.split("\n", trim: true)
end
end
|
12-Manhattan/lib/manhattan_v2.ex
| 0.846101
| 0.841956
|
manhattan_v2.ex
|
starcoder
|
defmodule Gringotts.CreditCard do
@moduledoc """
Defines a `struct` for (credit) cards and some utilities.
"""
defstruct [:number, :month, :year, :first_name, :last_name, :verification_code, :brand]
@typedoc """
Represents a Credit Card.
| Field | Type | Description |
| ----- | ---- | ----------- |
| `number` | `string` | The card number. |
| `month` | `integer` | Month of expiry (a number in the `1..12`\
range). |
| `year` | `integer` | Year of expiry. |
| `first_name` | `string` | First name of the card holder (as on card). |
| `last_name` | `string` | Last name of the card holder (as on card). |
| `verification_code` | `string` | The [Card Verification Code][cvc], usually\
a 3-4 digit number on the back of the card. |
| `brand` | `string` | The brand name of the card network (in\
some cases also the card issuer) in\
UPPERCASE. Some popular card networks\
are [Visa][visa], [MasterCard][mc],\
[Maestro][mo], [Diner's Club][dc] etc. |
[cvc]: https://en.wikipedia.org/wiki/Card_security_code
[visa]: https://usa.visa.com
[mc]: https://www.mastercard.us/en-us.html
[mo]: http://www.maestrocard.com/gateway/index.html
[dc]: http://www.dinersclub.com/
"""
@type t :: %__MODULE__{number: String.t,
month: 1..12,
year: non_neg_integer,
first_name: String.t,
last_name: String.t,
verification_code: String.t,
brand: String.t}
@doc """
Returns the full name of the card holder.
Joins `first_name` and `last_name` with a space in between.
"""
@spec full_name(t) :: String.t
def full_name(card) do
name = "#{card.first_name} #{card.last_name}"
String.trim(name)
end
end
|
lib/gringotts/credit_card.ex
| 0.86977
| 0.750004
|
credit_card.ex
|
starcoder
|
defmodule Eigr.FunctionsController.Controllers.V1.PersistentFunction do
@moduledoc """
Eigr.FunctionsController: PersistentFunction CRD.
## Kubernetes CRD Spec
Eigr PersistentFunction CRD
### Examples
```
apiVersion: functions.eigr.io/v1
kind: PersistentFunction
metadata:
name: shopping-cart
spec:
type: InMemory # this is default if ommited
containers:
- image: my-docker-hub-username/shopping-cart:latest
```
Or with a real persistent store database:
```
apiVersion: functions.eigr.io/v1
kind: PersistentFunction
metadata:
name: shopping-cart
spec:
type: Postgres
deployment: Unmanaged
config:
service: postgresql.default.svc.cluster.local
credentialsFromSecret:
name: postgres-credentials
containers:
- image: my-docker-hub-username/shopping-cart:latest
```
"""
require Logger
use Bonny.Controller
# It would be possible to call @group "functions.eigr.io"
# However, to maintain compatibility with the original protocol, we will call it cloudstate.io
@group "functions.eigr.io"
@version "v1"
@rule {"", ["services", "pods", "configmaps"], ["*"]}
@rule {"apps", ["statefulsets", "deployments"], ["*"]}
@scope :cluster
@names %{
plural: "persistentfunctions",
singular: "persistentfunction",
kind: "PersistentFunction",
shortNames: [
"pf",
"pfs",
"pfc",
"pfcs",
"pfunc",
"pfunction",
"pfuncs",
"pfunctions",
"persistentfunction",
"persistentfunctions"
]
}
# @additional_printer_columns [
# %{
# name: "test",
# type: "string",
# description: "test",
# JSONPath: ".spec.test"
# }
# ]
@doc """
Called periodically for each existing CustomResource to allow for reconciliation.
"""
@spec reconcile(map()) :: :ok | :error
@impl Bonny.Controller
def reconcile(payload) do
track_event(:reconcile, payload)
:ok
end
@doc """
Creates a kubernetes `statefulset`, `service` and `configmap` that runs a "Cloudstate" app.
"""
@spec add(map()) :: :ok | :error
@impl Bonny.Controller
def add(payload) do
track_event(:add, payload)
resources = parse(payload)
with {:ok, _} <- K8s.Client.create(resources.service) |> run(),
{:ok, _} <- K8s.Client.create(resources.configmap) |> run() do
resource_res = K8s.Client.create(resources.statefulset) |> run()
Logger.info("service result: #{inspect(resource_res)}")
case resource_res do
{:ok, _} -> :ok
{:error, error} -> {:error, error}
end
else
{:error, error} -> {:error, error}
end
end
@doc """
Updates `statefulset`, `service` and `configmap` resources.
"""
@spec modify(map()) :: :ok | :error
@impl Bonny.Controller
def modify(payload) do
resources = parse(payload)
with {:ok, _} <- K8s.Client.patch(resources.service) |> run(),
{:ok, _} <- K8s.Client.patch(resources.configmap) |> run(),
{:ok, _} <- K8s.Client.patch(resources.statefulset) |> run() do
:ok
else
{:error, error} -> {:error, error}
end
end
@doc """
Deletes `statefulset`, `service` and `configmap` resources.
"""
@spec delete(map()) :: :ok | :error
@impl Bonny.Controller
def delete(payload) do
track_event(:delete, payload)
resources = parse(payload)
with {:ok, _} <- K8s.Client.delete(resources.service) |> run(),
{:ok, _} <- K8s.Client.delete(resources.configmap) |> run(),
{:ok, _} <- K8s.Client.delete(resources.statefulset) |> run() do
:ok
else
{:error, error} -> {:error, error}
end
end
defp parse(%{
"kind" => "PersistentFunction",
"apiVersion" => "functions.eigr.io/v1",
"metadata" => %{"name" => name, "namespace" => ns},
"spec" => %{"containers" => containers}
}) do
statefulset = gen_statefulset(ns, name, containers)
service = gen_service(ns, name)
configmap = gen_configmap(ns, "proxy")
%{
configmap: configmap,
statefulset: statefulset,
service: service
}
end
defp gen_configmap(ns, name) do
%{
"apiVersion" => "v1",
"kind" => "ConfigMap",
"metadata" => %{
"namespace" => ns,
"name" => "proxy-cm"
},
"data" => %{
"PROXY_APP_NAME" => name,
"PROXY_CLUSTER_POLLING" => "3000",
"PROXY_CLUSTER_STRATEGY" => "kubernetes-dns",
"PROXY_HEADLESS_SERVICE" => "proxy-headless-svc",
"PROXY_HEARTBEAT_INTERVAL" => "240000",
"PROXY_HTTP_PORT" => "9001",
"PROXY_PORT" => "9000",
"PROXY_ROOT_TEMPLATE_PATH" => "/home/app",
"PROXY_UDS_ADDRESS" => "/var/run/eigr/functions.sock",
"PROXY_UDS_MODE" => "false",
"USER_FUNCTION_HOST" => "127.0.0.1",
"USER_FUNCTION_PORT" => "8080"
}
}
end
defp gen_service(ns, name) do
%{
"apiVersion" => "v1",
"kind" => "Service",
"metadata" => %{
"name" => "proxy-headless-svc",
"namespace" => ns,
"labels" => %{"svc-cluster-name" => "svc-proxy"}
},
"spec" => %{
"clusterIP" => "None",
"selector" => %{"cluster-name" => "proxy"},
"ports" => [
%{"port" => 4369, "name" => "epmd"},
%{"port" => 9000, "name" => "proxy"},
%{"port" => 9001, "name" => "http"}
]
}
}
end
defp gen_statefulset(ns \\ "default", name, replicas \\ 1, containers) do
container = List.first(containers)
image = container["image"]
%{
"apiVersion" => "apps/v1",
"kind" => "StatefulSet",
"metadata" => %{
"name" => name,
"namespace" => ns,
"labels" => %{"app" => name, "cluster-name" => "proxy"}
},
"spec" => %{
"selector" => %{
"matchLabels" => %{"app" => name, "cluster-name" => "proxy"}
},
"serviceName" => "proxy-headless-svc",
"replicas" => replicas,
"template" => %{
"metadata" => %{
"annotations" => %{
"prometheus.io/port" => "9001",
"prometheus.io/scrape" => "true"
},
"labels" => %{"app" => name, "cluster-name" => "proxy"}
},
"spec" => %{
"containers" => [
%{
"name" => "massa-proxy",
"image" => "docker.io/eigr/massa-proxy:0.1.31",
"env" => [
%{
"name" => "PROXY_POD_IP",
"value" => "6eycE1E/S341t4Bcto262ffyFWklCWHQIKloJDJYR7Y="
},
%{
"name" => "PROXY_POD_IP",
"valueFrom" => %{"fieldRef" => %{"fieldPath" => "status.podIP"}}
}
],
"ports" => [
%{"containerPort" => 9000},
%{"containerPort" => 9001},
%{"containerPort" => 4369}
],
"livenessProbe" => %{
"failureThreshold" => 10,
"httpGet" => %{
"path" => "/health",
"port" => 9001,
"scheme" => "HTTP"
},
"initialDelaySeconds" => 300,
"periodSeconds" => 3600,
"successThreshold" => 1,
"timeoutSeconds" => 1200
},
"resources" => %{
"limits" => %{
"memory" => "1024Mi"
},
"requests" => %{
"memory" => "70Mi"
}
},
"envFrom" => [
%{
"configMapRef" => %{"name" => "proxy-cm"}
}
]
},
%{
"name" => "user-function",
"image" => image,
"ports" => [
%{"containerPort" => 8080}
]
}
]
}
}
}
}
end
defp run(%K8s.Operation{} = op),
do: K8s.Client.run(op, Bonny.Config.cluster_name())
defp track_event(type, resource),
do: Logger.info("#{type}: #{inspect(resource)}")
end
|
lib/eigr_functions_controller/controllers/v1/persistent_function.ex
| 0.762557
| 0.735095
|
persistent_function.ex
|
starcoder
|
defmodule Unicode.SentenceBreak do
@moduledoc """
Functions to introspect Unicode
sentence breaks for binaries
(Strings) and codepoints.
"""
@behaviour Unicode.Property.Behaviour
alias Unicode.Utils
@sentence_breaks Utils.sentence_breaks()
|> Utils.remove_annotations()
@doc """
Returns the map of Unicode
sentence breaks.
The sentence break name is the map
key and a list of codepoint
ranges as tuples as the value.
"""
def sentence_breaks do
@sentence_breaks
end
@doc """
Returns a list of known Unicode
sentence break names.
This function does not return the
names of any sentence break aliases.
"""
@known_sentence_breaks Map.keys(@sentence_breaks)
def known_sentence_breaks do
@known_sentence_breaks
end
@sentence_break_alias Utils.property_value_alias()
|> Map.get("sb")
|> Utils.invert_map
|> Utils.atomize_values()
|> Utils.downcase_keys_and_remove_whitespace()
|> Utils.add_canonical_alias()
@doc """
Returns a map of aliases for
Unicode sentence breaks.
An alias is an alternative name
for referring to a sentence break. Aliases
are resolved by the `fetch/1` and
`get/1` functions.
"""
@impl Unicode.Property.Behaviour
def aliases do
@sentence_break_alias
end
@doc """
Returns the Unicode ranges for
a given sentence break as a list of
ranges as 2-tuples.
Aliases are resolved by this function.
Returns either `{:ok, range_list}` or
`:error`.
"""
@impl Unicode.Property.Behaviour
def fetch(sentence_break) when is_atom(sentence_break) do
Map.fetch(sentence_breaks(), sentence_break)
end
def fetch(sentence_break) do
sentence_break = Utils.downcase_and_remove_whitespace(sentence_break)
sentence_break = Map.get(aliases(), sentence_break, sentence_break)
Map.fetch(sentence_breaks(), sentence_break)
end
@doc """
Returns the Unicode ranges for
a given sentence break as a list of
ranges as 2-tuples.
Aliases are resolved by this function.
Returns either `range_list` or
`nil`.
"""
@impl Unicode.Property.Behaviour
def get(sentence_break) do
case fetch(sentence_break) do
{:ok, sentence_break} -> sentence_break
_ -> nil
end
end
@doc """
Returns the count of the number of characters
for a given sentence break.
## Example
iex> Unicode.SentenceBreak.count(:extend)
2508
"""
@impl Unicode.Property.Behaviour
def count(sentence_break) do
with {:ok, sentence_break} <- fetch(sentence_break) do
Enum.reduce(sentence_break, 0, fn {from, to}, acc -> acc + to - from + 1 end)
end
end
@doc """
Returns the sentence break name(s) for the
given binary or codepoint.
In the case of a codepoint, a single
sentence break name is returned.
For a binary a list of distinct sentence break
names represented by the graphemes in
the binary is returned.
"""
def sentence_break(string) when is_binary(string) do
string
|> String.to_charlist()
|> Enum.map(&sentence_break/1)
|> Enum.uniq()
end
for {sentence_break, ranges} <- @sentence_breaks do
def sentence_break(codepoint) when unquote(Utils.ranges_to_guard_clause(ranges)) do
unquote(sentence_break)
end
end
def sentence_break(codepoint) when is_integer(codepoint) and codepoint in 0..0x10FFFF do
:other
end
end
|
lib/unicode/sentence_break.ex
| 0.903276
| 0.446434
|
sentence_break.ex
|
starcoder
|
defmodule SimplexNoise.Overview do
use OctaveNoise.Mixin
import Bitwise
# Skewing Factor 1D to 5D with dummy 0D value
@precomputed_dimensions 5
@skewing_factor ([1.0] ++
(1..@precomputed_dimensions |> Enum.map(&SimplexNoise.Skew.skewing_factor_to_simplical_grid/1)))
|> List.to_tuple
@unskewing_factor ([1.0] ++
(1..@precomputed_dimensions |> Enum.map(&SimplexNoise.Skew.skewing_factor_from_simplical_grid/1)))
|> List.to_tuple
@radius_squared 0.5
# Noise Function
# when is_list(point) would be safer
def noise(point) when is_tuple(point) do
point = point |> Tuple.to_list
noise point, point |> length |> default_hash_function
end
def noise(point), do: noise point, point |> length |> default_hash_function
def noise(point, hash_function) when is_tuple(point), do: noise point |> Tuple.to_list, hash_function
def noise(point, hash_function) do
dimensions = length(point)
point
|> vertex_list(dimensions)
|> Enum.map(&vertex_contribution(point, &1, dimensions, hash_function))
|> Enum.sum
#point
#|> gradient_index
#|> gradient(dimensions)
end
def vertex_list(point, dimensions) do
# origin of unit hypercube in simplical grid space
unit_hypercube_origin = point
|> unit_hypercube_origin(dimensions)
# position in unit hypercube in original grid space
position_in_unit_hypercube = point
|> position_in_unit_hypercube(unit_hypercube_origin, dimensions)
# simplex vertices in simplical grid space
position_in_unit_hypercube
|> component_rank
|> simplex_vertices(unit_hypercube_origin, dimensions)
end
def vertex_contribution(point, vertex, dimensions, hash_function) do
displacement_vector = point
|> displacement_vector( vertex |> unskew_to_original_grid(dimensions) )
contribution = displacement_vector
|> Enum.reduce(@radius_squared, fn displacement, acc -> acc - displacement*displacement end)
if 0.0 < contribution do
contribution = 8 * contribution * contribution * contribution * contribution
extrapolated_gradient = displacement_vector |> dot( hash_function.(vertex) )
contribution * extrapolated_gradient
else
0.0
end
end
def default_hash_function(dimensions) do
fn (point) ->
{gradient, hash_value} = point
|> Enum.with_index
|> Enum.map_reduce(0x156E9,
fn {value, shift}, acc ->
hash_value = (trunc(value) * (2 * shift + 1)) &&& (0x1 <<< shift)
if 0x0 == hash_value do
{1, acc * 17 + 5}
else
{-1, acc * 17 + 5}
end
end
)
gradient
#|> List.replace_at(rem(hash_value, dimensions), 0)
end
end
def dot(point_a, point_b) do
point_a
|> Enum.zip(point_b)
|> Enum.reduce(0, fn {a, b}, acc -> acc + a * b end)
end
def unit_hypercube_origin(point, dimensions) do
point
|> skew_to_simplical_grid(dimensions)
|> Enum.map(&(&1 |> Float.floor |> trunc))
end
def displacement_vector(point_a, point_b) do
point_a
|> Enum.zip(point_b)
|> Enum.map(fn {a, b} -> a - b end)
end
def position_in_unit_hypercube(point, cell_origin, dimensions) do
cell_origin
|> unskew_to_original_grid(dimensions)
|> Enum.zip(point)
|> Enum.map(&(elem(&1, 1) - elem(&1, 0))) # original_x - origin_x
end
# [0, -1, 1, 0, 0] ->
# [1, 0, 4, 2, 3]
def component_rank(point) do
point
|> Enum.with_index() # add index
|> Enum.sort() # sort by value
|> Enum.with_index # add rank
|> Enum.sort_by(&elem(elem(&1,0),1)) # sort by original index
|> Enum.map(&elem(&1,1)) # discard all but rank in orginal order
end
# [1, 0, 4, 2, 3]=rank, [0, 0, 0, 0, 0]=origin ->
# [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 1],
# [0, 0, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 1, 1, 1]
def simplex_vertices(component_rank, simplex_origin, dimensions) do
vertex_blueprint = component_rank
|> Enum.zip(simplex_origin)
dimensions..0 # N+1 vertices in N dimensions
|> Enum.to_list
|> Enum.map(
fn vertex_index ->
vertex_blueprint
|> Enum.map(
fn {component_rank, component_base_value} ->
if component_rank < vertex_index do
component_base_value
else
component_base_value + 1
end
end
)
end
)
end
# use precomputed skewing factor for commonly used dimensions
# is_list(point) guard would be safer
def skew_to_simplical_grid(point, dimensions) when dimensions <= @precomputed_dimensions do
skewing_factor = @skewing_factor |> elem(dimensions)
point |> SimplexNoise.Skew.skew(skewing_factor)
end
# calculate skewing factor for not commonly used dimensions
# is_list(point) guard would be safer
def skew_to_simplical_grid(point, dimensions) do
skewing_factor = dimensions |> SimplexNoise.Skew.skewing_factor_to_simplical_grid
point |> SimplexNoise.Skew.skew(skewing_factor)
end
# use precomputed unskewing factor for commonly used dimensions
# uses Wikipedia style single additive skewing function
# is_list(point) guard would be safer
def unskew_to_original_grid(point, dimensions) when dimensions <= @precomputed_dimensions do
unskewing_factor = @unskewing_factor |> elem(dimensions)
point |> SimplexNoise.Skew.skew(unskewing_factor)
end
# calculate unskewing factor for not commonly used dimensions
# uses Wikipedia style single additive skewing function
# is_list(point) guard would be safer
def unskew_to_original_grid(point, dimensions) do
unskewing_factor = dimensions |> SimplexNoise.Skew.skewing_factor_from_simplical_grid
point |> SimplexNoise.Skew.skew(unskewing_factor)
end
end
|
lib/simplex_noise/overview.ex
| 0.696165
| 0.619529
|
overview.ex
|
starcoder
|
defmodule RigCloudEvents.CloudEvent do
@moduledoc """
CloudEvents is a vendor-neutral specification for defining the format of event data.
See: https://github.com/cloudevents
"""
@parser RigCloudEvents.Parser.PartialParser
@type t :: %__MODULE__{
json: String.t(),
parsed: @parser.t()
}
defstruct json: nil,
parsed: nil
# ---
@doc """
Initialize a new CloudEvent given a JSON string.
The given JSON string is decoded to an object and fields that are relevant for RIG
are checked for validity. However, note that this function does not implement the
full specification - a successful pass does not necessarily mean the given JSON
contains a valid CloudEvent according to the CloudEvents spec.
"""
@spec parse(String.t()) :: {:ok, t} | {:error, any}
def parse(json) when is_binary(json) do
with parsed = @parser.parse(json),
event = %__MODULE__{json: json, parsed: parsed},
{:ok, _} <- specversion(event),
{:ok, _} <- type(event),
{:ok, _} <- id(event) do
{:ok, event}
else
error -> error
end
end
@doc """
Convenience function used in testing.
If this would be called in production, of course it would be way more efficient to
access the given map directly. However, this modules' raison d'être is the safe
handling of incoming JSON encoded data, so it's safe to assume this function is ever
only called by tests.
"""
@spec parse(map) :: {:ok, t} | {:error, any}
def parse(map) when is_map(map) do
map |> Jason.encode!() |> parse
end
# ---
@doc """
Initialize a new CloudEvent or raise.
See `parse/1`.
"""
@spec parse!(String.t() | map) :: t
def parse!(input) do
case parse(input) do
{:ok, cloud_event} -> cloud_event
error -> raise "Failed to parse CloudEvent: #{inspect(error)}"
end
end
# ---
def specversion(%__MODULE__{parsed: parsed}) do
cond do
specversion_0_2?(parsed) -> {:ok, "0.2"}
specversion_0_1?(parsed) -> {:ok, "0.1"}
true -> {:error, :not_a_cloud_event}
end
end
# ---
def specversion!(event) do
{:ok, value} = specversion(event)
value
end
# ---
defp specversion_0_1?(parsed) do
case @parser.context_attribute(parsed, "cloudEventsVersion") do
{:ok, "0.1"} -> true
_ -> false
end
end
# ---
defp specversion_0_2?(parsed) do
case @parser.context_attribute(parsed, "specversion") do
{:ok, "0.2"} -> true
_ -> false
end
end
# ---
def type(%__MODULE__{parsed: parsed} = event) do
case specversion(event) do
{:ok, "0.2"} -> @parser.context_attribute(parsed, "type")
{:ok, "0.1"} -> @parser.context_attribute(parsed, "eventType")
end
end
# ---
def type!(event) do
{:ok, value} = type(event)
value
end
# ---
def id(%__MODULE__{parsed: parsed} = event) do
case specversion(event) do
{:ok, "0.2"} -> @parser.context_attribute(parsed, "id")
{:ok, "0.1"} -> @parser.context_attribute(parsed, "eventID")
end
end
# ---
def id!(event) do
{:ok, value} = id(event)
value
end
# ---
@spec find_value(t, json_pointer :: String.t()) :: {:ok, value :: any} | {:error, any}
def find_value(%__MODULE__{parsed: parsed}, json_pointer) do
@parser.find_value(parsed, json_pointer)
end
end
|
apps/rig_cloud_events/lib/rig_cloud_events/cloud_event.ex
| 0.84905
| 0.537284
|
cloud_event.ex
|
starcoder
|
defmodule PlugCacheControl.Helpers do
@moduledoc """
Contains helper functions for working with cache-control header on Plug
connections.
"""
alias Plug.Conn
alias PlugCacheControl.Header
@typep unit ::
:second
| :seconds
| :minute
| :minutes
| :hour
| :hours
| :day
| :days
| :week
| :weeks
| :year
| :years
@typep delta(t) :: {t, integer | {integer(), unit()}}
@typep flag(t) :: t | {t, boolean()}
@typep flag_directive ::
:must_revalidate
| :no_cache
| :no_store
| :no_transform
| :proxy_revalidate
| :private
| :public
@typep delta_directive :: :max_age | :s_maxage | :stale_while_revalidate | :stale_if_error
@type directive_opt :: flag(flag_directive) | delta(delta_directive) | {:no_cache, String.t()}
@doc """
Serializes the cache control directives and sets them on the connection,
overwriting the existing header value.
"""
@spec put_cache_control(Conn.t(), [directive_opt()]) :: Conn.t()
def put_cache_control(conn, directives) do
value =
directives
|> directives_to_map()
|> Header.new()
|> Header.to_string()
Conn.put_resp_header(conn, "cache-control", value)
end
@deprecated "Use `patch_cache_control/2` instead"
def merge_cache_control(conn, directives) do
patch_cache_control(conn, directives)
end
@doc """
Merges directives into the current value of the `cache-control` header.
"""
@spec patch_cache_control(Conn.t(), [directive_opt()]) :: Conn.t()
def patch_cache_control(conn, directives) do
new_value =
conn
|> Conn.get_resp_header("cache-control")
|> List.first("")
|> merge_cache_control_value(directives)
Conn.put_resp_header(conn, "cache-control", new_value)
end
defp merge_cache_control_value(value, directives) when is_binary(value) do
current = map_from_header(value)
updated = directives_to_map(directives)
current
|> Map.merge(updated)
|> Header.new()
|> Header.to_string()
end
defp directives_to_map(directives) do
mapper = fn
{key, _} = tuple when is_atom(key) ->
tuple
key when is_atom(key) ->
{key, true}
end
directives
|> Enum.map(mapper)
|> Enum.into(%{})
end
defp map_from_header(str) when is_binary(str) do
str
|> String.split(",", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.map(&String.split(&1, "=", trim: true))
|> Enum.map(fn
[key] -> {directive_to_atom(key), true}
[key, value] -> {directive_to_atom(key), value}
end)
|> Enum.into(%{})
end
defp directive_to_atom(directive) when is_binary(directive) do
directive
|> camel_to_snake_case()
|> String.to_existing_atom()
end
defp camel_to_snake_case(str) do
String.replace(str, "-", "_")
end
end
|
lib/plug_cache_control/helpers.ex
| 0.811078
| 0.551091
|
helpers.ex
|
starcoder
|
defmodule Validation do
@moduledoc """
This module provides rails-like validations and strong parameters.
Set `use Validation` in your model or web.ex.
## Examples
```
defmodule Hoge.User do
use Hoge.Web, :model
use Validation
schema "users" do
field :name, :string
timestamps()
end
validates :name, validate_length: [min: 1, max: 20]
def changeset(struct, params \\ %{}) do
struct
|> permit(params, [:name])
end
end
```
"""
@doc false
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__), only:
[validates: 2, validate: 2, fetch: 2, fetch: 3, permit: 3, permit: 4]
Module.register_attribute(__MODULE__, :validators, accumulate: :true)
@before_compile unquote(__MODULE__)
end
end
@doc false
defmacro __before_compile__(env) do
validators_list = Module.get_attribute(env.module, :validators)
quote do
@doc false
def validators, do: unquote(validators_list)
end
end
@doc false
@spec call_validators(struct, map, atom | list | map) :: struct
defmacro call_validators(struct, field, validations) do
Enum.reduce (if is_atom(validations), do: [validations], else: validations),
struct, fn(validation, struct) ->
case validation do
{validator_name, args} ->
quote do
unquote(validator_name)(unquote(struct), unquote(field), unquote(args))
end
validator_name when is_atom(validator_name) ->
quote do
unquote(validator_name)(unquote(struct), unquote(field))
end
end
end
end
@doc """
Define required validations.
A function which calls validators will be created.
## Examples
```
validates :user_id, :foreign_key_constraint
validates :sender_id, [:foreign_key_constraint]
validates :body, validate_length: [min: 1, max: 80]
```
"""
@spec validates(atom, atom | list | map) :: tuple
defmacro validates(field, validations) do
function_name = String.to_atom("validates_#{Atom.to_string(field)}")
quote do
@doc false
@spec unquote(function_name)(struct) :: struct
def unquote(function_name)(struct) do
unquote(__MODULE__).call_validators(
struct, unquote(field), unquote(validations))
end
@validators {unquote(field), &__MODULE__.unquote(function_name)/1}
end
end
@doc """
Validate fields.
"""
@spec validate(struct, [atom]) :: tuple
defmacro validate(struct, fields) do
quote bind_quoted: [struct: struct, fields: fields] do
Enum.reduce __MODULE__.validators, struct,
fn({field, validator}, struct) ->
if field in fields do
validator.(struct)
else
struct
end
end
end
end
@doc """
Set required fields and validate fields.
## Examples
```
def fetch_registration_changeset(struct, params \\ %{}) do
struct
|> fetch(~w(body user_id)a, ~w(price)a)
end
```
"""
@spec fetch(struct, [atom], [atom]) :: tuple
defmacro fetch(struct, required_fields, optional_fields) do
quote bind_quoted: [struct: struct,
required_fields: required_fields, optional_fields: optional_fields] do
struct
|> Ecto.Changeset.validate_required(required_fields)
|> validate(required_fields ++ optional_fields)
end
end
@doc """
Set required fields and validate fields.
## Examples
```
def fetch_registration_changeset(struct, params \\ %{}) do
struct
|> fetch(~w(body user_id)a)
end
```
"""
@spec fetch(struct, [atom]) :: tuple
defmacro fetch(struct, required_fields) do
quote bind_quoted: [struct: struct, required_fields: required_fields] do
fetch(struct, required_fields, [])
end
end
@doc """
Cast fields, set required fields and validate fields.
## Examples
```
def registration_changeset(struct, params \\ %{}) do
struct
|> permit(params, ~w(body user_id)a, ~w(price)a)
end
```
"""
@spec permit(struct, map, [atom], [atom]) :: tuple
defmacro permit(struct, params, required_fields, optional_fields) do
quote bind_quoted: [struct: struct, params: params,
required_fields: required_fields, optional_fields: optional_fields] do
struct
|> Ecto.Changeset.cast(params, required_fields ++ optional_fields)
|> fetch(required_fields, optional_fields)
end
end
@doc """
Cast fields, set required fields and validate fields.
## Examples
```
def registration_changeset(struct, params \\ %{}) do
struct
|> permit(params, ~w(body user_id price)a)
end
```
"""
@spec permit(struct, map, [atom]) :: tuple
defmacro permit(struct, params, required_fields) do
quote bind_quoted: [struct: struct, params: params,
required_fields: required_fields] do
permit(struct, params, required_fields, [])
end
end
end
|
lib/validation.ex
| 0.891493
| 0.879974
|
validation.ex
|
starcoder
|
defmodule ExMachina.Ecto do
@moduledoc """
Module for building and inserting factories with Ecto
This module works much like the regular `ExMachina` module, but adds a few
nice things that make working with Ecto easier.
* It uses `ExMachina.EctoStrategy`, which adds `insert/1`, `insert/2`,
`insert_pair/2`, `insert_list/3`.
* Adds a `params_for` function that is useful for working with changesets or
sending params to API endpoints.
More in-depth examples are in the [README](README.html).
"""
defmacro __using__(opts) do
verify_ecto_dep
if repo = Keyword.get(opts, :repo) do
quote do
use ExMachina
use ExMachina.EctoStrategy, repo: unquote(repo)
def params_for(factory_name, attrs \\ %{}) do
ExMachina.Ecto.params_for(__MODULE__, factory_name, attrs)
end
def params_with_assocs(factory_name, attrs \\ %{}) do
ExMachina.Ecto.params_with_assocs(__MODULE__, factory_name, attrs)
end
def fields_for(factory_name, attrs \\ %{}) do
raise "fields_for/2 has been renamed to params_for/2."
end
end
else
raise ArgumentError,
"""
expected :repo to be given as an option. Example:
use ExMachina.Ecto, repo: MyApp.Repo
"""
end
end
defp verify_ecto_dep do
unless Code.ensure_loaded?(Ecto) do
raise "You tried to use ExMachina.Ecto, but the Ecto module is not loaded. " <>
"Please add ecto to your dependencies."
end
end
@doc """
Builds a factory with the passed in factory_name and returns its fields
This is only for use with Ecto models.
Will return a map with the fields and virtual fields, but without the Ecto
metadata, associations, and the primary key.
If you want belongs_to associations to be inserted, use
`params_with_assocs/2`.
## Example
def user_factory do
%MyApp.User{name: "<NAME>", admin: false}
end
# Returns %{name: "<NAME>", admin: true}
params_for(:user, admin: true)
"""
def params_for(module, factory_name, attrs \\ %{}) do
module.build(factory_name, attrs)
|> drop_ecto_fields
end
@doc """
Same as `params_for/2`, but inserts all belongs_to associations and sets the
foreign keys.
## Example
def article_factory do
%MyApp.Article{title: "An Awesome Article", author: build(:author)}
end
# Inserts an author and returns %{title: "An Awesome Article", author_id: 12}
params_with_assocs(:article)
"""
def params_with_assocs(module, factory_name, attrs \\ %{}) do
module.build(factory_name, attrs)
|> insert_belongs_to_assocs(module)
|> drop_ecto_fields
end
defp insert_belongs_to_assocs(record = %{__struct__: struct, __meta__: %{__struct__: Ecto.Schema.Metadata}}, module) do
Enum.reduce(struct.__schema__(:associations), record, fn(association_name, record) ->
case struct.__schema__(:association, association_name) do
association = %{__struct__: Ecto.Association.BelongsTo} ->
insert_built_belongs_to_assoc(
module,
association.owner_key,
association_name,
record
)
_ -> record
end
end)
end
defp insert_built_belongs_to_assoc(module, foreign_key, association_name, record) do
case Map.get(record, association_name) do
built_relation = %{__meta__: %{state: :built}} ->
relation = built_relation |> module.insert
Map.put(record, foreign_key, relation.id)
_ -> Map.delete(record, foreign_key)
end
end
defp drop_ecto_fields(record = %{__struct__: struct, __meta__: %{__struct__: Ecto.Schema.Metadata}}) do
record
|> Map.from_struct
|> Map.delete(:__meta__)
|> Map.drop(struct.__schema__(:associations))
|> drop_autogenerated_ids(struct)
end
defp drop_ecto_fields(record) do
raise ArgumentError, "#{inspect record} is not an Ecto model. Use `build` instead."
end
defp drop_autogenerated_ids(map, struct) do
case struct.__schema__(:autogenerate_id) do
nil -> map
{name, _type} -> Map.delete(map, name)
end
end
end
|
lib/ex_machina/ecto.ex
| 0.783368
| 0.414395
|
ecto.ex
|
starcoder
|
defmodule Sanbase.Metric.Helper do
@moduledoc """
A helper module that uses the separate metric modules and builds maps and
mappings that combine the data from all modules into a single place.
This module is hiding the metric modules from the user-facing `Sanbase.Metric`
module and makes adding new modules transparent.
The order of modules in `@metric_modules` **does** matter.
`Module.register_attribute/3` with `accumulate: true` option puts new
attributes on top of the accumulated list. That means when we put them in a
map those that are first in @metric_modules might override later ones. One
example for this is part of social metrics which are both in
Sanbase.Clickhouse.MetricAdapter and Sanbase.SocialData.MetricAdapter and are
invoked with different args. The ones in `Sanbase.Clickhouse.MetricAdapter`
will override the ones in Sanbase.SocialData.MetricAdapter.
"""
@metric_modules [
Sanbase.Clickhouse.Github.MetricAdapter,
Sanbase.Clickhouse.MetricAdapter,
Sanbase.SocialData.MetricAdapter,
Sanbase.Price.MetricAdapter,
Sanbase.Twitter.MetricAdapter,
Sanbase.Clickhouse.TopHolders.MetricAdapter,
Sanbase.Clickhouse.Uniswap.MetricAdapter,
Sanbase.BlockchainAddress.MetricAdapter
]
Module.register_attribute(__MODULE__, :aggregations_acc, accumulate: true)
Module.register_attribute(__MODULE__, :aggregations_per_metric_acc, accumulate: true)
Module.register_attribute(__MODULE__, :free_metrics_acc, accumulate: true)
Module.register_attribute(__MODULE__, :restricted_metrics_acc, accumulate: true)
Module.register_attribute(__MODULE__, :access_map_acc, accumulate: true)
Module.register_attribute(__MODULE__, :min_plan_map_acc, accumulate: true)
Module.register_attribute(__MODULE__, :timeseries_metric_module_mapping_acc, accumulate: true)
Module.register_attribute(__MODULE__, :histogram_metric_module_mapping_acc, accumulate: true)
Module.register_attribute(__MODULE__, :table_metric_module_mapping_acc, accumulate: true)
Module.register_attribute(__MODULE__, :required_selectors_map_acc, accumulate: true)
Module.register_attribute(__MODULE__, :deprecated_metrics_acc, accumulate: true)
for module <- @metric_modules do
@required_selectors_map_acc module.required_selectors
@aggregations_acc module.available_aggregations()
@free_metrics_acc module.free_metrics()
@restricted_metrics_acc module.restricted_metrics()
@access_map_acc module.access_map()
@min_plan_map_acc module.min_plan_map()
aggregations_fn = fn metric ->
{:ok, %{available_aggregations: aggr}} = module.metadata(metric)
{metric, [nil] ++ aggr}
end
@aggregations_per_metric_acc Enum.into(module.available_metrics(), %{}, aggregations_fn)
@timeseries_metric_module_mapping_acc Enum.map(
module.available_timeseries_metrics(),
fn metric -> %{metric: metric, module: module} end
)
@histogram_metric_module_mapping_acc Enum.map(
module.available_histogram_metrics(),
fn metric -> %{metric: metric, module: module} end
)
@table_metric_module_mapping_acc Enum.map(
module.available_table_metrics(),
fn metric -> %{metric: metric, module: module} end
)
if function_exported?(module, :deprecated_metrics_map, 0),
do: @deprecated_metrics_acc(module.deprecated_metrics_map)
end
flat_unique = fn list -> list |> List.flatten() |> Enum.uniq() end
@aggregations @aggregations_acc |> then(flat_unique)
@free_metrics @free_metrics_acc |> then(flat_unique)
@restricted_metrics @restricted_metrics_acc |> then(flat_unique)
@timeseries_metric_module_mapping @timeseries_metric_module_mapping_acc |> then(flat_unique)
@table_metric_module_mapping @table_metric_module_mapping_acc |> then(flat_unique)
@histogram_metric_module_mapping @histogram_metric_module_mapping_acc |> then(flat_unique)
# Convert a list of maps to a single map with metric-module key-value pairs
metric_module_map = fn list -> Enum.into(list, %{}, &{&1.metric, &1.module}) end
@histogram_metric_to_module_map @histogram_metric_module_mapping |> then(metric_module_map)
@table_metric_to_module_map @table_metric_module_mapping |> then(metric_module_map)
@timeseries_metric_to_module_map @timeseries_metric_module_mapping
|> then(metric_module_map)
@metric_module_mapping (@histogram_metric_module_mapping ++
@timeseries_metric_module_mapping ++ @table_metric_module_mapping)
|> Enum.uniq()
@metric_to_module_map @metric_module_mapping |> Enum.into(%{}, &{&1.metric, &1.module})
# Convert a list of maps to one single map by merging all the elements
reduce_merge = fn list -> Enum.reduce(list, %{}, &Map.merge(&2, &1)) end
@aggregations_per_metric @aggregations_per_metric_acc |> then(reduce_merge)
@min_plan_map @min_plan_map_acc |> then(reduce_merge)
@access_map @access_map_acc |> then(reduce_merge)
@required_selectors_map @required_selectors_map_acc
|> then(reduce_merge)
resolve_restrictions = fn
restrictions when is_map(restrictions) ->
restrictions
restriction when restriction in [:restricted, :free] ->
%{"historical" => restriction, "realtime" => restriction}
end
@access_map Enum.into(@access_map, %{}, fn {metric, restrictions} ->
{metric, resolve_restrictions.(restrictions)}
end)
@metrics Enum.map(@metric_module_mapping, & &1.metric)
@timeseries_metrics Enum.map(@timeseries_metric_module_mapping, & &1.metric)
@histogram_metrics Enum.map(@histogram_metric_module_mapping, & &1.metric)
@metrics_mapset MapSet.new(@metrics)
@timeseries_metrics_mapset MapSet.new(@timeseries_metrics)
@histogram_metrics_mapset MapSet.new(@histogram_metrics)
@table_metrics Enum.map(@table_metric_module_mapping, & &1.metric)
@table_metrics_mapset MapSet.new(@table_metrics)
@deprecated_metrics_map Enum.reduce(@deprecated_metrics_acc, %{}, &Map.merge(&1, &2))
|> Enum.reject(&match?({_, nil}, &1))
|> Map.new()
def access_map(), do: @access_map
def aggregations_per_metric(), do: @aggregations_per_metric
def aggregations(), do: @aggregations
def free_metrics(), do: @free_metrics
def deprecated_metrics_map(), do: @deprecated_metrics_map
def histogram_metric_module_mapping(), do: @histogram_metric_module_mapping
def histogram_metric_to_module_map(), do: @histogram_metric_to_module_map
def histogram_metrics_mapset(), do: @histogram_metrics_mapset
def histogram_metrics(), do: @histogram_metrics
def metric_module_mapping(), do: @metric_module_mapping
def metric_modules(), do: @metric_modules
def metric_to_module_map(), do: @metric_to_module_map
def metrics_mapset(), do: @metrics_mapset
def metrics(), do: @metrics
def min_plan_map(), do: @min_plan_map
def restricted_metrics(), do: @restricted_metrics
def table_metrics(), do: @table_metrics
def table_metrics_mapset(), do: @table_metrics_mapset
def table_metric_module_mapping(), do: @table_metric_module_mapping
def table_metric_to_module_map(), do: @table_metric_to_module_map
def timeseries_metric_module_mapping(), do: @timeseries_metric_module_mapping
def timeseries_metric_to_module_map(), do: @timeseries_metric_to_module_map
def timeseries_metrics_mapset(), do: @timeseries_metrics_mapset
def timeseries_metrics(), do: @timeseries_metrics
def required_selectors_map(), do: @required_selectors_map
end
|
lib/sanbase/metric/helper.ex
| 0.794385
| 0.467818
|
helper.ex
|
starcoder
|
defmodule CarCache.Cache do
@moduledoc """
Data structure representing an CAR cache.
"""
alias CarCache.Clock
alias CarCache.LRU
defstruct c: 1_000,
name: nil,
p: 0,
data: nil,
t1: nil,
t2: nil,
b1: nil,
b2: nil
@type t :: %__MODULE__{
c: non_neg_integer(),
name: atom(),
p: non_neg_integer(),
data: :ets.tid(),
t1: Clock.t(),
t2: Clock.t(),
b1: LRU.t(),
b2: LRU.t()
}
@doc """
Create a new Cache data structure
"""
@spec new(atom()) :: t()
def new(name, opts \\ []) do
data_name = :"#{name}_data"
data = :ets.new(data_name, [:named_table, :set, :public, {:read_concurrency, true}])
%__MODULE__{
c: Keyword.get(opts, :max_size, 1_000),
name: name,
p: 0,
data: data,
t1: Clock.new(:"#{name}_t1", data),
t2: Clock.new(:"#{name}_t2", data),
b1: LRU.new(),
b2: LRU.new()
}
end
@doc """
Get an item from the cache
"""
@spec get(t() | atom(), any()) :: any()
def get(%__MODULE__{} = car, key) do
get(car.name, key)
end
def get(name, key) do
data_name = :"#{name}_data"
t1_name = :"#{name}_t1"
t2_name = :"#{name}_t2"
case :ets.lookup(data_name, key) do
[{^key, :deleted, ^t1_name, _}] ->
:telemetry.execute([:car_cache, :get], %{status: :deleted}, %{key: key, name: name, level: :t1})
nil
[{^key, :deleted, ^t2_name, _}] ->
:telemetry.execute([:car_cache, :get], %{status: :deleted}, %{key: key, name: name, level: :t2})
nil
[{^key, value, ^t1_name, 0}] ->
:telemetry.execute([:car_cache, :get], %{status: :miss}, %{key: key, name: name, level: :t1})
:ets.update_element(data_name, key, {4, 1})
value
[{^key, value, ^t1_name, 1}] ->
:telemetry.execute([:car_cache, :get], %{status: :miss}, %{key: key, name: name, level: :t1})
value
[{^key, value, ^t2_name, 0}] ->
:ets.update_element(data_name, key, {4, 1})
:telemetry.execute([:car_cache, :get], %{status: :miss}, %{key: key, name: name, level: :t2})
value
[{^key, value, ^t2_name, 1}] ->
:telemetry.execute([:car_cache, :get], %{status: :miss}, %{key: key, name: name, level: :t2})
value
_ ->
:telemetry.execute([:car_cache, :get], %{status: :miss}, %{key: key, cache: name})
nil
end
end
@doc """
Insert an item in the cache
"""
@spec put(t(), any(), any()) :: t()
def put(car, key, value) do
start_time = System.monotonic_time()
case :ets.lookup(car.data, key) do
[{^key, ^value, _, _}] ->
put_telemetry(car, key, start_time)
car
[{^key, _value, _, _}] ->
:ets.update_element(car.data, key, {2, value})
put_telemetry(car, key, start_time)
car
[] ->
car =
if car.t1.size + car.t2.size == car.c do
# cache full, replace a page from cache
car = replace(car)
# cache directory replacement
cond do
(!LRU.member?(car.b1, key) || !LRU.member?(car.b2, key)) && car.t1.size + car.b1.size == car.c ->
# Discard the LRU page in B1.
b1 = LRU.drop(car.b1)
%__MODULE__{car | b1: b1}
car.t1.size + car.t2.size + car.b1.size + car.b2.size == 2 * car.c &&
(!LRU.member?(car.b1, key) || !LRU.member?(car.b2, key)) ->
# Discard the LRU page in B2.
b2 = LRU.drop(car.b2)
%__MODULE__{car | b2: b2}
true ->
car
end
else
car
end
cond do
# cache directory miss
!LRU.member?(car.b1, key) && !LRU.member?(car.b2, key) ->
# Insert x at the tail of T1. Set the page reference bit of x to 0.
t1 = Clock.insert(car.t1, key, value)
put_telemetry(car, key, start_time)
%__MODULE__{car | t1: t1}
# cache directory hit
LRU.member?(car.b1, key) ->
# Adapt:Increase the target size for the list T1 as:p=min{p+max{1,|B2|/|B1|},c}
p = min(car.p + max(1, car.b2.size / car.b1.size), car.c)
:telemetry.execute([:car_cache, :resize], %{p: p}, %{cache: car.name})
car = %__MODULE__{car | p: p}
# Move x at the tail of T2. Set the page reference bit of x to 0.
t2 = Clock.insert(car.t2, key, value)
put_telemetry(car, key, start_time)
%__MODULE__{car | t2: t2}
# cache directory hit
# x must be in B2
true ->
# Adapt:Decrease the target size for the list T1 as:p=max{p−max{1,|B1|/|B2|},0}
p = max(car.p - max(1, car.b1.size / car.b2.size), 0)
:telemetry.execute([:car_cache, :resize], %{p: p}, %{cache: car.name})
car = %__MODULE__{car | p: p}
# Move x at the tail of T2. Set the page reference bit of x to 0.
t2 = Clock.insert(car.t2, key, value)
put_telemetry(car, key, start_time)
%__MODULE__{car | t2: t2}
end
end
end
@spec replace(t()) :: t()
defp replace(car) do
if car.t1.size >= max(1, car.p) do
case Clock.pop(car.t1) do
{key, _value, 0, t1} ->
# Demote the head page in T1 and make it the MRU page in B1.
b1 = LRU.insert(car.b1, key)
:telemetry.execute([:car_cache, :eviction], %{}, %{key: key, cache: car.name})
%__MODULE__{car | b1: b1, t1: t1}
{key, value, 1, t1} ->
# Set the page reference bit of head page in T1 to 0, and make it the tail page in T2.
t2 = Clock.insert(car.t2, key, value)
:telemetry.execute([:car_cache, :promotion], %{}, %{key: key, cache: car.name})
replace(%__MODULE__{car | t1: t1, t2: t2})
end
else
case Clock.pop(car.t2) do
{key, _value, 0, t2} ->
# Demote the head page in T2 and make it the MRU page in B2.
LRU.insert(car.b2, key)
:telemetry.execute([:car_cache, :eviction], %{}, %{key: key, cache: car.name})
%__MODULE__{car | t2: t2}
{key, value, 1, t2} ->
# Set the page reference bit of head page in T2 to 0, and make it the tail page in T2.
t2 = Clock.insert(t2, key, value)
:telemetry.execute([:car_cache, :demotion], %{}, %{key: key, cache: car.name})
replace(%__MODULE__{car | t2: t2})
end
end
end
@doc """
Mark a key as deleted.
It will not be returned on a get and will be eventually evicted from the
cache as more keys are inserted.
"""
@spec delete(t(), any()) :: t()
def delete(car, key) do
case :ets.lookup(car.data, key) do
[] ->
car
[{^key, _value, _, _}] ->
:ets.update_element(car.data, key, {2, :deleted})
car
end
end
@spec put_telemetry(t(), any(), non_neg_integer()) :: :ok
defp put_telemetry(car, key, start_time) do
end_time = System.monotonic_time()
delta = end_time - start_time
:telemetry.execute([:car_cache, :put], %{duration: delta}, %{key: key, cache: car.name})
end
end
|
lib/car_cache/cache.ex
| 0.770767
| 0.471223
|
cache.ex
|
starcoder
|
defmodule Elsa.Util do
@moduledoc """
Provides functions for simplifying first-class interactions (consuming and
producing) such as connecting to a cluster and establishing a persistent
client process for interacting with a cluster.
"""
@default_max_chunk_size 900_000
@timestamp_size_in_bytes 10
@doc """
Wrap establishing a connection to a cluster for performing an operation.
"""
@spec with_connection(Elsa.endpoints(), atom(), fun()) :: term()
def with_connection(endpoints, type \\ :any, fun) when is_function(fun) do
endpoints
|> reformat_endpoints()
|> connect(type)
|> do_with_connection(fun)
end
@doc """
Retrieves the appropriate registry for the given value and validates it exists.
Executes the function with the registry name if it successfully locates one.
"""
@spec with_registry(atom() | String.t(), (atom() -> term())) :: term() | {:error, String.t()}
def with_registry(connection, function) when is_function(function, 1) do
registry = Elsa.Supervisor.registry(connection)
case Process.whereis(registry) do
nil -> {:error, "Elsa with connection #{connection} has not been started correctly"}
_pid -> function.(registry)
end
end
@doc """
Retrieves the pid of a brod client process if it exists and executes the
given function against the client.
"""
@spec with_client(atom(), (pid() -> term())) :: term() | {:error, String.t()}
def with_client(registry, function) when is_function(function, 1) do
case Elsa.Registry.whereis_name({registry, :brod_client}) do
:undefined -> {:error, "Unable to find brod_client in registry(#{registry})"}
pid -> function.(pid)
end
end
@doc """
Convert supplied cluster endpoints from common keyword list format to
brod-compatible tuple.
"""
@spec reformat_endpoints(keyword()) :: [{charlist(), integer()}]
def reformat_endpoints(endpoints) do
Enum.map(endpoints, fn {key, value} -> {to_charlist(key), value} end)
end
@doc """
Retrieve the api version of the desired operation supported by the
connected cluster.
"""
@spec get_api_version(pid(), atom()) :: non_neg_integer()
def get_api_version(connection, api) do
{:ok, api_versions} = :kpro.get_api_versions(connection)
{_, version} = Map.get(api_versions, api)
version
end
@doc """
Determines if client pid is alive
"""
@spec client?(pid() | atom()) :: boolean()
def client?(pid) when is_pid(pid) do
Process.alive?(pid)
end
def client?(client) when is_atom(client) do
case Process.whereis(client) do
pid when is_pid(pid) -> client?(pid)
nil -> false
end
end
@doc """
Create a named client connection process for managing interactions
with the connected cluster.
"""
@spec start_client(keyword(), atom()) :: :ok | {:error, term()}
def start_client(endpoints, name, config \\ []) do
:brod.start_client(endpoints, name, config)
end
@doc """
Process messages into chunks of size up to the size specified by the calling function in bytes,
and determined by the function argument. If no chunk size is specified the default maximum
size a chunk will be is approximately 1 megabyte. If no sizing function is provided to construct
the appropriately sized chunks, the internal function based on Kernel.byte_size/1 is used.
"""
@spec chunk_by_byte_size(term(), integer(), fun()) :: [term()]
def chunk_by_byte_size(collection, chunk_byte_size \\ @default_max_chunk_size, byte_size_function \\ &get_byte_size/1) do
collection
|> Enum.chunk_while({0, []}, &chunk(&1, &2, chunk_byte_size, byte_size_function), &after_chunk/1)
end
@doc """
Return the number of partitions for a given topic. Bypasses the need for a persistent client
for lighter weight interactions from one-off calls.
"""
@spec partition_count(keyword | Elsa.connection() | pid, String.t()) :: integer()
def partition_count(endpoints, topic) when is_list(endpoints) do
{:ok, metadata} = :brod.get_metadata(reformat_endpoints(endpoints), [topic])
count_partitions(metadata)
end
def partition_count(connection, topic) when is_atom(connection) or is_pid(connection) do
{:ok, metadata} = :brod_client.get_metadata(connection, topic)
count_partitions(metadata)
end
# Handle brod < 3.16
defp count_partitions(%{topic_metadata: topic_metadatas}) do
[count | _] = for %{partition_metadata: metadata} <- topic_metadatas, do: Enum.count(metadata)
count
end
# Handle brod 3.16+
defp count_partitions(%{topics: topics}) do
[count | _] = for %{partitions: partitions} <- topics, do: Enum.count(partitions)
count
end
defp connect(endpoints, :controller), do: :kpro.connect_controller(endpoints, [])
defp connect(endpoints, _type), do: :kpro.connect_any(endpoints, [])
defp do_with_connection({:ok, connection}, fun) do
fun.(connection)
after
:kpro.close_connection(connection)
end
defp do_with_connection({:error, reason}, _fun) do
raise Elsa.ConnectError, message: format_reason(reason)
end
defp format_reason(reason) do
cond do
is_binary(reason) -> reason
Exception.exception?(reason) -> Exception.format(:error, reason)
true -> inspect(reason)
end
end
defp chunk(item, {current_size, current_batch}, chunk_byte_size, byte_size_function) do
item_size = byte_size_function.(item) + @timestamp_size_in_bytes
new_total = current_size + item_size
case new_total < chunk_byte_size do
true -> add_item_to_batch(new_total, item, current_batch)
false -> finish_batch(item_size, item, current_batch)
end
end
defp add_item_to_batch(total, item, batch) do
{:cont, {total, [item | batch]}}
end
defp finish_batch(total, item, batch) do
{:cont, Enum.reverse(batch), {total, [item]}}
end
defp after_chunk({_size, []}) do
{:cont, {0, []}}
end
defp after_chunk({_size, current_batch}) do
finish_batch(0, nil, current_batch)
end
defp get_byte_size(%{key: key, value: value} = msg) do
header_size =
Map.get(msg, :headers, [])
|> Enum.map(fn {key, value} -> byte_size(key) + byte_size(value) end)
|> Enum.sum()
byte_size(key) + byte_size(value) + header_size
end
end
|
lib/elsa/util.ex
| 0.819641
| 0.58673
|
util.ex
|
starcoder
|
defmodule Ngram.GameState do
@moduledoc """
Model the game state for a tic-tac-toe game.
"""
alias Ngram.Player
alias __MODULE__
defstruct code: nil,
players: [],
player_turn: nil,
status: :not_started,
timer_ref: nil,
ngram: "",
guesses: Map.new(),
puzzle: [],
prize_mult: 1,
winnings: Map.new(),
winner_id: nil
@type game_code :: String.t()
@type t :: %GameState{
code: nil | String.t(),
status: :not_started | :playing | :done,
players: [Player.t()],
player_turn: nil | integer(),
timer_ref: nil | reference()
}
# 30 Minutes of inactivity ends the game
@inactivity_timeout 1000 * 60 * 30
@alphabet ~w(a b c d e f g h i j k l m n o p q r s t u v w x y z)
@vowels ~w(a e i o u)
@doc """
Return an initialized GameState struct. Requires one player to start.
"""
@spec new(game_code(), Player.t()) :: t()
def new(game_code, %Player{} = player) do
%GameState{code: game_code, players: [%Player{player | letter: "O"}], ngram: "a beautiful day in the neighborhood"}
|> reset_inactivity_timer()
|> update_puzzle()
|> random_prize_mult()
end
defp random_prize_mult({:error, _reason} = error), do: error
defp random_prize_mult({:ok, %GameState{} = state}), do: {:ok, random_prize_mult(state)}
defp random_prize_mult(%GameState{} = state), do: %{state | prize_mult: Enum.random(1..12) * 100}
defp calculate_guess_score(%GameState{} = state, letter) do
if letter in @vowels do
0
else
letter_count = state.ngram |> String.graphemes |> Enum.count(& &1 == letter)
letter_count * state.prize_mult
end
end
defp update_winnings({:error, _reason} = error, %Player{} = _player, _amount), do: error
defp update_winnings({:ok, %GameState{} = state}, %Player{} = player, amount) do
update_winnings(state, player, amount)
end
defp update_winnings(%GameState{} = state, %Player{} = player, amount) do
winnings = state.winnings |> Map.update(player.id, amount, &(&1 + amount))
new_winnings_amount = winnings |> Map.get(player.id)
if new_winnings_amount >= 0 do
{:ok, %{state | winnings: winnings}}
else
{:error, "Insufficient balance"}
end
end
@doc """
Guess letter
"""
def guess_letter(%GameState{} = state, %Player{} = player, letter) do
letter =
if letter in @vowels do
""
else
letter
end
guess(state, player, letter)
end
@doc """
Guess letter
"""
def buy_vowel(%GameState{} = state, %Player{} = player, letter) do
letter =
if letter in @vowels do
letter
else
""
end
state
|> update_winnings(player, -250)
|> guess(player, letter)
end
@doc """
Guess letter
"""
def guess({:error, _reason} = error, _player, _letter), do: error
def guess({:ok, %GameState{} = state}, %Player{} = player, letter), do: guess(state, player, letter)
def guess(%GameState{} = state, %Player{} = player, letter) do
letter = String.downcase(letter)
# Set prize_mult to 0 if some guessed this letter
# already. This prevents double scoring of letters.
# Also set prize_mult to 0 if the letter is a vowel.
prize_mult =
cond do
Map.has_key?(state.guesses, letter) -> 0
letter in @vowels -> 0
true -> state.prize_mult
end
# Add this letter to
guess =
if letter in @alphabet do
%{letter => true}
else
%{}
end
guesses = Map.merge(state.guesses, guess)
guess_score = calculate_guess_score(state, letter)
state
|> Map.put(:guesses, guesses)
|> Map.put(:prize_mult, prize_mult)
|> update_puzzle()
|> verify_player_turn(player)
|> update_winnings(player, guess_score)
|> check_for_done()
|> next_player_turn()
|> random_prize_mult()
|> reset_inactivity_timer()
end
def update_puzzle(%GameState{} = state) do
hidden_letters = @alphabet -- Map.keys(state.guesses)
puzzle =
state.ngram
|> String.split
|> Enum.map(&(String.replace(&1, hidden_letters, " ") |> String.split("", trim: true)))
%{state | puzzle: puzzle }
end
@doc """
Allow another player to join the game. Exactly 2 players are required to play.
"""
@spec join_game(t(), Player.t()) :: {:ok, t()} | {:error, String.t()}
def join_game(%GameState{players: []} = _state, %Player{}) do
{:error, "Can only join a created game"}
end
def join_game(%GameState{players: [_p1, _p2, _p3]} = _state, %Player{} = _player) do
{:error, "Only 3 players allowed"}
end
def join_game(%GameState{players: [_p1]} = state, %Player{} = player) do
{:ok, %GameState{state | players: [player | state.players]}}
end
def join_game(%GameState{} = state, %Player{} = player) do
{:ok, %GameState{state | players: [player | state.players]} |> reset_inactivity_timer()}
end
@doc """
Return the player from the game state found by the ID.
"""
@spec get_player(t(), player_id :: String.t()) :: nil | Player.t()
def get_player(%GameState{players: players} = _state, player_id) do
Enum.find(players, &(&1.id == player_id))
end
@doc """
Return the player from the game state found by the ID in an `:ok`/`:error` tuple.
"""
@spec find_player(t(), player_id :: String.t()) :: {:ok, Player.t()} | {:error, String.t()}
def find_player(%GameState{} = state, player_id) do
case get_player(state, player_id) do
nil ->
{:error, "Player not found"}
%Player{} = player ->
{:ok, player}
end
end
@doc """
Return the opponent player from the perspective of the given player.
"""
@spec opponent(t(), Player.t()) :: nil | Player.t()
def opponent(%GameState{} = state, %Player{} = player) do
# Find the first player that doesn't have this ID
Enum.find(state.players, &(&1.id != player.id))
end
@doc """
Start the game.
"""
@spec start(t()) :: {:ok, t()} | {:error, String.t()}
def start(%GameState{status: :playing}), do: {:error, "Game in play"}
def start(%GameState{status: :done}), do: {:error, "Game is done"}
def start(%GameState{status: :not_started, players: [_p1, _p2, _p3]} = state) do
first_player = Enum.at(state.players, 0)
{:ok, %GameState{state | status: :playing, player_turn: first_player.id} |> reset_inactivity_timer()}
end
def start(%GameState{players: _players}), do: {:error, "Missing players"}
@doc """
Return a boolean value for if it is currently the given player's turn.
"""
@spec player_turn?(t(), Player.t()) :: boolean()
def player_turn?(%GameState{player_turn: player_id}, %Player{id: id}) when player_id == id,
do: true
def player_turn?(%GameState{}, %Player{}), do: false
@doc """
Restart the game resetting the state back.
"""
def restart(%GameState{players: [p1 | _]} = state) do
%GameState{state | status: :playing, player_turn: p1.letter}
|> reset_inactivity_timer()
end
defp verify_player_turn(%GameState{} = state, %Player{} = player) do
if player_turn?(state, player) do
{:ok, state}
else
{:error, "Not your turn!"}
end
end
defp get_remaining_letters(%GameState{} = state) do
ngram_letters =
state.ngram
|> String.split("", trim: true)
|> Enum.uniq()
|> Enum.filter(&(&1 != " "))
guesses =
state.guesses
|> Map.keys()
ngram_letters -- guesses
end
defp check_for_done({:ok, %GameState{} = state}) do
state =
case get_remaining_letters(state) do
[] ->
state
|> Map.put(:status, :done)
|> Map.put(:winner_id, state.player_turn)
_ ->
state
end
{:ok, state}
end
defp check_for_done({:error, _reason} = error), do: error
defp next_player_turn({:error, _reason} = error), do: error
defp next_player_turn({:ok, %GameState{player_turn: player_turn} = state}) do
# Find current index, increment it, the mod with the players length
next_index = (1 + Enum.find_index(state.players, &(&1.id == player_turn)))
next_index = rem(next_index, length(state.players))
next_player = Enum.at(state.players, next_index)
{:ok, %GameState{state | player_turn: next_player.id}}
end
defp reset_inactivity_timer({:error, _reason} = error), do: error
defp reset_inactivity_timer({:ok, %GameState{} = state}) do
{:ok, reset_inactivity_timer(state)}
end
defp reset_inactivity_timer(%GameState{} = state) do
state
|> cancel_timer()
|> set_timer()
end
defp cancel_timer(%GameState{timer_ref: ref} = state) when is_reference(ref) do
Process.cancel_timer(ref)
%GameState{state | timer_ref: nil}
end
defp cancel_timer(%GameState{} = state), do: state
defp set_timer(%GameState{} = state) do
%GameState{
state
| timer_ref: Process.send_after(self(), :end_for_inactivity, @inactivity_timeout)
}
end
end
|
lib/ngram/game_state.ex
| 0.780286
| 0.453141
|
game_state.ex
|
starcoder
|
defmodule EctoCommons.URLValidator do
@moduledoc ~S"""
This validator is used to validate URLs.
## Options
There are some available `:checks` depending on the strictness of what you want to validate:
- `:parsable`: Checks to see if the URL is parsable by `:http_uri.parse/1` Erlang function.
This can have issues with international URLs where it should be disabled (see tests). Defaults to enabled
- `:empty`: Checks to see if the parsed `%URI{}` struct is not empty (all fields set to nil). Defaults to enabled
- `:scheme`: Checks to see if the parsed `%URI{}` struct contains a `:scheme`. Defaults to enabled
- `:host`: Checks to see if the parsed `%URI{}` struct contains a `:host`. Defaults to enabled
- `:valid_host`: Does a `:inet.getbyhostname/1` call to check if the host exists. This will do a network call.
Defaults to disabled
- `:path`: Checks to see if the parsed `%URI{}` struct contains a `:path`. Defaults to disabled
- `:http_regexp`: Tries to match URL to a regexp known to catch many unwanted URLs (see code). It only accepts
HTTP(S) and FTP schemes though. Defaults to disabled
The approach is not yet very satisfactory IMHO, if you have suggestions, Pull Requests are welcome :)
## Example:
iex> types = %{url: :string}
iex> params = %{url: "https://www.example.com/"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_url(:url)
#Ecto.Changeset<action: nil, changes: %{url: "https://www.example.com/"}, errors: [], data: %{}, valid?: true>
iex> types = %{url: :string}
iex> params = %{url: "https://www.example.com/"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_url(:url, checks: [:empty, :path, :scheme, :host])
#Ecto.Changeset<action: nil, changes: %{url: "https://www.example.com/"}, errors: [], data: %{}, valid?: true>
iex> types = %{url: :string}
iex> params = %{url: "some@invalid_url"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_url(:url)
#Ecto.Changeset<action: nil, changes: %{url: "some@invalid_url"}, errors: [url: {"is not a valid url", [validation: :url]}], data: %{}, valid?: false>
iex> types = %{url: :string}
iex> params = %{url: "Just some random text"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_url(:url)
#Ecto.Changeset<action: nil, changes: %{url: "Just some random text"}, errors: [url: {"is not a valid url", [validation: :url]}], data: %{}, valid?: false>
"""
import Ecto.Changeset
# Taken from here https://mathiasbynens.be/demo/url-regex
# credo:disable-for-next-line Credo.Check.Readability.MaxLineLength
@http_regex ~r/^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\x{00a1}-\x{ffff}0-9]+-?)*[a-z\x{00a1}-\x{ffff}0-9]+)(?:\.(?:[a-z\x{00a1}-\x{ffff}0-9]+-?)*[a-z\x{00a1}-\x{ffff}0-9]+)*(?:\.(?:[a-z\x{00a1}-\x{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/ius
def validate_url(changeset, field, opts \\ []) do
validate_change(changeset, field, {:url, opts}, fn _, value ->
checks = Keyword.get(opts, :checks, [:parsable, :empty, :scheme, :host])
parsed = URI.parse(value)
case do_validate_url(value, parsed, checks) do
:ok -> []
:error -> [{field, {message(opts, "is not a valid url"), [validation: :url]}}]
end
end)
end
defp do_validate_url(value, parsed, checks) when is_list(checks) do
check_all = Enum.map(checks, fn check -> do_validate_url(value, parsed, check) end)
if Enum.member?(check_all, :error), do: :error, else: :ok
end
defp do_validate_url(value, _parsed, :parsable) do
case :http_uri.parse(String.to_charlist(value)) do
{:ok, _uri} -> :ok
{:error, _msg} -> :error
end
end
defp do_validate_url(value, _parsed, :http_regexp) do
case String.match?(value, @http_regex) do
true -> :ok
false -> :error
end
end
# Caution: this check does a network call and can be slow.
defp do_validate_url(_value, %URI{host: host}, :valid_host) do
case :inet.gethostbyname(String.to_charlist(host)) do
{:ok, _value} -> :ok
{:error, :nxdomain} -> :error
end
end
defp do_validate_url(_value, parsed, :empty) do
values = parsed |> Map.from_struct() |> Enum.map(fn {_key, val} -> blank?(val) end)
if Enum.member?(values, false), do: :ok, else: :error
end
defp do_validate_url(_value, %URI{path: path} = _parsed, :path) do
if blank?(path), do: :error, else: :ok
end
defp do_validate_url(_value, %URI{scheme: scheme} = _parsed, :scheme) do
if blank?(scheme), do: :error, else: :ok
end
defp do_validate_url(_value, %URI{host: host} = _parsed, :host) do
if blank?(host), do: :error, else: :ok
end
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
@compile {:inline, blank?: 1}
def blank?(""), do: true
def blank?([]), do: true
def blank?(nil), do: true
def blank?({}), do: true
def blank?(%{} = map) when map_size(map) == 0, do: true
def blank?(_), do: false
end
|
lib/validators/url.ex
| 0.83825
| 0.579103
|
url.ex
|
starcoder
|
defmodule CommandedMessaging do
@moduledoc ~S"""
# Commanded Messaging
**Common macros for messaging in a Commanded application**
## Commands
The `Commanded.Command` macro creates an Ecto `embedded_schema` so you can take advantage of the well known `Ecto.Changeset` API.
defmodule BasicCreateAccount do
use Commanded.Command,
username: :string,
email: :string,
age: :integer
end
iex> BasicCreateAccount.new()
#Ecto.Changeset<action: nil, changes: %{}, errors: [], data: #BasicCreateAccount<>, valid?: true>
### Validation
defmodule CreateAccount do
use Commanded.Command,
username: :string,
email: :string,
age: :integer
def handle_validate(command) do
command
|> validate_required([:username, :email, :age])
|> validate_format(:email, ~r/@/)
|> validate_number(:age, greater_than: 12)
end
end
iex> CreateAccount.new()
#Ecto.Changeset<action: nil, changes: %{}, errors: [username: {"can't be blank", [validation: :required]}, email: {"can't be blank", [validation: :required]}, age: {"can't be blank", [validation: :required]}], data: #CreateAccount<>, valid?: false>
iex> CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
#Ecto.Changeset<action: nil, changes: %{age: 5, email: "<EMAIL>", username: "chris"}, errors: [age: {"must be greater than %{number}", [validation: :number, kind: :greater_than, number: 12]}], data: #CreateAccount<>, valid?: false>
To create the actual command struct, use `Ecto.Changeset.apply_changes/1`
iex> command = CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
iex> Ecto.Changeset.apply_changes(command)
%CreateAccount{age: 5, email: "<EMAIL>", username: "chris"}
> Note that `apply_changes` will not validate values.
## Events
Most events mirror the commands that produce them. So we make it easy to reduce the boilerplate in creating them with the `Commanded.Event` macro.
defmodule BasicAccountCreated do
use Commanded.Event,
from: CreateAccount
end
iex> command = CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
iex> cmd = Ecto.Changeset.apply_changes(command)
iex> BasicAccountCreated.new(cmd)
%BasicAccountCreated{
age: 5,
email: "<EMAIL>",
username: "chris",
version: 1
}
### Extra Keys
There are times when we need keys defined on an event that aren't part of the originating command. We can add these very easily.
defmodule AccountCreatedWithExtraKeys do
use Commanded.Event,
from: CreateAccount,
with: [:date]
end
iex> command = CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
iex> cmd = Ecto.Changeset.apply_changes(command)
iex> AccountCreatedWithExtraKeys.new(cmd, date: ~D[2019-07-25])
%AccountCreatedWithExtraKeys{
age: 5,
date: ~D[2019-07-25],
email: "<EMAIL>",
username: "chris",
version: 1
}
### Excluding Keys
And you may also want to drop some keys from your command.
defmodule AccountCreatedWithDroppedKeys do
use Commanded.Event,
from: CreateAccount,
with: [:date],
drop: [:email]
end
iex> command = CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
iex> cmd = Ecto.Changeset.apply_changes(command)
iex> AccountCreatedWithDroppedKeys.new(cmd)
%AccountCreatedWithDroppedKeys{
age: 5,
date: nil,
username: "chris",
version: 1
}
### Versioning
You may have noticed that we provide a default version of `1`.
You can change the version of an event at anytime.
After doing so, you should define an upcast instance that knows how to transform older events into the latest version.
# This is for demonstration purposes only. You don't need to create a new event to version one.
defmodule AccountCreatedVersioned do
use Commanded.Event,
version: 2,
from: CreateAccount,
with: [:date, :sex],
drop: [:email],
defimpl Commanded.Event.Upcaster, for: AccountCreatedWithDroppedKeys do
def upcast(%{version: 1} = event, _metadata) do
AccountCreatedVersioned.new(event, sex: "maybe", version: 2)
end
def upcast(event, _metadata), do: event
end
end
iex> command = CreateAccount.new(username: "chris", email: "<EMAIL>", age: 5)
iex> cmd = Ecto.Changeset.apply_changes(command)
iex> event = AccountCreatedWithDroppedKeys.new(cmd)
iex> Commanded.Event.Upcaster.upcast(event, %{})
%AccountCreatedVersioned{age: 5, date: nil, sex: "maybe", username: "chris", version: 2}
> Note that you won't normally call `upcast` manually. `Commanded` will take care of that for you.
## Command Dispatch Validation
The `Commanded.CommandDispatchValidation` macro will inject the `validate_and_dispatch` function into your `Commanded.Application`.
"""
end
|
lib/commanded_messaging.ex
| 0.638159
| 0.403978
|
commanded_messaging.ex
|
starcoder
|
defmodule Mix.Tasks.FillExchange do
@moduledoc """
Module defining a mix task to fill an Exchange with random orders.
"""
use Mix.Task
@shortdoc "Populate exchange"
def run(args) do
Application.ensure_all_started(:hackney)
switches = [help: :boolean, mode: :boolean, count: :integer, ticker: :atom]
aliases = [h: :help, m: :mode, c: :count, t: :ticker]
default = [help: false, mode: false, count: 100, ticker: :AUXLND]
opts =
OptionParser.parse(args, switches: switches, aliases: aliases)
|> Tuple.to_list()
|> Enum.flat_map(fn x -> x end)
opts = Keyword.merge(default, opts)
help =
Enum.find(opts, default[:help], fn {key, _value} ->
key == :help
end)
|> elem(1)
mode =
Enum.find(opts, default[:mode], fn {key, _value} ->
key == :mode
end)
|> elem(1)
count =
Enum.find(opts, default[:count], fn {key, _value} ->
key == :count
end)
|> elem(1)
ticker =
Enum.find(opts, default[:ticker], fn {key, _value} ->
key == :ticker
end)
|> elem(1)
ticker =
if is_binary(ticker) do
String.to_atom(ticker)
else
ticker
end
if help do
show_help()
else
place_orders(ticker, mode, count)
end
end
def place_orders(_ticker, true, 0) do
end
def place_orders(ticker, true, count) do
order = Exchange.Utils.random_order(ticker)
order = %{
order
| exp_time: order.exp_time + 10_000_000_000
}
url = "http://localhost:4000/ticker/#{ticker}/traders/#{order.trader_id}/orders"
IO.puts("Trader #{inspect(order.trader_id)} placed order #{inspect(order.order_id)}")
HTTPoison.post(
url,
order |> Jason.encode!(),
[{"Content-Type", "application/json"}]
)
place_orders(ticker, true, count - 1)
end
def place_orders(ticker, false, _count) do
order = Exchange.Utils.random_order(ticker)
order = %{
order
| exp_time: order.exp_time + 10_000_000_000
}
url = "http://localhost:4000/ticker/#{ticker}/traders/#{order.trader_id}/orders"
IO.puts("Trader #{inspect(order.trader_id)} placed order #{inspect(order.order_id)}")
HTTPoison.post(
url,
order |> Jason.encode!(),
[{"Content-Type", "application/json"}]
)
place_orders(ticker, false, 1)
end
def show_help do
IO.puts("""
Fill Exchange task:
This task serves for population an exchange. It creates http requests with random orders and one can use it to insert orders continuously or insert a fixed number of orders.
--help or -h: show help.
--mode or -m: runs the task in fixed number mode, default false.
--count or -c: sets the number of orders to be inserted, default 100.
--ticker or -t: sets the name of the ticker, default AUXLND.
""")
end
end
|
lib/tasks/fill_exchange.ex
| 0.60964
| 0.404213
|
fill_exchange.ex
|
starcoder
|
defmodule Html5ever do
@moduledoc """
This is an HTML parser written in Rust.
The project provides a NIF - Native Implemented Function.
It works on top of [a parser of the same name](https://github.com/servo/html5ever)
from the Servo project.
By default this lib will try to use a precompiled NIF
from the GitHub releases page. This way you don't need
to have the Rust toolchain installed.
In case no precompiled file is found and the Mix env is
production then an error is raised.
You can force the compilation to occur by setting the
value of the `HTML5EVER_BUILD` environment variable to
"true" or "1". Alternatively you can also set the application
env `:build_from_source` to `true` in order to force the build:
config :html5ever, Html5ever, build_from_source: true
This project is possible thanks to [Rustler](https://hexdocs.pm/rustler).
"""
@doc """
Parses an HTML document from a string.
This returns a list of tuples representing the HTML tree.
## Example
iex> Html5ever.parse("<!doctype html><html><body><h1>Hello world</h1></body></html>")
{:ok,
[
{:doctype, "html", "", ""},
{"html", [], [{"head", [], []}, {"body", [], [{"h1", [], ["Hello world"]}]}]}
]}
"""
def parse(html) do
parse_dirty(html)
end
@doc """
Parses an HTML document from a string and returns a map.
The map contains the document structure.
## Example
iex> Html5ever.flat_parse("<!doctype html><html><body><h1>Hello world</h1></body></html>")
{:ok,
%{
nodes: %{
0 => %{id: 0, parent: nil, type: :document},
1 => %{id: 1, parent: 0, type: :doctype},
2 => %{
attrs: [],
children: [3, 4],
id: 2,
name: "html",
parent: 0,
type: :element
},
3 => %{
attrs: [],
children: [],
id: 3,
name: "head",
parent: 2,
type: :element
},
4 => %{
attrs: [],
children: [5],
id: 4,
name: "body",
parent: 2,
type: :element
},
5 => %{
attrs: [],
children: [6],
id: 5,
name: "h1",
parent: 4,
type: :element
},
6 => %{contents: "Hello world", id: 6, parent: 5, type: :text}
},
root: 0
}}
"""
def flat_parse(html) do
flat_parse_dirty(html)
end
defp parse_dirty(html) do
case Html5ever.Native.parse_sync(html) do
{:html5ever_nif_result, :ok, result} ->
{:ok, result}
{:html5ever_nif_result, :error, err} ->
{:error, err}
end
end
defp flat_parse_dirty(html) do
case Html5ever.Native.flat_parse_sync(html) do
{:html5ever_nif_result, :ok, result} ->
{:ok, result}
{:html5ever_nif_result, :error, err} ->
{:error, err}
end
end
end
|
lib/html5ever.ex
| 0.746139
| 0.510252
|
html5ever.ex
|
starcoder
|
defmodule Nx.Heatmap do
@moduledoc """
Provides a heatmap that is printed using ANSI colors
in the terminal.
"""
@doc false
defstruct [:tensor, opts: []]
@behaviour Access
@impl true
def fetch(%Nx.Heatmap{tensor: tensor} = hm, value) do
case Access.fetch(tensor, value) do
{:ok, %Nx.Tensor{shape: {}} = tensor} -> {:ok, tensor}
{:ok, tensor} -> {:ok, put_in(hm.tensor, tensor)}
:error -> :error
end
end
@impl true
def get_and_update(hm, key, fun) do
{get, tensor} = Access.get_and_update(hm.tensor, key, fun)
{get, put_in(hm.tensor, tensor)}
end
@impl true
def pop(hm, key) do
{pop, tensor} = Access.pop(hm.tensor, key)
{pop, put_in(hm.tensor, tensor)}
end
defimpl Inspect do
import Inspect.Algebra
@mono265 Enum.to_list(232..255)
def inspect(%{tensor: tensor, opts: heatmap_opts}, opts) do
%{shape: shape, names: names, type: type} = tensor
open = color("[", :list, opts)
sep = color(",", :list, opts)
close = color("]", :list, opts)
data = data(tensor, heatmap_opts, opts, {open, sep, close})
type = color(Nx.Type.to_string(type), :atom, opts)
shape = Nx.Shape.to_algebra(shape, names, open, close)
color("#Nx.Heatmap<", :map, opts)
|> concat(nest(concat([line(), type, shape, line(), data]), 2))
|> concat(color("\n>", :map, opts))
end
defp data(tensor, heatmap_opts, opts, doc) do
whitespace = Keyword.get(heatmap_opts, :ansi_whitespace, "\u3000")
{entry_fun, line_fun} =
if Keyword.get_lazy(heatmap_opts, :ansi_enabled, &IO.ANSI.enabled?/0) do
scale = length(@mono265) - 1
entry_fun = fn range ->
index = range |> Kernel.*(scale) |> round()
color = Enum.fetch!(@mono265, index)
[IO.ANSI.color_background(color), whitespace]
end
{entry_fun, &IO.iodata_to_binary([&1 | IO.ANSI.reset()])}
else
{&Integer.to_string(&1 |> Kernel.*(9) |> round()), &IO.iodata_to_binary/1}
end
render(tensor, opts, doc, entry_fun, line_fun)
end
defp render(%{shape: {size}} = tensor, _opts, _doc, entry_fun, line_fun) do
data = Nx.to_flat_list(tensor)
{data, [], min, max} = take_min_max(data, size)
base = max - min
data
|> Enum.map(fn elem -> entry_fun.((elem - min) / base) end)
|> line_fun.()
end
defp render(%{shape: shape} = tensor, opts, doc, entry_fun, line_fun) do
{dims, [rows, cols]} = shape |> Tuple.to_list() |> Enum.split(-2)
limit = opts.limit
list_opts = if limit == :infinity, do: [], else: [limit: rows * cols * limit + 1]
data = Nx.to_flat_list(tensor, list_opts)
{data, _rest, _limit} = chunk(dims, data, limit, {rows, cols, entry_fun, line_fun}, doc)
data
end
defp take_min_max([head | tail], count),
do: take_min_max(tail, count - 1, head, head, [head])
defp take_min_max(rest, 0, min, max, acc),
do: {Enum.reverse(acc), rest, min, max}
defp take_min_max([head | tail], count, min, max, acc),
do: take_min_max(tail, count - 1, min(min, head), max(max, head), [head | acc])
defp chunk([], acc, limit, {rows, cols, entry_fun, line_fun}, _docs) do
{acc, rest, min, max} = take_min_max(acc, rows * cols)
base = max - min
{[], doc} =
Enum.reduce(1..rows, {acc, empty()}, fn _, {acc, doc} ->
{line, acc} =
Enum.map_reduce(1..cols, acc, fn _, [elem | acc] ->
{entry_fun.((elem - min) / base), acc}
end)
doc = concat(doc, concat(line(), line_fun.(line)))
{acc, doc}
end)
if limit == :infinity, do: {doc, rest, limit}, else: {doc, rest, limit - 1}
end
defp chunk([dim | dims], data, limit, rcw, {open, sep, close} = docs) do
{acc, rest, limit} =
chunk_each(dim, data, [], limit, fn chunk, limit ->
chunk(dims, chunk, limit, rcw, docs)
end)
doc =
if(dims == [], do: open, else: concat(open, line()))
|> concat(concat(Enum.intersperse(acc, concat(sep, line()))))
|> nest(2)
|> concat(line())
|> concat(close)
{doc, rest, limit}
end
defp chunk_each(0, data, acc, limit, _fun) do
{Enum.reverse(acc), data, limit}
end
defp chunk_each(_dim, data, acc, 0, _fun) do
{Enum.reverse(["..." | acc]), data, 0}
end
defp chunk_each(dim, data, acc, limit, fun) do
{doc, rest, limit} = fun.(data, limit)
chunk_each(dim - 1, rest, [doc | acc], limit, fun)
end
end
end
|
lib/nx/heatmap.ex
| 0.78374
| 0.613352
|
heatmap.ex
|
starcoder
|
defmodule PartitionSupervisor do
@moduledoc """
A supervisor that starts multiple partitions of the same child.
Certain processes may become bottlenecks in large systems.
If those processes can have their state trivially partitioned,
in a way there is no dependency between them, then they can use
the `PartitionSupervisor` to create multiple isolated and
independent partitions.
Once the `PartitionSupervisor` starts, you can dispatch to its
children using `{:via, PartitionSupervisor, {name, key}}`, where
`name` is the name of the `PartitionSupervisor` and key is used
for routing.
## Example
The `DynamicSupervisor` is a single process responsible for starting
other processes. In some applications, the `DynamicSupervisor` may
become a bottleneck. To address this, you can start multiple instances
of the `DynamicSupervisor` and then pick a "random" instance to start
the child on.
Instead of:
children = [
{DynamicSupervisor, name: MyApp.DynamicSupervisor}
]
Supervisor.start_link(children, strategy: :one_for_one)
and:
DynamicSupervisor.start_child(MyApp.DynamicSupervisor, {Agent, fn -> %{} end})
You can do this:
children = [
{PartitionSupervisor,
child_spec: DynamicSupervisor,
name: MyApp.DynamicSupervisors}
]
Supervisor.start_link(children, strategy: :one_for_one)
and then:
DynamicSupervisor.start_child(
{:via, PartitionSupervisor, {MyApp.DynamicSupervisors, self()}},
{Agent, fn -> %{} end}
)
In the code above, we start a partition supervisor that will by default
start a dynamic supervisor for each core in your machine. Then, instead
of calling the `DynamicSupervisor` by name, you call it through the
partition supervisor using the `{:via, PartitionSupervisor, {name, key}}`
format. We picked `self()` as the routing key, which means each process
will be assigned one of the existing dynamic supervisors. See `start_link/1`
to see all options supported by the `PartitionSupervisor`.
## Implementation notes
The `PartitionSupervisor` requires a name as an atom to be given on start,
as it uses an ETS table to keep all of the partitions. Under the hood,
the `PartitionSupervisor` generates a child spec for each partition and
then act as a regular supervisor. The id of each child spec is the
partition number.
For routing, two strategies are used. If `key` is an integer, it is routed
using `rem(abs(key), partitions)`. Otherwise it uses `:erlang.phash2(key, partitions)`.
The particular routing may change in the future, and therefore cannot
be relied on. If you want to retrieve a particular PID for a certain key,
you can use `GenServer.whereis({:via, PartitionSupervisor, {name, key}})`.
"""
@behaviour Supervisor
@type name :: atom()
@doc false
def child_spec(opts) when is_list(opts) do
%{
id: Keyword.get(opts, :name, PartitionSupervisor),
start: {PartitionSupervisor, :start_link, [opts]},
type: :supervisor
}
end
@doc """
Starts a partition supervisor with the given options.
This function is typically not invoked directly, instead it is invoked
when using a `PartitionSupervisor` as a child of another supervisor:
children = [
{PartitionSupervisor, child_spec: SomeChild, name: MyPartitionSupervisor}
]
If the supervisor is successfully spawned, this function returns
`{:ok, pid}`, where `pid` is the PID of the supervisor. If the given name
for the partition supervisor is already assigned to a process,
the function returns `{:error, {:already_started, pid}}`, where `pid`
is the PID of that process.
Note that a supervisor started with this function is linked to the parent
process and exits not only on crashes but also if the parent process exits
with `:normal` reason.
## Options
* `:name` - an atom representing the name of the partition supervisor.
* `:partitions` - a positive integer with the number of partitions.
Defaults to `System.schedulers_online()` (typically the number of cores).
* `:strategy` - the restart strategy option, defaults to `:one_for_one`.
You can learn more about strategies in the `Supervisor` module docs.
* `:max_restarts` - the maximum number of restarts allowed in
a time frame. Defaults to `3`.
* `:max_seconds` - the time frame in which `:max_restarts` applies.
Defaults to `5`.
* `:with_arguments` - a two-argument anonymous function that allows
the partition to be given to the child starting function. See the
`:with_arguments` section below.
## `:with_arguments`
Sometimes you want each partition to know their partition assigned number.
This can be done with the `:with_arguments` option. This function receives
the list of arguments of the child specification with the partition and
it must return a new list of arguments.
For example, most processes are started by calling `start_link(opts)`,
where `opts` is a keyword list. You could attach the partition to the
keyword list like this:
with_arguments: fn [opts], partition ->
[Keyword.put(opts, :partition, partition)]
end
"""
def start_link(opts) do
name = opts[:name]
unless name && is_atom(name) do
raise ArgumentError,
"the :name option must be given to PartitionSupervisor as an atom, got: #{inspect(name)}"
end
{child_spec, opts} = Keyword.pop(opts, :child_spec)
unless child_spec do
raise ArgumentError, "the :child_spec option must be given to PartitionSupervisor"
end
{partitions, opts} = Keyword.pop(opts, :partitions, System.schedulers_online())
unless is_integer(partitions) and partitions >= 1 do
raise ArgumentError,
"the :partitions option must be a positive integer, got: #{inspect(partitions)}"
end
{with_arguments, opts} = Keyword.pop(opts, :with_arguments, fn args, _partition -> args end)
unless is_function(with_arguments, 2) do
raise ArgumentError,
"the :with_arguments option must be a function that receives two arguments, " <>
"the current call arguments and the partition, got: #{inspect(with_arguments)}"
end
%{start: {mod, fun, args}} = map = Supervisor.child_spec(child_spec, [])
modules = map[:modules] || [mod]
children =
for partition <- 0..(partitions - 1) do
args = with_arguments.(args, partition)
unless is_list(args) do
raise "the call to the function in :with_arguments must return a list, got: #{inspect(args)}"
end
start = {__MODULE__, :start_child, [mod, fun, args, name, partition]}
Map.merge(map, %{id: partition, start: start, modules: modules})
end
{init_opts, start_opts} = Keyword.split(opts, [:strategy, :max_seconds, :max_restarts])
Supervisor.start_link(__MODULE__, {name, partitions, children, init_opts}, start_opts)
end
@doc false
def start_child(mod, fun, args, name, partition) do
case apply(mod, fun, args) do
{:ok, pid} ->
:ets.insert(name, {partition, pid})
{:ok, pid}
{:ok, pid, info} ->
:ets.insert(name, {partition, pid})
{:ok, pid, info}
other ->
other
end
end
@impl true
def init({name, partitions, children, init_opts}) do
:ets.new(name, [:set, :named_table, :protected, read_concurrency: true])
:ets.insert(name, {:partitions, partitions})
Supervisor.init(children, Keyword.put_new(init_opts, :strategy, :one_for_one))
end
@doc """
Returns the number of partitions for the partition supervisor.
"""
@doc since: "1.14.0"
@spec partitions(name()) :: pos_integer()
def partitions(supervisor) when is_atom(supervisor) do
:ets.lookup_element(supervisor, :partitions, 2)
end
@doc """
Returns a list with information about all children.
This function returns a list of tuples containing:
* `id` - the partition number
* `child` - the PID of the corresponding child process or the
atom `:restarting` if the process is about to be restarted
* `type` - `:worker` or `:supervisor` as defined in the child
specification
* `modules` - as defined in the child specification
"""
@doc since: "1.14.0"
@spec which_children(name()) :: [
# Inlining [module()] | :dynamic here because :supervisor.modules() is not exported
{:undefined, pid | :restarting, :worker | :supervisor, [module()] | :dynamic}
]
def which_children(supervisor) when is_atom(supervisor) do
Supervisor.which_children(supervisor)
end
@doc """
Returns a map containing count values for the supervisor.
The map contains the following keys:
* `:specs` - the number of partitions (children processes)
* `:active` - the count of all actively running child processes managed by
this supervisor
* `:supervisors` - the count of all supervisors whether or not the child
process is still alive
* `:workers` - the count of all workers, whether or not the child process
is still alive
"""
@doc since: "1.14.0"
@spec count_children(name()) :: %{
specs: non_neg_integer,
active: non_neg_integer,
supervisors: non_neg_integer,
workers: non_neg_integer
}
def count_children(supervisor) when is_atom(supervisor) do
Supervisor.count_children(supervisor)
end
@doc """
Synchronously stops the given partition supervisor with the given `reason`.
It returns `:ok` if the supervisor terminates with the given
reason. If it terminates with another reason, the call exits.
This function keeps OTP semantics regarding error reporting.
If the reason is any other than `:normal`, `:shutdown` or
`{:shutdown, _}`, an error report is logged.
"""
@doc since: "1.14.0"
@spec stop(name(), reason :: term, timeout) :: :ok
def stop(supervisor, reason \\ :normal, timeout \\ :infinity) when is_atom(supervisor) do
Supervisor.stop(supervisor, reason, timeout)
end
## Via callbacks
@doc false
def whereis_name({name, key}) when is_atom(name) do
partitions = partitions(name)
partition =
if is_integer(key), do: rem(abs(key), partitions), else: :erlang.phash2(key, partitions)
:ets.lookup_element(name, partition, 2)
end
@doc false
def send(name_key, msg) do
Kernel.send(whereis_name(name_key), msg)
end
@doc false
def register_name(_, _) do
raise "{:via, PartitionSupervisor, _} cannot be given on registration"
end
@doc false
def unregister_name(_, _) do
raise "{:via, PartitionSupervisor, _} cannot be given on unregistration"
end
end
|
lib/elixir/lib/partition_supervisor.ex
| 0.928943
| 0.719692
|
partition_supervisor.ex
|
starcoder
|
defmodule TtrCore.Players.Player do
@moduledoc false
alias TtrCore.Cards.TicketCard
alias TtrCore.Players
defstruct [
id: 1,
name: "anonymous",
pieces: 45,
tickets: [],
tickets_buffer: [],
trains: [],
trains_selected: 0,
routes: [],
]
@type count :: integer()
@type t :: %__MODULE__{
id: Players.user_id(),
name: String.t,
pieces: count(),
tickets: [TicketCard.t],
tickets_buffer: [TicketCard.t],
trains: [TrainCard.t],
trains_selected: count(),
routes: [Route.t]
}
@spec add_route(t, Route.t) :: t
def add_route(%{routes: existing, pieces: pieces} = player, {_, _, cost, _} = new) do
%{player | routes: [new|existing], pieces: pieces - cost}
end
@spec add_trains(t, [TrainCard.t]) :: t
def add_trains(%{trains: existing} = player, new) do
%{player | trains: new ++ existing}
end
@spec add_trains_on_turn(t, [TrainCard.t]) :: t
def add_trains_on_turn(%{trains: existing, trains_selected: selected_count} = player, new) do
%{player | trains: new ++ existing, trains_selected: Enum.count(new) + selected_count}
end
@spec remove_trains(t, TrainCard.t) :: {t, [TrainCard.t]}
def remove_trains(%{trains: existing} = player, trains) do
removed = existing -- trains
updated_player = %{player | trains: removed}
{updated_player, removed}
end
@spec add_tickets(t, [TicketCard.t]) :: t
def add_tickets(%{tickets: existing} = player, new) do
%{player | tickets: new ++ existing}
end
@spec add_tickets_to_buffer(t, [TicketCard.t]) :: t
def add_tickets_to_buffer(player, new) do
%{player | tickets_buffer: new}
end
@spec remove_tickets_from_buffer(t, [TicketCard.t]) :: {t, [TicketCard.t]}
def remove_tickets_from_buffer(player, selected) do
remaining = player.tickets_buffer -- selected
updated_player = %{player | tickets_buffer: []}
{updated_player, remaining}
end
@spec reset_selections(t) :: t
def reset_selections(player) do
%{player | trains_selected: 0}
end
@spec out_of_stock?(t) :: boolean()
def out_of_stock?(%{pieces: pieces}) do
pieces <= 2
end
end
|
lib/ttr_core/players/player.ex
| 0.744656
| 0.425187
|
player.ex
|
starcoder
|
defmodule Helpers do
use Bitwise
@doc """
Function that takes two lists of bytes of equal size
Returns a list of the bitwise XOR of the two arguments
### Examples
iex> Helpers.my_xor([1], [1])
[0]
iex> Helpers.my_xor([0], [1])
[1]
"""
@spec my_xor([byte], [byte]) :: [byte]
def my_xor(a, b) when is_list(a) do
Enum.zip(a, b)
|> Enum.map(fn {x, y} -> Bitwise.bxor(x, y) end)
end
@spec my_xor(binary, [byte]) :: [byte]
def my_xor(char, ciphertext) when is_binary(char) do
:binary.copy(char, length(ciphertext))
|> :binary.bin_to_list()
|> my_xor(ciphertext)
end
@doc """
Function that calculates a score that a given plaintext is in English.
Iterates through the plaintext and adds up the result of where the character lies on the frequency map
### Examples
iex> Helpers.score("Dogs are better than cats") > Helpers.score("alfmlk20")
true
"""
@spec score(binary) :: integer
def score(plaintext) do
plaintext
|> :binary.bin_to_list()
|> Enum.reduce(fn x, score -> score + Map.get(frequency_map(), String.upcase(<<x>>), 0) end)
end
@doc """
Counts the number of bits in a given base 10 integer
### Examples
iex> Helpers.count_bits(0)
0
iex> Helpers.count_bits(42) ## 00101010
3
"""
@spec count_bits(integer) :: integer
def count_bits(bitstring) do
bits = for <<(bit::1 <- <<bitstring>>)>>, do: bit
Enum.sum(bits)
end
@doc """
A parallel map
Taken from <NAME>' Programming Elixir
"""
def pmap(collection, fun) do
me = self()
collection
|> Enum.map(fn elem ->
spawn_link(fn -> send(me, {self(), fun.(elem)}) end)
end)
|> Enum.map(fn pid ->
receive do
{^pid, result} -> result
end
end)
end
def transpose([[] | _]), do: []
@doc """
Transposes a list of lists
### Examples
iex> Helpers.transpose([[1,2,3], [4,5,6]])
[[1,4],[2,5],[3,6]]
"""
@spec transpose([[any]]) :: [[any]]
def transpose(lists) do
[Enum.map(lists, &hd/1) | transpose(Enum.map(lists, &tl/1))]
end
defp frequency_map do
# Taken from http://www.math.cornell.edu/~mec/2003-2004/cryptography/subs/frequencies.html
# Added spaces with a weighted value
%{
"E" => 21912,
"T" => 16587,
"A" => 14810,
"O" => 14003,
"I" => 13318,
"N" => 12666,
"S" => 11450,
"R" => 10977,
"H" => 10795,
"D" => 7874,
"L" => 7253,
"U" => 5246,
"C" => 4943,
"M" => 4761,
"F" => 4200,
"Y" => 3853,
"W" => 3819,
"G" => 3693,
"P" => 3316,
"B" => 2715,
"V" => 2019,
"K" => 1257,
"X" => 315,
"Q" => 205,
"J" => 188,
"Z" => 128,
" " => 16000
}
end
end
|
lib/helpers.ex
| 0.786336
| 0.558688
|
helpers.ex
|
starcoder
|
defmodule PoxTool do
defmodule Options do
defstruct [
palette: 256,
depth: { [0, 1, 2, 3, 4, 5, 6, 7], 128 }
]
@type palette_size :: 4 | 16 | 256
@type depth_mode :: 0..7
@type depth_bits :: 32 | 128
@type t :: %__MODULE__{
palette: nil | palette_size,
depth: { [depth_mode], depth_bits }
}
end
@type packed_pixel :: { depth :: binary, colour :: binary }
@type packed_row :: [packed_pixel]
@type packed_face :: [packed_row]
@type face :: :left | :right | :bottom | :top | :front | :back
@spec all_faces(value :: term) :: [{ face, value :: term }]
def all_faces(value), do: [left: value, right: value, bottom: value, top: value, front: value, back: value]
@spec pack(PoxTool.Poxel.t, keyword(PoxTool.Options.t)) :: [{ face, packed_face }]
def pack(poxel = %PoxTool.Poxel{}, opts \\ []), do: Map.from_struct(poxel) |> Enum.map(fn { name, face } -> { name, prepare_face(face, opts[:palette] || %{}) } end) |> pack_face([{ :size, PoxTool.Poxel.size(poxel) }|opts])
@tab " "
def save(packed_poxel, { width, height, depth }, path, palettes \\ []) do
File.mkdir(path)
poxel_name = path |> Path.basename |> Path.rootname
palettes = case palettes do
nil -> []
palettes when is_list(palettes) ->
Enum.map(palettes, fn { name, { palette, size } } ->
palette_name = [poxel_name, ?-, to_string(name), "-palette"]
palette_dir = [to_string(name), "-palette.png"]
save_palette(palette, path, palette_dir, size)
{ name, { palette_name, palette_dir } }
end)
{ palette, size } ->
palette_name = [poxel_name, "-palette"]
palette_dir = ["palette.png"]
save_palette(palette, path, palette_dir, size)
all_faces({ palette_name, palette_dir })
end
blob = [
~s[(poxel "], poxel_name, ~s["\n],
@tab, ~s[(size: ], to_string(width), ?\s, to_string(height), ?\s, to_string(depth), ~s[)\n],
Enum.reduce(packed_poxel, [], fn { name, face }, acc ->
blob = [save_face(face, path, name, poxel_name), @tab, ")\n"]
blob = case palettes[name] do
nil -> blob
{ palette_name, palette_dir } -> [[@tab, @tab, ~s[(palette: (texture "], palette_name, ~s[" :nearest (dir: "], palette_dir, ~s[") (stream: "palette")))\n]]|blob]
end
[[@tab, ?(, to_string(name), ":\n"|blob]|acc]
end),
?)
]
File.write!(Path.join(path, poxel_name <> ".poxel"), blob)
end
defp save_palette(palette, path, palette_dir, size) do
count = map_size(palette)
fill = (size - count) * 8 * 4
file = File.open!(Path.join(path, palette_dir), [:write])
png = :png.create(%{ size: { size, 1 }, mode: { :rgba, 8 }, file: file })
:png.append(png, { :row, (palette |> Enum.sort(fn { _, a }, { _, b } -> a < b end) |> Enum.map(fn { { r, g, b, a }, _ } -> <<round(r * 255) :: size(8), round(g * 255) :: size(8), round(b * 255) :: size(8), round(a * 255) :: size(8)>> end) |> Enum.into(<<>>)) <> <<0 :: size(fill)>> })
:ok = :png.close(png)
:ok = File.close(file)
end
defp save_face(face, path, name, poxel_name) do
size = PoxTool.Poxel.face_size(face)
{ depth_bits, colour_bits } = packed_bit_size(face)
depth_format = pixel_format(depth_bits)
colour_format = pixel_format(depth_bits)
file = File.open!(Path.join(path, "#{name}-depth.png"), [:write])
png = :png.create(%{ size: size, mode: depth_format, file: file })
Enum.each(face, fn row ->
:png.append(png, { :row, Enum.map(row, &elem(&1, 0)) |> Enum.into(<<>>) })
end)
:ok = :png.close(png)
:ok = File.close(file)
file = File.open!(Path.join(path, "#{name}-colour.png"), [:write])
png = :png.create(%{ size: size, mode: colour_format, file: file })
Enum.each(face, fn row ->
:png.append(png, { :row, Enum.map(row, &elem(&1, 1)) |> Enum.into(<<>>) })
end)
:ok = :png.close(png)
:ok = File.close(file)
[
[@tab, @tab, ~s[(depth: (texture "], poxel_name, ?-, to_string(name), ~s[-depth" :nearest (dir: "], to_string(name), ~s[-depth.png") (stream: "depth32")))\n]],
[@tab, @tab, ~s[(colour: (texture "], poxel_name, ?-, to_string(name), ~s[-colour" :nearest (dir: "], to_string(name), ~s[-colour.png") (stream: "colour], to_string(colour_bits), ~s[")))\n]]
]
end
defp pixel_format(8), do: { :grayscale, 8 }
defp pixel_format(32), do: { :rgba, 8 }
defp pixel_format(64), do: { :rgba, 16 }
defp pixel_format(128), do: { :rgba, 32 }
defp packed_bit_size([]), do: { 0, 0 }
defp packed_bit_size([[{ depth, colour }|_]|t]), do: { bit_size(depth), bit_size(colour) }
defp packed_bit_size([_|t]), do: packed_bit_size(t)
defp pack_face(faces, opts, packed \\ [])
defp pack_face([{ name, { palette, max_blocks, max_depth, rows } }|faces], opts, packed) do
options = opts[name] || %PoxTool.Options{}
size = case { name, opts[:size] } do
{ face, { x, _, _ } } when face in [:left, :right] -> x
{ face, { _, y, _ } } when face in [:bottom, :top] -> y
{ face, { _, _, z } } when face in [:front, :back] -> z
end
rows = Enum.map(rows, fn row ->
Enum.map(row, fn segments ->
{ depth, colour } = pack_pixel(options.palette, segments, options.depth, size)
{ _, size } = options.depth
{ pad(depth, size), pack_palette(colour, size) |> pad(size) }
end)
end)
pack_face(faces, opts, [{ name, rows }|packed])
end
defp pack_face([], _, packed), do: packed
defp pack_depth_header(transparent, mode \\ 0), do: <<if(transparent, do: 1, else: 0) :: size(1), mode :: size(3), 0 :: size(4)>>
defp pack_depth32_single(depth), do: <<depth :: little-size(24)>>
defp pack_depth_accum(pixel, size, palette, acc \\ { <<>>, <<>> }, n \\ 0)
defp pack_depth_accum([{ { depth, length }, index, _ }|segments], size, palette, { dacc, pacc }, n) when (depth - n) <= 0xf and length <= 0xf do
pack_depth_accum(segments, size, palette, { <<dacc :: bitstring, (depth - n) :: size(4), length :: size(4)>>, <<pacc :: bitstring, pack_palette_index(index, palette) :: bitstring>> }, depth + length)
end
defp pack_depth_accum([{ { depth, nil }, index, material }], size, palette, acc, n), do: pack_depth_accum([{ { depth, size - depth }, index, material }], size, palette, acc, n)
defp pack_depth_accum([], _, _, acc, _), do: acc
#TODO: handle when length > 0xf (breaks it up over multiple chunks)
defp pack_depth_blocks(pixel, size, palette, acc \\ { <<>>, <<>> }, n \\ 0)
defp pack_depth_blocks([{ { _, 0 }, _, _ }|segments], size, palette, acc, n) do
pack_depth_blocks(segments, size, palette, acc, n)
end
defp pack_depth_blocks([{ { depth, nil }, index, material }], size, palette, acc, n) when depth <= n do
pack_depth_blocks([{ { depth, size - n }, index, material }], size, palette, acc, n)
end
defp pack_depth_blocks([{ { depth, length }, index, material }|segments], size, palette, { dacc, pacc }, n) when depth <= n do
pack_depth_blocks([{ { depth + 1, length - 1 }, index, material }|segments], size, palette, { <<dacc :: bitstring, 1 :: size(1)>>, <<pacc :: bitstring, pack_palette_index(index, palette) :: bitstring>> }, n + 1)
end
defp pack_depth_blocks(segments = [_|_], size, palette, { dacc, pacc }, n) when n < size do
pack_depth_blocks(segments, size, palette, { <<dacc :: bitstring, 0 :: size(1)>>, <<pacc :: bitstring, pack_palette_index(0, palette) :: bitstring>> }, n + 1)
end
defp pack_depth_blocks([], size, palette, acc, _), do: acc
defp pack_palette_index(index, size) when index < size do
bits = size |> Itsy.Bit.mask_lower_power_of_2 |> Itsy.Bit.count
<<index :: size(bits)>>
end
defp pack_palette(palette, size) when bit_size(palette) < size, do: palette
defp pack_palette(palette, size) do
<<sequence :: bitstring-size(size), excess :: bitstring>> = palette
true = packed_palette_repeats?(sequence, excess) # TODO: throw custom exception
sequence
end
defp packed_palette_repeats?(sequence, palette) do
size = bit_size(sequence)
case palette do
<<^sequence :: bitstring-size(size), excess :: bitstring>> -> packed_palette_repeats?(sequence, excess)
palette when bit_size(palette) < bit_size(sequence) ->
size = bit_size(palette)
case sequence do
<<^palette :: bitstring-size(size), _ :: bitstring>> -> true
_ -> false
end
_ -> false
end
end
defp pad(bits, size, fill \\ 0) do
size = size - bit_size(bits)
<<bits :: bitstring, fill :: size(size)>>
end
defp pack_pixel(_, [], { _, 32 }, _) do
{ <<pack_depth_header(true) :: bitstring, pack_depth32_single(0) :: bitstring>>, <<>> }
end
defp pack_pixel(palette, [{ { depth, nil }, index, _ }], { [mode|modes], 32 }, _) when mode in [0, 3] and depth <= 0xffffff do
{ <<pack_depth_header(false, mode) :: bitstring, pack_depth32_single(depth) :: bitstring>>, pack_palette_index(index, palette) }
end
defp pack_pixel(palette, pixel, { [mode|modes], bits }, size) when mode in [1, 4] do
{ depth, colour } = pack_depth_accum(pixel, size, palette)
if bit_size(depth) <= (bits - 8) do
{ <<pack_depth_header(false, mode) :: bitstring, depth :: bitstring>>, colour }
else
pack_pixel(palette, pixel, { modes, bits }, size)
end
end
defp pack_pixel(palette, pixel, { [mode|modes], bits }, size) when mode in [2, 5] do
{ depth, colour } = pack_depth_blocks(pixel, size, palette)
if bit_size(depth) <= (bits - 8) do
{ <<pack_depth_header(false, mode) :: bitstring, depth :: bitstring>>, colour }
else
pack_pixel(palette, pixel, { modes, bits }, size)
end
end
defp pack_pixel(palette, pixels, { [_|modes], bits }, size), do: pack_pixel(palette, pixels, { modes, bits }, size)
@spec create(PoxTool.Poxel.t, String.t, keyword) :: :ok
def create(poxel, path, opts) do
depth_bits = opts[:depth_bits] || 32 # 128
depth_mode = opts[:depth_mode]
faces = Map.from_struct(poxel) |> Enum.map(fn { name, face } -> { name, prepare_face(face) } end)
max_sp = max_shared_palettes(faces)
palette_limit = exceeds_palette_limit?(faces)
size = PoxTool.Poxel.size(poxel)
cond do
opts[:palette] == true and palette_limit -> { :error, "Exceeds maximum palette limit of 256" }
opts[:shared_palette] == true and max_sp > 256 -> { :error, "Exceeds maximum palette limit of 256" }
not is_nil(opts[:size]) and opts[:size] != size -> { :error, "Does not match size" }
exceeds_depth_limit?(faces, depth_bits, depth_mode) -> { :error, "Exceeds depth limit" }
true ->
option = %PoxTool.Options{
depth: { depth_mode || [0, 1, 2, 3, 4, 5, 6, 7], depth_bits }
}
{ palettes, options } = if opts[:shared_palette] do
palette = PoxTool.Poxel.palette(poxel)
size = palette_size(palette)
{ { palette, size }, [{ :palette, palette }|PoxTool.all_faces(%{ option | palette: size })] }
else
{ palettes, options } = Enum.reduce(Map.from_struct(poxel), { [], [] }, fn { name, face }, { acc_p, acc_o } ->
palette = PoxTool.Poxel.face_palette(face)
size = palette_size(palette)
{ [{ name, { palette, size } }|acc_p], [{ name, %{ option | palette: size } }|acc_o] }
end)
end
packed = PoxTool.pack(poxel, options)
PoxTool.save(packed, size, path, palettes)
:ok
end
end
def prepare_face(face, palette \\ %{}) do
{ palette, max_blocks, max_depth, row, rows, _ } = PoxTool.Poxel.face_map(face, { palette, 0, 0, [], [], 0 }, fn
acc = { _, _, _, _, _, 0 } -> acc
{ palette, n, d, row, rows, c } -> { palette, n, d, [], [Enum.reverse(row)|rows], c }
end, fn { palette, indexes }, { _, n, d, row, rows, c } ->
d = case indexes do
[{ { s, nil }, _, _ }|_] -> max(d, s)
[{ { s, l }, _, _ }|_] -> max(d, s + l)
_ -> d
end
indexes = Enum.reverse(indexes)
{ palette, max(MapSet.new(indexes) |> MapSet.size, n), d, [indexes|row], rows, c + 1 }
end, fn { palette, _, _, _, _, _ } -> { palette, [] } end, fn { range, colour, material }, { palette, indexes } ->
palette = Map.put_new(palette, colour, map_size(palette))
{ palette, [{ range, palette[colour], material }|indexes] }
end)
{ palette, max_blocks, max_depth, Enum.reverse([Enum.reverse(row)|rows]) }
end
defp palette_size(palette) do
case map_size(palette) do
v when v <= 4 -> 4
v when v <= 16 -> 16
v when v <= 256 -> 256
end
end
defp max_segments(faces, max \\ 0)
defp max_segments([{ _, { _, segments, _, _ } }|faces], max), do: max_segments(faces, max(max, segments))
defp max_segments([], max), do: max
defp max_palettes(faces, max \\ 0)
defp max_palettes([{ _, { palette, _, _, _ } }|faces], _), do: max_palettes(faces, map_size(palette))
defp max_palettes([], max), do: max
defp max_shared_palettes(faces, merged \\ %{})
defp max_shared_palettes([{ _, { palette, _, _, _ } }|faces], merged), do: max_shared_palettes(faces, Map.merge(merged, palette))
defp max_shared_palettes([], merged), do: map_size(merged)
defp exceeds_palette_limit?([{ _, { palette, _, _, _ } }|_]) when map_size(palette) > 256, do: true
defp exceeds_palette_limit?([_|faces]), do: exceeds_palette_limit?(faces)
defp exceeds_palette_limit?([]), do: false
defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 32, 0) when (segments <= 1) and (depth <= 0xffffff), do: exceeds_depth_limit?(faces, 32, 0)
defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 32, 1) when (segments <= 3) and (depth <= 90), do: exceeds_depth_limit?(faces, 32, 1)
defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 32, 2) when (segments <= 24) and (depth <= 24), do: exceeds_depth_limit?(faces, 32, 2)
# defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 128, 0) when (segments <= 1) and (depth <= 0xffffff), do: exceeds_depth_limit?(faces, 128, 0)
# defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 128, 1) when (segments <= 3) and (depth <= 90), do: exceeds_depth_limit?(faces, 128, 1)
# defp exceeds_depth_limit?([{ _, { _, segments, depth, _ } }|faces], 128, 2) when (segments <= 24) and (depth <= 24), do: exceeds_depth_limit?(faces, 128, 2)
defp exceeds_depth_limit?([], _, _), do: false
defp exceeds_depth_limit?(faces, bits, mode) when mode in [3, 4, 5], do: exceeds_depth_limit?(faces, bits, mode - 3)
defp exceeds_depth_limit?(_, _, mode) when mode in [6, 7], do: false
defp exceeds_depth_limit?(faces, bits, nil), do: exceeds_depth_limit?(faces, bits, Enum.to_list(0..7))
defp exceeds_depth_limit?(faces, bits, modes) when is_list(modes), do: Enum.all?(modes, &exceeds_depth_limit?(faces, bits, &1))
defp exceeds_depth_limit?(_, _, _), do: true
end
|
lib/pox_tool.ex
| 0.751739
| 0.541651
|
pox_tool.ex
|
starcoder
|
defmodule NimbleOptions do
@options_schema [
*: [
type: :keyword_list,
keys: [
type: [
type: {:custom, __MODULE__, :validate_type, []},
default: :any,
doc: "The type of the option item."
],
required: [
type: :boolean,
default: false,
doc: "Defines if the option item is required."
],
default: [
type: :any,
doc: "The default value for option item if not specified."
],
keys: [
type: :keyword_list,
doc: """
Available for types `:keyword_list` and `:non_empty_keyword_list`,
it defines which set of keys are accepted for the option item. The value of the
`:keys` option is a schema itself. For example: `keys: [foo: [type: :atom]]`.
Use `:*` as the key to allow multiple arbitrary keys and specify their schema:
`keys: [*: [type: :integer]]`.
""",
keys: &__MODULE__.options_schema/0
],
deprecated: [
type: :string,
doc: """
Defines a message to indicate that the option item is deprecated. \
The message will be displayed as a warning when passing the item.
"""
],
rename_to: [
type: :atom,
doc: """
Renames a option item allowing one to use a normalized name \
internally, e.g. rename a deprecated item to the currently accepted name.
"""
],
doc: [
type: {:or, [:string, {:in, [false]}]},
doc: "The documentation for the option item."
],
subsection: [
type: :string,
doc: "The title of separate subsection of the options' documentation"
]
]
]
]
@moduledoc """
Provides a standard API to handle keyword-list-based options.
`NimbleOptions` allows developers to create schemas using a
pre-defined set of options and types. The main benefits are:
* A single unified way to define simple static options
* Config validation against schemas
* Automatic doc generation
## Schema options
These are the options supported in a *schema*. They are what
defines the validation for the items in the given schema.
#{NimbleOptions.Docs.generate(@options_schema, nest_level: 0)}
## Types
* `:any` - Any type.
* `:keyword_list` - A keyword list.
* `:non_empty_keyword_list` - A non-empty keyword list.
* `:atom` - An atom.
* `:string` - A string.
* `:boolean` - A boolean.
* `:integer` - An integer.
* `:non_neg_integer` - A non-negative integer.
* `:pos_integer` - A positive integer.
* `:float` - A float.
* `:timeout` - A non-negative integer or the atom `:infinity`.
* `:pid` - A PID (process identifier).
* `:mfa` - A named function in the format `{module, function, arity}` where
`arity` is a list of arguments. For example, `{MyModule, :my_fun, [arg1, arg2]}`.
* `:mod_arg` - A module along with arguments, e.g. `{MyModule, [arg1, arg2]}`.
Usually used for process initialization using `start_link` and friends.
* `{:fun, arity}` - Any function with the specified arity.
* `{:in, choices}` - A value that is a member of one of the `choices`. `choices`
should be a list of terms or a `Range`. The value is an element in said
list of terms, that is, `value in choices` is `true`. This was previously
called `:one_of` and the `:in` name is available since version 0.3.3.
* `{:custom, mod, fun, args}` - A custom type. The related value must be validated
by `mod.fun(values, ...args)`. The function should return `{:ok, value}` or
`{:error, message}`.
* `{:or, subtypes}` - A value that matches one of the given `subtypes`. The value is
matched against the subtypes in the order specified in the list of `subtypes`. If
one of the subtypes matches and **updates** (casts) the given value, the updated
value is used. For example: `{:or, [:string, :boolean, {:fun, 2}]}`. If one of the
subtypes is a keyword list, you won't be able to pass `:keys` directly. For this reason,
keyword lists (`:keyword_list` and `:non_empty_keyword_list`) are special cased and can
be used as subtypes with `{:keyword_list, keys}` or `{:non_empty_keyword_list, keys}`.
For example, a type such as `{:or, [:boolean, keyword_list: [enabled: [type: :boolean]]]}`
would match either a boolean or a keyword list with the `:enabled` boolean option in it.
* `{:list, subtype}` - A list where all elements match `subtype`. `subtype` can be any
of the accepted types listed here. Empty lists are allowed. The resulting validated list
contains the validated (and possibly updated) elements, each as returned after validation
through `subtype`. For example, if `subtype` is a custom validator function that returns
an updated value, then that updated value is used in the resulting list. Validation
fails at the *first* element that is invalid according to `subtype`.
## Example
iex> schema = [
...> producer: [
...> type: :non_empty_keyword_list,
...> required: true,
...> keys: [
...> module: [required: true, type: :mod_arg],
...> concurrency: [
...> type: :pos_integer,
...> ]
...> ]
...> ]
...> ]
...>
...> config = [
...> producer: [
...> concurrency: 1,
...> ]
...> ]
...>
...> {:error, %NimbleOptions.ValidationError{} = error} = NimbleOptions.validate(config, schema)
...> Exception.message(error)
"required option :module not found, received options: [:concurrency] (in options [:producer])"
## Nested option items
`NimbleOptions` allows option items to be nested so you can recursively validate
any item down the options tree.
### Example
iex> schema = [
...> producer: [
...> required: true,
...> type: :non_empty_keyword_list,
...> keys: [
...> rate_limiting: [
...> type: :non_empty_keyword_list,
...> keys: [
...> interval: [required: true, type: :pos_integer]
...> ]
...> ]
...> ]
...> ]
...> ]
...>
...> config = [
...> producer: [
...> rate_limiting: [
...> interval: :oops!
...> ]
...> ]
...> ]
...>
...> {:error, %NimbleOptions.ValidationError{} = error} = NimbleOptions.validate(config, schema)
...> Exception.message(error)
"expected :interval to be a positive integer, got: :oops! (in options [:producer, :rate_limiting])"
## Validating Schemas
Each time `validate/2` is called, the given schema itself will be validated before validating
the options.
In most applications the schema will never change but validating options will be done
repeatedly.
To avoid the extra cost of validating the schema, it is possible to validate the schema once,
and then use that valid schema directly. This is done by using the `new!/1` function first, and
then passing the returned schema to `validate/2`.
### Example
iex> raw_schema = [
...> hostname: [
...> required: true,
...> type: :string
...> ]
...> ]
...>
...> schema = NimbleOptions.new!(raw_schema)
...> NimbleOptions.validate([hostname: "elixir-lang.org"], schema)
{:ok, hostname: "elixir-lang.org"}
Calling `new!/1` from a function that receives options will still validate the schema each time
that function is called. Declaring the schema as a module attribute is supported:
@options_schema NimbleOptions.new!([...])
This schema will be validated at compile time. Calling `docs/1` on that schema is also
supported.
"""
alias NimbleOptions.ValidationError
defstruct schema: []
@basic_types [
:any,
:keyword_list,
:non_empty_keyword_list,
:atom,
:integer,
:non_neg_integer,
:pos_integer,
:float,
:mfa,
:mod_arg,
:string,
:boolean,
:timeout,
:pid
]
@typedoc """
A schema. See the module documentation for more information.
"""
@type schema() :: keyword()
@typedoc """
The `NimbleOptions` struct embedding a validated schema. See the
Validating Schemas section in the module documentation.
"""
@type t() :: %NimbleOptions{schema: schema()}
@doc """
Validate the given `options` with the given `schema`.
See the module documentation for what a `schema` is.
If the validation is successful, this function returns `{:ok, validated_options}`
where `validated_options` is a keyword list. If the validation fails, this
function returns `{:error, validation_error}` where `validation_error` is a
`NimbleOptions.ValidationError` struct explaining what's wrong with the options.
You can use `raise/1` with that struct or `Exception.message/1` to turn it into a string.
"""
@spec validate(keyword(), schema() | t()) ::
{:ok, validated_options :: keyword()} | {:error, ValidationError.t()}
def validate(options, %NimbleOptions{schema: schema}) do
validate_options_with_schema(options, schema)
end
def validate(options, schema) when is_list(options) and is_list(schema) do
validate(options, new!(schema))
end
@doc """
Validates the given `options` with the given `schema` and raises if they're not valid.
This function behaves exactly like `validate/2`, but returns the options directly
if they're valid or raises a `NimbleOptions.ValidationError` exception otherwise.
"""
@spec validate!(keyword(), schema() | t()) :: validated_options :: keyword()
def validate!(options, schema) do
case validate(options, schema) do
{:ok, options} -> options
{:error, %ValidationError{} = error} -> raise error
end
end
@doc """
Validates the given `schema` and returns a wrapped schema to be used with `validate/2`.
If the given schema is not valid, raises a `NimbleOptions.ValidationError`.
"""
@spec new!(schema()) :: t()
def new!(schema) when is_list(schema) do
case validate_options_with_schema(schema, options_schema()) do
{:ok, validated_schema} ->
%NimbleOptions{schema: validated_schema}
{:error, %ValidationError{} = error} ->
raise ArgumentError,
"invalid schema given to NimbleOptions.validate/2. " <>
"Reason: #{Exception.message(error)}"
end
end
@doc ~S"""
Returns documentation for the given schema.
You can use this to inject documentation in your docstrings. For example,
say you have your schema in a module attribute:
@options_schema [...]
With this, you can use `docs/1` to inject documentation:
@doc "Supported options:\n#{NimbleOptions.docs(@options_schema)}"
## Options
* `:nest_level` - an integer deciding the "nest level" of the generated
docs. This is useful when, for example, you use `docs/2` inside the `:doc`
option of another schema. For example, if you have the following nested schema:
nested_schema = [
allowed_messages: [type: :pos_integer, doc: "Allowed messages."],
interval: [type: :pos_integer, doc: "Interval."]
]
then you can document it inside another schema with its nesting level increased:
schema = [
producer: [
type: {:or, [:string, keyword_list: nested_schema]},
doc:
"Either a string or a keyword list with the following keys:\n\n" <>
NimbleOptions.docs(nested_schema, nest_level: 1)
],
other_key: [type: :string]
]
"""
@spec docs(schema(), keyword() | t()) :: String.t()
def docs(schema, options \\ [])
def docs(schema, options) when is_list(schema) and is_list(options) do
NimbleOptions.Docs.generate(schema, options)
end
def docs(%NimbleOptions{schema: schema}, options) when is_list(options) do
NimbleOptions.Docs.generate(schema, options)
end
@doc false
def options_schema() do
@options_schema
end
defp validate_options_with_schema(opts, schema) do
validate_options_with_schema_and_path(opts, schema, _path = [])
end
defp validate_options_with_schema_and_path(opts, fun, path) when is_function(fun) do
validate_options_with_schema_and_path(opts, fun.(), path)
end
defp validate_options_with_schema_and_path(opts, schema, path) do
schema = expand_star_to_option_keys(schema, opts)
with :ok <- validate_unknown_options(opts, schema),
{:ok, options} <- validate_options(schema, opts) do
{:ok, options}
else
{:error, %ValidationError{} = error} ->
{:error, %ValidationError{error | keys_path: path ++ error.keys_path}}
end
end
defp validate_unknown_options(opts, schema) do
valid_opts = Keyword.keys(schema)
case Keyword.keys(opts) -- valid_opts do
[] ->
:ok
keys ->
error_tuple(
keys,
nil,
"unknown options #{inspect(keys)}, valid options are: #{inspect(valid_opts)}"
)
end
end
defp validate_options(schema, opts) do
case Enum.reduce_while(schema, opts, &reduce_options/2) do
{:error, %ValidationError{}} = result -> result
result -> {:ok, result}
end
end
defp reduce_options({key, schema_opts}, opts) do
case validate_option(opts, key, schema_opts) do
{:error, %ValidationError{}} = result ->
{:halt, result}
{:ok, value} ->
if renamed_key = schema_opts[:rename_to] do
opts =
opts
|> Keyword.update(renamed_key, value, fn _ -> value end)
|> Keyword.delete(key)
{:cont, opts}
else
{:cont, Keyword.update(opts, key, value, fn _ -> value end)}
end
:no_value ->
if Keyword.has_key?(schema_opts, :default) do
opts_with_default = Keyword.put(opts, key, schema_opts[:default])
reduce_options({key, schema_opts}, opts_with_default)
else
{:cont, opts}
end
end
end
defp validate_option(opts, key, schema) do
with {:ok, value} <- validate_value(opts, key, schema),
{:ok, value} <- validate_type(schema[:type], key, value) do
if nested_schema = schema[:keys] do
validate_options_with_schema_and_path(value, nested_schema, _path = [key])
else
{:ok, value}
end
end
end
defp validate_value(opts, key, schema) do
cond do
Keyword.has_key?(opts, key) ->
if message = Keyword.get(schema, :deprecated) do
IO.warn("#{inspect(key)} is deprecated. " <> message)
end
{:ok, opts[key]}
Keyword.get(schema, :required, false) ->
error_tuple(
key,
nil,
"required option #{inspect(key)} not found, received options: " <>
inspect(Keyword.keys(opts))
)
true ->
:no_value
end
end
defp validate_type(:integer, key, value) when not is_integer(value) do
error_tuple(key, value, "expected #{inspect(key)} to be an integer, got: #{inspect(value)}")
end
defp validate_type(:non_neg_integer, key, value) when not is_integer(value) or value < 0 do
error_tuple(
key,
value,
"expected #{inspect(key)} to be a non negative integer, got: #{inspect(value)}"
)
end
defp validate_type(:pos_integer, key, value) when not is_integer(value) or value < 1 do
error_tuple(
key,
value,
"expected #{inspect(key)} to be a positive integer, got: #{inspect(value)}"
)
end
defp validate_type(:float, key, value) when not is_float(value) do
error_tuple(key, value, "expected #{inspect(key)} to be a float, got: #{inspect(value)}")
end
defp validate_type(:atom, key, value) when not is_atom(value) do
error_tuple(key, value, "expected #{inspect(key)} to be an atom, got: #{inspect(value)}")
end
defp validate_type(:timeout, key, value)
when not (value == :infinity or (is_integer(value) and value >= 0)) do
error_tuple(
key,
value,
"expected #{inspect(key)} to be non-negative integer or :infinity, got: #{inspect(value)}"
)
end
defp validate_type(:string, key, value) when not is_binary(value) do
error_tuple(key, value, "expected #{inspect(key)} to be a string, got: #{inspect(value)}")
end
defp validate_type(:boolean, key, value) when not is_boolean(value) do
error_tuple(key, value, "expected #{inspect(key)} to be a boolean, got: #{inspect(value)}")
end
defp validate_type(:keyword_list, key, value) do
if keyword_list?(value) do
{:ok, value}
else
error_tuple(
key,
value,
"expected #{inspect(key)} to be a keyword list, got: #{inspect(value)}"
)
end
end
defp validate_type(:non_empty_keyword_list, key, value) do
if keyword_list?(value) and value != [] do
{:ok, value}
else
error_tuple(
key,
value,
"expected #{inspect(key)} to be a non-empty keyword list, got: #{inspect(value)}"
)
end
end
defp validate_type(:pid, _key, value) when is_pid(value) do
{:ok, value}
end
defp validate_type(:pid, key, value) do
error_tuple(key, value, "expected #{inspect(key)} to be a pid, got: #{inspect(value)}")
end
defp validate_type(:mfa, _key, {mod, fun, args} = value)
when is_atom(mod) and is_atom(fun) and is_list(args) do
{:ok, value}
end
defp validate_type(:mfa, key, value) when not is_nil(value) do
error_tuple(
key,
value,
"expected #{inspect(key)} to be a tuple {Mod, Fun, Args}, got: #{inspect(value)}"
)
end
defp validate_type(:mod_arg, _key, {mod, _arg} = value) when is_atom(mod) do
{:ok, value}
end
defp validate_type(:mod_arg, key, value) do
error_tuple(
key,
value,
"expected #{inspect(key)} to be a tuple {Mod, Arg}, got: #{inspect(value)}"
)
end
defp validate_type({:fun, arity}, key, value) do
expected = "expected #{inspect(key)} to be a function of arity #{arity}, "
if is_function(value) do
case :erlang.fun_info(value, :arity) do
{:arity, ^arity} ->
{:ok, value}
{:arity, fun_arity} ->
error_tuple(key, value, expected <> "got: function of arity #{inspect(fun_arity)}")
end
else
error_tuple(key, value, expected <> "got: #{inspect(value)}")
end
end
defp validate_type({:custom, mod, fun, args}, key, value) do
case apply(mod, fun, [value | args]) do
{:ok, value} ->
{:ok, value}
{:error, message} when is_binary(message) ->
error_tuple(key, value, message)
other ->
raise "custom validation function #{inspect(mod)}.#{fun}/#{length(args) + 1} " <>
"must return {:ok, value} or {:error, message}, got: #{inspect(other)}"
end
end
# TODO: remove on v0.5.
defp validate_type({:one_of, choices}, key, value) do
validate_type({:in, choices}, key, value)
end
defp validate_type({:in, choices}, key, value) do
if value in choices do
{:ok, value}
else
error_tuple(
key,
value,
"expected #{inspect(key)} to be in #{inspect(choices)}, got: #{inspect(value)}"
)
end
end
defp validate_type({:or, subtypes}, key, value) do
result =
Enum.reduce_while(subtypes, _errors = [], fn subtype, errors_acc ->
{subtype, nested_schema} =
case subtype do
{keyword_list, keys} when keyword_list in [:keyword_list, :non_empty_keyword_list] ->
{keyword_list, keys}
other ->
{other, _nested_schema = nil}
end
case validate_type(subtype, key, value) do
{:ok, value} when not is_nil(nested_schema) ->
case validate_options_with_schema_and_path(value, nested_schema, _path = [key]) do
{:ok, value} -> {:halt, {:ok, value}}
{:error, %ValidationError{} = error} -> {:cont, [error | errors_acc]}
end
{:ok, value} ->
{:halt, {:ok, value}}
{:error, %ValidationError{} = reason} ->
{:cont, [reason | errors_acc]}
end
end)
case result do
{:ok, value} ->
{:ok, value}
errors when is_list(errors) ->
message =
"expected #{inspect(key)} to match at least one given type, but didn't match " <>
"any. Here are the reasons why it didn't match each of the allowed types:\n\n" <>
Enum.map_join(errors, "\n", &(" * " <> Exception.message(&1)))
error_tuple(key, value, message)
end
end
defp validate_type({:list, subtype}, key, value) when is_list(value) do
updated_elements =
for {elem, index} <- Stream.with_index(value) do
case validate_type(subtype, "list element", elem) do
{:ok, updated_elem} ->
updated_elem
{:error, %ValidationError{} = error} ->
throw({:error, index, error})
end
end
{:ok, updated_elements}
catch
{:error, index, %ValidationError{} = error} ->
message =
"list element at position #{index} in #{inspect(key)} failed validation: #{error.message}"
error_tuple(key, value, message)
end
defp validate_type({:list, _subtype}, key, value) do
error_tuple(key, value, "expected #{inspect(key)} to be a list, got: #{inspect(value)}")
end
defp validate_type(nil, key, value) do
validate_type(:any, key, value)
end
defp validate_type(_type, _key, value) do
{:ok, value}
end
defp keyword_list?(value) do
is_list(value) and Enum.all?(value, &match?({key, _value} when is_atom(key), &1))
end
defp expand_star_to_option_keys(keys, opts) do
case keys[:*] do
nil ->
keys
schema_opts ->
Enum.map(opts, fn {k, _} -> {k, schema_opts} end)
end
end
defp available_types() do
types =
Enum.map(@basic_types, &inspect/1) ++
[
"{:fun, arity}",
"{:in, choices}",
"{:or, subtypes}",
"{:custom, mod, fun, args}",
"{:list, subtype}"
]
Enum.join(types, ", ")
end
@doc false
def validate_type(value) when value in @basic_types do
{:ok, value}
end
def validate_type({:fun, arity} = value) when is_integer(arity) and arity >= 0 do
{:ok, value}
end
# TODO: remove on v0.5.
def validate_type({:one_of, choices}) do
IO.warn("the {:one_of, choices} type is deprecated. Use {:in, choices} instead.")
validate_type({:in, choices})
end
# "choices" here can be any enumerable so there's no easy and fast way to validate it.
def validate_type({:in, _choices} = value) do
{:ok, value}
end
def validate_type({:custom, mod, fun, args} = value)
when is_atom(mod) and is_atom(fun) and is_list(args) do
{:ok, value}
end
def validate_type({:or, subtypes} = value) when is_list(subtypes) do
Enum.reduce_while(subtypes, {:ok, value}, fn
{keyword_list_type, _keys}, acc
when keyword_list_type in [:keyword_list, :non_empty_keyword_list] ->
{:cont, acc}
subtype, acc ->
case validate_type(subtype) do
{:ok, _value} -> {:cont, acc}
{:error, reason} -> {:halt, {:error, "invalid type in :or for reason: #{reason}"}}
end
end)
end
def validate_type({:list, subtype}) do
case validate_type(subtype) do
{:ok, validated_subtype} -> {:ok, {:list, validated_subtype}}
{:error, reason} -> {:error, "invalid subtype for :list type: #{reason}"}
end
end
def validate_type(value) do
{:error, "invalid option type #{inspect(value)}.\n\nAvailable types: #{available_types()}"}
end
defp error_tuple(key, value, message) do
{:error, %ValidationError{key: key, message: message, value: value}}
end
end
|
lib/nimble_options.ex
| 0.905701
| 0.639792
|
nimble_options.ex
|
starcoder
|
defmodule AdventOfCode.DayFourSolution do
defp load_data() do
[draw | rest] = AdventOfCode.load_data(4, "data.txt")
boards =
rest
|> Enum.map(fn row -> String.split(row) end)
|> Enum.chunk_every(5)
|> Enum.map(&load_board/1)
draw_list = draw |> String.split(",") |> Enum.map(&String.to_integer/1)
{draw_list, boards}
end
# Initialize an empty board to work with
defp create_empty_board() do
%{
0 => %{0 => -1, 1 => -1, 2 => -1, 3 => -1, 4 => -1},
1 => %{0 => -1, 1 => -1, 2 => -1, 3 => -1, 4 => -1},
2 => %{0 => -1, 1 => -1, 2 => -1, 3 => -1, 4 => -1},
3 => %{0 => -1, 1 => -1, 2 => -1, 3 => -1, 4 => -1},
4 => %{0 => -1, 1 => -1, 2 => -1, 3 => -1, 4 => -1}
}
end
# Set the element of a board at [row][col]
defp set_element(board, row, col, value), do: update_in(board[row][col], &(&1 + -&1 + value))
# Public facing find_element_index
defp find_element_index(board, target), do: find_element_index(board, 0, 0, target)
# Search for an element and return the given index within the board
defp find_element_index(_, 5, 0, _), do: nil
defp find_element_index(board, row, 4, target) do
if board[row][4] == target,
do: {row, 4},
else: find_element_index(board, row + 1, 0, target)
end
defp find_element_index(board, row, col, target) do
if board[row][col] == target,
do: {row, col},
else: find_element_index(board, row, col + 1, target)
end
# Multi-dimensional array at [row][col]
defp mda_at(mda, row, col), do: mda |> Enum.at(row) |> Enum.at(col) |> String.to_integer()
# Public facing load_board
defp load_board(data), do: load_board(data, 0, 0, %{})
# If the row is already five and col is already one, the board has been filled
defp load_board(_, 5, 0, board), do: board
# If we start loading at the first row/col, we want to create a new board
defp load_board(data, 0, 0, _) do
el = data |> mda_at(0, 0)
updated_board = create_empty_board() |> set_element(0, 0, el)
load_board(data, 0, 1, updated_board)
end
# If we have come to the last col, move one row down and start at zero
defp load_board(data, row, 4, board) do
el = data |> mda_at(row, 4)
updated_board = board |> set_element(row, 4, el)
load_board(data, row + 1, 0, updated_board)
end
# If we haven't reached any of the edges/corners, just update the board at [row][col]
defp load_board(data, row, col, board) do
el = data |> mda_at(row, col)
updated_board = board |> set_element(row, col, el)
load_board(data, row, col + 1, updated_board)
end
defp fill_draw(draw, board) do
index = find_element_index(board, draw)
unless index == nil do
{row, col} = index
updated_board = set_element(board, row, col, -1)
updated_board
else
board
end
end
defp is_row_bingo?(board, row), do: board[row] |> Map.values() |> Enum.all?(&(&1 == -1))
# Public facing is_col_bingo?
defp is_col_bingo?(board, col), do: is_col_bingo?(board, 0, col, [])
defp is_col_bingo?(_, 5, _, outcome), do: outcome |> Enum.all?()
defp is_col_bingo?(board, row, col, outcome),
do: is_col_bingo?(board, row + 1, col, [board[row][col] == -1 | outcome])
# To find bingo, we generate a map containing true and false and apply & to all the values
defp has_bingo?(board) do
indices = 0..4 |> Enum.to_list()
has_row_bingo? = indices |> Enum.any?(&is_row_bingo?(board, &1))
has_col_bingo? = indices |> Enum.any?(&is_col_bingo?(board, &1))
has_row_bingo? || has_col_bingo?
end
defp get_unmarked(board), do: get_unmarked(board, 0, 0, 0)
defp get_unmarked(board, 5, 0, unmarked), do: unmarked
defp get_unmarked(board, row, 4, unmarked) do
x = if board[row][4] == -1, do: 0, else: board[row][4]
get_unmarked(board, row + 1, 0, unmarked + x)
end
defp get_unmarked(board, row, col, unmarked) do
x = if board[row][col] == -1, do: 0, else: board[row][col]
get_unmarked(board, row, col + 1, unmarked + x)
end
defp solve_one({[draw | rest], boards}) do
# Take the next set of draws and update the board accordingly
updated_boards = boards |> Enum.map(&fill_draw(draw, &1))
# Check if any of the boards have bingo
any_bingos? = updated_boards |> Enum.map(&has_bingo?/1) |> Enum.find_index(&(&1 == true))
unless any_bingos? == nil do
bingo_board = updated_boards |> Enum.at(any_bingos?)
unmarked = bingo_board |> get_unmarked()
unmarked * draw
else
solve_one({rest, updated_boards})
end
end
def part_one(), do: load_data() |> solve_one()
defp solve_two({[draw | rest], unsolved_boards}) do
updated_boards = unsolved_boards |> Enum.map(&fill_draw(draw, &1))
updated_unsolved_boards = updated_boards |> Enum.filter(&(!has_bingo?(&1)))
if updated_unsolved_boards |> Enum.empty?() do
last_board = updated_boards |> Enum.at(0)
unmarked = last_board |> get_unmarked()
unmarked * draw
else
solve_two({rest, updated_unsolved_boards})
end
end
def part_two(), do: load_data() |> solve_two()
end
|
lib/advent_of_code/day-4/solution.ex
| 0.563978
| 0.505676
|
solution.ex
|
starcoder
|
defmodule Mix.Tasks.Hedwig.Gen.Robot do
use Mix.Task
import Mix.Generator
@shortdoc "Generate a new robot"
@moduledoc """
Generates a new robot.
The robot will be placed in the `lib` directory.
## Examples
mix hedwig.gen.robot
mix hedwig.gen.robot --name alfred --robot Custom.Module
## Command line options
* `--name` - the name your robot will respond to
* `--aka` - an alias your robot will respond to
* `--robot` - the robot to generate (defaults to `YourApp.Robot`)
"""
@switches [aka: :string, name: :string, robot: :string]
@doc false
def run(argv) do
if Mix.Project.umbrella? do
Mix.raise "cannot run task hedwig.gen.robot from umbrella application"
end
config = Mix.Project.config
{opts, _argv, _} = OptionParser.parse(argv, switches: @switches)
app = config[:app]
deps = config[:deps]
Mix.shell.info [:clear, :home, """
Welcome to the Hedwig Robot Generator!
Let's get started.
"""]
aka = opts[:aka] || "/"
name = opts[:name] || prompt_for_name
robot = opts[:robot] || default_robot(app)
adapter = get_adapter_module(deps)
underscored = Mix.Utils.underscore(robot)
file = Path.join("lib", underscored) <> ".ex"
robot = Module.concat([robot])
opts = [adapter: adapter, aka: aka, app: app, name: name, robot: robot]
create_directory Path.dirname(file)
create_file file, robot_template(opts)
case File.read "config/config.exs" do
{:ok, contents} ->
Mix.shell.info [:green, "* updating ", :reset, "config/config.exs"]
File.write! "config/config.exs",
String.replace(contents, "use Mix.Config", config_template(opts))
{:error, _} ->
create_file "config/config.exs", config_template(opts)
end
Mix.shell.info """
Don't forget to add your new robot to your supervision tree
(typically in lib/#{app}.ex):
worker(#{inspect robot}, [])
"""
end
defp default_robot(app) do
case Application.get_env(app, :app_namespace, app) do
^app -> app |> to_string |> Mix.Utils.camelize
mod -> mod |> inspect
end |> Module.concat(Robot)
end
defp available_adapters(deps) do
deps
|> all_modules
|> Kernel.++(hedwig_modules)
|> Enum.uniq
|> Enum.filter(&implements_adapter?/1)
|> Enum.with_index
|> Enum.reduce(%{}, fn {adapter, index}, acc ->
Map.put(acc, index + 1, adapter)
end)
end
defp all_modules(deps) do
Enum.reduce(deps, [], &load_and_get_modules/2)
end
defp load_and_get_modules({app, _}, acc) do
load_and_get_modules(app, acc)
end
defp load_and_get_modules({app, _, _}, acc) do
load_and_get_modules(app, acc)
end
defp load_and_get_modules(app, acc) do
Application.load(app)
case :application.get_key(app, :modules) do
{:ok, modules} ->
modules ++ acc
_ ->
acc
end
end
defp hedwig_modules do
Application.load(:hedwig)
{:ok, modules} = :application.get_key(:hedwig, :modules)
modules
end
defp implements_adapter?(module) do
case get_in(module.module_info(), [:attributes, :behaviour]) do
nil -> false
mods -> Hedwig.Adapter in mods
end
end
defp get_adapter_module(deps) do
adapters = available_adapters(deps)
{selection, _} = adapters |> prompt_for_adapter |> Integer.parse
adapters[selection]
end
defp prompt_for_name do
Mix.shell.prompt("What would you like to name your bot?:")
|> String.strip
end
defp prompt_for_adapter(adapters) do
adapters = Enum.map(adapters, &format_adapter/1)
Mix.shell.info ["Available adapters\n\n", adapters]
Mix.shell.prompt("Please select an adapter:")
end
defp format_adapter({index, mod}) do
[inspect(index), ". ", :bright, :blue,
inspect(mod), :normal, :default_color, "\n"]
end
embed_template :robot, """
defmodule <%= inspect @robot %> do
use Hedwig.Robot, otp_app: <%= inspect @app %>
end
"""
embed_template :config, """
use Mix.Config
config <%= inspect @app %>, <%= inspect @robot %>,
adapter: <%= inspect @adapter %>,
name: <%= inspect @name %>,
aka: <%= inspect @aka %>,
responders: [
{Hedwig.Responders.Help, []},
{Hedwig.Responders.GreatSuccess, []},
{Hedwig.Responders.ShipIt, []}
]
"""
end
|
lib/mix/tasks/hedwig.gen.robot.ex
| 0.756088
| 0.425963
|
hedwig.gen.robot.ex
|
starcoder
|
elixir_doc = """
Top level module providing convenience access to needed functions as well
as the very high level `Benchee.run` API.
Intended Elixir interface.
"""
erlang_doc = """
High-Level interface for more convenient usage from Erlang. Same as `Benchee`.
"""
for {module, moduledoc} <- [{Benchee, elixir_doc}, {:benchee, erlang_doc}] do
defmodule module do
@moduledoc moduledoc
alias Benchee.Formatter
@doc """
Run benchmark jobs defined by a map and optionally provide configuration
options.
Runs the given benchmarks and prints the results on the console.
* jobs - a map from descriptive benchmark job name to a function to be
executed and benchmarked
* configuration - configuration options to alter what Benchee does, see
`Benchee.Configuration.init/1` for documentation of the available options.
## Examples
Benchee.run(%{"My Benchmark" => fn -> 1 + 1 end,
"My other benchmrk" => fn -> "1" ++ "1" end}, time: 3)
# Prints a summary of the benchmark to the console
"""
def run(jobs, config \\ [])
def run(jobs, config) when is_list(config) do
do_run(jobs, config)
end
def run(config, jobs) when is_map(jobs) do
# pre 0.6.0 way of passing in the config first and as a map
do_run(jobs, config)
end
defp do_run(jobs, config) do
config
|> Benchee.init()
|> Benchee.system()
|> add_benchmarking_jobs(jobs)
|> Benchee.collect()
|> Benchee.statistics()
|> Benchee.load()
|> Formatter.output()
end
defp add_benchmarking_jobs(suite, jobs) do
Enum.reduce(jobs, suite, fn {key, function}, suite_acc ->
Benchee.benchmark(suite_acc, key, function)
end)
end
defdelegate init(), to: Benchee.Configuration
defdelegate init(config), to: Benchee.Configuration
defdelegate system(suite), to: Benchee.System
defdelegate benchmark(suite, name, function), to: Benchee.Benchmark
defdelegate benchmark(suite, name, function, printer), to: Benchee.Benchmark
defdelegate collect(suite), to: Benchee.Benchmark
defdelegate collect(suite, printer), to: Benchee.Benchmark
defdelegate measure(suite), to: Benchee.Benchmark, as: :collect
defdelegate statistics(suite), to: Benchee.Statistics
defdelegate load(suite), to: Benchee.ScenarioLoader
end
end
|
lib/benchee.ex
| 0.81457
| 0.420362
|
benchee.ex
|
starcoder
|
defmodule Mlx90640 do
@moduledoc """
`elixir_mlx90640` provides a high level abstraction to interface with the
MLX90640 Far Infrared Thermal Sensor Array on Linux platforms.
"""
use Bitwise
use GenServer
defmodule State do
@moduledoc false
defstruct port: nil, receiver: nil
end
defmodule Frame do
@moduledoc false
defstruct data: []
end
@type frame_rate :: 1 | 2 | 4 | 8 | 16 | 32 | 64
@doc """
Starts and links the `Mlx90640` GenServer.
`receiver` is a process that will receive messages on each frame captured by
the sensor.
`frame_rate` is the (approximate) number of frames per second that the sensor
will capture. Valid values are 1, 2, 4, 8, 16, 32, and 64. The default is 2.
Higher values might require a faster I2C baud rate to be configured in Linux.
The `receiver` process will receive, for each frame captured by the sensor, a
message like `%Mlx90640.Frame{ data: data }`, where `data` is a list of rows,
and each row is a list of pixel temperature measurements, expressed as
floating point numbers indicating the temperature in degrees Celsius.
Under normal conditions, there should be 24 rows of 32 pixels each, but in
case of corrupted data frames there might be less.
"""
@spec start_link(pid, [ frame_rate: frame_rate ], [ term ]) :: GenServer.on_start()
def start_link(receiver, mlx_opts \\ [], opts \\ []) do
frame_rate = Keyword.get(mlx_opts, :frame_rate, 2)
if Enum.member?([1, 2, 4, 8, 16, 32, 64], frame_rate) do
arg = %{ receiver: receiver, frame_rate: frame_rate }
GenServer.start_link(__MODULE__, arg, opts)
else
{ :error, "frame rate #{frame_rate} not supported" }
end
end
@doc """
Gracefully stops the `Mlx90640` GenServer.
"""
@spec stop(GenServer.server()) :: :ok
def stop(pid) do
GenServer.cast(pid, :stop)
end
# GenServer callbacks
def init(%{ receiver: receiver, frame_rate: frame_rate }) do
executable_dir = Application.get_env(:elixir_mlx90640, :executable_dir, :code.priv_dir(:elixir_mlx90640))
port = Port.open({:spawn_executable, executable_dir ++ '/mlx90640'}, [
{:args, ["#{frame_rate}"]},
{:packet, 2},
:use_stdio,
:binary,
:exit_status
])
{:ok, %State{ port: port, receiver: receiver }}
end
def handle_info({port, {:data, data}}, state = %State{ port: port, receiver: receiver }) do
send(receiver, %Frame{ data: decode(data) })
{ :noreply, state }
end
def handle_info({port, {:exit_status, exit_status}}, state = %State{ port: port }) do
{ :stop, exit_status, state }
end
def handle_cast(:stop, state) do
{ :stop, :normal, state }
end
# Private helper functions
defp decode(data, decoded \\ []) do
case data do
<<>> -> decoded |> Enum.reverse |> Enum.chunk_every(32)
<< a, b, rest :: binary >> -> decode(rest, [decode_bytes(a, b) | decoded])
_ -> nil
end
end
defp decode_bytes(a, b) do
sign = if bsr(band(b, 0b10000000), 7) == 1, do: -1, else: 1
fractional = band(b, 0b01111111)
(a * sign) + (sign * fractional / 100.0)
end
end
|
lib/elixir_mlx90640.ex
| 0.866754
| 0.745097
|
elixir_mlx90640.ex
|
starcoder
|
defmodule Akd.Init.Distillery do
@moduledoc """
A native Hook module that comes shipped with Akd.
This module uses `Akd.Hook`.
Provides a set of operations that run distillery's `release.init` task with
a given template (optional). These commands are ran on the `build_at`
destination of a deployment.
Ensures to cleanup and empty the rel/ directory.
Doesn't have any Rollback operations.
# Options:
* `run_ensure`: `boolean`. Specifies whether to a run a command or not.
* `ignore_failure`: `boolean`. Specifies whether to continue if this hook fails.
* `cmd_envs`: `list` of `tuples`. Specifies the environments to provide while
initializing the distillery release.
# Defaults:
* `run_ensure`: `true`
* `ignore_failure`: `false`
"""
use Akd.Hook
@default_opts [run_ensure: true, ignore_failure: false]
@doc """
Callback implementation for `get_hooks/2`.
This function returns a list of operations that can be used to init a release
using distillery on the `build_at` destination of a deployment.
## Examples
iex> deployment = %Akd.Deployment{mix_env: "prod",
...> build_at: Akd.Destination.local("."),
...> publish_to: Akd.Destination.local("."),
...> name: "name",
...> vsn: "0.1.1"}
iex> Akd.Init.Distillery.get_hooks(deployment, [])
[%Akd.Hook{ensure: [%Akd.Operation{cmd: "rm -rf ./rel", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "rm -rf _build/prod", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], ignore_failure: false,
main: [%Akd.Operation{cmd: "mix deps.get \\n mix compile",
cmd_envs: [{"MIX_ENV", "prod"}],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}},
%Akd.Operation{cmd: "mix release.init --name name ",
cmd_envs: [{"MIX_ENV", "prod"}],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
@spec get_hooks(Akd.Deployment.t, Keyword.t) :: list(Akd.Hook.t)
def get_hooks(deployment, opts \\ []) do
opts = uniq_merge(opts, @default_opts)
destination = Akd.DestinationResolver.resolve(:build, deployment)
template_cmd = opts
|> Keyword.get(:template)
|> template_cmd()
name_cmd = name_cmd(deployment.name)
[init_hook(destination, deployment.mix_env, [name_cmd, template_cmd], opts)]
end
# This function takes a destination, a mix_env, switches and options
# and returns an Akd.Hook.t struct using form_hook DSL.
defp init_hook(destination, mix_env, switches, opts) do
cmd_envs = Keyword.get(opts, :cmd_envs, [])
cmd_envs = [{"MIX_ENV", mix_env} | cmd_envs]
form_hook opts do
main setup(), destination, cmd_envs: cmd_envs
main rel_init(switches), destination, cmd_envs: cmd_envs
ensure "rm -rf ./rel", destination
ensure "rm -rf _build/prod", destination
end
end
# This function accumulates all the switches of release.init command
# and forms a new command.
# This currently supports only template
defp rel_init(switches) when is_list(switches) do
Enum.reduce(switches, "mix release.init",
fn(cmd, acc) -> acc <> " " <> cmd end)
end
# These commands are to be ran before calling release init
defp setup(), do: "mix deps.get \n mix compile"
# This function returns sub-command associated with template switch
defp template_cmd(nil), do: ""
defp template_cmd(path), do: "--template #{path}"
# This function returns sub-command associated with name switch
defp name_cmd(nil), do: ""
defp name_cmd(name), do: "--name #{name}"
# This function takes two keyword lists and merges them keeping the keys
# unique. If there are multiple values for a key, it takes the value from
# the first value of keyword1 corresponding to that key.
defp uniq_merge(keyword1, keyword2) do
keyword2
|> Keyword.merge(keyword1)
|> Keyword.new()
end
end
|
lib/akd/base/init/distillery.ex
| 0.845161
| 0.54825
|
distillery.ex
|
starcoder
|
defmodule Algoliax do
@moduledoc """
Algoliax is wrapper for Algolia api
### Configuration
Algoliax needs only `:api_key` and `application_id` config. These configs can either be on config files or using environment varialble `"ALGOLIA_API_KEY"` and `"ALGOLIA_APPLICATION_ID"`.
config :algoliax,
api_key: "",
application_id: ""
### Usage
- `:index_name`, specificy the index where the object will be added on. **Required**
- `:object_id`, specify the attribute used to as algolia objectID. Default `:id`.
Any valid Algolia settings, using snake case or camel case. Ex: Algolia `attributeForFaceting` can be configured with `:attribute_for_faceting`
On first call to Algolia, we check that the settings on Algolia are up to date.
### Attributes
Objects send to Algolia are built using the attributes defined in the module using `attribute/1`, `attributes/1` or `attribute/2`
### Example
defmodule People do
use Algoliax,
index_name: :people,
object_id: :reference,
attribute_for_faceting: ["age"],
custom_ranking: ["desc(update_at)"]
defstruct reference: nil, last_name: nil, first_name: nil, age: nil
attributes([:first_name, :last_name, :age])
attribute(:updated_at, ~U[2019-07-18 08:45:56.639380Z] |> DateTime.to_unix())
attribute :full_name do
Map.get(model, :first_name, "") <> " " <> Map.get(model, :last_name, "")
end
attribute :nickname do
Map.get(model, :first_name, "") |> String.downcase()
end
end
"""
alias Algoliax.Resources.{Index, Object, Search}
alias Algoliax.{Config, Utils}
@doc """
Search for index values
## Example
iex> People.search("John")
{:ok,
%{
"exhaustiveNbHits" => true,
"hits" => [
%{
"_highlightResult" => %{
"full_name" => %{
"fullyHighlighted" => false,
"matchLevel" => "full",
"matchedWords" => ["john"],
"value" => "Pierre <em>Jon</em>es"
}
},
"age" => 69,
"first_name" => "Pierre",
"full_name" => "<NAME>",
"indexed_at" => 1570908223,
"last_name" => "Jones",
"objectID" => "b563deb6-2a06-4428-8e5a-ca1ecc08f4e2"
},
%{
"_highlightResult" => %{
"full_name" => %{
"fullyHighlighted" => false,
"matchLevel" => "full",
"matchedWords" => ["john"],
"value" => "Glennie <em>Jon</em>es"
}
},
"age" => 27,
"first_name" => "Glennie",
"full_name" => "<NAME>",
"indexed_at" => 1570908223,
"last_name" => "Jones",
"objectID" => "58e8ff8d-2794-41e1-a4ef-6f8db8d432b6"
},
...
],
"hitsPerPage" => 20,
"nbHits" => 16,
"nbPages" => 1,
"page" => 0,
"params" => "query=john",
"processingTimeMS" => 1,
"query" => "john"
}}
"""
@callback search(query :: binary(), params :: map()) ::
{:ok, map()} | {:not_indexable, model :: map()}
@doc """
Search for facet values
## Example
iex> People.search_facet("age")
{:ok,
%{
"exhaustiveFacetsCount" => true,
"facetHits" => [
%{"count" => 22, "highlighted" => "46", "value" => "46"},
%{"count" => 21, "highlighted" => "38", "value" => "38"},
%{"count" => 19, "highlighted" => "54", "value" => "54"},
%{"count" => 19, "highlighted" => "99", "value" => "99"},
%{"count" => 18, "highlighted" => "36", "value" => "36"},
%{"count" => 18, "highlighted" => "45", "value" => "45"},
%{"count" => 18, "highlighted" => "52", "value" => "52"},
%{"count" => 18, "highlighted" => "56", "value" => "56"},
%{"count" => 18, "highlighted" => "59", "value" => "59"},
%{"count" => 18, "highlighted" => "86", "value" => "86"}
],
"processingTimeMS" => 1
}}
"""
@callback search_facet(facet_name :: binary(), facet_query :: binary(), params :: map()) ::
{:ok, map()} | {:not_indexable, model :: map()}
@doc """
Add/update object. The object is added/updated to algolia with the object_id configured.
## Example
people = %People{reference: 10, last_name: "Doe", first_name: "John", age: 20},
People.save_object(people)
"""
@callback save_object(object :: map() | struct()) ::
{:ok, map()} | {:not_indexable, model :: map()}
@doc """
Save multiple object at once
## Options
* `:force_delete` - if `true` will trigger a "deleteObject" on object that must not be indexed. Default `false`
## Example
peoples = [
%People{reference: 10, last_name: "Doe", first_name: "John", age: 20},
%People{reference: 89, last_name: "Einstein", first_name: "Albert", age: 65}
]
People.save_objects(peoples)
People.save_objects(peoples, force_delete: true)
"""
@callback save_objects(models :: list(map()) | list(struct()), opts :: Keyword.t()) ::
{:ok, map()} | {:error, map()}
@doc """
Fetch object from algolia. By passing the model, the object is retreived using the object_id configured
## Example
people = %People{reference: 10, last_name: "Doe", first_name: "John", age: 20},
People.get_object(people)
"""
@callback get_object(model :: map() | struct()) :: {:ok, map()} | {:error, map()}
@doc """
Delete object from algolia. By passing the model, the object is retreived using the object_id configured
## Example
people = %People{reference: 10, last_name: "Doe", first_name: "John", age: 20},
People.delete_object(people)
"""
@callback delete_object(model :: map() | struct()) :: {:ok, map()} | {:error, map()}
@doc """
Reindex [Ecto](https://hexdocs.pm/ecto/Ecto.html) specific
"""
@callback reindex(query :: Ecto.Query.t(), opts :: Keyword.t()) ::
{:ok, map()} | {:error, map()}
@callback reindex(opts :: Keyword.t()) ::
{:ok, map()} | {:error, map()}
@doc """
Reindex atomicly [Ecto](https://hexdocs.pm/ecto/Ecto.html) specific
"""
@callback reindex_atomic() :: {:ok, map()} | {:error, map()}
@doc """
Check if current object must be indexed or not. By default it's always true. To override this behaviour overide this function in your model
## Example
defmodule People do
use Algoliax,
index_name: :people,
attribute_for_faceting: ["age"],
custom_ranking: ["desc(update_at)"],
object_id: :reference
#....
def to_be_indexed?(model) do
model.age > 50
end
end
"""
@callback to_be_indexed?(model :: map()) :: true | false
@doc """
Get index settings from Algolia
"""
@callback get_settings() :: {:ok, map()} | {:error, map()}
@doc """
Configure index
"""
@callback configure_index() :: {:ok, map()} | {:error, map()}
@doc """
Delete index
"""
@callback delete_index() :: {:ok, map()} | {:error, map()}
defmacro __using__(settings) do
quote do
@behaviour Algoliax
import unquote(__MODULE__)
Module.register_attribute(__MODULE__, :index_attributes, accumulate: true)
settings = unquote(settings)
@settings settings
@before_compile unquote(__MODULE__)
@impl Algoliax
def search(query, params \\ %{}) do
Search.search(__MODULE__, @settings, query, params)
end
@impl Algoliax
def search_facet(facet_name, facet_query \\ nil, params \\ %{}) do
Search.search_facet(__MODULE__, @settings, facet_name, facet_query, params)
end
@impl Algoliax
def get_settings do
Index.get_settings(__MODULE__, @settings)
end
@impl Algoliax
def configure_index do
Index.configure_index(__MODULE__, @settings)
end
@impl Algoliax
def delete_index do
Index.delete_index(__MODULE__, @settings)
end
@impl Algoliax
def to_be_indexed?(model) do
true
end
defoverridable(to_be_indexed?: 1)
end
end
defmacro __before_compile__(_env) do
quote do
@impl Algoliax
def save_objects(models, opts \\ []) do
Object.save_objects(
__MODULE__,
@settings,
models,
@index_attributes,
opts
)
end
@impl Algoliax
def save_object(model) do
Object.save_object(__MODULE__, @settings, model, @index_attributes)
end
@impl Algoliax
def delete_object(model) do
Object.delete_object(__MODULE__, @settings, model, @index_attributes)
end
@impl Algoliax
def get_object(model) do
Object.get_object(__MODULE__, @settings, model, @index_attributes)
end
@impl Algoliax
def reindex(opts) when is_list(opts) do
Object.reindex(__MODULE__, @settings, @index_attributes, nil, opts)
end
@impl Algoliax
def reindex(query \\ nil, opts \\ []) do
Object.reindex(__MODULE__, @settings, @index_attributes, query, opts)
end
@impl Algoliax
def reindex_atomic do
Object.reindex_atomic(__MODULE__, @settings, @index_attributes)
end
end
end
@doc """
Define an attributes to be indexed with a computed value without or with model access
## Example without model access
The model is not available.
attribute :utc_now, DateTime.utc_now()
## Example with model access
The model is available inside the block.
attribute :uppcase_name do
model.name |> String.upcase()
end
"""
defmacro attribute(attribute_name, do: block) do
method_attribute_name = Utils.prefix_attribute(attribute_name)
quote do
@index_attributes unquote(method_attribute_name)
def unquote(method_attribute_name)(model) do
var!(model) = model
unquote(block)
end
end
end
defmacro attribute(attribute_name, value) do
method_attribute_name = Utils.prefix_attribute(attribute_name)
quote do
@index_attributes unquote(method_attribute_name)
def unquote(method_attribute_name)(model) do
unquote(value)
end
end
end
@doc """
Define an attribute to be added to the indexed object with a value taken from the model (map/struct)
## Example
attribute :id
"""
defmacro attribute(attribute_name) do
Algoliax.build_attribute(attribute_name)
end
@doc """
Define multiple attributes to be added to the indexed object with a value taken from the model (map/struct)
## Example
attributes :id, :inserted_at
"""
defmacro attributes(attribute_names) do
Enum.map(attribute_names, fn attribute_name ->
Algoliax.build_attribute(attribute_name)
end)
end
@doc """
Generate a secured api key with filter
## Examples
Algoliax.generate_secured_api_key("reference:10")
Algoliax.generate_secured_api_key("reference:10 OR nickname:john")
"""
@spec generate_secured_api_key(filters :: binary()) :: binary()
def generate_secured_api_key(filters) do
query_string = "filters=#{URI.encode_www_form("#{filters}")}"
hmac =
:crypto.hmac(
:sha256,
Config.api_key(),
query_string
)
|> Base.encode16(case: :lower)
Base.encode64(hmac <> query_string)
end
@doc false
def build_attribute(attribute_name) do
method_attribute_name = Utils.prefix_attribute(attribute_name)
quote do
@index_attributes unquote(method_attribute_name)
def unquote(method_attribute_name)(model) do
Map.get(model, unquote(attribute_name))
end
end
end
end
|
lib/algoliax.ex
| 0.873134
| 0.432003
|
algoliax.ex
|
starcoder
|
defmodule Sanbase.SocialData.TrendingWords do
@moduledoc ~s"""
Module for fetching the list of trending words
This list does NOT calculate the most popular words on crypto social
media overall - those would often be the same, redundant words
such as ‘Bitcoin’, ‘Ethereum’, ‘crypto’ etc.
Instead, our list aims to discover the biggest developing or emerging
stories within the crypto community. That is why each day you’ll see
a new batch of fresh topics, currently gaining steam on crypto social media.
This signals an abnormally high interest in a previously uninspiring
topic, making the list practical for discovering new and developing
talking points in the crypto community.
The results are sourced from more than 1000 crypto-specific social media
channels, including hundreds of telegram groups, subredits, discord groups,
bitcointalk forums, etc.
"""
use Ecto.Schema
import Sanbase.DateTimeUtils, only: [str_to_sec: 1]
alias Sanbase.ClickhouseRepo
@type word :: String.t()
@type slug :: String.t()
@type interval :: String.t()
@typedoc """
Defines the position in the list of trending words for a given datetime.
If it has an integer value it means that the word was in the list of emerging
words. If it has a nil value it means that the word was not in that list
"""
@type position :: non_neg_integer() | nil
@type trending_word :: %{
word: word,
score: float()
}
@type trending_slug :: %{
slug: slug,
score: float()
}
@type word_stat :: %{
datetme: DateTime.t(),
position: position
}
# When calculating the trending now words fetch the data for the last
# N hours to ensure that there is some data and we're not in the middle
# of computing the latest data
@hours_back_ensure_has_data 3
require Sanbase.Utils.Config, as: Config
schema Config.get(:trending_words_table) do
field(:dt, :utc_datetime)
field(:word, :string)
field(:volume, :float)
field(:volume_normalized, :float)
field(:unqiue_users, :integer)
field(:score, :float)
field(:source, :string)
# ticker_slug
field(:project, :string)
field(:computed_at, :string)
end
@spec get_trending_words(DateTime.t(), DateTime.t(), interval, non_neg_integer) ::
{:ok, list(trending_word)} | {:error, String.t()}
def get_trending_words(from, to, interval, size) do
{query, args} = get_trending_words_query(from, to, interval, size)
ClickhouseRepo.query_reduce(query, args, %{}, fn
[dt, word, _project, score], acc ->
datetime = DateTime.from_unix!(dt)
elem = %{word: word, score: score}
Map.update(acc, datetime, [elem], fn words -> [elem | words] end)
end)
end
def get_trending_projects(from, to, interval, size) do
{query, args} = get_trending_words_query(from, to, interval, size)
ClickhouseRepo.query_reduce(query, args, %{}, fn
[_dt, _word, nil, _score], acc ->
acc
[dt, _word, project, score], acc ->
datetime = DateTime.from_unix!(dt)
[_ticker, slug] = String.split(project, "_")
elem = %{slug: slug, score: score}
Map.update(acc, datetime, [elem], fn slugs -> [elem | slugs] end)
end)
end
@doc ~s"""
Get a list of the currently trending words
"""
@spec get_currently_trending_words(non_neg_integer()) ::
{:ok, list(trending_word)} | {:error, String.t()}
def get_currently_trending_words(size \\ 10)
def get_currently_trending_words(size) do
now = Timex.now()
case get_trending_words(
Timex.shift(now, hours: -@hours_back_ensure_has_data),
now,
"1h",
size
) do
{:ok, []} ->
{:ok, []}
{:ok, stats} ->
{_, words} =
stats
|> Enum.max_by(fn {dt, _} -> DateTime.to_unix(dt) end)
{:ok, words}
{:error, error} ->
{:error, error}
end
end
@doc ~s"""
Get a list of the currently trending projects
"""
@spec get_currently_trending_projects(non_neg_integer()) ::
{:ok, list(trending_slug)} | {:error, String.t()}
def get_currently_trending_projects(size \\ 10)
def get_currently_trending_projects(size) do
now = Timex.now()
case get_trending_projects(
Timex.shift(now, hours: -@hours_back_ensure_has_data),
now,
"1h",
size
) do
{:ok, stats} ->
{_, projects} =
stats
|> Enum.max_by(fn {dt, _} -> DateTime.to_unix(dt) end)
{:ok, projects}
{:error, error} ->
{:error, error}
end
end
@spec get_word_trending_history(word, DateTime.t(), DateTime.t(), interval, non_neg_integer) ::
{:ok, list(word_stat)} | {:error, String.t()}
def get_word_trending_history(word, from, to, interval, size) do
{query, args} = word_trending_history_query(word, from, to, interval, size)
ClickhouseRepo.query_transform(query, args, fn [dt, position] ->
position = if position > 0, do: position
%{
datetime: DateTime.from_unix!(dt),
position: position
}
end)
|> case do
{:ok, result} -> {:ok, Enum.reject(result, &is_nil(&1.position))}
{:error, error} -> {:error, error}
end
end
@spec get_project_trending_history(slug, DateTime.t(), DateTime.t(), interval, non_neg_integer) ::
{:ok, list(word_stat)} | {:error, String.t()}
def get_project_trending_history(slug, from, to, interval, size) do
{query, args} = project_trending_history_query(slug, from, to, interval, size)
ClickhouseRepo.query_transform(query, args, fn [dt, position] ->
position = if position > 0, do: position
%{
datetime: DateTime.from_unix!(dt),
position: position
}
end)
|> case do
{:ok, result} -> {:ok, Enum.reject(result, &is_nil(&1.position))}
{:error, error} -> {:error, error}
end
end
defp get_trending_words_query(from, to, interval, size) do
query = """
SELECT
t,
word,
project,
total_score AS score
FROM(
SELECT
toUnixTimestamp(intDiv(toUInt32(toDateTime(dt)), ?1) * ?1) AS t,
word,
any(project) AS project,
SUM(score) / 4 AS total_score
FROM #{Config.get(:trending_words_table)}
PREWHERE
dt >= toDateTime(?2) AND
dt < toDateTime(?3) AND
source NOT IN ('twitter', 'bitcointalk') AND
dt = t
GROUP BY t, word
ORDER BY total_score DESC
LIMIT ?4 BY t
)
ORDER BY t, score
"""
args = [str_to_sec(interval), from, to, size]
{query, args}
end
defp word_trending_history_query(word, from, to, interval, size) do
{query, args} = get_trending_words_query(from, to, interval, size)
args_len = length(args)
next_pos = args_len + 1
query =
[
"""
SELECT
t,
toUInt32(indexOf(groupArray(?#{args_len})(word), ?#{next_pos}))
FROM(
""",
query,
"""
)
GROUP BY t
ORDER BY t
"""
]
|> to_string()
args = args ++ [word]
{query, args}
end
defp project_trending_history_query(slug, from, to, interval, size) do
{query, args} = get_trending_words_query(from, to, interval, size)
args_len = length(args)
next_pos = args_len + 1
query =
[
"""
SELECT
t,
toUInt32(indexOf(groupArray(?#{args_len})(project), ?#{next_pos}))
FROM(
""",
query,
"""
)
GROUP BY t
ORDER BY t
"""
]
|> to_string()
ticker = Sanbase.Model.Project.ticker_by_slug(slug)
args = args ++ [ticker <> "_" <> slug]
{query, args}
end
end
|
lib/sanbase/social_data/trending_words.ex
| 0.869133
| 0.68296
|
trending_words.ex
|
starcoder
|
defmodule OrdMap do
defstruct tuples: nil
@moduledoc """
- A set of functions and a macro for working with **ordered maps**.
- An **ordered map** is a *struct* with a *list of key-value tuples*
where *key* and *value* can be any value.
- It can be serialized to JSON with [Poison](https://github.com/devinus/poison) - you need to add [OrdMap Poison encoder](https://github.com/MartinKavik/ord_map_encoder_poison) to your project dependencies.
## Usage
```
iex> o%{"foo" => "bar"}
%OrdMap{tuples: [{"foo", "bar"}]}
iex> my_ord_map = OrdMap.new([{"foo", 1}, {"bar", 2}])
iex> OrdMap.get(my_ord_map, "bar")
2
iex> my_ord_map = o%{"foo" => o(%{nested: "something"}), "bar" => "two"}
iex> my_ord_map["foo"][:nested]
"something"
iex> my_ord_map = o%{"foo" => 1, "bar" => 2}
iex> Enum.map my_ord_map, fn {key, value} -> {key, value + 1} end
[{"foo", 2}, {"bar", 3}]
```
"""
@behaviour Access
@type key :: any
@type value :: any
@type tuples :: [{key, value}]
@type t :: %OrdMap{tuples: tuples}
@type ord_map :: t
defmacro __using__(_opts) do
quote do
import OrdMap, only: [o: 1]
end
end
@doc """
Macro transforms `Map` to `t:ord_map/0` during compilation.
## Examples
iex> o%{a: 1, b: 2}
%OrdMap{tuples: [a: 1, b: 2]}
iex> o(%{"a" => "x"})
%OrdMap{tuples: [{"a", "x"}]}
"""
defmacro o({type, _meta, args}) when type == :%{} do
quote do
%OrdMap{tuples: unquote(args)}
end
end
@doc """
Deletes the entry in the `t:ord_map/0` having a specific `key`.
If the `key` does not exist, returns the `t:ord_map/0` unchanged.
## Examples
iex> OrdMap.delete(o(%{"a" => 1, b: 2}), "a")
o%{b: 2}
iex> OrdMap.delete(o(%{b: 2}), :a)
o%{b: 2}
"""
@spec delete(t | tuples, key) :: t
def delete(%OrdMap{tuples: tuples}, key), do: delete(tuples, key)
def delete([], _key), do: [] |> OrdMap.new()
def delete([h | _] = tuples, key) when is_tuple(h) and tuple_size(h) == 2 do
Enum.filter(tuples, fn {k, _} -> k != key end) |> OrdMap.new()
end
@doc """
Fetches the value for a specific `key` in the given `t:ord_map/0`.
If the `key` does not exist, returns `:error`.
## Examples
iex> ord_map = o%{"a" => 1}
iex> OrdMap.fetch(ord_map, "a")
{:ok, 1}
iex> ord_map = o%{}
iex> OrdMap.fetch(ord_map, "key")
:error
"""
@spec fetch(t | tuples, key) :: {:ok, value} | :error
def fetch(%OrdMap{tuples: tuples}, key), do: fetch(tuples, key)
def fetch([], _key), do: :error
def fetch([h | _] = tuples, key) when is_tuple(h) and tuple_size(h) == 2 do
case List.keyfind(tuples, key, 0) do
{_, value} -> {:ok, value}
nil -> :error
end
end
@doc """
Creates an `t:ord_map/0` from a `t:tuples/0`.
(Delegates to function `OrdMap.new/1`)
## Examples
iex> [{:b, 1}, {:a, 2}] |> OrdMap.from_tuples
o%{b: 1, a: 2}
"""
@spec from_tuples(tuples) :: t
defdelegate from_tuples(tuples), to: __MODULE__, as: :new
@doc """
Gets the value for a specific `key` in `t:ord_map/0`.
If `key` is present in `t:ord_map/0` with value `value`, then `value` is
returned. Otherwise, `default` is returned (which is `nil` unless
specified otherwise).
## Examples
iex> OrdMap.get(o(%{}), :a)
nil
iex> OrdMap.get(o(%{a: 1}), :a)
1
iex> OrdMap.get(o(%{a: 1}), :b)
nil
iex> OrdMap.get(o(%{"a" => 1}), :b, 3)
3
iex> OrdMap.get([{:a, 2}], :a)
2
"""
@spec get(t | tuples, key, default :: value) :: value
def get(term, key, default \\ nil) do
case fetch(term, key) do
{:ok, value} -> value
:error -> default
end
end
@doc """
Gets the value from `key` and updates it, all in one pass.
This `fun` argument receives the value of `key` (or `nil` if `key`
is not present) and must return a two-element tuple: the "get" value
(the retrieved value, which can be operated on before being returned)
and the new value to be stored under `key`. The `fun` may also
return `:pop`, implying the current value shall be removed from the
'ord_map' and returned.
The returned value is a tuple with the "get" value returned by
`fun` and a new 'ord_map' with the updated value under `key`.
## Examples
iex> OrdMap.get_and_update(o(%{a: 1}), :a, fn current_value ->
...> {current_value, "new value!"}
...> end)
{1, o%{a: "new value!"}}
iex> OrdMap.get_and_update(o(%{a: 1}), :b, fn current_value ->
...> {current_value, "new value!"}
...> end)
{nil, o%{a: 1, b: "new value!"}}
iex> OrdMap.get_and_update(o(%{a: 1}), :a, fn _ -> :pop end)
{1, o%{}}
iex> OrdMap.get_and_update(o(%{a: 1}), :b, fn _ -> :pop end)
{nil, o%{a: 1}}
"""
@spec get_and_update(t | tuples, key, (value -> {get, value} | :pop)) :: {get, t} when get: term
def get_and_update(%OrdMap{tuples: tuples}, key, fun), do: get_and_update(tuples, key, fun)
def get_and_update([], key, fun), do: _get_and_update([], key, fun)
def get_and_update([h | _] = tuples, key, fun)
when is_tuple(h) and tuple_size(h) == 2,
do: _get_and_update(tuples, key, fun)
defp _get_and_update(tuples, key, fun) when is_list(tuples) and is_function(fun) do
case tuples |> OrdMap.get(key) |> fun.() do
{get_value, update_value} ->
new_data = put(tuples, key, update_value)
{get_value, new_data}
:pop ->
pop(tuples, key)
end
end
@doc """
Merges two `t:ord_map/0`s into one.
All keys in `ord_map2` will be added to `ord_map1`, overriding any existing one
(i.e., the keys in `ord_map2` "have precedence" over the ones in `ord_map1`).
## Examples
iex> OrdMap.merge(o(%{a: 1, b: 2}), o%{a: 3, d: 4})
o%{a: 3, b: 2, d: 4}
"""
@spec merge(t, t) :: t
def merge(ord_map1, ord_map2) do
List.foldl(ord_map2.tuples, ord_map1.tuples, &List.keystore(&2, elem(&1, 0), 0, &1))
|> OrdMap.new()
end
@doc """
Creates an empty `t:ord_map/0`.
(See `new/1` for creating `t:ord_map/0` from other types)
## Examples
iex> OrdMap.new()
o%{}
"""
@spec new :: t
def new(), do: %OrdMap{tuples: []}
@doc """
Creates an `t:ord_map/0` from a `Map`,
from a `t:tuples/0` or from other `t:ord_map/0`.
(See `new/0` creating an empty `t:ord_map/0`)
## Examples
iex> OrdMap.new(%{a: 2, b: 1})
o%{a: 2, b: 1}
iex> OrdMap.new([a: 3, b: 4])
o%{a: 3, b: 4}
iex> OrdMap.new(%OrdMap{tuples: [{"a", 5}, {"b", 6}]})
o%{"a" => 5, "b" => 6}
iex> OrdMap.new([])
o%{}
"""
@spec new(t | map | tuples) :: t
def new(%OrdMap{} = ord_map), do: ord_map
def new(%{} = map), do: Map.to_list(map) |> OrdMap.new()
def new([]), do: %OrdMap{tuples: []}
def new([h | _] = tuples) when is_tuple(h) and tuple_size(h) == 2, do: %OrdMap{tuples: tuples}
@doc """
Returns all keys from `t:ord_map/0`.
## Examples
iex> OrdMap.keys(o%{a: 1, b: 2})
[:a, :b]
iex> OrdMap.keys([{"a", 2}, {"b", 3}])
["a", "b"]
iex> OrdMap.keys([])
[]
"""
@spec keys(t | tuples) :: [value]
def keys(%OrdMap{tuples: tuples}), do: keys(tuples)
def keys([]), do: []
def keys([h | _] = tuples) when is_tuple(h) and tuple_size(h) == 2 do
Enum.map(tuples, &elem(&1, 0))
end
@doc """
Returns and removes the value associated with `key` in the `t:ord_map/0`.
## Examples
iex> OrdMap.pop(o(%{"a" => 1}), "a")
{1, o%{}}
iex> OrdMap.pop(o(%{a: 1}), :b)
{nil, o%{a: 1}}
iex> OrdMap.pop(o(%{a: 1}), :b, 3)
{3, o%{a: 1}}
"""
@spec pop(t | tuples, key, default :: value) :: {value, t}
def pop(term, key, default \\ nil)
def pop(%OrdMap{tuples: tuples}, key, default), do: pop(tuples, key, default)
def pop([], _key, default), do: {default, [] |> OrdMap.new()}
def pop([h | _] = tuples, key, default) when is_tuple(h) and tuple_size(h) == 2 do
case fetch(tuples, key) do
{:ok, value} ->
{value, delete(tuples, key) |> OrdMap.new()}
:error ->
{default, tuples |> OrdMap.new()}
end
end
@doc """
Puts the given `value` under `key`.
If a previous value is already stored, the value is overridden.
## Examples
iex> OrdMap.put(o(%{a: 1}), :b, 2)
o%{a: 1, b: 2}
iex> OrdMap.put(o(%{"a" => 1, b: 2}), "a", 3)
o%{"a" => 3, b: 2}
"""
@spec put(t | tuples, key, value) :: t
def put(%OrdMap{tuples: tuples}, key, value), do: put(tuples, key, value)
def put([], key, value), do: [{key, value}] |> OrdMap.new()
def put([h | _] = tuples, key, value) when is_tuple(h) and tuple_size(h) == 2 do
List.keystore(tuples, key, 0, {key, value}) |> OrdMap.new()
end
@doc """
Alters the value stored under `key` to `value`, but only
if the entry `key` already exists in `t:ord_map/0`.
## Examples
iex> OrdMap.replace(o(%{a: 1}), :b, 2)
o%{a: 1}
iex> OrdMap.replace(o(%{a: 1, b: 2}), :a, 3)
o%{a: 3, b: 2}
iex> OrdMap.replace([{"c", 5},{"d", 6}], "c", 7)
o%{"c" => 7, "d" => 6}
iex> OrdMap.replace([], "c", 7)
o%{}
"""
@spec replace(t | tuples, key, value) :: t
def replace(%OrdMap{tuples: tuples}, key, value), do: replace(tuples, key, value)
def replace([], _key, _value), do: [] |> OrdMap.new()
def replace([h | _] = tuples, key, value) when is_tuple(h) and tuple_size(h) == 2 do
List.keyreplace(tuples, key, 0, {key, value}) |> OrdMap.new()
end
@doc """
Returns all values from `t:ord_map/0`.
## Examples
iex> OrdMap.values(o%{a: 1, b: 2})
[1, 2]
iex> OrdMap.values([a: 2, b: 3])
[2, 3]
iex> OrdMap.values([])
[]
"""
@spec values(t | tuples) :: [value]
def values(%OrdMap{tuples: tuples}) do
values(tuples)
end
def values([]), do: []
def values([h | _] = tuples) when is_tuple(h) and tuple_size(h) == 2 do
Enum.map(tuples, &elem(&1, 1))
end
end
defimpl Enumerable, for: OrdMap do
alias Enumerable.List, as: EList
def count(%OrdMap{tuples: tuples}), do: EList.count(tuples)
def member?(%OrdMap{tuples: tuples}, element), do: EList.member?(tuples, element)
def reduce(%OrdMap{tuples: tuples}, acc, fun), do: EList.reduce(tuples, acc, fun)
def slice(%OrdMap{tuples: tuples}), do: EList.slice(tuples)
end
|
lib/ord_map.ex
| 0.917728
| 0.814975
|
ord_map.ex
|
starcoder
|
defmodule Elixium.Mnemonic do
alias Elixium.Utilities
@leading_zeros_for_mnemonic 8
@leading_zeros_of_mnemonic 11
@regex_chunk_from_entropy Regex.compile!(".{1,#{@leading_zeros_of_mnemonic}}")
@regex_chunk_to_entropy Regex.compile!(".{1,#{@leading_zeros_for_mnemonic}}")
@moduledoc """
Functionality for generating mnemonics
"""
@words to_string(:code.priv_dir(:elixium_core)) <> "/words.txt"
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Enum.to_list()
@doc """
Gets the correct checksum of a binary
"""
@spec checksum_length(binary) :: Integer.t()
def checksum_length(entropy_bytes) do
entropy_bytes
|> bit_size()
|> div(32)
end
@doc """
Verifies if the binary needs to be normalized
"""
@spec maybe_normalize(binary) :: binary
def maybe_normalize(binary) do
binary
|> String.valid?()
|> normalize(binary)
end
@doc """
Using a private address generate a mnemonic seed
"""
@spec from_entropy(binary) :: String.t()
def from_entropy(binary) do
binary
|> maybe_normalize()
|> append_checksum()
|> mnemonic()
end
@doc """
Using a mnemonic seed generate a private seed
"""
@spec to_entropy(String.t()) :: binary
def to_entropy(mnemonic) do
mnemonic
|> indicies()
|> bytes()
|> entropy()
end
defp append_checksum(bytes) do
bytes
|> checksum()
|> append(bytes)
end
defp checksum(entropy) do
entropy
|> Utilities.sha256()
|> to_binary_string()
|> take_first(entropy)
end
defp to_binary_string(bytes) do
bytes
|> :binary.bin_to_list()
|> Enum.map(&binary_for_mnemonic/1)
|> Enum.join()
end
defp binary_for_mnemonic(byte), do: to_binary(byte, @leading_zeros_for_mnemonic)
defp to_binary(byte, leading_zeros) do
byte
|> Integer.to_string(2)
|> String.pad_leading(leading_zeros, "0")
end
defp take_first(binary_string, bytes) do
bytes
|> checksum_range()
|> slice(binary_string)
end
defp checksum_range(bytes) do
bytes
|> checksum_length()
|> range()
end
defp range(length), do: Range.new(0, length - 1)
defp slice(range, binary_string), do: String.slice(binary_string, range)
defp append(checksum, bytes), do: to_binary_string(bytes) <> checksum
defp mnemonic(entropy) do
@regex_chunk_from_entropy
|> Regex.scan(entropy)
|> List.flatten()
|> Enum.map(&word/1)
|> Enum.join(" ")
end
defp word(binary) do
binary
|> String.to_integer(2)
|> pick_word()
end
defp pick_word(index), do: Enum.at(@words, index)
defp indicies(mnemonic) do
mnemonic
|> String.split()
|> Enum.map(&word_binary_index/1)
|> Enum.join()
end
defp word_binary_index(word) do
@words
|> Enum.find_index(&(&1 == word))
|> binary_of_index()
end
defp binary_of_index(index), do: to_binary(index, @leading_zeros_of_mnemonic)
defp bytes(bits) do
bits
|> String.length()
|> div(33)
|> Kernel.*(32)
|> range()
|> slice(bits)
end
defp entropy(entropy_bits) do
@regex_chunk_to_entropy
|> Regex.scan(entropy_bits)
|> List.flatten()
|> Enum.map(&String.to_integer(&1, 2))
|> :binary.list_to_bin()
end
defp normalize(true, string), do: Base.decode16!(string, case: :mixed)
defp normalize(false, binary), do: binary
end
|
lib/encoding/mnemonic.ex
| 0.774413
| 0.477615
|
mnemonic.ex
|
starcoder
|
defmodule Jumper do
@moduledoc """
Jump consistent hashing for Elixir, without the need of C compilers.
This module exposes nothing beyond a simple `slot/2`, used to slot a key into a
ange of buckets `[0, buckets)`. This offers a fairly good consistency rate with
changing bucket counts, such that:
iex> Jumper.slot(5000, 10)
4
iex> Jumper.slot(5000, 11)
4
iex> Jumper.slot(5000, 12)
4
This can be used for routing inside systems where destinations change often and
can come and go (so there's a variable number of destinations). This is the main
advantage; static destination counts can simply `hash(key) % N` as a router.
This implementation is based on the algorithm described in the original paper
found [here](https://arxiv.org/ftp/arxiv/papers/1406/1406.2294.pdf).
"""
use Bitwise
# the jump count
@jump 1 <<< 31
# magic number use for jumping
@mage 0x27BB2EE687B0B0FD
# a 64 bit mask for wrapping
@mask 0xFFFFFFFFFFFFFFFF
@doc """
Slots a key into a range of buckets of the form `[0, buckets)`.
The key and bucket count must both be integers; to slot a non-numeric value,
it's possible to use `:erlang.phash2/1` to generate a quick hash value.
## Examples
iex> Jumper.slot(5000, 10)
4
iex> Jumper.slot(5000, 11)
4
iex> Jumper.slot(5000, 12)
4
"""
@spec slot(key :: integer, buckets :: integer) :: integer
def slot(key, buckets) when is_integer(key) and is_integer(buckets),
do: jump_consistent_hash(key, buckets, -1, 0)
# Recursive jump consistent hash algorithm, using guards to determine when the
# algorithm has completed. Nothing special to see here beyond the algorithm as
# defined in the paper released by Google (see module documentation for links).
defp jump_consistent_hash(key, buckets, _bucket, j) when j < buckets do
new_key = key * @mage + 1 &&& @mask
new_jump = trunc((j + 1) * (@jump / ((new_key >>> 33) + 1)))
jump_consistent_hash(new_key, buckets, j, new_jump)
end
# Exit clause for jump hashing, which just emits the bucket
defp jump_consistent_hash(_key, _buckets, bucket, _j), do: bucket
end
|
deps/jumper/lib/jumper.ex
| 0.915969
| 0.614336
|
jumper.ex
|
starcoder
|
defmodule AlphaVantage.Cryptocurrencies do
@moduledoc """
A set of functions for fetching cryptocurrency data from [Alpha Vantage](www.alphavantage.co/documentation/#digital-currency).
"""
alias AlphaVantage.Gateway
@doc """
Returns the realtime exchange rate for any pair of digital currency (e.g., Bitcoin) and physical currency (e.g., USD).
Data returned for physical currency (Forex) pairs also include realtime bid and ask prices.
Please reference https://www.alphavantage.co/documentation/#crypto-exchange for more detail.
## Parameters
**Required**
- `:from_currency`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_currency`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
*Please note that `"csv"` is not yet supported by Alpha Vantage for this function.
"""
@spec exchange_rate(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def exchange_rate(from_currency, to_currency, opts \\ []) do
params = [
function: "CURRENCY_EXCHANGE_RATE",
from_currency: from_currency,
to_currency: to_currency
]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the real-time price of AVC with the United States Dollar (USD) as the base currency.
The AVC price is streamed from the major decentralized exchange (DEX) Uniswap.
Please reference https://www.alphavantage.co/documentation/#avc-price for more detail.
## Parameters
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec alpha_vantage_coin(Keyword.t()) :: Gateway.response()
def alpha_vantage_coin(opts \\ []) do
params = [function: "AVC_PRICE"]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns intraday time series (timestamp, open, high, low, close, volume) of the cryptocurrency specified, updated realtime.
Please reference https://www.alphavantage.co/documentation/#crypto-intraday for more detail.
## Parameters
**Required**
- `:symbol`
The digital/crypto currency of your choice.
It can be any of the currencies in the [digital currency list](https://www.alphavantage.co/digital_currency_list/).
For example: `"BTC"`
- `:market`
The exchange market of your choice.
It can be any of the market in the [market list](https://www.alphavantage.co/physical_currency_list/).
For example: `"USD"`
- `:interval`
Time interval between two consecutive data points in the time series.
The following values are supported and accepted as strings: `"1min"`, `"5min"`, `"15min"`, `"30min"`, `"60min"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
- `:outputsize`
- `"compact"` returns only the latest 100 data points in the intraday time series (default);
- `"full"` returns the full-length intraday time series.
The `"compact"` option is recommended if you would like to reduce the data size of each API call.
"""
@spec intraday(String.t(), String.t(), String.t(), Keyword.t()) :: Gateway.response()
def intraday(symbol, market, interval, opts \\ []) do
params = [function: "CRYPTO_INTRADAY", symbol: symbol, market: market, interval: interval]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the daily historical time series for a digital currency (e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan), refreshed daily at midnight (UTC).
Prices and volumes are quoted in both the market-specific currency and USD.
Please reference https://www.alphavantage.co/documentation/#currency-daily for more detail.
## Parameters
**Required**
- `:symbol`
The digital/crypto currency of your choice.
It can be any of the currencies in the [digital currency list](https://www.alphavantage.co/digital_currency_list/).
For example: `"BTC"`
- `:market`
The exchange market of your choice.
It can be any of the market in the [market list](https://www.alphavantage.co/physical_currency_list/).
For example: `"CNY"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec daily(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def daily(symbol, market, opts \\ []) do
params = [function: "DIGITAL_CURRENCY_DAILY", symbol: symbol, market: market]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the weekly historical time series for a digital currency (e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan), refreshed daily at midnight (UTC).
Prices and volumes are quoted in both the market-specific currency and USD.
Please reference https://www.alphavantage.co/documentation/#currency-weekly for more detail.
## Parameters
**Required**
- `:symbol`
The digital/crypto currency of your choice.
It can be any of the currencies in the [digital currency list](https://www.alphavantage.co/digital_currency_list/).
For example: `"BTC"`
- `:market`
The exchange market of your choice.
It can be any of the market in the [market list](https://www.alphavantage.co/physical_currency_list/).
For example: `"CNY"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec weekly(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def weekly(symbol, market, opts \\ []) do
params = [function: "DIGITAL_CURRENCY_WEEKLY", symbol: symbol, market: market]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the monthly historical time series for a digital currency (e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan), refreshed daily at midnight (UTC).
Prices and volumes are quoted in both the market-specific currency and USD.
Please reference https://www.alphavantage.co/documentation/#currency-monthly for more detail.
## Parameters
**Required**
- `:symbol`
The digital/crypto currency of your choice.
It can be any of the currencies in the [digital currency list](https://www.alphavantage.co/digital_currency_list/).
For example: `"BTC"`
- `:market`
The exchange market of your choice.
It can be any of the market in the [market list](https://www.alphavantage.co/physical_currency_list/).
For example: `"CNY"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec monthly(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def monthly(symbol, market, opts \\ []) do
params = [function: "DIGITAL_CURRENCY_MONTHLY", symbol: symbol, market: market]
AlphaVantage.query(Keyword.merge(params, opts))
end
end
|
lib/alpha_vantage/cryptocurrencies.ex
| 0.89809
| 0.771241
|
cryptocurrencies.ex
|
starcoder
|
defmodule RealtimeClient do
@moduledoc """
Client library to work with [Realtime](https://github.com/supabase/realtime).
It's mostly a wrapper around [Phoenix Client](https://github.com/mobileoverlord/phoenix_client).
## Getting started
First you have to create a client Socket:
options = [
url: "ws://realtime-server:4000/socket/websocket",
]
{:ok, socket} = RealtimeClient.socket(options)
Once you have a connected socket, you can subscribe to topics:
{:ok, channel} = RealtimeClient.subscribe(socket, "realtime:*")
You can also subscribe to a specific channel (row level changes):
{:ok, channel} = RealtimeClient.subscribe(socket, "realtime:public:users:id=eq.42")
Consuming events is done with `handle_info` callbacks:
alias PhoenixClient.Message
# handle `INSERT` events
def handle_info(%Message{event: "INSERT", payload: %{"record" => record}} = msg, state) do
# do something with record
{:noreply, state}
end
# handle `DELETE` events
def handle_info(%Message{event: "DELETE", payload: %{"record" => record}} = msg, state) do
IO.inspect(record, label: "DELETE")
{:noreply, state}
end
# match all cases not handled above
def handle_info(%Message{} = msg, state) do
{:noreply, state}
end
## Configuration
Socket endpoint and parameters can also be configured:
config :realtime_client,
endpoint: "ws://realtime-server:4000/socket/websocket",
apikey: "<KEY>"
Creating the socket can then be done with:
{:ok, socket} = RealtimeClient.socket()
"""
alias PhoenixClient.{Socket, Channel}
@doc false
def init(opts) do
opts = init_opts(opts)
Socket.init(opts)
end
@doc false
def start_link(opts) do
name = Keyword.get(opts, :name, Realtime.Socket)
opts = init_opts(opts)
Socket.start_link(opts, name: name)
end
@doc false
def child_spec(opts) do
socket_opts = init_opts(opts)
Socket.child_spec({socket_opts, name: Realtime.Socket})
end
def subscribe(topic) do
case Channel.join(Realtime.Socket, topic) do
{:ok, _, channel} -> {:ok, channel}
error -> error
end
end
@doc """
Subscribes to a topic through given socket.
In cases where the socket is not connected (yet), the function is
retried (see `subscribe/4`).
* `socket` - The name of pid of the client socket
* `topic` - The topic to subscribe to
"""
def subscribe(socket, topic) do
subscribe(socket, topic, 3)
end
def subscribe(socket, topic, retires, error \\ nil)
def subscribe(_socket, _topic, 0, error) do
error
end
def subscribe(socket, topic, retries, _error) do
case Channel.join(socket, topic) do
{:ok, _, channel} ->
{:ok, channel}
error ->
Process.sleep(100)
subscribe(socket, topic, retries - 1, error)
end
end
@doc """
Creates a new client socket.
* `opts` - The optional list of options. See below.
## Options
* `url` - the url of the websocket to connect to
* `params` - the params to send to the websocket, e.g. to pass an api key
"""
def socket(opts \\ []) do
init_opts(opts)
|> Socket.start_link()
end
defp init_opts(opts) do
url =
Keyword.get_lazy(opts, :url, fn -> Application.fetch_env!(:realtime_client, :endpoint) end)
params =
case Keyword.get(opts, :params, %{}) |> Map.get(:apikey) do
nil ->
apikey = Application.fetch_env!(:realtime_client, :apikey)
%{apikey: apikey}
_ ->
opts[:params]
end
[url: url, params: params]
end
end
|
lib/realtime_client.ex
| 0.78345
| 0.458712
|
realtime_client.ex
|
starcoder
|
defmodule SpiritFingers.SimHash do
@moduledoc """
SimHash Module which delegates to Rust NIFs which will
perform the hashing, similarity and distance calculations.
"""
use Rustler, otp_app: :spirit_fingers, crate: "simhash"
@typedoc "unsigned 64 bit integer represenation of simhash"
@type t :: pos_integer()
@typedoc """
Similarity between two `SimHash.t`, represented as a value
between 0.0 and 1.0.
* `0.0` means no similarity,
* `1.0` means identical.
"""
@type similarity :: float()
@typedoc """
64 bit floating point represenation of the
[Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance)
between 2 `SimHash.t`.
"""
@type distance :: float()
@doc """
Calculate `SimHash.t` split by whitespace.
## Examples
iex> SpiritFingers.SimHash.simhash("The cat sat on the mat")
{:ok, 2595200813813010837}
iex> SpiritFingers.SimHash.simhash("The cat sat under the mat")
{:ok, 2595269945604666783}
iex> SpiritFingers.SimHash.simhash("Why the lucky stiff")
{:ok, 1155526875459215761}
"""
@spec simhash(binary()) :: {:ok, t()}
def simhash(_bin), do: :erlang.nif_error(:nif_not_loaded)
@doc """
Bitwise hamming distance of two `SimHash.t` hashes
## Examples
iex> SpiritFingers.SimHash.hamming_distance(0, 0)
{:ok, 0.0}
iex> SpiritFingers.SimHash.hamming_distance(0b1111111, 0b0000000)
{:ok, 7.0}
iex> SpiritFingers.SimHash.hamming_distance(0b0100101, 0b1100110)
{:ok, 3.0}
"""
@spec hamming_distance(t(), t()) :: {:ok, distance()}
def hamming_distance(_hash0, _hash1), do: :erlang.nif_error(:nif_not_loaded)
@doc """
Calculate similarity as `SimHash.similarity` of two hashes.
`0.0` means no similarity, `1.0` means identical.
## Examples
iex> SpiritFingers.SimHash.hash_similarity(0, 0)
{:ok, 1.0}
iex> SpiritFingers.SimHash.hash_similarity(0xFFFFFFFFFFFFFFFF, 0)
{:ok, 0.0}
iex> SpiritFingers.SimHash.hash_similarity(0xFFFFFFFF, 0)
{:ok, 0.5}
"""
@spec hash_similarity(t(), t()) :: {:ok, similarity()}
def hash_similarity(_hash0, _hash1), do: :erlang.nif_error(:nif_not_loaded)
@doc """
Calculate similarity `SimHash.similarity` of two string slices split by whitespace by simhash.
## Examples
iex> SpiritFingers.SimHash.similarity("Stop hammertime", "Stop hammertime")
{:ok, 1.0}
iex> SpiritFingers.SimHash.similarity("Hocus pocus", "Hocus pocus pilatus pas")
{:ok, 0.9375}
iex> SpiritFingers.SimHash.similarity("Peanut butter", "Strawberry cocktail")
{:ok, 0.59375}
"""
@spec similarity(binary(), binary()) :: {:ok, similarity()}
def similarity(_text0, _text1), do: :erlang.nif_error(:nif_not_loaded)
end
|
lib/simhash.ex
| 0.907985
| 0.565269
|
simhash.ex
|
starcoder
|
use Croma
defmodule RaftKV.SplitMergePolicy do
defmodule MergeThresholdRatio do
use Croma.SubtypeOfFloat, min: 0.0, max: 1.0, default: 0.5
end
@moduledoc """
An Elixir struct to specify when to split/merge shards in a keyspace.
`:raft_kv` uses the following 3 stats to judge whether splitting/merging should be done:
1. number of keys in a shard
2. aggregated data size (in an arbitrary unit) in a shard
3. current load (computing resource required, in an arbitrary unit) of a shard
Fields:
- `:max_shards`
Maximum number of shards.
If number of shards is the same as this value, no further split occurs.
- `:min_shards`
Minimum number of shards.
If number of shards is the same as this value, no further merge occurs.
- `:max_keys_per_shard`
Threshold number of keys for shard split.
Shards that contains more keys than this value become candidates for split.
If `nil`, shards are not split due to number of keys.
Defaults to `nil`.
- `:max_size_per_shard`
Threshold size for shard split.
Shards whose aggregated size exceeds this value become candidates for split.
If `nil`, shards are not split due to size.
Defaults to `nil`.
- `:max_load_per_shard`
Threshold load for shard split.
Shards that have been experiencing load above this value become candidates for split.
If `nil`, shards are not split due to load.
Defaults to `nil`.
- `:load_per_query_to_missing_key`
`RaftKV.query/4` returns `{:error, :key_not_found}` if target `key` does not exist.
This value is used as the "load" for such queries.
Defaults to `0`.
- `:merge_threshold_ratio`:
For each of the 3 types of split thresholds above (`:max_keys_per_shard`, `:max_size_per_shard` and `:max_load_per_shard`),
merge thresholds are calculated by multiplying this ratio.
Consecutive 2 shards become candidates for merge if they together
(1) contain less keys than the threshold,
(2) contain smaller size than the threshold, and
(3) experience less load than the threshold.
Defaults to `#{MergeThresholdRatio.default()}`.
"""
use Croma.Struct, fields: [
max_shards: Croma.PosInteger,
min_shards: {Croma.PosInteger, [default: 1]},
max_keys_per_shard: Croma.TypeGen.nilable(Croma.PosInteger),
max_size_per_shard: Croma.TypeGen.nilable(Croma.PosInteger),
max_load_per_shard: Croma.TypeGen.nilable(Croma.PosInteger),
load_per_query_to_missing_key: {Croma.NonNegInteger, [default: 0]},
merge_threshold_ratio: MergeThresholdRatio,
]
def valid?(p) do
super(p) and check?(p)
end
defun check?(%__MODULE__{min_shards: min, max_shards: max}) :: boolean do
min <= max
end
end
|
lib/raft_kv/split_merge_policy.ex
| 0.871844
| 0.777553
|
split_merge_policy.ex
|
starcoder
|
defmodule SiteGenerator.Helpers do
def format_timestamp(time) do
# Sites occasionally set cookies with insane expiration times (> 9999-12-31 23:59:59),
# which DateTime balks at. In those cases, we forcibly set the time to 9999-12-31 23:59:59.
time
|> case do
timestamp when timestamp > 253402300799 -> 253402300799
timestamp -> timestamp
end
|> round
|> DateTime.from_unix!
|> DateTime.to_naive
|> NaiveDateTime.to_string
end
def format_datetime({{year, month, day}, {hour, minute, second, _microsecond}}) do
format_datetime({{year, month, day}, {hour, minute, second}})
end
def format_datetime({{year, month, day}, {hour, minute, second}}) do
year =
case year do
year when year > 9999 -> 9999
year -> year
end
{:ok, date} = NaiveDateTime.new(year, month, day, hour, minute, second)
NaiveDateTime.to_string(date)
end
def get_base_domain(url) do
with host = URI.parse(url).host, base_domain = PublicSuffix.registrable_domain(host)
do
case base_domain do
nil -> host
_ -> base_domain
end
end
end
def truncate(string, maximum) do
if String.length(string) > maximum do
"#{String.slice(string, 0, maximum)}..."
else
string
end
end
def headers_to_check do
%{
"Strict-Transport-Security" =>
~s{<a href="https://https.cio.gov/hsts/">HTTP Strict Transport Security</a> (HSTS) skyddar besökare genom att se till att deras webbläsare alltid ansluter över HTTPS.},
"Content-Security-Policy" =>
~s{<a href="https://scotthelme.co.uk/content-security-policy-an-introduction/">Content Security Policy</a> (CSP) är ett kraftfullt verktyg för att skydda en webbplats mot till exempel XSS-attacker och informationsläckage. },
"X-Frame-Options" =>
~s{Med X-Frame-Options kan servern berätta för webbläsaren huruvida sidan får visas i en <code><frame></code>, <code><iframe></code> eller <code><object></code>. Med andra ord: det är möjligt att säga att sidan inte får bäddas in i en annan sajt. Detta skyddar mot så kallad <a href="https://en.wikipedia.org/wiki/Clickjacking">clickjacking</a>.},
"X-Xss-Protection" =>
~s{X-XSS-Protection ställer in <a href="https://en.wikipedia.org/wiki/Cross-site_scripting">XSS</a>-filtret i en del webbläsare. <a href="https://scotthelme.co.uk/hardening-your-http-response-headers/#x-xss-protection">Rekommenderat värde</a> är <code>X-XSS-Protection: 1; mode=block</code>.},
"X-Content-Type-Options" =>
~s{X-Content-Type-Options <a href="https://scotthelme.co.uk/hardening-your-http-response-headers/#x-content-type-options">skyddar mot en viss typ av attacker</a> och dess enda giltiga värde är <code>X-Content-Type-Options: nosniff</code>.}
}
end
def check_referrer_policy(referrer) do
cond do
referrer in ["never", "no-referrer"] ->
%{"status" => "success",
"icon" => "icon-umbrella2 success",
"text" => "Referrers läcks ej"}
referrer in ["origin", "origin-when-cross-origin", "origin-when-crossorigin"] ->
%{"status" => "warning",
"icon" => "icon-raindrops2 warning",
"text" => "Referrers läcks delvis"}
referrer in ["no-referrer-when-down-grade", "default", "unsafe-url", "always", "", nil] ->
%{"status" => "alert",
"icon" => "icon-raindrops2 alert",
"text" => "Referrers läcks"}
true ->
%{"status" => "other",
"icon" => "",
"text" => "Referrers läcks (antagligen)"}
end
end
end
|
lib/site_generator/helpers.ex
| 0.572125
| 0.426083
|
helpers.ex
|
starcoder
|
defmodule BitFieldSet do
@moduledoc false
use Bitwise
@type piece_index :: non_neg_integer
@type size :: non_neg_integer
@type errors :: :out_of_bounds | :bit_field_size_too_small
@opaque t :: %__MODULE__{size: size, pieces: non_neg_integer}
defstruct size: 0, pieces: 0
@doc """
Create a new bit field set given a `size` (an integer denoting the
bit size of the bit field) *and* some optional initialization
`content` (a binary, the size of it should not exceed the size of
the bit field).
iex> BitFieldSet.new!(16) |> BitFieldSet.to_binary()
<<0, 0>>
The target size should be specified when a bit field is initialized
with data.
iex> BitFieldSet.new!(<<128, 1>>, 16)
#BitFieldSet<[0, 15]>
"""
@spec new(binary, size) :: {:ok, t} | {:error, errors}
def new(data \\ <<>>, size)
def new(<<>>, size), do: {:ok, %__MODULE__{size: size, pieces: 0}}
def new(_, size) when size <= 0, do: {:error, :bit_field_size_too_small}
def new(data, size) when is_binary(data) and bit_size(data) - size < 8 do
actual_size = bitfield_size(size)
<<pieces::big-size(actual_size)>> = data
with bitfield = %__MODULE__{size: size, pieces: pieces},
{:ok, bitfield} <- validate_trailing_bits(bitfield),
{:ok, bitfield} <- drop_tailing_bits(bitfield) do
{:ok, bitfield}
end
end
def new(data, size) when is_bitstring(data) and bit_size(data) - size < 8 do
data_size = bit_size(data)
<<pieces::integer-size(data_size)>> = data
bitfield = %__MODULE__{size: size, pieces: pieces}
{:ok, bitfield}
end
def new(_content, _size), do: {:error, :out_of_bounds}
# Trailing bits should never be set. They can occur if the bit
# field set it not divisible by eight. If they are set we should
# throw an error.
defp validate_trailing_bits(%__MODULE__{size: size} = bitfield)
when rem(size, 8) == 0 do
{:ok, bitfield}
end
defp validate_trailing_bits(%__MODULE__{size: size, pieces: pieces} = bitfield) do
tailing_bits = bitfield_size(bitfield) - size
tailing_bit_mask = (1 <<< tailing_bits) - 1
if band(pieces, tailing_bit_mask) == 0 do
{:ok, bitfield}
else
{:error, :out_of_bounds}
end
end
# We don't use the tailing bits for the internal representation
defp drop_tailing_bits(%__MODULE__{size: size} = bitfield)
when rem(size, 8) == 0 do
{:ok, bitfield}
end
defp drop_tailing_bits(%__MODULE__{size: size, pieces: pieces} = bitfield) do
tailing_bits = bitfield_size(bitfield) - size
{:ok, %{bitfield | pieces: pieces >>> tailing_bits}}
end
@doc """
Like `new/2` but will throw an error on initialization failure
"""
@spec new!(binary, size) :: t
def new!(content \\ <<>>, size) do
{:ok, set} = new(content, size)
set
end
@doc """
Takes two bit field sets of the same size, and return `true` if both
sets contain exactly the same pieces; and `false` otherwise.
iex> a = BitFieldSet.new!(<<0b10100110>>, 8)
iex> b = BitFieldSet.new!(<<0b10100110>>, 8)
iex> BitFieldSet.equal?(a, b)
true
iex> c = BitFieldSet.new!(<<0b11011011>>, 8)
iex> BitFieldSet.equal?(a, c)
false
"""
@spec equal?(t, t) :: boolean
def equal?(
%__MODULE__{size: size, pieces: pieces},
%__MODULE__{size: size, pieces: pieces}
),
do: true
def equal?(_, _), do: false
@doc """
Takes a bit field set and a piece number and return `true` if the
given piece number is present in the set; `false` otherwise.
iex> set = BitFieldSet.new!(<<0b10000001>>, 8)
iex> BitFieldSet.member?(set, 7)
true
iex> BitFieldSet.member?(set, 2)
false
"""
@spec member?(t, piece_index) :: boolean
def member?(%__MODULE__{pieces: pieces} = bitfield, piece_index) do
piece = get_piece_index(bitfield, piece_index)
band(pieces, piece) != 0
end
@doc """
Take a bit field set and an piece index and add it to the bit
field. The updated piece set will get returned:
iex> a = BitFieldSet.new!(<<0b10101000>>, 8)
iex> BitFieldSet.put(a, 6)
#BitFieldSet<[0, 2, 4, 6]>
"""
@spec put(t, piece_index) :: t
def put(%__MODULE__{size: size, pieces: pieces} = bitfield, piece_index)
when piece_index < size do
piece = get_piece_index(bitfield, piece_index)
%{bitfield | pieces: bor(pieces, piece)}
end
@doc """
Take a bit field set and an index. The given index will get removed
from the bit field set and the updated bit field set will get
returned:
iex> set = BitFieldSet.new!(<<0b10101000>>, 8)
iex> BitFieldSet.delete(set, 2)
#BitFieldSet<[0, 4]>
"""
@spec delete(t, piece_index) :: t
def delete(%__MODULE__{pieces: pieces} = bitfield, piece_index) do
piece = get_piece_index(bitfield, piece_index)
%{bitfield | pieces: band(pieces, bnot(piece))}
end
@doc """
Set all the bits to on in the bit field set.
iex> set = BitFieldSet.new!(<<0b10100110>>, 8)
iex> BitFieldSet.fill(set)
#BitFieldSet<[0, 1, 2, 3, 4, 5, 6, 7]>
"""
@spec fill(t) :: t
def fill(%__MODULE__{size: size} = bitfield) do
%{bitfield | pieces: (1 <<< size) - 1}
end
@doc """
Take a bit field set and return `true` if the set contains all the
pieces, and `false` otherwise.
iex> BitFieldSet.new!(<<0b10011010>>, 8) |> BitFieldSet.full?()
false
iex> BitFieldSet.new!(<<0b11111111>>, 8) |> BitFieldSet.full?()
true
"""
@spec full?(t) :: boolean
def full?(%__MODULE__{pieces: pieces, size: size}) do
pieces == (1 <<< size) - 1
end
@doc """
Take a bit field set and return `true` if the set contains no
pieces, and `false` otherwise.
iex> BitFieldSet.new!(<<0b11111111>>, 8) |> BitFieldSet.empty?()
false
iex> BitFieldSet.new!(<<0b00000000>>, 8) |> BitFieldSet.empty?()
true
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{pieces: 0}), do: true
def empty?(%__MODULE__{}), do: false
@doc """
Takes two bit field sets of the same size and return a set
containing the pieces that belong to both sets.
iex> a = BitFieldSet.new!(<<0b00101010>>, 8)
iex> b = BitFieldSet.new!(<<0b10110011>>, 8)
iex> BitFieldSet.intersection(a, b)
#BitFieldSet<[2, 6]>
"""
@spec intersection(t, t) :: t
def intersection(
%__MODULE__{size: size, pieces: a} = bitfield,
%__MODULE__{size: size, pieces: b}
) do
%{bitfield | pieces: band(b, a)}
end
@doc """
Takes two bit field sets, a and b, who both of the same size, and
returns a set containing the pieces in *a* without the pieces in
*b*.
iex> a = BitFieldSet.new!(<<170>>, 8)
iex> b = BitFieldSet.new!(<<85>>, 8)
iex> BitFieldSet.difference(a, b)
#BitFieldSet<[0, 2, 4, 6]>
iex> BitFieldSet.difference(b, a)
#BitFieldSet<[1, 3, 5, 7]>
"""
@spec difference(t, t) :: t
def difference(
%__MODULE__{size: size, pieces: a} = bitfield,
%__MODULE__{size: size, pieces: b}
) do
%{bitfield | pieces: band(a, bnot(b))}
end
@doc """
Takes two bit field sets of the same size and returns a set
containing all members of both sets.
iex> a = BitFieldSet.new!(<<0b00101010>>, 8)
iex> b = BitFieldSet.new!(<<0b10000000>>, 8)
iex> BitFieldSet.union(a, b)
#BitFieldSet<[0, 2, 4, 6]>
"""
@spec union(t, t) :: t
def union(
%__MODULE__{size: size, pieces: a} = bitfield,
%__MODULE__{size: size, pieces: b}
) do
%{bitfield | pieces: bor(a, b)}
end
@doc """
Takes two bit field sets, a and b, who has the same size, and return
`true` if all the members of set a are also members of set b;
`false` otherwise.
iex> a = BitFieldSet.new!(<<0b00000110>>, 8)
iex> b = BitFieldSet.new!(<<0b00101110>>, 8)
iex> BitFieldSet.subset?(a, b)
true
iex> BitFieldSet.subset?(b, a)
false
"""
@spec subset?(t, t) :: boolean
def subset?(
%__MODULE__{size: size, pieces: a},
%__MODULE__{size: size, pieces: b}
) do
band(b, a) == a
end
@doc """
Takes two bit field sets and return `true` if the two bit fields
does not share any members, otherwise `false` will get returned.
iex> a = BitFieldSet.new!(<<0b00101110>>, 8)
iex> b = BitFieldSet.new!(<<0b11010001>>, 8)
iex> c = BitFieldSet.new!(<<0b11101000>>, 8)
iex> BitFieldSet.disjoint?(a, b)
true
iex> BitFieldSet.disjoint?(a, c)
false
"""
@spec disjoint?(t, t) :: boolean
def disjoint?(
%__MODULE__{pieces: a, size: size},
%__MODULE__{pieces: b, size: size}
) do
band(b, a) == 0
end
@doc """
Take a bit field set and return the number of its available pieces.
iex> BitFieldSet.new!(<<0b10101010>>, 8) |> BitFieldSet.size()
4
"""
@spec size(t) :: non_neg_integer
def size(%__MODULE__{pieces: pieces}) do
count_enabled_bits(pieces, 0)
end
@doc """
Takes a bit field set and returns a binary representation of the set.
iex> a = BitFieldSet.new!(<<0b10011010, 0b10000000>>, 16)
iex> BitFieldSet.to_binary(a)
<<154, 128>>
"""
@spec to_binary(t) :: binary
def to_binary(%__MODULE__{pieces: pieces, size: size}) do
byte_size = bitfield_size(size)
tailing_bits = byte_size - size
bitfield = pieces <<< tailing_bits
<<bitfield::big-size(byte_size)>>
end
@doc """
Take a bit field set and returns the available pieces as a list.
iex> BitFieldSet.new!(<<0b10011010>>, 8) |> BitFieldSet.to_list()
[0, 3, 4, 6]
"""
@spec to_list(t) :: [piece_index]
def to_list(%__MODULE__{pieces: 0, size: _}), do: []
def to_list(%__MODULE__{pieces: pieces, size: size}) do
# `:math.log2/1` does not support numbers bigger than 1024 bits;
# so we need to split the number up if the number is bigger
chunk(<<pieces::integer-size(size)>>, 1024)
|> List.flatten()
end
defp chunk(data, step_size, offset \\ 0) do
case data do
<<>> ->
[]
<<head::integer-size(step_size), remaining::bitstring>> ->
offset = offset + step_size
# note; body recursive because we will hit other limitations
# before blowing the call stack
[do_chunk_to_list(head, offset, []) | chunk(remaining, step_size, offset)]
<<remainder::bitstring>> ->
remainder_size = bit_size(remainder)
offset = offset + remainder_size
<<remainder::integer-size(remainder_size)>> = remainder
[do_chunk_to_list(remainder, offset, [])]
end
end
defp do_chunk_to_list(0, _, acc), do: acc
defp do_chunk_to_list(n, offset, acc) do
next = band(n, n - 1)
position_of_least_significant = :erlang.trunc(:math.log2(band(n, -n)) + 1)
do_chunk_to_list(next, offset, [offset - position_of_least_significant | acc])
end
# helpers ============================================================
defp get_piece_index(%__MODULE__{size: size}, piece_index) do
1 <<< (size - (piece_index + 1))
end
# calculate the size of the bit field in bytes (divisible by 8)
defp bitfield_size(%__MODULE__{size: size}), do: bitfield_size(size)
defp bitfield_size(size) when is_integer(size) do
tail = if rem(size, 8) != 0, do: 1, else: 0
(div(size, 8) + tail) * 8
end
# We use <NAME>'s Algorithm to count the 'on' bits in the
# bit field. It works by subtracting one from the current integer
# value and returning the BINARY AND of that and the current value.
# This will effectively remove the current least significant bit, so
# it is just a matter of counting the times we can do that before
# reaching zero
defp count_enabled_bits(0, acc), do: acc
defp count_enabled_bits(n, acc) do
count_enabled_bits(band(n, n - 1), acc + 1)
end
# protocols ==========================================================
defimpl Enumerable do
def reduce(source, acc, fun) do
Enumerable.List.reduce(BitFieldSet.to_list(source), acc, fun)
end
def member?(source, value) do
{:ok, BitFieldSet.member?(source, value)}
end
def count(source) do
{:ok, BitFieldSet.size(source)}
end
end
defimpl Collectable do
def into(original) do
{original,
fn
acc, {:cont, value} ->
BitFieldSet.put(acc, value)
acc, :done ->
acc
_, :halt ->
:ok
end}
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(source, opts) do
opts = %Inspect.Opts{opts | charlists: :as_lists}
concat(["#BitFieldSet<", Inspect.List.inspect(BitFieldSet.to_list(source), opts), ">"])
end
end
end
|
lib/bit_field_set.ex
| 0.880277
| 0.635505
|
bit_field_set.ex
|
starcoder
|
defmodule Membrane.HTTPAdaptiveStream.SinkBin do
@moduledoc """
Bin responsible for receiving audio and video streams, performing payloading and CMAF muxing
to eventually store them using provided storage configuration.
## Input streams
Parsed H264 or AAC video or audio streams are expected to be connected via the `:input` pad.
The type of stream has to be specified via the pad's `:encoding` option.
## Output
Specify one of `Membrane.HTTPAdaptiveStream.Storages` as `:storage` to configure the sink.
"""
use Membrane.Bin
alias Membrane.{ParentSpec, Time, MP4}
alias Membrane.HTTPAdaptiveStream.{Sink, Storage}
@payloaders %{H264: MP4.Payloader.H264, AAC: MP4.Payloader.AAC}
def_options muxer_segment_duration: [
spec: pos_integer,
default: 2 |> Time.seconds()
],
manifest_name: [
spec: String.t(),
default: "index",
description: "Name of the main manifest file"
],
manifest_module: [
spec: module,
description: """
Implementation of the `Membrane.HTTPAdaptiveStream.Manifest`
behaviour.
"""
],
storage: [
spec: Storage.config_t(),
description: """
Storage configuration. May be one of `Membrane.HTTPAdaptiveStream.Storages.*`.
See `Membrane.HTTPAdaptiveStream.Storage` behaviour.
"""
],
target_window_duration: [
spec: pos_integer | :infinity,
default: Time.seconds(40),
description: """
Manifest duration is kept above that time, while the oldest segments
are removed whenever possible.
"""
],
persist?: [
spec: boolean,
default: false,
description: """
If true, stale segments are removed from the manifest only. Once
playback finishes, they are put back into the manifest.
"""
],
target_segment_duration: [
spec: pos_integer,
default: 0,
description: """
Expected length of each segment. Setting it is not necessary, but
may help players achieve better UX.
"""
],
hls_mode: [
spec: :muxed_av | :separate_av,
default: :separate_av,
description: """
Option defining how the incoming tracks will be handled and how CMAF will be muxed.
- In `:muxed_av` audio will be added to each video rendition, creating CMAF segments that contain both audio and video.
- In `:separate_av` audio and video tracks will be separate and synchronization will need to be sorted out by the player.
"""
]
def_input_pad :input,
demand_unit: :buffers,
caps: [Membrane.H264, Membrane.AAC],
availability: :on_request,
options: [
encoding: [
spec: :H264 | :AAC,
description: """
Encoding type determining which payloader will be used for the given stream.
"""
],
track_name: [
spec: String.t() | nil,
default: nil,
description: """
Name that will be used to name the media playlist for the given track, as well as its header and segments files.
It must not contain any URI reserved characters
"""
]
]
@impl true
def handle_init(opts) do
children =
[
sink: %Sink{
manifest_name: opts.manifest_name,
manifest_module: opts.manifest_module,
storage: opts.storage,
target_window_duration: opts.target_window_duration,
persist?: opts.persist?,
target_segment_duration: opts.target_segment_duration
}
] ++
if(opts.hls_mode == :muxed_av, do: [audio_tee: Membrane.Tee.Parallel], else: [])
state = %{muxer_segment_duration: opts.muxer_segment_duration, mode: opts.hls_mode}
{{:ok, spec: %ParentSpec{children: children}}, state}
end
@impl true
def handle_pad_added(Pad.ref(:input, ref) = pad, context, state) do
muxer = %MP4.Muxer.CMAF{segment_duration: state.muxer_segment_duration}
encoding = context.options[:encoding]
payloader = Map.fetch!(@payloaders, encoding)
track_name = context.options[:track_name]
spec =
cond do
state.mode == :separate_av ->
%ParentSpec{
links: [
link_bin_input(pad)
|> to({:payloader, ref}, payloader)
|> to({:cmaf_muxer, ref}, muxer)
|> via_in(pad, options: [track_name: track_name])
|> to(:sink)
]
}
state.mode == :muxed_av and encoding == :H264 ->
%ParentSpec{
children: %{
{:payloader, ref} => payloader,
{:cmaf_muxer, ref} => muxer
},
links: [
link_bin_input(pad)
|> to({:payloader, ref})
|> to({:cmaf_muxer, ref}),
link(:audio_tee)
|> to({:cmaf_muxer, ref}),
link({:cmaf_muxer, ref})
|> via_in(pad, options: [track_name: track_name])
|> to(:sink)
]
}
state.mode == :muxed_av and encoding == :AAC ->
if count_audio_tracks(context) > 1,
do: raise("In :muxed_av mode, only one audio input is accepted")
%ParentSpec{
children: %{{:payloader, ref} => payloader},
links: [
link_bin_input(pad)
|> to({:payloader, ref})
|> to(:audio_tee)
]
}
end
{{:ok, spec: spec}, state}
end
@impl true
def handle_pad_removed(Pad.ref(:input, ref), _ctx, state) do
children =
[
{:payloader, ref}
] ++ if(state.mode != :muxed_av, do: [{:cmaf_muxer, ref}], else: [])
{{:ok, remove_child: children}, state}
end
@impl true
def handle_element_end_of_stream({:sink, _}, _ctx, state) do
{{:ok, notify: :end_of_stream}, state}
end
@impl true
def handle_element_end_of_stream(_element, _ctx, state) do
{:ok, state}
end
defp count_audio_tracks(context),
do:
Enum.count(context.pads, fn {_pad, metadata} ->
metadata.options.encoding == :AAC
end)
end
|
lib/membrane_http_adaptive_stream/sink_bin.ex
| 0.881047
| 0.561575
|
sink_bin.ex
|
starcoder
|
defmodule RegimenRunner do
@moduledoc """
Runs a regimen
"""
defmodule Item do
@moduledoc false
@type t :: %__MODULE__{time_offset: integer,
sequence: Farmbot.CeleryScript.Ast.t}
defstruct [:time_offset, :sequence]
def parse(%{"time_offset" => offset, "sequence" => sequence}) do
%__MODULE__{time_offset: offset,
sequence: Farmbot.CeleryScript.Ast.parse(sequence)}
end
end
use GenServer
use Amnesia
use Farmbot.Sync.Database
require Logger
def start_link(regimen, time) do
GenServer.start_link(__MODULE__, [regimen, time], name: :"regimen-#{regimen.id}")
end
@lint false
def init([regimen, time]) do
# parse and sort the regimen items
items = regimen.regimen_items
|> Enum.map(&Item.parse(&1))
|> Enum.sort(&(&1.time_offset <= &2.time_offset))
first_item = List.first(items)
if first_item do
epoch = build_epoch(time)
first_dt = Timex.shift(epoch, milliseconds: first_item.time_offset)
timestr = "#{first_dt.month}/#{first_dt.day}/#{first_dt.year} " <>
"at: #{first_dt.hour}:#{first_dt.minute}"
Logger.info "your fist item will execute on #{timestr}"
millisecond_offset = Timex.diff(first_dt, Timex.now(), :milliseconds)
Process.send_after(self(), :execute, millisecond_offset)
{:ok, %{epoch: epoch, regimen: %{regimen | regimen_items: items}, next_execution: first_dt}}
else
Logger.warn ">> no items on regimen: #{regimen.name}"
{:ok, %{}}
end
end
def handle_call(:get_state, _from, state), do: {:reply, state, state}
@lint false
def handle_info(:execute, state) do
{item, regimen} = pop_item(state.regimen)
if item do
Elixir.Sequence.Supervisor.add_child(item.sequence, Timex.now())
next_item = List.first(regimen.regimen_items)
if next_item do
next_dt = Timex.shift(state.epoch, milliseconds: next_item.time_offset)
timestr = "#{next_dt.month}/#{next_dt.day}/#{next_dt.year} at: #{next_dt.hour}:#{next_dt.minute}"
Logger.info "your next item will execute on #{timestr}"
millisecond_offset = Timex.diff(next_dt, Timex.now(), :milliseconds)
Process.send_after(self(), :execute, millisecond_offset)
{:ok, %{state | regimen: regimen, next_execution: next_dt}}
else
Logger.info ">> #{regimen.name} is complete!"
spawn fn() ->
Elixir.Regimen.Supervisor.remove_child(regimen)
end
{:noreply, :finished}
end
else
Logger.info ">> #{regimen.name} is complete!"
spawn fn() ->
Elixir.Regimen.Supervisor.remove_child(regimen)
end
{:noreply, :finished}
end
end
@spec pop_item(Regimen.t) :: {Item.t | nil, Regimen.t}
# when there is more than one item pop the top one
defp pop_item(%Regimen{regimen_items: [do_this_one | items ]} = r) do
{do_this_one, %Regimen{r | regimen_items: items}}
end
@doc """
Gets the state of a regimen by its id.
"""
def get_state(id), do: GenServer.call(:"regimen-#{id}", :get_state)
# returns midnight of today
@spec build_epoch(DateTime.t) :: DateTime.t
def build_epoch(n) do
Timex.shift(n, hours: -n.hour, seconds: -n.second, minutes: -n.minute)
end
end
|
lib/regimen/regimen_runner.ex
| 0.762866
| 0.432243
|
regimen_runner.ex
|
starcoder
|
defmodule UnicodeData do
@moduledoc """
Provides access to Unicode properties needed for more complex text processing.
## Script detection
Proper text layout requires knowing which script is in use for a run of text.
Unicode provides the `Script` property to identify the script associated with a
codepoint. The script short name is also provided, which can be passed to font
engines or cross-referenced with [ISO 15924](https://en.wikipedia.org/wiki/ISO_15924).
Once the script is identified, it's possible to determine if the script is a right-to-left
script, as well as what additional support might be required for proper layout.
## Shaping support
The `Joining_Type` and `Joining_Group` properties provide support for shaping engines
doing layout of cursive scripts.
## Layout support
Bidirectional algorithms such the one in [UAX #9](http://www.unicode.org/reports/tr9/) require access
to several Unicode properties in order to properly layout paragraphs where the direction of the text
is not uniform -- for example, when embedding an English word into a Hebrew paragraph.
The `Bidi_Class`, `Bidi_Mirroring_Glyph`, `Bidi_Mirrored`, `Bidi_Paired_Bracket`, and `Bidi_Paired_Bracket_Type`
properties are specifically provided to allow for implementation
of the Unicode bidirectional algorithm described in [UAX #9](http://www.unicode.org/reports/tr9/).
For layout of vertical text, the `Vertical_Orientation` and `East_Asian_Width` properties are exposed
to help layout engines decide whether or not to rotate characters that are normally laid out horizontally.
This can (and should) be tailored based on context but provide sane defaults in the absence of any
such context (such as when rendering a plain text document).
## Text segmentation
Textual analysis often requires splitting on line, word, or sentence boundaries. While the most
sophisticated algorithms require contextual knowledge, Unicode provides properties and default
algorithms for this purpose.
### Line Breaking
The Unicode line-breaking algorithm described in [UAX #14](http://www.unicode.org/reports/tr14/)
makes use of the `Line_Break` property and has notes about tailoring the algorithm for various
contexts.
This module exposes the property, a conformant implementation of the line-breaking algorithm, and the
ability to tailor the algorithm according to the standard. As part of the test suite it tailors the algorithm
by customizing rules 13 and 25 per the example given in the algorithm description.
For most purposes, breaking plain text into paragraphs can be accomplished by line breaking only at required
break points.
iex> UnicodeData.apply_required_linebreaks("Paragraph 1.\\nParagraph 2.")
["Paragraph 1.", "Paragraph 2."]
Laying out paragraphs of text usually involves identifying potential breakpoints and choosing the
ones that best satisfy layout constraints. To aid in this analysis, the `identify_linebreak_positions`
returns a list of indices that indicate potential break positions.
iex> UnicodeData.identify_linebreak_positions("Where can this line be broken?\\nLet me know.")
[{"Where can this line be broken?", [6, 10, 15, 20, 23]}, {"Let me know.", [4, 7]}]
### Tailoring the line-breaking algorithm
This implementation allows for several ways to tailor the algorithm for more complex needs.
The first is to modify the classification of code points by supplying a `linebreak_classifier` function to
any of the line-breaking functions. If you don't supply one, the aptly-named `default_linebreak_classes/2`
will be applied to conform with the default implementation.
The second is to replace, discard, or supplement one or more of the tailorable rules. An example will be
provided in future versions; for now, we recommend looking at the test suite to see such tailoring in action.
### Word and sentence breaking
Breaking on word and sentence boundaries is described in [UAX #29](http://www.unicode.org/reports/tr29/)
and makes use of the `Word_Break` and `Sentence_Break` properties, respectively.
"""
alias UnicodeData.Script
alias UnicodeData.Bidi
alias UnicodeData.Segment
alias UnicodeData.Vertical
@typedoc """
A codepoint in either binary or numeric form.
"""
@type cp :: integer | String.codepoint()
@typedoc """
Function that can override or resolve the linebreak classification of a codepoint.
"""
@type linebreak_classifier :: (String.codepoint(), String.t() -> String.t())
@typedoc """
Function that determines whether a break is allowed between two linebreak classifiers.
"""
@type uax14_tailored_rule ::
(String.t(), String.t(), String.t() | nil -> {atom | nil, String.t() | nil})
@typedoc """
Set of tailored line-breaking rules.
"""
@type uax14_ruleset :: [uax14_tailored_rule]
@doc """
Lookup the script property associated with a codepoint.
This will return the script property value. In addition to the explicitly
defined scripts, there are three special values.
* Characters with script value `Inherited` inherit the script of the preceding character.
* Characters with script value `Common` are used in multiple scripts.
* Characters of `Unknown` script are unassigned, private use, noncharacter or
surrogate code points.
This is sourced from [Scripts.txt](http://www.unicode.org/Public/UNIDATA/Scripts.txt)
## Examples
iex> UnicodeData.script_from_codepoint("a")
"Latin"
iex> UnicodeData.script_from_codepoint("9")
"Common"
iex> UnicodeData.script_from_codepoint("\u0643")
"Arabic"
"""
@spec script_from_codepoint(integer | String.codepoint()) :: String.t()
def script_from_codepoint(codepoint) when is_integer(codepoint) do
Script.script_from_codepoint(codepoint)
end
def script_from_codepoint(codepoint) do
<<intval::utf8>> = codepoint
script_from_codepoint(intval)
end
@doc """
Get the short name associated with a script. This is the tag
used to identify scripts in OpenType fonts and generally matches
the script code defined in ISO 15942.
See [Annex #24](http://www.unicode.org/reports/tr24/) for more about
the relationship between Unicode and ISO 15942.
Data from [OpenType script tags](http://www.microsoft.com/typography/otspec/scripttags.htm)
and [PropertyValueAliases.txt](http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt)
## Examples
iex> UnicodeData.script_to_tag("Latin")
"latn"
iex> UnicodeData.script_to_tag("Unknown")
"zzzz"
iex> UnicodeData.script_to_tag("Adlam")
"adlm"
"""
@spec script_to_tag(String.t()) :: String.t()
def script_to_tag(script) do
Script.script_to_tag(script)
end
@doc """
Determine if the script is written right-to-left.
This data is derived from ISO 15924.
There's a handy sortable table on
[the Wikipedia page for ISO 15924](https://en.wikipedia.org/wiki/ISO_15924).
## Examples
iex> UnicodeData.right_to_left?("Latin")
false
iex> UnicodeData.right_to_left?("Arabic")
true
You can also pass the script short name.
iex> UnicodeData.right_to_left?("adlm")
true
"""
@spec right_to_left?(String.t()) :: boolean
def right_to_left?(script) do
as_tag = Script.right_to_left?(script)
if as_tag do
true
else
script
|> script_to_tag
|> Script.right_to_left?()
end
end
@doc """
Determine if a script uses the `Joining Type` property
to select contextual forms.
Typically this is used to select a shaping engine, which will then call
`joining_type/1` and `joining_group/1` to do cursive shaping.
## Examples
iex> UnicodeData.uses_joining_type?("Latin")
false
iex> UnicodeData.uses_joining_type?("Arabic")
true
iex> UnicodeData.uses_joining_type?("Nko")
true
You can also pass the script short name.
iex> UnicodeData.uses_joining_type?("syrc")
true
"""
@spec uses_joining_type?(String.t()) :: boolean
def uses_joining_type?(script) do
as_tag = Script.uses_joining_type?(script)
if as_tag do
true
else
script
|> script_to_tag
|> Script.uses_joining_type?()
end
end
@doc """
Determine the joining type for cursive scripts.
Cursive scripts have the following join types:
* `R` Right_Joining (top-joining for vertical)
* `L` Left_Joining (bottom-joining for vertical)
* `D` Dual_Joining -- joins to characters on both sides.
* `C` Join_Causing -- forces a join to occur.
* `U` Non_Joining -- does not join to characters on either side.
* `T` Transparent -- characters on either side join to each other.
Transparent characters are treated as if they do not exist during joining -- typically these are
marks that render above or below the preceding base glyph.
Characters from other scripts return `U` as they do not participate in cursive shaping.
This is sourced from [ArabicShaping.txt](http://www.unicode.org/Public/UNIDATA/ArabicShaping.txt)
## Examples
iex> UnicodeData.joining_type("\u0643")
"D"
iex> UnicodeData.joining_type("\u062F")
"R"
iex> UnicodeData.joining_type("\u0710")
"R"
"""
@spec joining_type(cp) :: String.t()
def joining_type(codepoint) when is_integer(codepoint) do
Script.jointype_from_codepoint(codepoint)
end
def joining_type(codepoint) do
<<intval::utf8>> = codepoint
joining_type(intval)
end
@doc """
Determine the joining group for cursive scripts.
Characters from other scripts return `No_Joining_Group` as they do not
participate in cursive shaping.
The `ALAPH` and `DALATH RISH` joining groups are of particular interest
to shaping engines dealing with Syriac.
[Chapter 9.3 of the Unicode Standard](http://www.unicode.org/versions/Unicode10.0.0/ch09.pdf)
discusses Syriac shaping in detail.
This is sourced from [ArabicShaping.txt](http://www.unicode.org/Public/UNIDATA/ArabicShaping.txt)
## Examples
iex> UnicodeData.joining_group("\u0643")
"KAF"
iex> UnicodeData.joining_group("\u062F")
"DAL"
iex> UnicodeData.joining_group("\u0710")
"ALAPH"
"""
@spec joining_group(cp) :: String.t()
def joining_group(codepoint) when is_integer(codepoint) do
Script.joingroup_from_codepoint(codepoint)
end
def joining_group(codepoint) do
<<intval::utf8>> = codepoint
joining_group(intval)
end
@doc """
Determine the bidirectional character type of a character.
This is used to initialize the Unicode bidirectional algorithm, published in [UAX #9](http://www.unicode.org/reports/tr9/).
There are several blocks of unassigned code points which are reserved to specific script blocks and therefore return
a specific bidirectional character type. For example, unassigned code point `\uFE75`, in the Arabic block, has type "AL".
If not specifically assigned or reserved, the default value is "L" (Left-to-Right).
This is sourced from [DerivedBidiClass.txt](http://www.unicode.org/Public/UNIDATA/extracted/DerivedBidiClass.txt)
## Examples
iex> UnicodeData.bidi_class("A")
"L"
iex> UnicodeData.bidi_class("\u062F")
"AL"
iex> UnicodeData.bidi_class("\u{10B40}")
"R"
iex> UnicodeData.bidi_class("\uFE75")
"AL"
"""
@spec bidi_class(cp) :: String.t()
def bidi_class(codepoint) when is_integer(codepoint) do
Bidi.bidi_class(codepoint)
end
def bidi_class(codepoint) do
<<intval::utf8>> = codepoint
bidi_class(intval)
end
@doc """
The `Bidi_Mirrored` property indicates whether or not there is another Unicode character
that typically has a glyph that is the mirror image of the original character's glyph.
Character-based mirroring is used by the Unicode bidirectional algorithm. A layout engine
may want to consider other method of mirroring.
Some characters like \u221B (CUBE ROOT) claim to be mirrored but do not actually have a
corresponding mirror character - in those cases this function returns false.
This is sourced from [BidiMirroring.txt](http://www.unicode.org/Public/UNIDATA/BidiMirroring.txt)
## Examples
iex> UnicodeData.bidi_mirrored?("A")
false
iex> UnicodeData.bidi_mirrored?("[")
true
iex> UnicodeData.bidi_mirrored?("\u221B")
false
"""
@spec bidi_mirrored?(cp) :: boolean
def bidi_mirrored?(codepoint) when is_integer(codepoint) do
Bidi.mirrored?(codepoint)
end
def bidi_mirrored?(codepoint) do
<<intval::utf8>> = codepoint
bidi_mirrored?(intval)
end
@doc """
The `Bidi_Mirroring_Glyph` property returns the character suitable for character-based
mirroring, if one exists. Otherwise, it returns `nil`.
Character-based mirroring is used by the Unicode bidirectional algorithm. A layout engine
may want to consider other method of mirroring.
This is sourced from [BidiMirroring.txt](http://www.unicode.org/Public/UNIDATA/BidiMirroring.txt)
## Examples
iex> UnicodeData.bidi_mirror_codepoint("[")
"]"
iex> UnicodeData.bidi_mirror_codepoint("A")
nil
"""
@spec bidi_mirror_codepoint(cp) :: String.codepoint() | nil
def bidi_mirror_codepoint(codepoint) when is_integer(codepoint) do
m = Bidi.mirror_glyph(codepoint)
if m != nil, do: <<m::utf8>>, else: nil
end
def bidi_mirror_codepoint(<<codepoint::utf8>>) do
bidi_mirror_codepoint(codepoint)
end
@doc """
The Unicode `Bidi_Paired_Bracket_Type` property classifies characters into opening and closing
paired brackets for the purposes of the Unicode bidirectional algorithm.
It returns one of the following values:
* `o` Open - The character is classified as an opening bracket.
* `c` Close - The character is classified as a closing bracket.
* `n` None - the character is not a paired bracket character.
This is sourced from [BidiBrackets.txt](http://www.unicode.org/Public/UNIDATA/BidiBrackets.txt)
## Examples
iex> UnicodeData.bidi_paired_bracket_type("[")
"o"
iex> UnicodeData.bidi_paired_bracket_type("}")
"c"
iex> UnicodeData.bidi_paired_bracket_type("A")
"n"
"""
@spec bidi_paired_bracket_type(cp) :: String.t()
def bidi_paired_bracket_type(codepoint) when is_integer(codepoint) do
Bidi.paired_bracket_type(codepoint)
end
def bidi_paired_bracket_type(<<codepoint::utf8>>) do
Bidi.paired_bracket_type(codepoint)
end
@doc """
The `Bidi_Paired_Bracket` property is used to establish pairs of opening and closing
brackets for the purposes of the Unicode bidirectional algorithm.
If a character is an opening or closing bracket, this will return the other character in
the pair. Otherwise, it returns `nil`.
This is sourced from [BidiBrackets.txt](http://www.unicode.org/Public/UNIDATA/BidiBrackets.txt)
## Examples
iex> UnicodeData.bidi_paired_bracket("[")
"]"
iex> UnicodeData.bidi_paired_bracket("]")
"["
iex> UnicodeData.bidi_paired_bracket("A")
nil
"""
@spec bidi_paired_bracket(cp) :: String.codepoint() | nil
def bidi_paired_bracket(codepoint) when is_integer(codepoint) do
val = Bidi.paired_bracket(codepoint)
if val != nil, do: <<val::utf8>>, else: nil
end
def bidi_paired_bracket(<<codepoint::utf8>>) do
bidi_paired_bracket(codepoint)
end
@doc """
The Line_Break property is used by the Unicode line breaking algorithm to identify locations where
a break opportunity exists.
These are intended to be interpreted in the scope of [UAX #14](http://www.unicode.org/reports/tr14/).
You may wish to override these values in some contexts - in such cases consider providing a classifier
to `line_breaking/2`.
For a list of possible return values, best practices and implementation notes, you should refer to
[UAX #14](http://www.unicode.org/reports/tr14/).
This is sourced from [LineBreak.txt](http://www.unicode.org/Public/UNIDATA/LineBreak.txt)
## Examples
iex> UnicodeData.line_breaking("\u00B4")
"BB"
iex> UnicodeData.line_breaking("]")
"CP"
iex> UnicodeData.line_breaking("\u061F")
"EX"
iex> UnicodeData.line_breaking(":")
"IS"
"""
@spec line_breaking(cp) :: String.t()
def line_breaking(codepoint) when is_integer(codepoint) do
Segment.line_break(codepoint)
end
def line_breaking(<<codepoint::utf8>>) do
Segment.line_break(codepoint)
end
@doc """
Tailors the `Line_Break` property by applying the `tailoring` function.
This allows you to override the value that would normally be returned and is a simple way to tailor the
behaviour of the line breaking algorithm.
"""
@spec line_breaking(cp, linebreak_classifier) :: String.t()
def line_breaking(<<codepoint::utf8>>, tailoring) do
orig = Segment.line_break(codepoint)
tailoring.(codepoint, orig)
end
@doc """
Indicate all linebreak opportunities in a string of text according to UAX 14.
Breaks opportunities are classified as either required or allowed.
"""
@spec linebreak_locations(String.t(), linebreak_classifier | nil, uax14_ruleset | nil) :: [
{:required | :allowed, integer}
]
def linebreak_locations(text, tailoring \\ nil, rules \\ nil) do
tailored_classes = if tailoring == nil, do: &default_linebreak_classes/2, else: tailoring
tailored_rules = if rules == nil, do: Segment.uax14_default_rules(), else: rules
# |> Enum.map(fn {k, v} -> v end)
out =
text
|> String.codepoints()
|> Stream.map(fn x -> line_breaking(x, tailored_classes) end)
|> Stream.chunk_every(2, 1, :discard)
|> Enum.map_reduce(nil, fn x, acc -> Segment.uax14_break_between(x, acc, tailored_rules) end)
|> elem(0)
|> Stream.with_index(1)
|> Stream.filter(fn {k, _} -> k != :prohibited end)
|> Enum.to_list()
out
end
@doc """
This is the default tailoring of linebreak classes according to UAX #14.
It resolves AI, CB, CJ, SA, SG, and XX into other line breaking classes in the
absence of any other criteria.
If you are supplying your own tailoring function, you may want unhandled cases to
fall back to this implementation.
"""
@spec default_linebreak_classes(cp, String.t()) :: String.t()
def default_linebreak_classes(codepoint, original_class) do
case original_class do
"AI" -> "AL"
"SG" -> "AL"
"XX" -> "AL"
"SA" -> if Regex.match?(~r/\p{Mn}|\p{Mc}/u, <<codepoint::utf8>>), do: "CM", else: "AL"
"CJ" -> "NS"
_ -> original_class
end
end
@doc """
Converts a run of text into a set of lines by implementing UAX#14, breaking only at required positions,
and indicating allowed break positions.
"""
@spec identify_linebreak_positions(String.t(), linebreak_classifier | nil, uax14_ruleset | nil) ::
[{String.t(), [integer]}]
def identify_linebreak_positions(text, tailoring \\ nil, rules \\ nil) do
text
|> linebreak_locations(tailoring, rules)
|> Enum.chunk_while(
{0, []},
fn {break, index}, {offset, allowed} ->
if break == :required do
{
:cont,
{String.slice(text, offset, index - 1), Enum.reverse(allowed)},
{index, []}
}
else
{:cont, {offset, [index - offset | allowed]}}
end
end,
fn {offset, allowed} ->
{:cont, {String.slice(text, offset, String.length(text)), Enum.reverse(allowed)}, {0, []}}
end
)
end
@doc """
Break a run of text into lines. It only breaks where required and ignores other break opportunities.
This is a conforming implementation of [UAX #14](http://www.unicode.org/reports/tr14/).
It supports tailoring the assignment of linebreak classes as well as tailoring the rules themselves.
Converts a run of text into a set of lines by implementing UAX#14 and breaking only at required positions.
"""
@spec apply_required_linebreaks(String.t(), linebreak_classifier | nil, uax14_ruleset | nil) ::
[String.t()]
def apply_required_linebreaks(text, linebreak_classification \\ nil, rules \\ nil) do
text
|> linebreak_locations(linebreak_classification, rules)
|> Stream.filter(fn {k, _} -> k == :required end)
|> Stream.chunk_while(
0,
fn {_, index}, offset ->
{
:cont,
String.slice(text, offset, index - 1),
index
}
end,
fn offset -> {:cont, String.slice(text, offset, String.length(text)), 0} end
)
|> Enum.to_list()
end
@doc """
The `Word_Break` property can be used to help determine word boundaries.
[UAX #29](http://www.unicode.org/reports/tr29/) provides a simple algorithm that uses
this property to handle most unambiguous situations. To get better results, you should
tailor the algorithm for the locale and context. In particular, hyphens and apostrophes
commonly require a better understanding of the context to correctly determine if they
indicate a word boundary.
For a list of possible return values, best practices and implementation notes, you should refer to
section 4 of [UAX #29](http://www.unicode.org/reports/tr29/).
This is sourced from [WordBreakProperty.txt](http://www.unicode.org/Public/UNIDATA/auxiliary/WordBreakProperty.txt)
## Examples
iex> UnicodeData.word_breaking("B")
"ALetter"
iex> UnicodeData.word_breaking("\u30A1")
"Katakana"
iex> UnicodeData.word_breaking("\u00B4")
"Other"
"""
@spec word_breaking(cp) :: String.t()
def word_breaking(codepoint) when is_integer(codepoint) do
Segment.word_break(codepoint)
end
def word_breaking(<<codepoint::utf8>>) do
word_breaking(codepoint)
end
@doc """
The `Sentence_Break` property can be used to help determine sentence boundaries.
[UAX #29](http://www.unicode.org/reports/tr29/) provides a simple algorithm that uses this property to
handle most unambiguous situations. If the locale is known, information in the [CLDR](http://cldr.unicode.org/index)
can be used to improve the quality of boundary analysis.
This is sourced from [SentenceBreakProperty.txt](http://www.unicode.org/Public/UNIDATA/auxiliary/SentenceBreakProperty.txt)
For a list of possible return values, best practices and implementation notes, you should refer to
section 5 of [UAX #29](http://www.unicode.org/reports/tr29/).
## Examples
iex> UnicodeData.sentence_breaking(" ")
"Sp"
iex> UnicodeData.sentence_breaking("?")
"STerm"
iex> UnicodeData.sentence_breaking("]")
"Close"
"""
@spec sentence_breaking(cp) :: String.t()
def sentence_breaking(codepoint) when is_integer(codepoint) do
Segment.sentence_break(codepoint)
end
def sentence_breaking(<<codepoint::utf8>>) do
sentence_breaking(codepoint)
end
# TODO: UAX11 East_Asian_Width
# EastAsianWidth.txt
@doc """
The `Vertical_Orientation` property indicates the default character orientation when laying out vertical text.
This is intended to be a reasonable or legible default to use when laying out plain text in vertical columns.
A text layout program may need to consider the script, style, or context rather than relying exclusively on
the value of this property.
For more details, including a table of representative glyphs for the `Tu` and `Tr` values, see
[UAX #50](http://www.unicode.org/reports/tr50/).
It returns one of the following values:
* `U` Upright - The character is typically displayed upright (not rotated).
* `R` Rotated - The character is typically displayed sideways (rotated 90 degrees).
* `Tu` Typographically upright - Uses a different (unspecified) glyph but falls back to upright display.
* `Tr` Typographically rotated - Uses a different (unspecified) glyph but falls back to rotated display.
This is sourced from [VerticalOrientation.txt](http://www.unicode.org/Public/UNIDATA/VerticalOrientation.txt)
## Examples
iex> UnicodeData.vertical_orientation("$")
"R"
iex> UnicodeData.vertical_orientation("\u00A9")
"U"
iex> UnicodeData.vertical_orientation("\u300A")
"Tr"
iex> UnicodeData.vertical_orientation("\u3083")
"Tu"
"""
@spec vertical_orientation(cp) :: String.t()
def vertical_orientation(codepoint) when is_integer(codepoint) do
Vertical.orientation(codepoint)
end
def vertical_orientation(<<codepoint::utf8>>) do
vertical_orientation(codepoint)
end
@doc """
The `East_Asian_Width` property is useful when interoperating with legacy East Asian encodings or fixed pitch fonts.
This is an informative property that a layout engine may wish to use when tailoring line breaking or laying out vertical
text runs. Refer to [UAX #11](http://www.unicode.org/reports/tr11/) for a discussion and guidelines around its usage.
This is sourced from [EastAsianWidth.txt](http://www.unicode.org/Public/UNIDATA/EastAsianWidth.txt)
## Examples
iex> UnicodeData.east_asian_width("$")
"Na"
iex> UnicodeData.east_asian_width("\u00AE")
"A"
iex> UnicodeData.east_asian_width("\u2B50")
"W"
"""
@spec east_asian_width(cp) :: String.t()
def east_asian_width(codepoint) when is_integer(codepoint) do
Vertical.east_asian_width(codepoint)
end
def east_asian_width(<<codepoint::utf8>>) do
east_asian_width(codepoint)
end
end
|
lib/unicodedata.ex
| 0.93189
| 0.752035
|
unicodedata.ex
|
starcoder
|
defmodule ArkEcosystem.Crypto.Helpers.MapKeyTransformer do
def underscore(map) when is_map(map) do
transform_map(map, &Macro.underscore/1)
end
def camelCase(map) when is_map(map) do
transform_map(map, &camelizer/1)
end
defp transform_map(map, transformer) when is_map(map) do
Map.keys(map)
|> transform_keys(map, transformer)
end
defp transform_keys(keys, map, transformer) when length(keys) > 0 do
{key, keys} = List.pop_at(keys, 0)
value = Map.get(map, key)
transformed_key = transform_key(key, transformer)
map =
cond do
is_map(value) ->
nested_keys = Map.keys(value)
transformed_map = transform_keys(nested_keys, value, transformer)
write_key(map, key, transformed_key, transformed_map)
is_list(value) ->
transformed_list =
Enum.map(value, fn item -> transform_list_value(item, transformer) end)
write_key(map, key, transformed_key, transformed_list)
true ->
write_key(map, key, transformed_key, value)
end
transform_keys(keys, map, transformer)
end
defp transform_keys(keys, map, _transformer) when length(keys) == 0 do
map
end
defp transform_list_value(value, transformer) do
cond do
is_map(value) ->
transform_map(value, transformer)
is_list(value) ->
Enum.map(value, fn item -> transform_list_value(item, transformer) end)
true ->
value
end
end
defp transform_key(key, transformer) do
key |> Atom.to_string() |> transformer.() |> String.to_atom()
end
defp write_key(map, old_key, new_key, value) do
Map.delete(map, old_key) |> Map.put(new_key, value)
end
# Macro.camelize turns public_key into PublicKey, so
# an additional step is necessary to downcase the first character.
defp camelizer(key) do
camelized = key |> Macro.camelize()
String.downcase(String.first(camelized)) <>
String.slice(camelized, 1, String.length(camelized))
end
end
|
lib/arkecosystem/crypto/helpers/map_key_transformer.ex
| 0.730963
| 0.492615
|
map_key_transformer.ex
|
starcoder
|
defmodule Day18 do
def part_one(file_reader \\ InputFile, rounds \\ 100, dim \\ 100) do
file_reader.contents_of(18, :stream)
|> Enum.map(&String.trim/1)
|> Enum.map(&String.split(&1, "", trim: true))
|> Enum.with_index()
|> Enum.flat_map(fn {row, y} ->
row
|> Enum.with_index()
|> Enum.map(fn
{".", x} -> {{x, y}, 0}
{"#", x} -> {{x, y}, 1}
end)
end)
|> Enum.into(%{})
|> play(rounds, dim)
|> Map.values()
|> Enum.reduce(&Kernel.+/2)
end
def part_two(file_reader \\ InputFile, rounds \\ 100, dim \\ 100) do
file_reader.contents_of(18, :stream)
|> Enum.map(&String.trim/1)
|> Enum.map(&String.split(&1, "", trim: true))
|> Enum.with_index()
|> Enum.flat_map(fn {row, y} ->
row
|> Enum.with_index()
|> Enum.map(fn
{".", x} -> {{x, y}, 0}
{"#", x} -> {{x, y}, 1}
end)
end)
|> Enum.into(%{})
|> pin_corners(dim)
|> play_v2(rounds, dim)
|> Map.values()
|> Enum.reduce(&Kernel.+/2)
end
def play(grid, 0, _dim), do: grid
def play(grid, n, dim) do
(for x <- 0..dim-1, y <- 0..dim-1, do: {x, y})
|> Enum.map(fn point ->
{Map.get(grid, point), count_neighbors(grid, point, dim)}
|> case do
{1, 2} -> {point, 1}
{_, 3} -> {point, 1}
_ -> {point, 0}
end
end)
|> Enum.into(%{})
|> play(n - 1, dim)
end
def play_v2(grid, 0, _dim), do: grid
def play_v2(grid, n, dim) do
(for x <- 0..dim-1, y <- 0..dim-1, do: {x, y})
|> Enum.map(fn point ->
{Map.get(grid, point), count_neighbors(grid, point, dim)}
|> case do
{1, 2} -> {point, 1}
{_, 3} -> {point, 1}
_ -> {point, 0}
end
end)
|> Enum.into(%{})
|> pin_corners(dim)
|> play_v2(n - 1, dim)
end
def count_neighbors(grid, {x, y}, dim) do
(for dx <- -1..1, dy <- -1..1, do: {dx, dy})
|> Enum.reject(fn {dx, dy} -> dx == 0 && dy == 0 end)
|> Enum.map(fn {dx, dy} -> {x + dx, y + dy} end)
|> Enum.reject(fn {x, y} -> x < 0 || y < 0 || x >= dim || y >= dim end)
|> Enum.map(fn pt -> Map.get(grid, pt) end)
|> Enum.reduce(&Kernel.+/2)
end
def pin_corners(map, dim) do
map
|> Map.put({0, 0}, 1)
|> Map.put({dim - 1, 0}, 1)
|> Map.put({0, dim - 1}, 1)
|> Map.put({dim - 1, dim - 1}, 1)
end
end
|
year_2015/lib/day_18.ex
| 0.547222
| 0.591104
|
day_18.ex
|
starcoder
|
defmodule EWallet.Web.SearchParser do
@moduledoc """
This module allows parsing of arbitrary attributes into a search query.
It takes in a request's attributes, parses only the attributes needed for searching,
then builds those attributes into a search query on top of the given `Ecto.Queryable`.
"""
import Ecto.Query
@doc """
Parses search attributes and appends the resulting queries into the given queryable.
To search for one term in all fields, use:
%{"search_term" => "term"}
For multiple search, use the following format:
%{"search_terms" => %{ "field_name_1" => "term", "field_name_2" => "term2" }}
Where "field_name" is in the list of available search fields.
"""
def to_query(queryable, %{"search_terms" => terms}, fields) when terms != nil do
{_i, query} = Enum.reduce(terms, {0, queryable}, fn({field, value}, {index, query}) ->
fields
|> is_allowed_field?(field)
|> build_search_query(index, query, value)
end)
query
end
def to_query(queryable, %{"search_term" => term}, fields) when term != nil do
{_i, query} = Enum.reduce(fields, {0, queryable}, fn(field, {index, query}) ->
build_search_query(field, index, query, term)
end)
query
end
def to_query(queryable, _, _), do: queryable
defp is_allowed_field?(fields, field) do
atom_field = String.to_existing_atom(field)
cond do
Enum.member?(fields, {atom_field, :uuid}) -> {atom_field, :uuid}
Enum.member?(fields, atom_field) -> atom_field
true -> nil
end
rescue
_ in ArgumentError -> nil
end
defp build_search_query(_field, index, query, nil), do: {index, query}
defp build_search_query(nil, index, query, _value), do: {index, query}
defp build_search_query(field, index, query, value) do
case index do
0 -> {index + 1, build_and_search_query(query, field, value)}
_ -> {index, build_or_search_query(query, field, value)}
end
end
defp build_or_search_query(query, {field, :uuid}, term) do
from q in query, or_where: ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%")
end
defp build_or_search_query(query, field, term) do
from q in query, or_where: ilike(field(q, ^field), ^"%#{term}%")
end
defp build_and_search_query(query, {field, :uuid}, term) do
from q in query, where: ilike(fragment("?::text", field(q, ^field)), ^"%#{term}%")
end
defp build_and_search_query(query, field, term) do
from q in query, where: ilike(field(q, ^field), ^"%#{term}%")
end
end
|
apps/ewallet/lib/ewallet/web/search_parser.ex
| 0.767167
| 0.598606
|
search_parser.ex
|
starcoder
|
defmodule CsvGenerator do
@moduledoc File.read!("README.md")
defmacro __using__(_options) do
quote do
Module.register_attribute(__MODULE__, :columns, accumulate: true, persist: false)
Module.register_attribute(__MODULE__, :delimiter, accumulate: false, persist: false)
Module.register_attribute(__MODULE__, :line_ending, accumulate: false, persist: false)
Module.register_attribute(__MODULE__, :decimal_point, accumulate: false, persist: false)
import unquote(__MODULE__)
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(env) do
compile(
Module.get_attribute(env.module, :columns) |> Enum.reverse(),
Module.get_attribute(env.module, :delimiter, ","),
Module.get_attribute(env.module, :line_ending, "\n"),
Module.get_attribute(env.module, :decimal_point, ".")
)
end
@doc """
Defines a column in the CSV.
`column name, type, options`
The column name will be used to select the value from the given input.
The following types are currently supported:
Type | Elixir type | Default format
:----------- | :---------------------- | :------------------
`:string` | `String` | n/a
`:integer` | `Integer` | n/a
`:float` | `Float` | n/a
`:date` | `Date` | `"%Y-%m-%d"`
`:time` | `DateTime` or `Integer` | `"%H:%M"`
`:datetime` | `DateTime` | `"%Y-%m-%d %H:%M:%S"`
For `:date`, `:time`, and `:datetime`, any of the Date(Time) types that
are compatible with `Calendar.Strftime.strftime/2` are allowed.
`:time` also allows an `Integer` value that represents the time within a day.
## Options
* `:header` - Use this instead of the name for column header.
* `:format` - Supply a different format string, see https://hexdocs.pm/calendar/readme.html.
* `:digits` - Supply the number of digits for a `Float`.
* `:with` - Specifies a function to be called on the value before processing.
column :value, :integer, with: &calc/1 or
column :value, :integer, with: fn(x) -> x * 2 end
* `:source` - Use another field as the source for this column, this allows you to use the same column multiple times.
"""
defmacro column(name, type \\ :string, opts \\ []) do
# This makes it possible to pass an anonymous function to :with
parms =
case Keyword.get(opts, :with) do
nil -> opts
_ -> Keyword.update!(opts, :with, &Macro.escape/1)
end
quote bind_quoted: [name: name, type: type, opts: parms] do
@columns {name, type, opts}
end
end
@doc """
Specify the character to use as column delimiter, default: ","
## Example
delimiter ";"
"""
defmacro delimiter(char) do
quote bind_quoted: [char: char] do
@delimiter char
end
end
@doc """
Specify the line ending to use, default: "\\n".
## Example
line_ending "\r\n"
"""
defmacro line_ending(char) do
quote bind_quoted: [char: char] do
@line_ending char
end
end
@doc """
Specify the decimal point, default: "."
## Example
decimal_point ","
"""
defmacro decimal_point(char) do
quote bind_quoted: [char: char] do
@decimal_point char
end
end
@doc false
def compile(columns, delimiter, line_ending, decimal_point) do
headers = gen_header(columns, delimiter)
columns_ast = gen_columns(columns, decimal_point)
columns_fn =
Enum.map(columns, fn {name, _type, opts} ->
value = Keyword.get(opts, :source, name)
quote do
render(unquote(name), Map.get(row, unquote(value)))
end
end)
quote do
unquote(columns_ast)
@doc """
Called to render the CSV output.
## Example
iex> MyCSV.render(list)
"..."
"""
def render(list) when is_list(list) do
[
unquote(headers)
| Enum.map(list, fn row ->
unquote(columns_fn)
|> Enum.join(unquote(delimiter))
end)
]
|> Enum.join(unquote(line_ending))
end
end
end
defp gen_header(columns, delimiter) do
Enum.map(columns, fn {name, _type, opts} ->
Keyword.get(opts, :header, name)
|> quote_string()
end)
|> Enum.join(delimiter)
end
defp gen_columns(columns, decimal_point) do
for {name, type, opts} <- columns do
{fname, func} =
case Keyword.get(opts, :with) do
nil ->
{:render,
quote do
# test
end}
with_function ->
{:post_render,
quote do
def render(unquote(name), value) do
post_render(unquote(name), unquote(with_function).(value))
end
end}
end
case type do
:string ->
quote do
unquote(func)
def unquote(fname)(unquote(name), value) do
quote_string(value)
end
end
:integer ->
quote do
@doc false
unquote(func)
@doc false
def unquote(fname)(unquote(name), nil), do: 0
def unquote(fname)(unquote(name), value) when is_integer(value) do
value
end
def unquote(fname)(unquote(name), value) when is_binary(value) do
value
end
def unquote(fname)(unquote(name), value) do
raise "Invalid value for #{unquote(name)}: #{inspect(value)}"
end
end
:float ->
convert =
case {Keyword.get(opts, :digits), decimal_point} do
{nil, "."} ->
quote do
v
end
{nil, char} ->
quote do
v
|> to_string
|> String.replace(".", unquote(char))
end
{digits, "."} ->
divisor = 5 / :math.pow(10, digits + 2)
quote do
Float.round(v + unquote(divisor), unquote(digits))
end
{digits, char} ->
divisor = 5 / :math.pow(10, digits + 2)
quote do
Float.round(v + unquote(divisor), unquote(digits))
|> to_string
|> String.replace(".", unquote(char))
end
end
quote do
unquote(func)
def unquote(fname)(unquote(name), value) do
v =
cond do
is_nil(value) ->
0.0
is_float(value) ->
value
is_binary(value) ->
case Float.parse(value) do
:error ->
raise "Cannort parse float value \"#{inspect(value)}\""
{f, _} ->
f
end
true ->
raise "Invalid float value \"#{inspect(value)}\""
end
unquote(convert)
end
end
:date ->
quote do
unquote(func)
def unquote(fname)(unquote(name), nil), do: ""
def unquote(fname)(unquote(name), value) do
Calendar.Strftime.strftime!(value, unquote(Keyword.get(opts, :format, "%Y-%m-%d")))
end
end
:time ->
quote do
unquote(func)
def unquote(fname)(unquote(name), nil), do: ""
def unquote(fname)(unquote(name), value) when is_integer(value) do
unquote(fname)(unquote(name), DateTime.from_unix!(value))
end
def unquote(fname)(unquote(name), value) do
Calendar.Strftime.strftime!(
value,
unquote(Keyword.get(opts, :format, "%H:%M"))
)
end
end
:datetime ->
quote do
unquote(func)
def unquote(fname)(unquote(name), nil), do: ""
def unquote(fname)(unquote(name), value) do
Calendar.Strftime.strftime!(
value,
unquote(Keyword.get(opts, :format, "%Y-%m-%d %H:%M:%S"))
)
end
end
end
end
end
@doc false
def quote_string(s) do
["\"", s, "\""] |> Enum.join()
end
end
|
lib/csv_generator.ex
| 0.832815
| 0.435421
|
csv_generator.ex
|
starcoder
|
defmodule Xgit.Object do
@moduledoc ~S"""
Describes a single object stored (or about to be stored) in a git repository.
This struct is constructed, modified, and shared as a working description of
how to find and describe an object before it gets written to a repository.
"""
use Xgit.ObjectType
alias Xgit.ContentSource
alias Xgit.FileMode
alias Xgit.FilePath
alias Xgit.ObjectId
alias Xgit.PersonIdent
alias Xgit.Util.ParseCharlist
alias Xgit.Util.ParseDecimal
import Xgit.Util.ForceCoverage
import Xgit.Util.ParseHeader, only: [next_header: 1]
@typedoc ~S"""
This struct describes a single object stored or about to be stored in a git
repository.
## Struct Members
* `:type`: the object's type (`:blob`, `:tree`, `:commit`, or `:tag`)
* `:content`: how to obtain the content (see `Xgit.ContentSource`)
* `:size`: size (in bytes) of the object or `:unknown`
* `:id`: object ID (40 chars hex) of the object or `:unknown`
"""
@type t :: %__MODULE__{
type: ObjectType.t(),
content: ContentSource.t(),
size: non_neg_integer() | :unknown,
id: ObjectId.t() | :unknown
}
@enforce_keys [:type, :content]
defstruct [:type, :content, size: :unknown, id: :unknown]
@doc ~S"""
Return `true` if the struct describes a valid object.
_IMPORTANT:_ This validation _only_ verifies that the struct itself is valid.
It does not inspect the content of the object. That check can be performed by
`check/2`.
"""
@spec valid?(object :: any) :: boolean
def valid?(object)
def valid?(%__MODULE__{type: type, content: content, size: size, id: id})
when is_object_type(type) and is_integer(size) and size >= 0,
do: ObjectId.valid?(id) && content != nil && ContentSource.impl_for(content) != nil
def valid?(_), do: cover(false)
@typedoc ~S"""
Error codes which can be returned by `check/2`.
"""
@type check_reason ::
:invalid_type
| :no_tree_header
| :invalid_tree
| :invalid_parent
| :no_author
| :no_committer
| :no_object_header
| :invalid_object
| :no_type_header
| :invalid_tagger
| :bad_date
| :bad_email
| :missing_email
| :missing_space_before_date
| :bad_time_zone
| :invalid_file_mode
| :truncated_in_name
| :duplicate_entry_names
| :incorrectly_sorted
| :truncated_in_object_id
| :null_sha1
| :invalid_mode
@doc ~S"""
Verify that a proposed object is valid.
This function performs a detailed check on the _content_ of the object.
For a simpler verification that the `Object` struct is _itself_
valid, see `valid?/1`.
Verifications made by this function only check that the fields of an object are
formatted correctly. The object ID checksum of the object is not verified, and
connectivity links between objects are also not verified. It's assumed that
the caller can provide both of these validations on its own.
## Options
By default, this function will only enforce Posix file name restrictions.
* `:macosx?`: `true` to also enforce Mac OS X path name restrictions
* `:windows?`: `true` to also enforce Windows path name restrictions
## Return Value
`:ok` if the object is successfully validated.
`{:error, :invalid_type}` if the object's type is unknown.
`{:error, :no_tree_header}` if the object is a commit but does not contain
a valid tree header.
`{:error, :invalid_tree}` if the object is a commit but the tree object ID
is invalid.
`{:error, :invalid_parent}` if the object is a commit but one of the `parent`
headers is invalid.
`{:error, :no_author}` if the object is a commit but there is no `author` header.
`{:error, :no_committer}` if the object is a commit but there is no `committer` header.
`{:error, :no_object_header}` if the object is a tag but there is no `object` header.
`{:error, :invalid_object}` if the object is a tag but the object ID is invalid.
`{:error, :no_type_header}` if the object is a tag but there is no `type` header.
`{:error, :invalid_tagger}` if the object is a tag but one of the `tagger` headers
is invalid.
`{:error, :bad_date}` if the object is a tag or a commit but has a malformed date entry.
`{:error, :bad_email}` if the object is a tag or a commit but has a malformed e-mail address.
`{:error, :missing_email}` if the object is a tag or a commit but has a missing e-mail address
where one is expected.
`{:error, :missing_space_before_date}` if the object is a tag or a commit but
has no space preceding the place where a date is expected.
`{:error, :bad_time_zone}` if the object is a tag or a commit but has a malformed
time zone entry.
`{:error, :invalid_file_mode}` if the object is a tree but one of the file modes is invalid.
`{:error, :truncated_in_name}` if the object is a tree but one of the file names is incomplete.
`{:error, :duplicate_entry_names}` if the object is a tree and contains duplicate
entry names.
`{:error, :incorrectly_sorted}` if the object is a tree and the entries are not
in alphabetical order.
`{:error, :truncated_in_object_id}` if the object is a tree and one of the object IDs
is invalid.
`{:error, :null_sha1}` if the object is a tree and one of the object IDs is all zeros.
`{:error, :invalid_mode}` if the object is a tree and one of the file modes is incomplete.
See also error responses from `Xgit.FilePath.check_path/2` and
`Xgit.FilePath.check_path_segment/2`.
"""
@spec check(object :: t(), windows?: boolean, macosx?: boolean) ::
:ok
| {:error, reason :: check_reason}
| {:error, reason :: FilePath.check_path_reason()}
| {:error, reason :: FilePath.check_path_segment_reason()}
def check(object, opts \\ [])
def check(%__MODULE__{type: :blob}, _opts), do: cover(:ok)
def check(%__MODULE__{type: :commit} = object, _opts), do: check_commit(object)
def check(%__MODULE__{type: :tag} = object, _opts), do: check_tag(object)
def check(%__MODULE__{type: :tree} = object, opts), do: check_tree(object, opts)
def check(%__MODULE__{type: _type}, _opts), do: cover({:error, :invalid_type})
# -- commit specifics --
defp check_commit(%__MODULE__{content: data}) when is_list(data) do
with {:tree, {'tree', tree_id, data}} <- {:tree, next_header(data)},
{:tree_id, {_tree_id_str, []}} <- {:tree_id, ObjectId.from_hex_charlist(tree_id)},
{:parents, data} when is_list(data) <- {:parents, check_commit_parents(data)},
{:author, {'author', author, data}} <- {:author, next_header(data)},
{:author_id, :ok} <- {:author_id, check_person_ident(author)},
{:committer, {'committer', committer, _data}} <- {:committer, next_header(data)},
{:committer_id, :ok} <- {:committer_id, check_person_ident(committer)} do
cover :ok
else
{:tree, _} -> cover {:error, :no_tree_header}
{:tree_id, _} -> cover {:error, :invalid_tree}
{:parents, _} -> cover {:error, :invalid_parent}
{:author, _} -> cover {:error, :no_author}
{:author_id, why} when is_atom(why) -> cover {:error, why}
{:committer, _} -> cover {:error, :no_committer}
{:committer_id, why} when is_atom(why) -> cover {:error, why}
end
end
defp check_commit_parents(data) do
with {'parent', parent_id, next_data} <- next_header(data),
{:parent_id, {_parent_id, []}} <- {:parent_id, ObjectId.from_hex_charlist(parent_id)} do
check_commit_parents(next_data)
else
{:parent_id, _} -> cover nil
_ -> cover data
end
end
# -- tag specifics --
defp check_tag(%__MODULE__{content: data}) when is_list(data) do
with {:object, {'object', object_id, data}} <- {:object, next_header(data)},
{:object_id, {object_id, []}} when is_binary(object_id) <-
{:object_id, ObjectId.from_hex_charlist(object_id)},
{:type, {'type', _type, data}} <- {:type, next_header(data)},
{:tag, {'tag', _tag, data}} <- {:tag, next_header(data)},
{:tagger, data} when is_list(data) <- {:tagger, maybe_match_tagger(data)} do
cover :ok
else
{:object, _} -> cover {:error, :no_object_header}
{:object_id, _} -> cover {:error, :invalid_object}
{:type, _} -> cover {:error, :no_type_header}
{:tag, _} -> cover {:error, :no_tag_header}
{:tagger, _} -> cover {:error, :invalid_tagger}
end
end
defp maybe_match_tagger(data) do
with {'tagger', tagger, next} when next != data <- next_header(data),
{:valid_person_ident, %PersonIdent{}} <-
{:valid_person_ident, PersonIdent.from_byte_list(tagger)} do
cover next
else
{:valid_person_ident, _} -> cover nil
_ -> cover data
end
end
# -- tree specifics --
defp check_tree(%__MODULE__{content: data}, opts) when is_list(data) and is_list(opts) do
maybe_normalized_paths =
if Keyword.get(opts, :windows?) || Keyword.get(opts, :macosx?) do
MapSet.new()
else
cover nil
end
check_next_tree_entry(data, maybe_normalized_paths, [], FileMode.regular_file(), opts)
end
defp check_next_tree_entry([], _maybe_normalized_paths, _previous_name, _previous_mode, _opts),
do: cover(:ok)
defp check_next_tree_entry(data, maybe_normalized_paths, previous_name, previous_mode, opts) do
# Scan one entry then recurse to scan remaining entries.
with {:file_mode, {:ok, file_mode, data}} <- {:file_mode, check_file_mode(data, 0)},
{:file_mode, true} <- {:file_mode, FileMode.valid?(file_mode)},
{:path_split, {path_segment, [0 | data]}} <- {:path_split, path_and_object_id(data)},
{:path_valid, :ok} <- {:path_valid, FilePath.check_path_segment(path_segment, opts)},
{:duplicate, false} <-
{:duplicate, maybe_mapset_member?(maybe_normalized_paths, path_segment, opts)},
{:duplicate, false} <- {:duplicate, duplicate_name?(path_segment, data)},
{:sorted, true} <-
{:sorted, correctly_sorted?(previous_name, previous_mode, path_segment, file_mode)},
{raw_object_id, data} <- Enum.split(data, 20),
{:object_id_length, 20} <- {:object_id_length, Enum.count(raw_object_id)},
{:object_id_null, false} <- {:object_id_null, Enum.all?(raw_object_id, &(&1 == 0))} do
check_next_tree_entry(
data,
maybe_put_path(maybe_normalized_paths, path_segment, opts),
path_segment,
file_mode,
opts
)
else
{:file_mode, {:error, reason}} -> cover {:error, reason}
{:file_mode, _} -> cover {:error, :invalid_file_mode}
{:path_split, _} -> cover {:error, :truncated_in_name}
{:path_valid, {:error, reason}} -> cover {:error, reason}
{:duplicate, _} -> cover {:error, :duplicate_entry_names}
{:sorted, _} -> cover {:error, :incorrectly_sorted}
{:object_id_length, _} -> cover {:error, :truncated_in_object_id}
{:object_id_null, _} -> cover {:error, :null_sha1}
end
end
defp check_file_mode([], _mode), do: cover({:error, :invalid_mode})
defp check_file_mode([?\s | data], mode), do: cover({:ok, mode, data})
defp check_file_mode([?0 | _data], 0), do: cover({:error, :invalid_mode})
defp check_file_mode([c | data], mode) when c >= ?0 and c <= ?7,
do: check_file_mode(data, mode * 8 + (c - ?0))
defp check_file_mode([_c | _data], _mode), do: cover({:error, :invalid_mode})
defp path_and_object_id(data), do: Enum.split_while(data, &(&1 != 0))
defp maybe_mapset_member?(nil, _path_segment, _opts), do: cover(false)
defp maybe_mapset_member?(mapset, path_segment, opts),
do: MapSet.member?(mapset, normalize(path_segment, Keyword.get(opts, :macosx?, false)))
defp duplicate_name?(this_name, data) do
data = Enum.drop(data, 20)
{mode_str, data} = Enum.split_while(data, &(&1 != ?\s))
mode = parse_octal(mode_str)
data = Enum.drop(data, 1)
{next_name, data} = Enum.split_while(data, &(&1 != 0))
data = Enum.drop(data, 1)
compare = FilePath.compare_same_name(this_name, next_name, mode)
cond do
Enum.empty?(mode_str) or Enum.empty?(next_name) -> cover false
compare == :lt -> cover false
compare == :eq -> cover true
compare == :gt -> duplicate_name?(this_name, data)
end
end
defp parse_octal(data) do
case Integer.parse(to_string(data), 8) do
{n, _} when is_integer(n) -> cover n
:error -> cover 0
end
end
defp correctly_sorted?([], _previous_mode, _this_name, _this_mode), do: cover(true)
defp correctly_sorted?(previous_name, previous_mode, this_name, this_mode),
do: FilePath.compare(previous_name, previous_mode, this_name, this_mode) != :gt
defp maybe_put_path(nil, _path_segment, _opts), do: cover(nil)
defp maybe_put_path(mapset, path_segment, opts),
do: MapSet.put(mapset, normalize(path_segment, Keyword.get(opts, :macosx?, false)))
# -- generic matching utilities --
defp check_person_ident(data) do
with {:missing_email, [?< | email_start]} <-
{:missing_email, Enum.drop_while(data, &(&1 != ?<))},
{:bad_email, [?> | after_email]} <-
{:bad_email, Enum.drop_while(email_start, &(&1 != ?>))},
{:missing_space_before_date, [?\s | date]} <- {:missing_space_before_date, after_email},
{:bad_date, {_date, [?\s | tz]}} <-
{:bad_date, ParseDecimal.from_decimal_charlist(date)},
{:bad_timezone, {_tz, []}} <- {:bad_timezone, ParseDecimal.from_decimal_charlist(tz)} do
cover :ok
else
{:missing_email, _} -> cover :missing_email
{:bad_email, _} -> cover :bad_email
{:missing_space_before_date, _} -> cover :missing_space_before_date
{:bad_date, _} -> cover :bad_date
{:bad_timezone, _} -> cover :bad_time_zone
end
end
defp normalize(name, true = _mac?) when is_list(name) do
name
|> ParseCharlist.decode_ambiguous_charlist()
|> String.downcase()
|> :unicode.characters_to_nfc_binary()
end
defp normalize(name, _) when is_list(name), do: Enum.map(name, &to_lower/1)
defp to_lower(b) when b >= ?A and b <= ?Z, do: cover(b + 32)
defp to_lower(b), do: cover(b)
end
|
lib/xgit/object.ex
| 0.892764
| 0.646655
|
object.ex
|
starcoder
|
defmodule ExUnitGWT do
@moduledoc """
Helpers that add Given-When-Then (GWT) syntax to ExUnit
## Examples
```
defmodule ExUnitGWTExample do
use ExUnit.Case
import ExUnitGWT
feature "Serve coffee: in order to earn money customers should be able to buy coffee at all times" do
scenario "Buy last coffee" do
given? "there are 1 coffees left in the machine" do
coffees_left = 1
coffee_price = 1
end
and? "I have deposited 1 dollar" do
dollars_deposited = 1
end
when? "I press the coffee button" do
coffees_received = coffee_price * dollars_deposited
coffees_left = coffees_left - coffees_received
end
then? "I should be served a coffee" do
assert coffees_received == 1
end
and? "There should be no coffees left in the machine" do
assert coffees_left == 0
end
end
end
end
```
"""
@doc """
Used to describe some terse yet descriptive text of what is desired in order to realize a named business value as an explicit system actor I want to gain some beneficial outcome which furthers the goal
## Examples
```
feature "Shopping cart" do
...
end
```
"""
defmacro feature(description, do: block) do
quote do
ExUnit.Case.__describe__(__MODULE__, __ENV__.line, unquote(description))
try do
unquote(block)
after
@ex_unit_describe nil
Module.delete_attribute(__MODULE__, :describetag)
end
end
end
@doc """
Used to describe some determinable business situation
## Examples
```
scenario "User adds item to cart" do
...
end
```
"""
defmacro scenario(description, var \\ quote(do: _), contents) do
contents =
case contents do
[do: block] ->
quote do
unquote(block)
:ok
end
_ ->
quote do
try(unquote(contents))
:ok
end
end
var = Macro.escape(var)
contents = Macro.escape(contents, unquote: true)
quote bind_quoted: [var: var, contents: contents, description: description] do
name = ExUnit.Case.register_test(__ENV__, :scenario, description, [])
def unquote(name)(unquote(var)), do: unquote(contents)
end
end
@doc """
Describes a scenario precondition
## Examples
```
given? "I'm a logged-in User" do
...
end
```
"""
defmacro given?(_description, clause) do
block = Keyword.get(clause, :do, nil)
quote do
unquote(block)
end
end
@doc """
Describes an action by the actor
## Examples
```
when? "I go to the Item page and click Add" do
...
end
```
"""
defmacro when?(_description, clause) do
block = Keyword.get(clause, :do, nil)
quote do
unquote(block)
end
end
@doc """
Describes a testable outcome
## Examples
```
then? "The quantity of items in my cart should go up" do
...
end
```
"""
defmacro then?(_description, clause) do
block = Keyword.get(clause, :do, nil)
quote do
unquote(block)
end
end
@doc """
Used to describe additional preconditions, actions or expected results
## Examples
```
and? "My subtotal should increment" do
...
end
```
"""
defmacro and?(_description, clause) do
block = Keyword.get(clause, :do, nil)
quote do
unquote(block)
end
end
@doc """
Used to describe additional negating preconditions, actions or expected results
## Examples
```
but? "I should not be able to check out before adding payment info" do
...
end
```
"""
defmacro but?(_description, clause) do
block = Keyword.get(clause, :do, nil)
quote do
unquote(block)
end
end
end
|
lib/ex_unit_gwt.ex
| 0.834441
| 0.926835
|
ex_unit_gwt.ex
|
starcoder
|
defmodule Workflow.Domain.Account do
defstruct [
account_number: nil,
balance: 0,
state: nil,
]
alias Workflow.Domain.Account
defmodule Commands do
defmodule OpenAccount, do: defstruct [:account_number, :initial_balance]
defmodule DepositMoney, do: defstruct [:account_number, :transfer_uuid, :amount]
defmodule WithdrawMoney, do: defstruct [:account_number, :transfer_uuid, :amount]
defmodule CloseAccount, do: defstruct [:account_number]
end
defmodule Events do
defmodule AccountOpened, do: defstruct [:account_number, :initial_balance]
defmodule MoneyDeposited, do: defstruct [:account_number, :transfer_uuid, :amount, :balance]
defmodule MoneyWithdrawn, do: defstruct [:account_number, :transfer_uuid, :amount, :balance]
defmodule AccountOverdrawn, do: defstruct [:account_number, :balance]
defmodule AccountClosed, do: defstruct [:account_number]
end
alias Commands.{OpenAccount,DepositMoney,WithdrawMoney,CloseAccount}
alias Events.{AccountOpened,MoneyDeposited,MoneyWithdrawn,AccountOverdrawn,AccountClosed}
def handle(%Account{state: nil},
%OpenAccount{account_number: account_number, initial_balance: initial_balance})
when is_number(initial_balance) and initial_balance > 0 do
%AccountOpened{account_number: account_number, initial_balance: initial_balance}
end
def handle(%Account{state: :active, balance: balance},
%DepositMoney{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount})
when is_number(amount) and amount > 0 do
balance = balance + amount
%MoneyDeposited{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance}
end
def handle(%Account{state: :active, balance: balance},
%WithdrawMoney{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount})
when is_number(amount) and amount > 0 do
case balance - amount do
balance when balance < 0 ->
[
%MoneyWithdrawn{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance},
%AccountOverdrawn{account_number: account_number, balance: balance},
]
balance ->
%MoneyWithdrawn{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance}
end
end
def handle(%Account{state: :active},
%CloseAccount{account_number: account_number}), do: %AccountClosed{account_number: account_number}
# state mutatators
def apply(%Account{} = state, %AccountOpened{account_number: account_number, initial_balance: initial_balance}) do
%Account{state |
account_number: account_number,
balance: initial_balance,
state: :active,
}
end
def apply(%Account{} = state, %MoneyDeposited{balance: balance}), do: %Account{state | balance: balance}
def apply(%Account{} = state, %MoneyWithdrawn{balance: balance}), do: %Account{state | balance: balance}
def apply(%Account{} = state, %AccountOverdrawn{}), do: state
def apply(%Account{} = state, %AccountClosed{}) do
%Account{state |
state: :closed,
}
end
end
|
test/domain/account/account.ex
| 0.602296
| 0.606061
|
account.ex
|
starcoder
|
defmodule Cog.Relay.Tracker do
require Logger
@moduledoc """
Represents the internal state of `Cog.Relay.Relays` and functions to
operate on it.
Tracks all the relays that have checked in with the bot, recording
which bundles they each serve.
Maintains a set of disabled relays. Relays that appear in the disabled
set will be filtered out when the list of relays for a bundle is requested.
Note: Relays must be explicitly disabled, otherwise they are assumed to be
available.
"""
@type relay_id :: String.t
@type bundle_name :: String.t
@type version :: String.t # e.g. "1.0.0"
@type version_spec :: {bundle_name, version}
@type t :: %__MODULE__{map: %{version_spec => MapSet.t}, #MapSets are of relay IDs
disabled: MapSet.t}
defstruct [map: %{}, disabled: MapSet.new]
@doc """
Create a new, empty Tracker
"""
@spec new() :: t
def new(),
do: %__MODULE__{}
@doc """
Enables a relay if it exists in the disabled set by removing it from the
disabled set. When the list of relays for a bundle is requested, disabled
bundles are filtered out.
Note: If a relay is assigned no bundles it is unknown to the tracker. When
enabling or disabling make sure to load bundles first or this will just be
a noop.
"""
@spec enable_relay(t, String.t) :: t
def enable_relay(tracker, relay_id) do
disabled = MapSet.delete(tracker.disabled, relay_id)
%{tracker | disabled: disabled}
end
@doc """
Disables a relay if it exists in the tracker by adding it to the disabled
set. When the list of relays for a bundle is requested, disabled bundles
are filtered out.
Note: If a relay is assigned no bundles it is unknown to the tracker. When
enabling or disabling make sure to load bundles first or this will just be
a noop.
"""
@spec disable_relay(t, String.t) :: t
def disable_relay(tracker, relay_id) do
if in_tracker?(tracker, relay_id) do
disabled = MapSet.put(tracker.disabled, relay_id)
%{tracker | disabled: disabled}
else
tracker
end
end
@doc """
Removes all record of `relay` from the tracker. If `relay` is the
last one serving a given bundle version, that version is removed
from the tracker as well.
"""
@spec remove_relay(t, String.t) :: t
def remove_relay(tracker, relay) do
updated = Enum.reduce(tracker.map, %{}, fn({version_spec, relays}, acc) ->
remaining = MapSet.delete(relays, relay)
if Enum.empty?(remaining) do
acc
else
Map.put(acc, version_spec, remaining)
end
end)
disabled = MapSet.delete(tracker.disabled, relay)
%{tracker | map: updated, disabled: disabled}
end
@doc """
Records `relay` as serving each of `bundle_versions`. If `relay` has
previously been recorded as serving other bundles, those bundles are
retained; this is an incremental, cumulative operation.
"""
@spec add_bundle_versions_for_relay(t, String.t, [version_spec]) :: t
def add_bundle_versions_for_relay(tracker, relay, version_specs) do
map = Enum.reduce(version_specs, tracker.map, fn(spec, acc) ->
Map.update(acc, spec, MapSet.new([relay]), &MapSet.put(&1, relay))
end)
%{tracker | map: map}
end
@doc """
Like `add_bundle_versions_for_relay/3` but overwrites any existing bundle
information for `relay`. From this point, `relay` is known to only
serve `bundle_versions`, and no others.
"""
@spec set_bundle_versions_for_relay(t, String.t, [version_spec]) :: t
def set_bundle_versions_for_relay(tracker, relay, version_specs) do
tracker
|> remove_relay(relay)
|> add_bundle_versions_for_relay(relay, version_specs)
end
@doc """
Removes the given bundle version from the tracker.
"""
@spec drop_bundle(t, bundle_name, version) :: t
def drop_bundle(tracker, bundle_name, version) do
map = Map.delete(tracker.map, {bundle_name, version})
%{tracker | map: map}
end
@doc """
Return a tuple with the list of relays serving the bundle or an error and
a reason.
"""
@spec relays(t, bundle_name, version) :: {:ok, [String.t]} | {:error, atom()}
def relays(tracker, bundle_name, bundle_version) when is_binary(bundle_name) do
relays = Map.get(tracker.map, {bundle_name, bundle_version}, MapSet.new)
enabled_relays = MapSet.difference(relays, tracker.disabled)
case {MapSet.to_list(relays), MapSet.to_list(enabled_relays)} do
{[], []} -> {:error, :no_relays}
{_, []} -> {:error, :no_enabled_relays}
{_, relays} -> {:ok, relays}
end
end
@doc """
Return true/false indicating whether or not the specified bundle version is available
on the selected relay.
"""
@spec is_bundle_available?(t, relay_id, bundle_name, version) :: boolean()
def is_bundle_available?(tracker, relay, bundle_name, bundle_version) do
case relays(tracker, bundle_name, bundle_version) do
{:ok, available_relays} ->
Enum.member?(available_relays, relay)
_ ->
false
end
end
defp in_tracker?(tracker, relay_id) do
Map.values(tracker.map)
|> Enum.reduce(&MapSet.union(&1, &2))
|> MapSet.member?(relay_id)
end
end
|
lib/cog/relay/tracker.ex
| 0.864982
| 0.472075
|
tracker.ex
|
starcoder
|
defmodule Playwright.Channel.Catalog do
@moduledoc """
Provides storage and management of ChannelOwner instances.
`Catalog` implements `GenServer` to maintain state, while domain logic is
expected to be handled within caller modules such as `Playwright.Channel`.
"""
use GenServer
import Playwright.Helpers.ErrorHandling
alias Playwright.Channel.Error
defstruct [:awaiting, :storage]
# module init
# ---------------------------------------------------------------------------
@doc """
Starts a `Playwright.Channel.Catalog` linked to the current process with the
given "root" resource.
## Return Values
If the `Catalog` is successfully created and initialized, the function
returns `{:ok, pid}`, where `pid` is the PID of the running `Catalog` server.
## Arguments
| key/name | type | | description |
| -------- | ------ | ------- | ----------- |
| `root` | param | `map()` | The root resource for items in the `Catalog`. Provides the `Session` for its descendants |
"""
@spec start_link(map()) :: {:ok, pid()}
def start_link(root) do
GenServer.start_link(__MODULE__, root)
end
# @impl init
# ---------------------------------------------------------------------------
@impl GenServer
def init(root) do
{:ok,
%__MODULE__{
awaiting: %{},
storage: %{"Root" => root}
}}
end
# module API
# ---------------------------------------------------------------------------
@doc """
Retrieves a resource from the `Catalog` by its `param: guid`.
If the resource is already present in the `Catalog` that resource is returned
directly. The desired resource might not yet be in the `Catalog`, in which
case the request will be considered as "awaiting". An awaiting request will
later receive a response, when the `Catalog` entry is made, or will time out.
## Returns
- `resource`
- `{:error, error}`
## Arguments
| key/name | type | | description |
| ---------- | ------ | ---------- | ----------- |
| `catalog` | param | `pid()` | PID for the Catalog server |
| `guid` | param | `binary()` | GUID to look up |
| `:timeout` | option | `float()` | Maximum time to wait, in milliseconds. Defaults to `30_000` (30 seconds). |
"""
@spec get(pid(), binary(), map()) :: struct() | {:error, Error.t()}
def get(catalog, guid, options \\ %{}) do
with_timeout(options, fn timeout ->
GenServer.call(catalog, {:get, {:guid, guid}}, timeout)
end)
end
@doc """
Returns a `List` of resources matching the provided "filter".
## Returns
- [`resource`]
- []
## Arguments
| key/name | type | | description |
| --------- | ------ | ------- | ----------- |
| `catalog` | param | `pid()` | PID for the Catalog server |
| `filter` | param | `map()` | Attributes for filtering |
"""
@spec list(pid(), map()) :: [struct()]
def list(catalog, filter) do
GenServer.call(catalog, {:list, filter})
end
@doc """
Adds a resource to the `Catalog`, keyed on `:guid`.
## Returns
- `resource` (the same as provided)
## Arguments
| key/name | type | | description |
| ---------- | ------ | ---------- | ----------- |
| `catalog` | param | `pid()` | PID for the Catalog server |
| `resource` | param | `struct()` | The resource to store |
"""
@spec put(pid(), struct()) :: struct()
def put(catalog, %{guid: guid} = resource) do
GenServer.call(catalog, {:put, {:guid, guid}, resource})
end
@doc """
Removes a resource from the `Catalog`, along with its legacy.
## Returns
- `:ok`
## Arguments
| key/name | type | | description |
| ---------- | ------ | ---------- | ----------- |
| `catalog` | param | `pid()` | PID for the Catalog server |
| `guid` | param | `binary()` | GUID for the "parent" |
"""
@spec rm_r(pid(), binary()) :: :ok
def rm_r(catalog, guid) do
children = list(catalog, %{parent: get(catalog, guid)})
children |> Enum.each(fn child -> rm_r(catalog, child.guid) end)
rm(catalog, guid)
end
# @impl callbacks
# ---------------------------------------------------------------------------
@impl GenServer
def handle_call({:get, {:guid, guid}}, from, %{awaiting: awaiting, storage: storage} = state) do
item = storage[guid]
if item do
{:reply, item, state}
else
{:noreply, %{state | awaiting: Map.put(awaiting, guid, from)}}
end
end
@impl GenServer
def handle_call({:list, filter}, _, %{storage: storage} = state) do
case filter(Map.values(storage), filter, []) do
[] ->
{:reply, [], state}
result ->
{:reply, result, state}
end
end
@impl GenServer
def handle_call({:put, {:guid, guid}, item}, _, %{awaiting: awaiting, storage: storage} = state) do
{caller, awaiting} = Map.pop(awaiting, guid)
storage = Map.put(storage, guid, item)
if caller do
GenServer.reply(caller, item)
end
{:reply, item, %{state | awaiting: awaiting, storage: storage}}
end
@impl GenServer
def handle_call({:rm, guid}, _, %{storage: storage} = state) do
updated = Map.delete(storage, guid)
{:reply, :ok, %{state | storage: updated}}
end
# private
# ---------------------------------------------------------------------------
defp filter([], _attrs, result) do
result
end
defp filter([head | tail], attrs, result) when head.type == "" do
filter(tail, attrs, result)
end
defp filter([head | tail], %{parent: parent, type: type} = attrs, result)
when head.parent.guid == parent.guid and head.type == type do
filter(tail, attrs, result ++ [head])
end
defp filter([head | tail], %{parent: parent, type: type} = attrs, result)
when head.parent.guid != parent.guid or head.type != type do
filter(tail, attrs, result)
end
defp filter([head | tail], %{parent: parent} = attrs, result)
when head.parent.guid == parent.guid do
filter(tail, attrs, result ++ [head])
end
defp filter([head | tail], %{type: type} = attrs, result)
when head.type == type do
filter(tail, attrs, result ++ [head])
end
defp filter([head | tail], %{guid: guid} = attrs, result)
when head.guid == guid do
filter(tail, attrs, result ++ [head])
end
defp filter([_head | tail], attrs, result) do
filter(tail, attrs, result)
end
defp rm(catalog, guid) do
GenServer.call(catalog, {:rm, guid})
end
end
|
lib/playwright/channel/catalog.ex
| 0.914534
| 0.595669
|
catalog.ex
|
starcoder
|
defmodule RlStudy.MDP.Environment do
alias RlStudy.MDP.State
alias RlStudy.MDP.Action
require Logger
@type grid_t :: [[integer]]
@type t :: %RlStudy.MDP.Environment{
grid: grid_t(),
agent_state: RlStudy.MDP.State.t(),
move_probe: float,
default_reward: float
}
defstruct [:grid, :agent_state, :move_probe, :default_reward]
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> RlStudy.MDP.Environment.new(grid)
%RlStudy.MDP.Environment{
agent_state: %RlStudy.MDP.State{column: -1, row: -1},
default_reward: -0.04,
grid: [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]],
move_probe: 0.8
}
"""
@spec new(grid_t()) :: RlStudy.MDP.Environment.t()
def new(grid) do
new(grid, 0.8)
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> RlStudy.MDP.Environment.new(grid, 0.3)
%RlStudy.MDP.Environment{
agent_state: %RlStudy.MDP.State{column: -1, row: -1},
default_reward: -0.04,
grid: [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]],
move_probe: 0.3
}
"""
@spec new(grid_t(), float) :: RlStudy.MDP.Environment.t()
def new(grid, move_probe) do
%RlStudy.MDP.Environment{
grid: grid,
agent_state: State.new(),
default_reward: -0.04,
move_probe: move_probe
}
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> RlStudy.MDP.Environment.row_length(env)
3
"""
@spec row_length(t()) :: non_neg_integer()
def row_length(environment) do
length(environment.grid)
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> RlStudy.MDP.Environment.column_length(env)
4
"""
@spec column_length(t()) :: non_neg_integer()
def column_length(environment) do
environment.grid
|> Enum.at(0)
|> length()
end
@doc """
# Examples
iex> RlStudy.MDP.Environment.actions()
[:up, :down, :left, :right]
"""
@spec actions :: [RlStudy.MDP.Action.t()]
def actions() do
[Action.up(), Action.down(), Action.left(), Action.right()]
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> RlStudy.MDP.Environment.states(env)
[
%RlStudy.MDP.State{column: 0, row: 0},
%RlStudy.MDP.State{column: 1, row: 0},
%RlStudy.MDP.State{column: 2, row: 0},
%RlStudy.MDP.State{column: 3, row: 0},
%RlStudy.MDP.State{column: 0, row: 1},
%RlStudy.MDP.State{column: 2, row: 1},
%RlStudy.MDP.State{column: 3, row: 1},
%RlStudy.MDP.State{column: 0, row: 2},
%RlStudy.MDP.State{column: 1, row: 2},
%RlStudy.MDP.State{column: 2, row: 2},
%RlStudy.MDP.State{column: 3, row: 2}
]
"""
@spec states(RlStudy.MDP.Environment.t()) :: [RlStudy.MDP.State.t()]
def states(environment) do
environment.grid
|> Enum.with_index()
|> Enum.map(fn {row, index_row} ->
row
|> Enum.with_index()
|> Enum.map(fn {cell, index_column} ->
if(cell != 9) do
State.new(index_row, index_column)
end
end)
end)
|> List.flatten()
|> Enum.filter(fn elm -> elm != nil end)
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> state = RlStudy.MDP.State.new(2,0)
iex> RlStudy.MDP.Environment.transit_func(env, state, :up)
%{%RlStudy.MDP.State{column: 0, row: 1} => 0.8, %RlStudy.MDP.State{column: 0, row: 2} => 0.09999999999999998, %RlStudy.MDP.State{column: 1, row: 2} => 0.09999999999999998}
iex> RlStudy.MDP.Environment.transit_func(env, state, :right)
%{%RlStudy.MDP.State{column: 0, row: 1} => 0.09999999999999998, %RlStudy.MDP.State{column: 0, row: 2} => 0.09999999999999998, %RlStudy.MDP.State{column: 1, row: 2} => 0.8}
iex> RlStudy.MDP.Environment.transit_func(env, state, :down)
%{%RlStudy.MDP.State{column: 0, row: 1} => 0, %RlStudy.MDP.State{column: 0, row: 2} => 0.9, %RlStudy.MDP.State{column: 1, row: 2} => 0.09999999999999998}
iex> RlStudy.MDP.Environment.transit_func(env, state, :left)
%{%RlStudy.MDP.State{column: 0, row: 1} => 0.09999999999999998, %RlStudy.MDP.State{column: 0, row: 2} => 0.9, %RlStudy.MDP.State{column: 1, row: 2} => 0}
"""
@spec transit_func(
RlStudy.MDP.Environment.t(),
RlStudy.MDP.State.t(),
RlStudy.MDP.Action.t()
) :: %{optional(RlStudy.MDP.Action.t()) => float}
def transit_func(environment, state, action) do
Logger.debug("Transit. state: #{inspect(state)}, action: #{inspect(action)}")
transition_probes = %{}
if !can_action_at(environment, state) do
transition_probes
end
oposite_direction = Action.opsite_action(action)
Logger.debug("oposite_direction: #{inspect(oposite_direction)}")
actions()
|> Enum.reduce(transition_probes, fn a, acc ->
Logger.debug("Update probes. action: #{inspect(a)}, transit_probes: #{inspect(acc)}")
next_state = move(environment, state, a)
probe =
cond do
a == action -> environment.move_probe
a != oposite_direction -> (1 - environment.move_probe) / 2
true -> 0
end
Logger.debug("next_state: #{inspect(next_state)}, probe: #{probe}")
# https://elixir-lang.org/getting-started/pattern-matching.html#the-pin-operator
Map.update(acc, next_state, probe, fn value ->
value + probe
end)
end)
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> state = RlStudy.MDP.State.new(0,0)
iex> RlStudy.MDP.Environment.can_action_at(env, state)
true
iex> state2 = RlStudy.MDP.State.new(1,1)
iex> RlStudy.MDP.Environment.can_action_at(env, state2)
false
"""
@spec can_action_at(RlStudy.MDP.Environment.t(), RlStudy.MDP.State.t()) :: boolean
def can_action_at(environment, state) do
environment.grid
|> Enum.at(state.row)
|> Enum.at(state.column)
|> Kernel.==(0)
end
defp move(environment, state, action) do
Logger.debug(
"environment: #{inspect(environment)}, state: #{inspect(state)}, action: #{inspect(action)}"
)
if !can_action_at(environment, state) do
raise "Can't move from here!"
end
next_state = State.clone(state)
# Move
next_state =
cond do
action == Action.up() -> %{next_state | row: next_state.row - 1}
action == Action.down() -> %{next_state | row: next_state.row + 1}
action == Action.left() -> %{next_state | column: next_state.column - 1}
action == Action.right() -> %{next_state | column: next_state.column + 1}
end
next_state =
cond do
# Check if next_state is not out of the grid
!(0 <= next_state.row && next_state.row < row_length(environment)) ->
state
!(0 <= next_state.column && next_state.column < column_length(environment)) ->
state
# Check whether the agent bumped a block cell.
environment.grid |> Enum.at(next_state.row) |> Enum.at(next_state.column) |> Kernel.==(9) ->
state
true ->
next_state
end
next_state
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> state = RlStudy.MDP.State.new(0,0)
iex> RlStudy.MDP.Environment.reward_func(env, state)
%{reward: -0.04, done: false}
iex> state_goal = RlStudy.MDP.State.new(0,3)
iex> RlStudy.MDP.Environment.reward_func(env, state_goal)
%{reward: 1, done: true}
iex> state_damage = RlStudy.MDP.State.new(1,3)
iex> RlStudy.MDP.Environment.reward_func(env, state_damage)
%{reward: -1, done: true}
"""
@spec reward_func(
RlStudy.MDP.Environment.t(),
RlStudy.MDP.State.t()
) :: %{done: boolean, reward: float}
def reward_func(environment, state) do
case environment.grid |> Enum.at(state.row) |> Enum.at(state.column) do
1 -> %{reward: 1, done: true}
-1 -> %{reward: -1, done: true}
_ -> %{reward: environment.default_reward, done: false}
end
end
@doc """
# Examples
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> RlStudy.MDP.Environment.reset(env)
%RlStudy.MDP.Environment{
agent_state: %RlStudy.MDP.State{column: 0, row: 2},
default_reward: -0.04,
grid: [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]],
move_probe: 0.8
}
"""
@spec reset(RlStudy.MDP.Environment.t()) :: RlStudy.MDP.Environment.t()
def reset(environment) do
%{environment | agent_state: State.new(row_length(environment) - 1, 0)}
end
@doc """
TODO
# Examples
iex>
iex> :rand.seed(:exrop, {103, 104, 105})
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> env = %{env | agent_state: RlStudy.MDP.State.new(2,0)}
iex> RlStudy.MDP.Environment.step(env, :up)
%{
done: false,
environment: %RlStudy.MDP.Environment{agent_state: %RlStudy.MDP.State{column: 0, row: 1}, default_reward: -0.04, grid: [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]], move_probe: 0.8},
next_state: %RlStudy.MDP.State{column: 0, row: 1},
reward: -0.04
}
iex> env = %{env | agent_state: RlStudy.MDP.State.new(0,2)}
iex> RlStudy.MDP.Environment.step(env, :right)
%{
done: true,
environment: %RlStudy.MDP.Environment{agent_state: %RlStudy.MDP.State{column: 3, row: 0}, default_reward: -0.04, grid: [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]], move_probe: 0.8},
next_state: %RlStudy.MDP.State{column: 3, row: 0},
reward: 1
}
"""
@spec step(RlStudy.MDP.Environment.t(), RlStudy.MDP.Action.t()) :: %{
done: boolean,
environment: RlStudy.MDP.Environment.t(),
next_state: RlStudy.MDP.State.t(),
reward: float
}
def step(environment, action) do
%{next_state: next_state, reward: reward, done: done} =
transit(environment, environment.agent_state, action)
%{
environment: %{environment | agent_state: next_state},
next_state: next_state,
reward: reward,
done: done
}
end
@doc """
# Examples
iex> :rand.seed(:exrop, {101, 102, 103})
iex> grid = [[0, 0, 0, 1], [0, 9, 0, -1], [0, 0, 0, 0]]
iex> env = RlStudy.MDP.Environment.new(grid)
iex> init_state = RlStudy.MDP.State.new(2,0)
iex> RlStudy.MDP.Environment.transit(env, init_state, :up)
%{next_state: %RlStudy.MDP.State{column: 0, row: 1}, reward: -0.04, done: false}
iex> goal_state = RlStudy.MDP.State.new(0,3)
iex> RlStudy.MDP.Environment.transit(env, goal_state, :up)
** (RuntimeError) Can't move from here!
"""
@spec transit(RlStudy.MDP.Environment.t(), RlStudy.MDP.State.t(), RlStudy.MDP.Action.t()) ::
%{done: boolean, next_state: RlStudy.MDP.State.t(), reward: float}
def transit(environment, state, action) do
transit_probes = transit_func(environment, state, action)
if Kernel.map_size(transit_probes) == 0 do
Logger.debug("No transit_probes.")
%{environment: environment, next_state: nil, reward: nil, done: true}
else
Logger.debug("transit_probes: #{inspect(transit_probes)}")
next_state = prob_choice(transit_probes, :rand.uniform())
%{reward: reward, done: done} = reward_func(environment, next_state)
transit_to = %{next_state: next_state, reward: reward, done: done}
Logger.debug("Transit to #{inspect(transit_to)}")
transit_to
end
end
defp prob_choice(probes, _) when Kernel.map_size(probes) == 1 do
probe = Enum.at(Map.keys(probes), 0)
Logger.debug("choice last one. probe: #{inspect(probe)}")
probe
end
defp prob_choice(probes, ran) when Kernel.map_size(probes) > 1 do
"""
Algorithm https://rosettacode.org/wiki/Probabilistic_choice#Elixir
Run :rand.seed(:exrop, {101, 102, 103}) for test https://github.com/elixir-lang/elixir/blob/v1.10/lib/elixir/lib/enum.ex#L1985-L1991
"""
Logger.debug("probes: #{inspect(probes)}, ran: #{ran}")
state_key = Enum.at(Map.keys(probes), 0)
{:ok, prob} = Map.fetch(probes, state_key)
if ran < prob do
Logger.debug("choiced prob: #{inspect(state_key)}")
state_key
else
prob_choice(Map.delete(probes, state_key), ran - prob)
end
end
end
|
lib/mdp/environment.ex
| 0.76947
| 0.636847
|
environment.ex
|
starcoder
|
defmodule Hex.Utils do
def safe_deserialize_erlang("") do
nil
end
def safe_deserialize_erlang(binary) do
case safe_binary_to_term(binary) do
{:ok, term} ->
term
:error ->
Mix.raise("Received malformed erlang from Hex API")
end
rescue
ArgumentError ->
Mix.raise("Received malformed erlang from Hex API")
end
def safe_serialize_erlang(term) do
binarify(term)
|> :erlang.term_to_binary()
end
def safe_binary_to_term!(binary, opts \\ []) do
case safe_binary_to_term(binary, opts) do
{:ok, term} ->
term
:error ->
raise ArgumentError, "unsafe terms"
end
end
def safe_binary_to_term(binary, opts \\ [])
def safe_binary_to_term(binary, opts) when is_binary(binary) do
term = :erlang.binary_to_term(binary, opts)
safe_terms(term)
{:ok, term}
catch
:throw, :safe_terms ->
:error
end
defp safe_terms(list) when is_list(list) do
safe_list(list)
end
defp safe_terms(tuple) when is_tuple(tuple) do
safe_tuple(tuple, tuple_size(tuple))
end
defp safe_terms(map) when is_map(map) do
fun = fn key, value, acc ->
safe_terms(key)
safe_terms(value)
acc
end
:maps.fold(fun, map, map)
end
defp safe_terms(other)
when is_atom(other) or is_number(other) or is_bitstring(other) or is_pid(other) or
is_reference(other) do
other
end
defp safe_terms(_other) do
throw(:safe_terms)
end
defp safe_list([]), do: :ok
defp safe_list([h | t]) when is_list(t) do
safe_terms(h)
safe_list(t)
end
defp safe_list([h | t]) do
safe_terms(h)
safe_terms(t)
end
defp safe_tuple(_tuple, 0), do: :ok
defp safe_tuple(tuple, n) do
safe_terms(:erlang.element(n, tuple))
safe_tuple(tuple, n - 1)
end
def truncate(string, options \\ []) do
length = options[:length] || 50
omission = options[:omission] || "..."
cond do
not String.valid?(string) ->
string
String.length(string) < length ->
string
true ->
String.slice(string, 0, length) <> omission
end
end
def binarify(term, opts \\ [])
def binarify(binary, _opts) when is_binary(binary) do
binary
end
def binarify(number, _opts) when is_number(number) do
number
end
def binarify(atom, _opts) when is_nil(atom) or is_boolean(atom) do
atom
end
def binarify(atom, _opts) when is_atom(atom) do
Atom.to_string(atom)
end
def binarify(list, opts) when is_list(list) do
for(elem <- list, do: binarify(elem, opts))
end
def binarify(tuple, opts) when is_tuple(tuple) do
for(elem <- Tuple.to_list(tuple), do: binarify(elem, opts))
|> List.to_tuple()
end
def binarify(map, opts) when is_map(map) do
if Keyword.get(opts, :maps, true) do
for(elem <- map, into: %{}, do: binarify(elem, opts))
else
for(elem <- map, do: binarify(elem, opts))
end
end
def print_error_result({:error, reason}) do
Hex.Shell.info(inspect(reason))
end
def print_error_result({:ok, {status, nil, _headers}}) do
print_http_code(status)
end
def print_error_result({:ok, {status, "", _headers}}) do
print_http_code(status)
end
def print_error_result({:ok, {_status, body, _headers}}) when is_binary(body) do
Hex.Shell.info(body)
end
def print_error_result({:ok, {status, body, _headers}}) when is_map(body) do
message = body["message"]
errors = body["errors"]
if message do
Hex.Shell.info(message)
end
if errors do
pretty_errors(errors)
end
unless message || errors do
print_http_code(status)
Hex.Shell.info(body)
end
end
defp pretty_errors(errors, depth \\ 0) do
Enum.each(errors, fn
{key, map} when is_map(map) ->
Hex.Shell.info(indent(depth) <> key <> ":")
pretty_errors(map, depth + 1)
{key, value} ->
Hex.Shell.info(indent(depth) <> key <> ": " <> value)
end)
end
defp print_http_code(code), do: Hex.Shell.info(pretty_http_code(code))
defp pretty_http_code(401), do: "Authentication failed (401)"
defp pretty_http_code(403), do: "Forbidden (403)"
defp pretty_http_code(404), do: "Entity not found (404)"
defp pretty_http_code(422), do: "Validation failed (422)"
defp pretty_http_code(code), do: "HTTP status code: #{code}"
defp indent(0), do: " "
defp indent(depth), do: " " <> indent(depth - 1)
def hexdocs_url(package) do
"https://hexdocs.pm/#{package}"
end
def hexdocs_url(package, version) do
"https://hexdocs.pm/#{package}/#{version}"
end
def hexdocs_module_url(package, module) do
"https://hexdocs.pm/#{package}/#{module}.html"
end
def hexdocs_module_url(package, version, module) do
"https://hexdocs.pm/#{package}/#{version}/#{module}.html"
end
def package_retirement_reason(:RETIRED_OTHER), do: "other"
def package_retirement_reason(:RETIRED_INVALID), do: "invalid"
def package_retirement_reason(:RETIRED_SECURITY), do: "security"
def package_retirement_reason(:RETIRED_DEPRECATED), do: "deprecated"
def package_retirement_reason(:RETIRED_RENAMED), do: "renamed"
def package_retirement_reason(other), do: other
def package_retirement_message(%{reason: reason_code, message: message}) do
"(#{package_retirement_reason(reason_code)}) #{message}"
end
def package_retirement_message(%{reason: reason_code}) do
"(#{package_retirement_reason(reason_code)})"
end
# From https://github.com/fishcakez/dialyze/blob/6698ae582c77940ee10b4babe4adeff22f1b7779/lib/mix/tasks/dialyze.ex#L168
def otp_version do
major = :erlang.system_info(:otp_release) |> List.to_string()
vsn_file = Path.join([:code.root_dir(), "releases", major, "OTP_VERSION"])
try do
{:ok, contents} = File.read(vsn_file)
String.split(contents, "\n", trim: true)
else
[full] -> full
_ -> major
catch
:error, _ -> major
end
end
def lock(tuple) when elem(tuple, 0) == :hex do
if tuple_size(tuple) > 7 and Hex.Server.should_warn_lock_version?() do
Hex.Shell.warn(
"The mix.lock file was generated with a newer version of Hex. Update " <>
"your client by running `mix local.hex` to avoid losing data."
)
end
destructure [:hex, name, version, checksum, managers, deps, repo], Tuple.to_list(tuple)
%{
name: to_string(name),
version: version,
checksum: checksum,
managers: managers,
deps: lock_deps(deps),
repo: repo || "hexpm"
}
end
def lock(_) do
nil
end
defp lock_deps(nil) do
nil
end
defp lock_deps(deps) do
Enum.map(deps, fn {app, req, opts} ->
opts =
opts
|> Keyword.put_new(:repo, "hexpm")
|> Keyword.update!(:hex, &to_string/1)
{app, req, opts}
end)
end
end
|
lib/hex/utils.ex
| 0.606032
| 0.478833
|
utils.ex
|
starcoder
|
defmodule Ockam.Examples.Messaging.ReliableDeduplication do
@moduledoc """
Example of combining reliable delivery with index-ordering deduplication.
Such combination allows to get low message loss with high uniqness of messages
as long as pipes and channels are available and have no errors
"""
alias Ockam.Examples.Messaging.Filter
alias Ockam.Examples.Messaging.Shuffle
alias Ockam.Examples.Ping
alias Ockam.Examples.Pong
alias Ockam.Messaging.Delivery.ResendPipe
alias Ockam.Messaging.Ordering.Strict.IndexPipe
alias Ockam.Messaging.PipeChannel
alias Ockam.Workers.PubSubSubscriber
alias Ockam.Transport.TCP.RecoverableClient
## Local examole
## Create filter and shuffle forwarders
## Run reliable delivery channel over filter and shuffle
## Wrap reliable channel into index ordering channel to deduplicate messages
## Send ping-pong through this combined channel
def local() do
## Intermediate
{:ok, filter} = Filter.create(address: "filter")
{:ok, shuffle} = Shuffle.create(address: "shuffle")
Ockam.Node.register_address("me")
## Pong
{:ok, resend_spawner} =
PipeChannel.Spawner.create(
responder_options: [pipe_mod: ResendPipe, sender_options: [confirm_timeout: 200]]
)
{:ok, ord_spawner} = PipeChannel.Spawner.create(responder_options: [pipe_mod: IndexPipe])
{:ok, "pong"} = Pong.create(address: "pong", delay: 500)
## Create resend channel through filter and shuffle
{:ok, "ping"} = Ping.create(address: "ping", delay: 500)
{:ok, resend_channel} =
PipeChannel.Initiator.create_and_wait(
pipe_mod: ResendPipe,
init_route: [filter, shuffle, resend_spawner],
sender_options: [confirm_timeout: 200]
)
{:ok, _ord_channel} =
PipeChannel.Initiator.create_and_wait(
pipe_mod: IndexPipe,
init_route: [resend_channel, ord_spawner]
)
end
def run_local() do
{:ok, channel} = local()
start_ping_pong(channel)
end
def hub_responder() do
Ockam.Transport.TCP.start()
Ockam.Node.register_address("me")
{:ok, "pong"} = Pong.create(address: "pong", delay: 500)
{:ok, client} = RecoverableClient.create(destination: {"localhost", 4000})
{:ok, _subscription} =
PubSubSubscriber.create(
pub_sub_route: [client, "pub_sub_service"],
name: "responder",
topic: "responder"
)
{:ok, "resend_receiver"} = ResendPipe.receiver().create(address: "resend_receiver")
{:ok, "resend_sender"} =
ResendPipe.sender().create(
address: "resend_sender",
confirm_timeout: 200,
receiver_route: [client, "pub_sub_t_initiator", "resend_receiver"]
)
{:ok, "resend_channel"} =
PipeChannel.Simple.create(
address: "resend_channel",
inner_address: "resend_channel_inner",
sender: "resend_sender",
channel_route: ["resend_channel_inner"]
)
{:ok, "ord_spawner"} =
PipeChannel.Spawner.create(
address: "ord_spawner",
responder_options: [pipe_mod: IndexPipe]
)
end
def hub_initiator() do
{:ok, "ping"} = Ping.create(address: "ping", delay: 500)
{:ok, client} = RecoverableClient.create(destination: {"localhost", 4000})
{:ok, _subscription} =
PubSubSubscriber.create(
pub_sub_route: [client, "pub_sub_service"],
name: "initiator",
topic: "initiator"
)
{:ok, "resend_receiver"} = ResendPipe.receiver().create(address: "resend_receiver")
{:ok, "resend_sender"} =
ResendPipe.sender().create(
address: "resend_sender",
confirm_timeout: 200,
receiver_route: [client, "pub_sub_t_responder", "resend_receiver"]
)
{:ok, "resend_channel"} =
PipeChannel.Simple.create(
address: "resend_channel",
inner_address: "resend_channel_inner",
sender: "resend_sender",
channel_route: ["resend_channel_inner"]
)
{:ok, _ord_channel} =
PipeChannel.Initiator.create_and_wait(
pipe_mod: IndexPipe,
init_route: ["resend_channel", "ord_spawner"]
)
end
def run_hub_initiator() do
{:ok, channel} = hub_initiator()
start_ping_pong(channel)
end
def send_messages(route, n_messages \\ 20) do
Enum.each(1..n_messages, fn n ->
Ockam.Router.route(%{
onward_route: route ++ ["me"],
return_route: ["me"],
payload: "Msg #{n}"
})
end)
end
def start_ping_pong(channel) do
## Start ping-pong
Ockam.Router.route(%{
onward_route: [channel, "pong"],
return_route: ["ping"],
payload: "0"
})
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/examples/messaging/reliable_deduplication.ex
| 0.815416
| 0.484197
|
reliable_deduplication.ex
|
starcoder
|
defmodule OMG.EthereumEventListener do
@moduledoc """
GenServer running the listener.
Periodically fetches events made on dynamically changing block range
from the root chain contract and feeds them to a callback.
It is **not** responsible for figuring out which ranges of Ethereum blocks are eligible to scan and when, see
`OMG.RootChainCoordinator` for that.
The `OMG.RootChainCoordinator` provides the `SyncGuide` that indicates what's eligible to scan, taking into account:
- finality margin
- mutual ordering and dependencies of various types of Ethereum events to be respected.
It **is** responsible for processing all events from all blocks and processing them only once.
It accomplishes that by keeping a persisted value in `OMG.DB` and its state that reflects till which Ethereum height
the events were processed (`synced_height`).
This `synced_height` is updated after every batch of Ethereum events get successfully consumed by
`callbacks.process_events_callback`, as called in `sync_height/2`, together with all the `OMG.DB` updates this
callback returns, atomically.
The key in `OMG.DB` used to persist `synced_height` is defined by the value of `synced_height_update_key`.
What specific Ethereum events it fetches, and what it does with them is up to predefined `callbacks`.
See `OMG.EthereumEventListener.Core` for the implementation of the business logic for the listener.
"""
use GenServer
use Spandex.Decorators
use OMG.Utils.LoggerExt
alias OMG.EthereumEventListener.Core
alias OMG.RootChainCoordinator
@type config() :: %{
block_finality_margin: non_neg_integer,
synced_height_update_key: atom,
service_name: atom,
# maps a pair denoting eth height range to a list of ethereum events
get_events_callback: (non_neg_integer, non_neg_integer -> {:ok, [map]}),
# maps a list of ethereum events to a list of `db_updates` to send to `OMG.DB`
process_events_callback: ([any] -> {:ok, [tuple]})
}
### Client
@spec start_link(config()) :: GenServer.on_start()
def start_link(config) do
%{service_name: name} = config
GenServer.start_link(__MODULE__, config, name: name)
end
@doc """
Returns child_specs for the given `EthereumEventListener` setup, to be included e.g. in Supervisor's children.
See `handle_continue/2` for the required keyword arguments.
"""
@spec prepare_child(keyword()) :: %{id: atom(), start: tuple()}
def prepare_child(opts \\ []) do
name = Keyword.fetch!(opts, :service_name)
%{id: name, start: {OMG.EthereumEventListener, :start_link, [Map.new(opts)]}, shutdown: :brutal_kill, type: :worker}
end
### Server
@doc """
Initializes the GenServer state, most work done in `handle_continue/2`.
"""
def init(init) do
{:ok, init, {:continue, :setup}}
end
@doc """
Reads the status of listening (till which Ethereum height were the events processed) from the `OMG.DB` and initializes
the logic `OMG.EthereumEventListener.Core` with it. Does an initial `OMG.RootChainCoordinator.check_in` with the
Ethereum height it last stopped on. Next, it continues to monitor and fetch the events as usual.
"""
def handle_continue(
:setup,
%{
contract_deployment_height: contract_deployment_height,
synced_height_update_key: update_key,
service_name: service_name,
get_events_callback: get_events_callback,
process_events_callback: process_events_callback,
metrics_collection_interval: metrics_collection_interval,
ethereum_events_check_interval_ms: ethereum_events_check_interval_ms
}
) do
_ = Logger.info("Starting #{inspect(__MODULE__)} for #{service_name}.")
{:ok, last_event_block_height} = OMG.DB.get_single_value(update_key)
# we don't need to ever look at earlier than contract deployment
last_event_block_height = max(last_event_block_height, contract_deployment_height)
{initial_state, height_to_check_in} =
Core.init(update_key, service_name, last_event_block_height, ethereum_events_check_interval_ms)
callbacks = %{
get_ethereum_events_callback: get_events_callback,
process_events_callback: process_events_callback
}
{:ok, _} = schedule_get_events(ethereum_events_check_interval_ms)
:ok = RootChainCoordinator.check_in(height_to_check_in, service_name)
{:ok, _} = :timer.send_interval(metrics_collection_interval, self(), :send_metrics)
_ = Logger.info("Started #{inspect(__MODULE__)} for #{service_name}, synced_height: #{inspect(height_to_check_in)}")
{:noreply, {initial_state, callbacks}}
end
def handle_info(:send_metrics, {state, callbacks}) do
:ok = :telemetry.execute([:process, __MODULE__], %{}, state)
{:noreply, {state, callbacks}}
end
@doc """
Main worker function, called on a cadence as initialized in `handle_continue/2`.
Does the following:
- asks `OMG.RootChainCoordinator` about how to sync, with respect to other services listening to Ethereum
- (`sync_height/2`) figures out what is the suitable range of Ethereum blocks to download events for
- (`sync_height/2`) if necessary fetches those events to the in-memory cache in `OMG.EthereumEventListener.Core`
- (`sync_height/2`) executes the related event-consuming callback with events as arguments
- (`sync_height/2`) does `OMG.DB` updates that persist the processes Ethereum height as well as whatever the
callbacks returned to persist
- (`sync_height/2`) `OMG.RootChainCoordinator.check_in` to tell the rest what Ethereum height was processed.
"""
@decorate trace(service: :ethereum_event_listener, type: :backend)
def handle_info(:sync, {state, callbacks}) do
:ok = :telemetry.execute([:trace, __MODULE__], %{}, state)
case RootChainCoordinator.get_sync_info() do
:nosync ->
:ok = RootChainCoordinator.check_in(state.synced_height, state.service_name)
{:ok, _} = schedule_get_events(state.ethereum_events_check_interval_ms)
{:noreply, {state, callbacks}}
sync_info ->
new_state = sync_height(state, callbacks, sync_info)
{:ok, _} = schedule_get_events(state.ethereum_events_check_interval_ms)
{:noreply, {new_state, callbacks}}
end
end
# see `handle_info/2`, clause for `:sync`
@decorate span(service: :ethereum_event_listener, type: :backend, name: "sync_height/3")
defp sync_height(state, callbacks, sync_guide) do
{events, new_state} =
state
|> Core.calc_events_range_set_height(sync_guide)
|> get_events(callbacks.get_ethereum_events_callback)
db_update = [{:put, new_state.synced_height_update_key, new_state.synced_height}]
:ok = :telemetry.execute([:process, __MODULE__], %{events: events}, new_state)
{:ok, db_updates_from_callback} = callbacks.process_events_callback.(events)
:ok = publish_events(events)
:ok = OMG.DB.multi_update(db_update ++ db_updates_from_callback)
:ok = RootChainCoordinator.check_in(new_state.synced_height, new_state.service_name)
new_state
end
defp get_events({{from, to}, state}, get_events_callback) do
{:ok, new_events} = get_events_callback.(from, to)
{new_events, state}
end
defp get_events({:dont_fetch_events, state}, _callback) do
{[], state}
end
defp schedule_get_events(ethereum_events_check_interval_ms) do
:timer.send_after(ethereum_events_check_interval_ms, self(), :sync)
end
defp publish_events([%{event_signature: event_signature} | _] = data) do
[event_signature, _] = String.split(event_signature, "(")
{:root_chain, event_signature}
|> OMG.Bus.Event.new(:data, data)
|> OMG.Bus.direct_local_broadcast()
end
defp publish_events([]), do: :ok
end
|
apps/omg/lib/omg/ethereum_event_listener.ex
| 0.862323
| 0.551453
|
ethereum_event_listener.ex
|
starcoder
|
defmodule AWS.ElastiCache do
@moduledoc """
Amazon ElastiCache
Amazon ElastiCache is a web service that makes it easier to set up, operate, and
scale a distributed cache in the cloud.
With ElastiCache, customers get all of the benefits of a high-performance,
in-memory cache with less of the administrative burden involved in launching and
managing a distributed cache. The service makes setup, scaling, and cluster
failure handling much simpler than in a self-managed cache deployment.
In addition, through integration with Amazon CloudWatch, customers get enhanced
visibility into the key performance statistics associated with their cache and
can receive alarms if a part of their cache runs hot.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2015-02-02",
content_type: "application/x-www-form-urlencoded",
credential_scope: nil,
endpoint_prefix: "elasticache",
global?: false,
protocol: "query",
service_id: "ElastiCache",
signature_version: "v4",
signing_name: "elasticache",
target_prefix: nil
}
end
@doc """
Adds up to 50 cost allocation tags to the named resource.
A cost allocation tag is a key-value pair where the key and value are
case-sensitive. You can use cost allocation tags to categorize and track your
AWS costs.
When you apply tags to your ElastiCache resources, AWS generates a cost
allocation report as a comma-separated value (CSV) file with your usage and
costs aggregated by your tags. You can apply tags that represent business
categories (such as cost centers, application names, or owners) to organize your
costs across multiple services. For more information, see [Using Cost Allocation Tags in Amazon
ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html)
in the *ElastiCache User Guide*.
"""
def add_tags_to_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToResource", input, options)
end
@doc """
Allows network ingress to a cache security group.
Applications using ElastiCache must be running on Amazon EC2, and Amazon EC2
security groups are used as the authorization mechanism.
You cannot authorize ingress from an Amazon EC2 security group in one region to
an ElastiCache cluster in another region.
"""
def authorize_cache_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AuthorizeCacheSecurityGroupIngress", input, options)
end
@doc """
Apply the service update.
For more information on service updates and applying them, see [Applying Service Updates](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/applying-updates.html).
"""
def batch_apply_update_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchApplyUpdateAction", input, options)
end
@doc """
Stop the service update.
For more information on service updates and stopping them, see [Stopping Service Updates](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/stopping-self-service-updates.html).
"""
def batch_stop_update_action(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchStopUpdateAction", input, options)
end
@doc """
Complete the migration of data.
"""
def complete_migration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CompleteMigration", input, options)
end
@doc """
Makes a copy of an existing snapshot.
This operation is valid for Redis only.
Users or groups that have permissions to use the `CopySnapshot` operation can
create their own Amazon S3 buckets and copy snapshots to it. To control access
to your snapshots, use an IAM policy to control who has the ability to use the
`CopySnapshot` operation. For more information about using IAM to control the
use of ElastiCache operations, see [Exporting Snapshots](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html)
and [Authentication & Access Control](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/IAM.html).
You could receive the following error messages.
## Error Messages
* **Error Message:** The S3 bucket %s is outside of the region.
**Solution:** Create an Amazon S3 bucket in the same region as your snapshot.
For more information, see [Step 1: Create an Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket)
in the ElastiCache User Guide.
* **Error Message:** The S3 bucket %s does not exist.
**Solution:** Create an Amazon S3 bucket in the same region as your snapshot.
For more information, see [Step 1: Create an Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket)
in the ElastiCache User Guide.
* **Error Message:** The S3 bucket %s is not owned by the
authenticated user.
**Solution:** Create an Amazon S3 bucket in the same region as your snapshot.
For more information, see [Step 1: Create an Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket)
in the ElastiCache User Guide.
* **Error Message:** The authenticated user does not have sufficient
permissions to perform the desired activity.
**Solution:** Contact your system administrator to get the needed permissions.
* **Error Message:** The S3 bucket %s already contains an object
with key %s.
**Solution:** Give the `TargetSnapshotName` a new and unique value. If exporting
a snapshot, you could alternatively create a new Amazon S3 bucket and use this
same value for `TargetSnapshotName`.
* **Error Message: ** ElastiCache has not been granted READ
permissions %s on the S3 Bucket.
**Solution:** Add List and Read permissions on the bucket. For more information,
see [Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access)
in the ElastiCache User Guide.
* **Error Message: ** ElastiCache has not been granted WRITE
permissions %s on the S3 Bucket.
**Solution:** Add Upload/Delete permissions on the bucket. For more information,
see [Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access)
in the ElastiCache User Guide.
* **Error Message: ** ElastiCache has not been granted READ_ACP
permissions %s on the S3 Bucket.
**Solution:** Add View Permissions on the bucket. For more information, see
[Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access)
in the ElastiCache User Guide.
"""
def copy_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopySnapshot", input, options)
end
@doc """
Creates a cluster.
All nodes in the cluster run the same protocol-compliant cache engine software,
either Memcached or Redis.
This operation is not supported for Redis (cluster mode enabled) clusters.
"""
def create_cache_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCacheCluster", input, options)
end
@doc """
Creates a new Amazon ElastiCache cache parameter group.
An ElastiCache cache parameter group is a collection of parameters and their
values that are applied to all of the nodes in any cluster or replication group
using the CacheParameterGroup.
A newly created CacheParameterGroup is an exact duplicate of the default
parameter group for the CacheParameterGroupFamily. To customize the newly
created CacheParameterGroup you can change the values of specific parameters.
For more information, see:
*
[ModifyCacheParameterGroup](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html) in the ElastiCache API Reference.
* [Parameters and Parameter
Groups](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.html)
in the ElastiCache User Guide.
"""
def create_cache_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCacheParameterGroup", input, options)
end
@doc """
Creates a new cache security group.
Use a cache security group to control access to one or more clusters.
Cache security groups are only used when you are creating a cluster outside of
an Amazon Virtual Private Cloud (Amazon VPC). If you are creating a cluster
inside of a VPC, use a cache subnet group instead. For more information, see
[CreateCacheSubnetGroup](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html).
"""
def create_cache_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCacheSecurityGroup", input, options)
end
@doc """
Creates a new cache subnet group.
Use this parameter only when you are creating a cluster in an Amazon Virtual
Private Cloud (Amazon VPC).
"""
def create_cache_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateCacheSubnetGroup", input, options)
end
@doc """
Global Datastore for Redis offers fully managed, fast, reliable and secure
cross-region replication.
Using Global Datastore for Redis, you can create cross-region read replica
clusters for ElastiCache for Redis to enable low-latency reads and disaster
recovery across regions. For more information, see [Replication Across Regions Using Global
Datastore](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Redis-Global-Datastore.html).
* The **GlobalReplicationGroupIdSuffix** is the name of the Global
Datastore.
* The **PrimaryReplicationGroupId** represents the name of the
primary cluster that accepts writes and will replicate updates to the secondary
cluster.
"""
def create_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGlobalReplicationGroup", input, options)
end
@doc """
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled)
replication group.
This API can be used to create a standalone regional replication group or a
secondary replication group associated with a Global Datastore.
A Redis (cluster mode disabled) replication group is a collection of clusters,
where one of the clusters is a read/write primary and the others are read-only
replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis cluster-mode enabled cluster is comprised of from 1 to 90 shards
(API/CLI: node groups). Each shard has a primary node and up to 5 read-only
replica nodes. The configuration can range from 90 shards and 0 replicas to 15
shards and 5 replicas, which is the maximum number or replicas allowed.
The node or shard limit can be increased to a maximum of 500 per cluster if the
Redis engine version is 5.0.6 or higher. For example, you can choose to
configure a 500 node cluster that ranges between 83 shards (one primary and 5
replicas per shard) and 500 shards (single primary and no replicas). Make sure
there are enough available IP addresses to accommodate the increase. Common
pitfalls include the subnets in the subnet group have too small a CIDR range or
the subnets are shared and heavily used by other clusters. For more information,
see [Creating a Subnet Group](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.Creating.html).
For versions below 5.0.6, the limit is 250 per cluster.
To request a limit increase, see [AWS Service Limits](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html)
and choose the limit type **Nodes per cluster per instance type**.
When a Redis (cluster mode disabled) replication group has been successfully
created, you can add one or more read replicas to it, up to a total of 5 read
replicas. If you need to increase or decrease the number of node groups
(console: shards), you can avail yourself of ElastiCache for Redis' scaling. For
more information, see [Scaling ElastiCache for Redis Clusters](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Scaling.html)
in the *ElastiCache User Guide*.
This operation is valid for Redis only.
"""
def create_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateReplicationGroup", input, options)
end
@doc """
Creates a copy of an entire cluster or replication group at a specific moment in
time.
This operation is valid for Redis only.
"""
def create_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshot", input, options)
end
@doc """
For Redis engine version 6.x onwards: Creates a Redis user.
For more information, see [Using Role Based Access Control (RBAC)](http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html).
"""
def create_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUser", input, options)
end
@doc """
For Redis engine version 6.x onwards: Creates a Redis user group.
For more information, see [Using Role Based Access Control (RBAC)](http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html)
"""
def create_user_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateUserGroup", input, options)
end
@doc """
Decreases the number of node groups in a Global Datastore
"""
def decrease_node_groups_in_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DecreaseNodeGroupsInGlobalReplicationGroup",
input,
options
)
end
@doc """
Dynamically decreases the number of replicas in a Redis (cluster mode disabled)
replication group or the number of replica nodes in one or more node groups
(shards) of a Redis (cluster mode enabled) replication group.
This operation is performed with no cluster down time.
"""
def decrease_replica_count(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DecreaseReplicaCount", input, options)
end
@doc """
Deletes a previously provisioned cluster.
`DeleteCacheCluster` deletes all associated cache nodes, node endpoints and the
cluster itself. When you receive a successful response from this operation,
Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or
revert this operation.
This operation is not valid for:
* Redis (cluster mode enabled) clusters
* Redis (cluster mode disabled) clusters
* A cluster that is the last read replica of a replication group
* A cluster that is the primary node of a replication group
* A node group (shard) that has Multi-AZ mode enabled
* A cluster from a Redis (cluster mode enabled) replication group
* A cluster that is not in the `available` state
"""
def delete_cache_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCacheCluster", input, options)
end
@doc """
Deletes the specified cache parameter group.
You cannot delete a cache parameter group if it is associated with any cache
clusters. You cannot delete the default cache parameter groups in your account.
"""
def delete_cache_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCacheParameterGroup", input, options)
end
@doc """
Deletes a cache security group.
You cannot delete a cache security group if it is associated with any clusters.
"""
def delete_cache_security_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCacheSecurityGroup", input, options)
end
@doc """
Deletes a cache subnet group.
You cannot delete a default cache subnet group or one that is associated with
any clusters.
"""
def delete_cache_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCacheSubnetGroup", input, options)
end
@doc """
Deleting a Global Datastore is a two-step process:
* First, you must `DisassociateGlobalReplicationGroup` to remove the
secondary clusters in the Global Datastore.
* Once the Global Datastore contains only the primary cluster, you
can use DeleteGlobalReplicationGroup API to delete the Global Datastore while
retainining the primary cluster using Retain…= true.
Since the Global Datastore has only a primary cluster, you can delete the Global
Datastore while retaining the primary by setting `RetainPrimaryCluster=true`.
When you receive a successful response from this operation, Amazon ElastiCache
immediately begins deleting the selected resources; you cannot cancel or revert
this operation.
"""
def delete_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteGlobalReplicationGroup", input, options)
end
@doc """
Deletes an existing replication group.
By default, this operation deletes the entire replication group, including the
primary/primaries and all of the read replicas. If the replication group has
only one primary, you can optionally delete only the read replicas, while
retaining the primary by setting `RetainPrimaryCluster=true`.
When you receive a successful response from this operation, Amazon ElastiCache
immediately begins deleting the selected resources; you cannot cancel or revert
this operation.
This operation is valid for Redis only.
"""
def delete_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteReplicationGroup", input, options)
end
@doc """
Deletes an existing snapshot.
When you receive a successful response from this operation, ElastiCache
immediately begins deleting the snapshot; you cannot cancel or revert this
operation.
This operation is valid for Redis only.
"""
def delete_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshot", input, options)
end
@doc """
For Redis engine version 6.x onwards: Deletes a user.
The user will be removed from all user groups and in turn removed from all
replication groups. For more information, see [Using Role Based Access Control (RBAC)](http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html).
"""
def delete_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUser", input, options)
end
@doc """
For Redis engine version 6.x onwards: Deletes a user group.
The user group must first be disassociated from the replication group before it
can be deleted. For more information, see [Using Role Based Access Control (RBAC)](http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html).
"""
def delete_user_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteUserGroup", input, options)
end
@doc """
Returns information about all provisioned clusters if no cluster identifier is
specified, or about a specific cache cluster if a cluster identifier is
supplied.
By default, abbreviated information about the clusters is returned. You can use
the optional *ShowCacheNodeInfo* flag to retrieve detailed information about the
cache nodes associated with the clusters. These details include the DNS address
and port for the cache node endpoint.
If the cluster is in the *creating* state, only cluster-level information is
displayed until all of the nodes are successfully provisioned.
If the cluster is in the *deleting* state, only cluster-level information is
displayed.
If cache nodes are currently being added to the cluster, node endpoint
information and creation time for the additional nodes are not displayed until
they are completely provisioned. When the cluster state is *available*, the
cluster is ready for use.
If cache nodes are currently being removed from the cluster, no endpoint
information for the removed nodes is displayed.
"""
def describe_cache_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheClusters", input, options)
end
@doc """
Returns a list of the available cache engines and their versions.
"""
def describe_cache_engine_versions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheEngineVersions", input, options)
end
@doc """
Returns a list of cache parameter group descriptions.
If a cache parameter group name is specified, the list contains only the
descriptions for that group.
"""
def describe_cache_parameter_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheParameterGroups", input, options)
end
@doc """
Returns the detailed parameter list for a particular cache parameter group.
"""
def describe_cache_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheParameters", input, options)
end
@doc """
Returns a list of cache security group descriptions.
If a cache security group name is specified, the list contains only the
description of that group. This applicable only when you have ElastiCache in
Classic setup
"""
def describe_cache_security_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheSecurityGroups", input, options)
end
@doc """
Returns a list of cache subnet group descriptions.
If a subnet group name is specified, the list contains only the description of
that group. This is applicable only when you have ElastiCache in VPC setup. All
ElastiCache clusters now launch in VPC by default.
"""
def describe_cache_subnet_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCacheSubnetGroups", input, options)
end
@doc """
Returns the default engine and system parameter information for the specified
cache engine.
"""
def describe_engine_default_parameters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEngineDefaultParameters", input, options)
end
@doc """
Returns events related to clusters, cache security groups, and cache parameter
groups.
You can obtain events specific to a particular cluster, cache security group, or
cache parameter group by providing the name as a parameter.
By default, only the events occurring within the last hour are returned;
however, you can retrieve up to 14 days' worth of events if necessary.
"""
def describe_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEvents", input, options)
end
@doc """
Returns information about a particular global replication group.
If no identifier is specified, returns information about all Global Datastores.
"""
def describe_global_replication_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGlobalReplicationGroups", input, options)
end
@doc """
Returns information about a particular replication group.
If no identifier is specified, `DescribeReplicationGroups` returns information
about all replication groups.
This operation is valid for Redis only.
"""
def describe_replication_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeReplicationGroups", input, options)
end
@doc """
Returns information about reserved cache nodes for this account, or about a
specified reserved cache node.
"""
def describe_reserved_cache_nodes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeReservedCacheNodes", input, options)
end
@doc """
Lists available reserved cache node offerings.
"""
def describe_reserved_cache_nodes_offerings(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeReservedCacheNodesOfferings",
input,
options
)
end
@doc """
Returns details of the service updates
"""
def describe_service_updates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeServiceUpdates", input, options)
end
@doc """
Returns information about cluster or replication group snapshots.
By default, `DescribeSnapshots` lists all of your snapshots; it can optionally
describe a single snapshot, or just the snapshots associated with a particular
cache cluster.
This operation is valid for Redis only.
"""
def describe_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshots", input, options)
end
@doc """
Returns details of the update actions
"""
def describe_update_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUpdateActions", input, options)
end
@doc """
Returns a list of user groups.
"""
def describe_user_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUserGroups", input, options)
end
@doc """
Returns a list of users.
"""
def describe_users(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeUsers", input, options)
end
@doc """
Remove a secondary cluster from the Global Datastore using the Global Datastore
name.
The secondary cluster will no longer receive updates from the primary cluster,
but will remain as a standalone cluster in that AWS region.
"""
def disassociate_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateGlobalReplicationGroup", input, options)
end
@doc """
Used to failover the primary region to a selected secondary region.
The selected secondary region will become primary, and all other clusters will
become secondary.
"""
def failover_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "FailoverGlobalReplicationGroup", input, options)
end
@doc """
Increase the number of node groups in the Global Datastore
"""
def increase_node_groups_in_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"IncreaseNodeGroupsInGlobalReplicationGroup",
input,
options
)
end
@doc """
Dynamically increases the number of replicas in a Redis (cluster mode disabled)
replication group or the number of replica nodes in one or more node groups
(shards) of a Redis (cluster mode enabled) replication group.
This operation is performed with no cluster down time.
"""
def increase_replica_count(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "IncreaseReplicaCount", input, options)
end
@doc """
Lists all available node types that you can scale your Redis cluster's or
replication group's current node type.
When you use the `ModifyCacheCluster` or `ModifyReplicationGroup` operations to
scale your cluster or replication group, the value of the `CacheNodeType`
parameter must be one of the node types returned by this operation.
"""
def list_allowed_node_type_modifications(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAllowedNodeTypeModifications", input, options)
end
@doc """
Lists all cost allocation tags currently on the named resource.
A `cost allocation tag` is a key-value pair where the key is case-sensitive and
the value is optional. You can use cost allocation tags to categorize and track
your AWS costs.
If the cluster is not in the *available* state, `ListTagsForResource` returns an
error.
You can have a maximum of 50 cost allocation tags on an ElastiCache resource.
For more information, see [Monitoring Costs with Tags](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html).
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Modifies the settings for a cluster.
You can use this operation to change one or more cluster configuration
parameters by specifying the parameters and the new values.
"""
def modify_cache_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCacheCluster", input, options)
end
@doc """
Modifies the parameters of a cache parameter group.
You can modify up to 20 parameters in a single request by submitting a list
parameter name and value pairs.
"""
def modify_cache_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCacheParameterGroup", input, options)
end
@doc """
Modifies an existing cache subnet group.
"""
def modify_cache_subnet_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCacheSubnetGroup", input, options)
end
@doc """
Modifies the settings for a Global Datastore.
"""
def modify_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyGlobalReplicationGroup", input, options)
end
@doc """
Modifies the settings for a replication group.
* [Scaling for Amazon ElastiCache for Redis (cluster mode enabled)](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html)
in the ElastiCache User Guide
*
[ModifyReplicationGroupShardConfiguration](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html)
in the ElastiCache API Reference
This operation is valid for Redis only.
"""
def modify_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyReplicationGroup", input, options)
end
@doc """
Modifies a replication group's shards (node groups) by allowing you to add
shards, remove shards, or rebalance the keyspaces among existing shards.
"""
def modify_replication_group_shard_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ModifyReplicationGroupShardConfiguration",
input,
options
)
end
@doc """
Changes user password(s) and/or access string.
"""
def modify_user(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyUser", input, options)
end
@doc """
Changes the list of users that belong to the user group.
"""
def modify_user_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyUserGroup", input, options)
end
@doc """
Allows you to purchase a reserved cache node offering.
Reserved nodes are not eligible for cancellation and are non-refundable. For
more information, see [Managing Costs with Reserved Nodes](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/reserved-nodes.html)
for Redis or [Managing Costs with Reserved Nodes](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/reserved-nodes.html)
for Memcached.
"""
def purchase_reserved_cache_nodes_offering(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PurchaseReservedCacheNodesOffering", input, options)
end
@doc """
Redistribute slots to ensure uniform distribution across existing shards in the
cluster.
"""
def rebalance_slots_in_global_replication_group(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"RebalanceSlotsInGlobalReplicationGroup",
input,
options
)
end
@doc """
Reboots some, or all, of the cache nodes within a provisioned cluster.
This operation applies any modified cache parameter groups to the cluster. The
reboot operation takes place as soon as possible, and results in a momentary
outage to the cluster. During the reboot, the cluster status is set to
REBOOTING.
The reboot causes the contents of the cache (for each cache node being rebooted)
to be lost.
When the reboot is complete, a cluster event is created.
Rebooting a cluster is currently supported on Memcached and Redis (cluster mode
disabled) clusters. Rebooting is not supported on Redis (cluster mode enabled)
clusters.
If you make changes to parameters that require a Redis (cluster mode enabled)
cluster reboot for the changes to be applied, see [Rebooting a Cluster](http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html)
for an alternate process.
"""
def reboot_cache_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RebootCacheCluster", input, options)
end
@doc """
Removes the tags identified by the `TagKeys` list from the named resource.
"""
def remove_tags_from_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options)
end
@doc """
Modifies the parameters of a cache parameter group to the engine or system
default value.
You can reset specific parameters by submitting a list of parameter names. To
reset the entire cache parameter group, specify the `ResetAllParameters` and
`CacheParameterGroupName` parameters.
"""
def reset_cache_parameter_group(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResetCacheParameterGroup", input, options)
end
@doc """
Revokes ingress from a cache security group.
Use this operation to disallow access from an Amazon EC2 security group that had
been previously authorized.
"""
def revoke_cache_security_group_ingress(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RevokeCacheSecurityGroupIngress", input, options)
end
@doc """
Start the migration of data.
"""
def start_migration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartMigration", input, options)
end
@doc """
Represents the input of a `TestFailover` operation which test automatic failover
on a specified node group (called shard in the console) in a replication group
(called cluster in the console).
## Note the following
* A customer can use this operation to test automatic failover on up
to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any
rolling 24-hour period.
* If calling this operation on shards in different clusters (called
replication groups in the API and CLI), the calls can be made concurrently.
* If calling this operation multiple times on different shards in
the same Redis (cluster mode enabled) replication group, the first node
replacement must complete before a subsequent call can be made.
* To determine whether the node replacement is complete you can
check Events using the Amazon ElastiCache console, the AWS CLI, or the
ElastiCache API. Look for the following automatic failover related events,
listed here in order of occurrance:
1. Replication group message: `Test Failover API called
for node group <node-group-id>`
2. Cache cluster message: `Failover from primary node
<primary-node-id> to replica node <node-id> completed`
3. Replication group message: `Failover from primary
node <primary-node-id> to replica node <node-id> completed`
4. Cache cluster message: `Recovering cache nodes
<node-id>`
5. Cache cluster message: `Finished recovery for cache
nodes <node-id>`
For more information see:
* [Viewing ElastiCache Events](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html)
in the *ElastiCache User Guide*
*
[DescribeEvents](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) in the ElastiCache API Reference
Also see, [Testing Multi-AZ
](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html#auto-failover-test)
in the *ElastiCache User Guide*.
"""
def test_failover(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TestFailover", input, options)
end
end
|
lib/aws/generated/elasticache.ex
| 0.794903
| 0.521227
|
elasticache.ex
|
starcoder
|
defmodule PuppeteerPdf.Generate do
@moduledoc """
Generate a PDF file from multiple available sources.
"""
@doc """
Generate PDF file given an HTML string input
## Options
- `header_template` - HTML template for the print header.
- `footer_template` - HTML template for the print footer.
- `display_header_footer` - Display header and footer.
- `format` - Page format. Possible values: Letter, Legal, Tabloid, Ledger, A0, A1, A2, A3, A4, A5, A6
- `margin_left` - Integer value (px)
- `margin_right` - Integer value (px)
- `margin_top` - Integer value (px)
- `margin_bottom` - Integer value (px)
- `scale` - Scale of the webpage rendering. (default: 1). Accept values between 0.1 and 2.
- `width` - Paper width, accepts values labeled with units.
- `height` - Paper height, accepts values labeled with units.
- `debug` - Output Puppeteer PDF options
- `landscape` - Paper orientation.
- `print_background` - Print background graphics.
- `timeout` - Integer value (ms), configures the timeout of the PDF creation (defaults to 5000)
"""
@spec from_string(String.t(), String.t(), list()) :: {:ok, String.t()} | {:error, atom()}
def from_string(html_code, pdf_output_path, options \\ []) do
# Random gen filename
{:ok, path} = Briefly.create(extname: ".html")
case File.open(path, [:write, :utf8]) do
{:ok, file} ->
IO.write(file, html_code)
File.close(file)
Path.absname(path)
|> from_file(pdf_output_path, options)
{:error, error} ->
{:error, error}
end
end
@doc """
Generate PDF file with a URL given as input.
## Options
- `header_template` - HTML template for the print header.
- `footer_template` - HTML template for the print footer.
- `display_header_footer` - Display header and footer.
- `format` - Page format. Possible values: Letter, Legal, Tabloid, Ledger, A0, A1, A2, A3, A4, A5, A6
- `margin_left` - Integer value (px)
- `margin_right` - Integer value (px)
- `margin_top` - Integer value (px)
- `margin_bottom` - Integer value (px)
- `scale` - Scale of the webpage rendering. (default: 1). Accept values between 0.1 and 2.
- `width` - Paper width, accepts values labeled with units.
- `height` - Paper height, accepts values labeled with units.
- `debug` - Output Puppeteer PDF options
- `landscape` - Paper orientation.
- `print_background` - Print background graphics.
- `timeout` - Integer value (ms), configures the timeout of the PDF creation (defaults to 5000)
"""
@spec from_url(String.t(), String.t(), list()) :: {:ok, String.t()} | {:error, atom()}
def from_url(url, pdf_output_path, options \\ []) do
exec_path =
case Application.get_env(:puppeteer_pdf, :exec_path) do
nil -> "puppeteer-pdf"
value -> value
end
params =
Enum.reduce(options, [url, "--path", pdf_output_path], fn {key, value}, result ->
value =
case key do
:header_template ->
["--headerTemplate=#{value}"]
:footer_template ->
["--footerTemplate=#{value}"]
:display_header_footer ->
["--displayHeaderFooter"]
:format ->
if(
Enum.member?(
[
"letter",
"legal",
"tabloid",
"ledger",
"a0",
"a1",
"a2",
"a3",
"a4",
"a5",
"a6"
],
to_string(value) |> String.downcase()
)
) do
["--format", to_string(value)]
else
{:error, :invalid_format}
end
:ignore_https_errors ->
["--ignoreHTTPSErrors", "true"]
:margin_left ->
must_be_integer("--marginLeft", value)
:margin_right ->
must_be_integer("--marginRight", value)
:margin_top ->
must_be_integer("--marginTop", value)
:margin_bottom ->
must_be_integer("--marginBottom", value)
:scale ->
with {value, ""} <- Float.parse(to_string(value)),
true <- value >= 0.1 && value <= 2.0 do
["--scale", to_string(value)]
else
_ -> {:error, :invalid_scale}
end
:width ->
must_be_integer("--width", value)
:height ->
must_be_integer("--height", value)
:debug ->
["--debug"]
:landscape ->
["--landscape"]
:print_background ->
["--printBackground"]
:timeout ->
# timeout is not an argument for puppeteer-pdf
:ignore
end
case result do
{:error, message} ->
{:error, message}
_ ->
case value do
{:error, message} ->
{:error, message}
:ignore ->
result
_ ->
result ++ value
end
end
end)
case params do
{:error, message} ->
{:error, message}
_ ->
# In some cases when invalid values are provided the command executing
# can hang process. This will assure that it can exit.
task =
Task.async(fn ->
case System.cmd(exec_path, params) do
{cmd_response, _} ->
{:ok, cmd_response}
error_message ->
{:error, error_message}
end
end)
Task.await(task, options[:timeout] || 5000)
end
end
@doc """
Generate PDF file with an HTML file path given as input.
## Options
- `header_template` - HTML template for the print header.
- `footer_template` - HTML template for the print footer.
- `display_header_footer` - Display header and footer.
- `format` - Page format. Possible values: Letter, Legal, Tabloid, Ledger, A0, A1, A2, A3, A4, A5, A6
- `margin_left` - Integer value (px)
- `margin_right` - Integer value (px)
- `margin_top` - Integer value (px)
- `margin_bottom` - Integer value (px)
- `scale` - Scale of the webpage rendering. (default: 1). Accept values between 0.1 and 2.
- `width` - Paper width, accepts values labeled with units.
- `height` - Paper height, accepts values labeled with units.
- `debug` - Output Puppeteer PDF options
- `landscape` - Paper orientation.
- `print_background` - Print background graphics.
- `timeout` - Integer value (ms), configures the timeout of the PDF creation (defaults to 5000)
"""
@spec from_file(String.t(), String.t(), list()) :: {:ok, String.t()} | {:error, atom()}
def from_file(html_file_path, pdf_output_path, options \\ []) do
case File.exists?(html_file_path) do
true ->
exec_path =
case Application.get_env(:puppeteer_pdf, :exec_path) do
nil -> "puppeteer-pdf"
value -> value
end
params =
Enum.reduce(options, [html_file_path, "--path", pdf_output_path], fn {key, value},
result ->
value =
case key do
:header_template ->
["--headerTemplate=#{value}"]
:footer_template ->
["--footerTemplate=#{value}"]
:display_header_footer ->
["--displayHeaderFooter"]
:format ->
if(
Enum.member?(
[
"letter",
"legal",
"tabloid",
"ledger",
"a0",
"a1",
"a2",
"a3",
"a4",
"a5",
"a6"
],
to_string(value) |> String.downcase()
)
) do
["--format", to_string(value)]
else
{:error, :invalid_format}
end
:margin_left ->
must_be_integer("--marginLeft", value)
:margin_right ->
must_be_integer("--marginRight", value)
:margin_top ->
must_be_integer("--marginTop", value)
:margin_bottom ->
must_be_integer("--marginBottom", value)
:scale ->
with {value, ""} <- Float.parse(to_string(value)),
true <- value >= 0.1 && value <= 2.0 do
["--scale", to_string(value)]
else
_ -> {:error, :invalid_scale}
end
:width ->
must_be_integer("--width", value)
:height ->
must_be_integer("--height", value)
:debug ->
["--debug"]
:landscape ->
["--landscape"]
:print_background ->
["--printBackground"]
:timeout ->
# timeout is not an argument for puppeteer-pdf
:ignore
end
case result do
{:error, message} ->
{:error, message}
_ ->
case value do
{:error, message} ->
{:error, message}
:ignore ->
result
_ ->
result ++ value
end
end
end)
case params do
{:error, message} ->
{:error, message}
_ ->
# In some cases when invalid values are provided the command executing
# can hang process. This will assure that it can exit.
task =
Task.async(fn ->
case System.cmd(exec_path, params) do
{cmd_response, _} ->
{:ok, cmd_response}
error_message ->
{:error, error_message}
end
end)
Task.await(task, options[:timeout] || 5000)
end
false ->
{:error, :input_file_not_found}
end
end
@spec must_be_integer(String.t(), Integer.t()) ::
list() | {:error, :margin_value_must_be_integer}
defp must_be_integer(field, value) when is_integer(value) do
[field, to_string(value)]
end
@spec must_be_integer(any(), any()) :: {:error, :margin_value_must_be_integer}
defp must_be_integer(_, _) do
{:error, :margin_value_must_be_integer}
end
end
|
lib/generate.ex
| 0.899925
| 0.537102
|
generate.ex
|
starcoder
|
defmodule Soap.Response.Parser do
@moduledoc """
Provides a functions for parse an xml-like response body.
"""
import SweetXml, only: [xpath: 2, sigil_x: 2]
@soap_version_namespaces %{
"1.1" => :"http://schemas.xmlsoap.org/soap/envelope/",
"1.2" => :"http://www.w3.org/2003/05/soap-envelope"
}
@doc """
Executing with xml response body.
If a list is empty then `parse/1` returns full parsed response structure into map.
"""
@spec parse(String.t(), atom()) :: map()
def parse(xml_response, :fault) do
fault_tag = get_fault_tag(xml_response)
xml_response
|> xpath(~x"//#{fault_tag}/*"l)
|> parse_elements()
end
def parse(xml_response, _response_type) do
body_tag = get_body_tag(xml_response)
xml_response
|> xpath(~x"//#{body_tag}/*"l)
|> parse_elements()
end
@spec parse_record(tuple()) :: map() | String.t()
defp parse_record({:xmlElement, tag_name, _, _, _, _, _, _, elements, _, _, _}) do
%{tag_name => parse_elements(elements)}
end
defp parse_record({:xmlText, _, _, _, value, _}), do: transform_record_value(value)
defp transform_record_value(nil), do: nil
defp transform_record_value(value) when is_list(value), do: value |> to_string() |> String.trim()
defp transform_record_value(value) when is_binary(value), do: value |> String.trim()
@spec parse_elements(list() | tuple()) :: map()
defp parse_elements([]), do: %{}
defp parse_elements(elements) when is_tuple(elements), do: parse_record(elements)
defp parse_elements(elements) when is_list(elements) do
elements
|> Enum.map(&parse_record/1)
|> parse_element_values()
end
@spec parse_element_values(list()) :: any()
defp parse_element_values(elements) do
cond do
Enum.all?(elements, &is_map/1) && unique_tags?(elements) ->
Enum.reduce(elements, &Map.merge/2)
Enum.all?(elements, &is_map/1) ->
elements |> Enum.map(&Map.to_list/1) |> List.flatten()
true ->
extract_value_from_list(elements)
end
end
@spec extract_value_from_list(list()) :: any()
defp extract_value_from_list([element]), do: element
defp extract_value_from_list(elements), do: elements
defp unique_tags?(elements) do
keys =
elements
|> Enum.map(&Map.keys/1)
|> List.flatten()
Enum.uniq(keys) == keys
end
defp get_envelope_namespace(xml_response) do
env_namespace = @soap_version_namespaces[soap_version()]
xml_response
|> xpath(~x"//namespace::*"l)
|> Enum.find(fn {_, _, _, _, namespace_url} -> namespace_url == env_namespace end)
|> elem(3)
end
defp get_fault_tag(xml_response) do
xml_response
|> get_envelope_namespace()
|> List.to_string()
|> apply_namespace_to_tag("Fault")
end
defp get_body_tag(xml_response) do
xml_response
|> get_envelope_namespace()
|> List.to_string()
|> apply_namespace_to_tag("Body")
end
defp apply_namespace_to_tag(env_namespace, tag), do: env_namespace <> ":" <> tag
defp soap_version, do: Application.fetch_env!(:soap, :globals)[:version]
end
|
lib/soap/response/parser.ex
| 0.632162
| 0.423518
|
parser.ex
|
starcoder
|
defmodule Toby.Data.Provider do
@moduledoc """
Provides statistics about the running Erlang VM for display in components.
Since these lookups can be expensive, access this data via `Toby.Data.Server`
instead of calling this module directly. The server module provides a
throttled interface to this data to avoid overwhelming the system.
"""
alias Toby.Data.{Applications, Node, Samples}
def provide({node, :node}, _) do
{:ok,
%{
current: node,
cookie: Node.cookie(node),
connected_nodes: Node.connected_nodes(),
visible_nodes: Node.visible_nodes()
}}
end
def provide({node, :lookup, proc_or_port_or_name}, _) do
{:ok, Node.lookup(node, proc_or_port_or_name)}
end
def provide({node, :processes}, _) do
{:ok, %{processes: Node.processes_extended(node)}}
end
def provide({node, :ports}, _) do
{:ok, %{ports: Node.ports_extended(node)}}
end
def provide({node, :applications}, _) do
with {:ok, apps} <- Applications.applications(node) do
{:ok,
%{
applications: Enum.sort_by(apps, &to_string/1)
}}
end
end
def provide({node, :application, app}, _) do
Applications.application(node, app)
end
def provide({node, :system}, _) do
{:ok,
%{
cpu: system_cpu(node),
limits: system_limits(node),
memory: system_memory(node),
statistics: system_statistics(node),
system: system_data(node)
}}
end
def provide({node, :load}, samples) do
{:ok,
%{
utilization: Samples.historical_scheduler_utilization(samples),
scheduler_count: system_cpu(node).schedulers,
memory: Samples.historical_memory(samples),
io: Samples.historical_io(samples)
}}
end
def provide({node, :memory}, samples) do
allocators = Node.allocators(node)
{:ok,
%{
allocators: allocators,
allocator_names: ["Total" | Map.keys(allocators) -- ["Total"]],
allocation_history: Samples.historical_allocation(samples)
}}
end
def provide({node, :tables}, _) do
{:ok,
%{
tables: node |> Node.ets_tables() |> Enum.sort_by(& &1[:name])
}}
end
def provide({_node, :help}, _) do
{:ok, %{version: Toby.version()}}
end
def provide(_other_key, _) do
{:error, :invalid_key}
end
def system_data(node) do
%{
otp_release: Node.system_info(node, :otp_release),
erts_version: Node.system_info(node, :version),
compiled_for: Node.system_info(node, :system_architecture),
emulator_wordsize: Node.system_info(node, {:wordsize, :internal}),
process_wordsize: Node.system_info(node, {:wordsize, :external}),
smp_support?: Node.system_info(node, :smp_support),
thread_support?: Node.system_info(node, :threads),
async_thread_pool_size: Node.system_info(node, :thread_pool_size)
}
end
def system_cpu(node) do
%{
logical_cpus: Node.system_info(node, :logical_processors),
online_logical_cpus: Node.system_info(node, :logical_processors),
available_logical_cpus: Node.system_info(node, :logical_processors),
schedulers: Node.system_info(node, :schedulers),
online_schedulers: Node.system_info(node, :schedulers_online),
available_schedulers: Node.system_info(node, :schedulers_online)
}
end
def system_limits(node) do
%{
atoms:
limit(Node.system_info(node, :atom_count), Node.system_info(node, :atom_limit)),
procs:
limit(
Node.system_info(node, :process_count),
Node.system_info(node, :process_limit)
),
ports:
limit(Node.system_info(node, :port_count), Node.system_info(node, :port_limit)),
ets: limit(Node.system_info(node, :ets_count), Node.system_info(node, :ets_limit)),
dist_buffer_busy: Node.system_info(node, :dist_buf_busy_limit)
}
end
def system_statistics(node) do
{{:input, io_input}, {:output, io_output}} = Node.statistics(node, :io)
%{
uptime_ms: uptime_ms(node),
run_queue: Node.statistics(node, :total_run_queue_lengths),
io_input_bytes: io_input,
io_output_bytes: io_output
}
end
def system_memory(node) do
Enum.into(Node.memory(node), %{})
end
defp limit(count, limit) do
%{count: count, limit: limit, percent_used: percent(count, limit)}
end
defp percent(_, 0), do: 0
defp percent(x, y), do: :erlang.trunc(Float.round(x / y, 2) * 100)
defp uptime_ms(node) do
{total_ms, _since_last_call_ms} = Node.statistics(node, :wall_clock)
total_ms
end
end
|
lib/toby/data/provider.ex
| 0.811003
| 0.600657
|
provider.ex
|
starcoder
|
import Kernel, except: [apply: 2]
defmodule Ecto.Query.Builder.Join do
@moduledoc false
alias Ecto.Query.Builder
alias Ecto.Query.{JoinExpr, QueryExpr}
@doc """
Escapes a join expression (not including the `on` expression).
It returns a tuple containing the binds, the on expression (if available)
and the association expression.
## Examples
iex> escape(quote(do: x in "foo"), [], __ENV__)
{:x, {"foo", nil}, nil, []}
iex> escape(quote(do: "foo"), [], __ENV__)
{:_, {"foo", nil}, nil, []}
iex> escape(quote(do: x in Sample), [], __ENV__)
{:x, {nil, Sample}, nil, []}
iex> escape(quote(do: x in __MODULE__), [], __ENV__)
{:x, {nil, __MODULE__}, nil, []}
iex> escape(quote(do: x in {"foo", :sample}), [], __ENV__)
{:x, {"foo", :sample}, nil, []}
iex> escape(quote(do: x in {"foo", Sample}), [], __ENV__)
{:x, {"foo", Sample}, nil, []}
iex> escape(quote(do: x in {"foo", __MODULE__}), [], __ENV__)
{:x, {"foo", __MODULE__}, nil, []}
iex> escape(quote(do: c in assoc(p, :comments)), [p: 0], __ENV__)
{:c, nil, {0, :comments}, []}
iex> escape(quote(do: x in fragment("foo")), [], __ENV__)
{:x, {:{}, [], [:fragment, [], [raw: "foo"]]}, nil, []}
"""
@spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {atom, Macro.t | nil, Macro.t | nil, list}
def escape({:in, _, [{var, _, context}, expr]}, vars, env)
when is_atom(var) and is_atom(context) do
{_, expr, assoc, params} = escape(expr, vars, env)
{var, expr, assoc, params}
end
def escape({:subquery, _, [expr]}, _vars, _env) do
{:_, quote(do: Ecto.Query.subquery(unquote(expr))), nil, []}
end
def escape({:subquery, _, [expr, opts]}, _vars, _env) do
{:_, quote(do: Ecto.Query.subquery(unquote(expr), unquote(opts))), nil, []}
end
def escape({:fragment, _, [_ | _]} = expr, vars, env) do
{expr, {params, :acc}} = Builder.escape(expr, :any, {[], :acc}, vars, env)
{:_, expr, nil, params}
end
def escape({string, schema} = join, _vars, env) when is_binary(string) do
case Macro.expand(schema, env) do
schema when is_atom(schema) ->
{:_, {string, schema}, nil, []}
_ ->
Builder.error! "malformed join `#{Macro.to_string(join)}` in query expression"
end
end
def escape({:assoc, _, [{var, _, context}, field]}, vars, _env)
when is_atom(var) and is_atom(context) do
ensure_field!(field)
var = Builder.find_var!(var, vars)
field = Builder.quoted_field!(field)
{:_, nil, {var, field}, []}
end
def escape({:^, _, [expr]}, _vars, _env) do
{:_, quote(do: Ecto.Query.Builder.Join.join!(unquote(expr))), nil, []}
end
def escape(string, _vars, _env) when is_binary(string) do
{:_, {string, nil}, nil, []}
end
def escape(schema, _vars, _env) when is_atom(schema) do
{:_, {nil, schema}, nil, []}
end
def escape(join, vars, env) do
case Macro.expand(join, env) do
^join ->
Builder.error! "malformed join `#{Macro.to_string(join)}` in query expression"
join ->
escape(join, vars, env)
end
end
@doc """
Called at runtime to check dynamic joins.
"""
def join!(expr) when is_atom(expr),
do: {nil, expr}
def join!(expr) when is_binary(expr),
do: {expr, nil}
def join!({source, module}) when is_binary(source) and is_atom(module),
do: {source, module}
def join!(expr),
do: Ecto.Queryable.to_query(expr)
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, atom, [Macro.t], Macro.t, Macro.t, Macro.t, atom, nil | {:ok, String.t | nil}, nil | String.t | [String.t], Macro.Env.t) ::
{Macro.t, Keyword.t, non_neg_integer | nil}
def build(query, qual, binding, expr, count_bind, on, as, prefix, maybe_hints, env) do
{:ok, prefix} = prefix || {:ok, nil}
hints = List.wrap(maybe_hints)
unless Enum.all?(hints, &is_binary/1) do
Builder.error!(
"`hints` must be a compile time string or list of strings, " <>
"got: `#{Macro.to_string(maybe_hints)}`"
)
end
unless is_atom(as) do
Builder.error! "`as` must be a compile time atom, got: `#{Macro.to_string(as)}`"
end
unless is_binary(prefix) or is_nil(prefix) do
Builder.error! "`prefix` must be a compile time string, got: `#{Macro.to_string(prefix)}`"
end
{query, binding} = Builder.escape_binding(query, binding, env)
{join_bind, join_source, join_assoc, join_params} = escape(expr, binding, env)
join_params = Builder.escape_params(join_params)
join_qual = validate_qual(qual)
validate_bind(join_bind, binding)
{count_bind, query} =
if is_nil(count_bind) do
query =
quote do
query = Ecto.Queryable.to_query(unquote(query))
join_count = Builder.count_binds(query)
query
end
{quote(do: join_count), query}
else
{count_bind, query}
end
binding = binding ++ [{join_bind, count_bind}]
next_bind =
if is_integer(count_bind) do
count_bind + 1
else
quote(do: unquote(count_bind) + 1)
end
join = [
as: as,
assoc: join_assoc,
file: env.file,
line: env.line,
params: join_params,
prefix: prefix,
qual: join_qual,
source: join_source,
hints: hints
]
query = build_on(on || true, join, as, query, binding, count_bind, env)
{query, binding, next_bind}
end
def build_on({:^, _, [var]}, join, as, query, _binding, count_bind, env) do
quote do
query = unquote(query)
Ecto.Query.Builder.Join.join!(
query,
%JoinExpr{unquote_splicing(join), on: %QueryExpr{}},
unquote(var),
unquote(as),
unquote(count_bind),
unquote(env.file),
unquote(env.line)
)
end
end
def build_on(on, join, as, query, binding, count_bind, env) do
case Ecto.Query.Builder.Filter.escape(:on, on, count_bind, binding, env) do
{on_expr, {on_params, []}} ->
on_params = Builder.escape_params(on_params)
join =
quote do
%JoinExpr{
unquote_splicing(join),
on: %QueryExpr{
expr: unquote(on_expr),
params: unquote(on_params),
line: unquote(env.line),
file: unquote(env.file)
}
}
end
Builder.apply_query(query, __MODULE__, [join, as, count_bind], env)
_pattern ->
raise ArgumentError, "invalid expression for join `:on`, subqueries aren't supported"
end
end
@doc """
Applies the join expression to the query.
"""
def apply(%Ecto.Query{joins: joins} = query, expr, nil, _count_bind) do
%{query | joins: joins ++ [expr]}
end
def apply(%Ecto.Query{joins: joins, aliases: aliases} = query, expr, as, count_bind) do
aliases =
case aliases do
%{} -> runtime_aliases(aliases, as, count_bind)
_ -> compile_aliases(aliases, as, count_bind)
end
%{query | joins: joins ++ [expr], aliases: aliases}
end
def apply(query, expr, as, count_bind) do
apply(Ecto.Queryable.to_query(query), expr, as, count_bind)
end
@doc """
Called at runtime to build aliases.
"""
def runtime_aliases(aliases, nil, _), do: aliases
def runtime_aliases(aliases, name, join_count) when is_atom(name) and is_integer(join_count) do
if Map.has_key?(aliases, name) do
Builder.error! "alias `#{inspect name}` already exists"
else
Map.put(aliases, name, join_count)
end
end
defp compile_aliases({:%{}, meta, aliases}, name, join_count)
when is_atom(name) and is_integer(join_count) do
{:%{}, meta, aliases |> Map.new |> runtime_aliases(name, join_count) |> Map.to_list}
end
defp compile_aliases(aliases, name, join_count) do
quote do
Ecto.Query.Builder.Join.runtime_aliases(unquote(aliases), unquote(name), unquote(join_count))
end
end
@doc """
Called at runtime to build a join.
"""
def join!(query, join, expr, as, count_bind, file, line) do
# join without expanded :on is built and applied to the query,
# so that expansion of dynamic :on accounts for the new binding
{on_expr, on_params, on_file, on_line} =
Ecto.Query.Builder.Filter.filter!(:on, apply(query, join, as, count_bind), expr, count_bind, file, line)
join = %{join | on: %QueryExpr{expr: on_expr, params: on_params, line: on_line, file: on_file}}
apply(query, join, as, count_bind)
end
defp validate_qual(qual) when is_atom(qual) do
qual!(qual)
end
defp validate_qual(qual) do
quote(do: Ecto.Query.Builder.Join.qual!(unquote(qual)))
end
defp validate_bind(bind, all) do
if bind != :_ and bind in all do
Builder.error! "variable `#{bind}` is already defined in query"
end
end
@qualifiers [:inner, :inner_lateral, :left, :left_lateral, :right, :full, :cross]
@doc """
Called at runtime to check dynamic qualifier.
"""
def qual!(qual) when qual in @qualifiers, do: qual
def qual!(qual) do
raise ArgumentError,
"invalid join qualifier `#{inspect qual}`, accepted qualifiers are: " <>
Enum.map_join(@qualifiers, ", ", &"`#{inspect &1}`")
end
defp ensure_field!({var, _, _}) when var != :^ do
Builder.error! "you passed the variable `#{var}` to `assoc/2`. Did you mean to pass the atom `:#{var}`?"
end
defp ensure_field!(_), do: true
end
|
lib/ecto/query/builder/join.ex
| 0.850608
| 0.429788
|
join.ex
|
starcoder
|
defmodule Filterable.Params do
@moduledoc ~S"""
Allows to fetch `Map` of filterable params.
Performs casting/triming/normalization of filter params.
"""
alias Filterable.Utils
@spec filter_value(map | Keyword.t(), Keyword.t()) :: {:ok | :error, any}
def filter_value(params, opts \\ []) do
with params <- fetch_params(params, Keyword.get(opts, :top_param)),
value <- fetch_value(params, Keyword.get(opts, :param)),
value <- Utils.to_atoms_map(value),
value <- normalize_map(value),
value <- trim_value(value, Keyword.get(opts, :trim)),
value <- nilify_value(value, Keyword.get(opts, :allow_blank)),
{:ok, value} <-
cast_value(value, Keyword.get(opts, :cast), Keyword.get(opts, :cast_errors)),
value <- default_value(value, Keyword.get(opts, :default)),
do: {:ok, value}
end
defp fetch_params(params, key) do
if key do
fetch_value(params, key)
else
params
end
end
defp fetch_value(nil, _) do
nil
end
defp fetch_value(params, key) when is_list(key) do
if Keyword.keyword?(key) do
Enum.into(key, %{}, fn {k, v} ->
{k, fetch_value(fetch_value(params, k), v)}
end)
else
Enum.into(key, %{}, &{&1, fetch_value(params, &1)})
end
end
defp fetch_value(params, key) when is_map(params) do
Map.get(params, Utils.ensure_string(key)) || Map.get(params, Utils.ensure_atom(key))
end
defp fetch_value(params, key) when is_list(params) do
if Keyword.keyword?(params) do
Keyword.get(params, key)
end
end
defp fetch_value(_, _) do
nil
end
defp normalize_map(map) when map_size(map) == 1 do
map |> Map.values() |> List.first()
end
defp normalize_map(map) do
map
end
defp trim_value(value, true) do
trim_value(value)
end
defp trim_value(value, _) do
value
end
defp trim_value(%{__struct__: _} = value) do
value
end
defp trim_value(value) when is_bitstring(value) do
String.trim(value)
end
defp trim_value(value) when is_list(value) do
Enum.map(value, &trim_value(&1))
end
defp trim_value(value) when is_map(value) do
Enum.into(value, %{}, fn {k, v} -> {k, trim_value(v)} end)
end
defp trim_value(value) do
value
end
defp nilify_value(value, allow_blank) when allow_blank in [nil, false] do
nilify_value(value)
end
defp nilify_value(value, _) do
value
end
defp nilify_value(%{__struct__: _} = value) do
value
end
defp nilify_value(value) when is_bitstring(value) do
Utils.presence(value)
end
defp nilify_value(value) when is_list(value) do
value |> Enum.filter(&nilify_value(&1)) |> Utils.presence()
end
defp nilify_value(value) when is_map(value) do
value |> Enum.into(%{}, fn {k, v} -> {k, nilify_value(v)} end) |> Utils.presence()
end
defp nilify_value(value) do
value
end
defp default_value(%{__struct__: _} = value, _) do
value
end
defp default_value(value, default) when is_map(value) and is_list(default) do
Enum.into(value, %{}, fn {k, v} ->
{k, default_value(v, Keyword.get(default, k))}
end)
end
defp default_value(value, default) when not is_map(value) and is_list(default) do
if Keyword.keyword?(default) do
value
else
(is_nil(value) && default) || value
end
end
defp default_value(nil, default) do
default
end
defp default_value(value, _) do
value
end
defp cast_value(value, nil, _) do
{:ok, value}
end
defp cast_value(%{__struct__: _} = value, cast, errors) do
cast(value, cast, errors)
end
defp cast_value(value, cast, errors) when is_map(value) do
Utils.reduce_with(value, %{}, fn {k, v}, acc ->
case cast(v, cast, errors) do
error = {:error, _} -> error
{:ok, val} -> Map.put(acc, k, val)
end
end)
end
defp cast_value(value, cast, errors) when is_list(value) do
Utils.reduce_with(value, [], fn val, acc ->
case cast(val, cast, errors) do
error = {:error, _} -> error
{:ok, nil} -> acc
{:ok, val} -> acc ++ [val]
end
end)
end
defp cast_value(value, cast, errors) do
cast(value, cast, errors)
end
defp cast(value, cast, errors) when is_list(cast) do
Utils.reduce_with(cast, value, fn c, val ->
case cast(val, c, errors) do
error = {:error, _} -> error
{:ok, val} -> val
end
end)
end
defp cast(value, cast, true) do
case cast(value, cast) do
:error -> {:error, cast_error_message(value: value, cast: cast)}
error = {:error, _} -> error
value -> {:ok, value}
end
end
defp cast(value, cast, _) do
case cast(value, cast) do
:error -> {:ok, nil}
{:error, _} -> {:ok, nil}
value -> {:ok, value}
end
end
defp cast(nil, _) do
nil
end
defp cast(value, :atom) do
cast(value, :atom_unchecked)
end
defp cast(value, {:atom, checked_values}) do
Filterable.Cast.atom(value, checked_values)
end
defp cast(value, cast) when is_atom(cast) do
apply(Filterable.Cast, cast, [value])
end
defp cast(value, cast) when is_function(cast) do
cast.(value)
end
defp cast(value, _) do
value
end
defp cast_error_message(value: value, cast: {cast, params}) when is_atom(cast) do
"Unable to cast #{inspect(value)} to #{to_string(cast)} with options: #{inspect(params)}"
end
defp cast_error_message(value: value, cast: cast) when is_function(cast) do
"Unable to cast #{inspect(value)} using #{inspect(cast)}"
end
defp cast_error_message(value: value, cast: cast) when is_atom(cast) do
"Unable to cast #{inspect(value)} to #{to_string(cast)}"
end
end
|
lib/filterable/params.ex
| 0.811452
| 0.495911
|
params.ex
|
starcoder
|
defmodule Bodyguard.Plug.Authorize do
@behaviour Plug
@moduledoc """
Perform authorization in a Plug pipeline.
## Options
* `policy` *required* - the policy (or context) module
* `action` *required* - the action to authorize, either an atom or a 1-arity
function that accepts a conn and returns the action
* `user` - a 1-arity function which accepts the connection and returns a
user. If omitted, defaults `user` to `nil`
* `params` - params to pass to the authorization callbacks
* `fallback` - a fallback controller or plug to handle authorization
failure. If specified, the plug is called and then the pipeline is
`halt`ed. If not specified, then `Bodyguard.NotAuthorizedError` raises
directly to the router.
## Examples
# Raise on failure
plug Bodyguard.Plug.Authorize, policy: MyApp.Blog, action: :update_posts,
user: &get_current_user/1
# Fallback on failure
plug Bodyguard.Plug.Authorize, policy: MyApp.Blog, action: :update_posts,
user: &get_current_user/1, fallback: MyApp.FallbackController
"""
def init(opts \\ []) do
policy = Keyword.get(opts, :policy)
action = Keyword.get(opts, :action)
user_fun = Keyword.get(opts, :user)
params = Keyword.get(opts, :params, [])
fallback = Keyword.get(opts, :fallback)
if is_nil(policy), do: raise(ArgumentError, "#{inspect(__MODULE__)} :policy option required")
if action == nil or not (is_atom(action) or is_function(action, 1)),
do:
raise(
ArgumentError,
"#{inspect(__MODULE__)} :action option required - must be an atom or 1-arity function that accepts conn and returns the action"
)
unless is_nil(user_fun) or is_function(user_fun, 1),
do:
raise(
ArgumentError,
"#{inspect(__MODULE__)} :user option must be a 1-arity function that accepts conn and returns a user"
)
unless is_nil(fallback) or is_atom(fallback),
do: raise(ArgumentError, "#{inspect(__MODULE__)} :fallback option must be a plug module")
%{
policy: policy,
action: action,
user_fun: user_fun,
params: params,
fallback: fallback
}
end
def call(conn, %{fallback: nil} = opts) do
Bodyguard.permit!(
opts.policy,
get_action(conn, opts.action),
get_user(conn, opts.user_fun),
opts.params
)
conn
end
def call(conn, opts) do
case Bodyguard.permit(
opts.policy,
get_action(conn, opts.action),
get_user(conn, opts.user_fun),
opts.params
) do
:ok ->
conn
error ->
conn
|> opts.fallback.call(error)
|> Plug.Conn.halt()
end
end
defp get_user(conn, user_fun) when is_function(user_fun, 1) do
user_fun.(conn)
end
defp get_user(_conn, nil), do: nil
defp get_action(conn, action_fun) when is_function(action_fun, 1) do
action_fun.(conn)
end
defp get_action(_conn, action), do: action
end
|
lib/bodyguard/plug/authorize.ex
| 0.854991
| 0.438124
|
authorize.ex
|
starcoder
|
defmodule Spacesaving do
@type counter :: {%{}, integer}
@type countable :: atom
| String.t
@doc """
Initialize the state
## Examples
iex> Spacesaving.init(2)
{%{}, 2}
"""
@spec init(integer) :: counter
def init(n) do
{%{}, n}
end
@doc """
Add the item, which should be an atom or string to the state
## Examples
iex> Spacesaving.init(2) |> Spacesaving.push(:foo) |> Spacesaving.push(:bar)
{%{foo: 1, bar: 1}, 2}
iex> Spacesaving.init(2) |> Spacesaving.push(:foo) |> Spacesaving.push(:foo) |> Spacesaving.push(:bar) |> Spacesaving.push(:baz)
{%{foo: 2, baz: 2}, 2}
"""
@spec push(counter, countable) :: counter
def push({counts, max}, item) do
counts = if Map.has_key?(counts, item) or Enum.count(counts) < max do
Map.update(counts, item, 1, fn c -> c + 1 end)
else
{key, count} = Enum.min_by(counts, fn {_, c} -> c end)
counts
|> Map.delete(key)
|> Map.put(item, count + 1)
end
{counts, max}
end
@doc """
Get the top k counts as a descending sorted key list
## Examples
iex> {%{foo: 3, baz: 2}, 2} |> Spacesaving.top(2)
[foo: 3, baz: 2]
iex> {%{foo: 3, baz: 2}, 2} |> Spacesaving.top(1)
[foo: 3]
"""
@spec top(counter, integer) :: [{countable, integer}]
def top({counts, _}, k), do: top_counts(counts, k)
@spec top_counts(%{}, integer) :: [{countable, integer}]
defp top_counts(counts, k) do
counts
|> Enum.into([])
|> Enum.sort_by(fn {_, c} -> -c end)
|> Enum.take(k)
end
@doc """
Merge two states together, adding the counts for keys in both
counters. The new state will be the minimum of
the two states' sizes.
## Examples
iex> Spacesaving.merge({%{foo: 3, baz: 2}, 2}, {%{foo: 3, baz: 2}, 2})
{%{foo: 6, baz: 4}, 2}
iex> Spacesaving.merge({%{foo: 3, bar: 1}, 2}, {%{foo: 3, baz: 2}, 2})
{%{foo: 6, baz: 2}, 2}
"""
@spec merge(counter, counter) :: counter
def merge({left, left_max}, {right, right_max}) do
new_max = min(left_max, right_max)
merged = Map.merge(left, right, fn _k, v0, v1 -> v0 + v1 end)
|> top_counts(new_max)
|> Enum.into(%{})
{merged, new_max}
end
end
|
lib/spacesaving.ex
| 0.808748
| 0.520862
|
spacesaving.ex
|
starcoder
|
defmodule PgMoney.Extension do
@moduledoc """
Implements how to encode and decode PostgeSQL's [`money` data type](https://www.postgresql.org/docs/9.5/datatype-money.html).
"""
@behaviour Postgrex.Extension
import PgMoney
@impl true
@spec init(keyword) :: PgMoney.config()
def init(opts) do
precision = Keyword.get(opts, :precision, 2)
telemetry = Keyword.get(opts, :telemetry_prefix, [:pg_money])
%{
precision: precision,
telemetry: telemetry
}
end
@impl true
@spec format(PgMoney.config()) :: :binary
def format(_state), do: :binary
@impl true
@spec matching(PgMoney.config()) :: [receive: String.t(), send: String.t()]
def matching(_state),
do: [
receive: "cash_recv",
send: "cash_send"
]
@impl true
@spec decode(PgMoney.config()) :: Macro.t()
def decode(%{precision: p, telemetry: t}) do
quote location: :keep do
<<unquote(PgMoney.storage_size())::int32,
data::binary-size(unquote(PgMoney.storage_size()))>> ->
<<digits::int64>> = data
unquote(__MODULE__).to_dec(digits, unquote(p), unquote(t))
end
end
@impl true
@spec encode(PgMoney.config()) :: Macro.t()
def encode(%{precision: p, telemetry: t}) do
quote location: :keep do
%Decimal{} = decimal ->
<<unquote(PgMoney.storage_size())::int32,
unquote(__MODULE__).to_int(decimal, unquote(p), unquote(t))::int64>>
n when is_float(n) ->
<<unquote(PgMoney.storage_size())::int32,
unquote(__MODULE__).to_int(Decimal.from_float(n), unquote(p), unquote(t))::int64>>
n when is_integer(n) ->
<<unquote(PgMoney.storage_size())::int32,
unquote(__MODULE__).to_int(Decimal.new(n), unquote(p), unquote(t))::int64>>
other ->
raise ArgumentError, "cannot encode #{inspect(other)} as money."
end
end
@doc """
Returns a `t:Decimal.t/0` which corresponds to `money` with given precision.
"""
@spec to_dec(integer, PgMoney.precision(), PgMoney.telemetry()) :: Decimal.t()
def to_dec(integer, precision \\ 2, telemetry \\ false)
def to_dec(integer, precision, telemetry) do
started_at = current_time()
try do
case {PgMoney.is_money(integer), PgMoney.is_precision(precision)} do
{true, true} ->
coef = abs(integer)
%Decimal{
sign:
if coef == integer do
1
else
-1
end,
coef: coef,
exp: -precision
}
{false, false} ->
raise ArgumentError,
"invalid money (#{inspect(integer)}) and precision (#{inspect(precision)})"
{false, _} ->
raise ArgumentError,
"cannot represent #{inspect(integer)} as `money`, not a valid int64."
{_, false} ->
raise ArgumentError,
"invalid precision #{inspect(precision)}, must be a positive integer"
end
after
duration = time_diff(started_at, current_time())
emit_start(telemetry, :to_dec, started_at)
emit_stop(telemetry, :to_dec, duration)
end
end
@doc """
Returns an integer which corresponds to `money` with given precision.
"""
@spec to_int(Decimal.t(), PgMoney.precision(), PgMoney.telemetry()) :: integer
def to_int(decimal, precision \\ 2, telemetry \\ false)
def to_int(%Decimal{sign: sign, coef: coef, exp: e} = d, p, t) do
started_at = current_time()
try do
cond do
not is_precision(p) ->
raise ArgumentError, "invalid precision #{inspect(p)}, must be a positive integer."
coef in [:inf, :qNaN, :sNaN] ->
raise ArgumentError, message: "cannot represent #{inspect(d)} as `money`."
true ->
{dst, int} =
case e + p do
n when n == 0 ->
{d, sign * coef}
n when 0 < n ->
f = Enum.reduce(1..n, 1, fn _, acc -> 10 * acc end)
{d, sign * coef * f}
n when n < 0 ->
dst = Decimal.round(d, p)
{dst, dst.sign * dst.coef}
end
try do
check_validity(int)
after
emit_event(t, :to_int, d, dst, p)
end
end
after
ended_at = current_time()
duration = time_diff(started_at, ended_at)
emit_start(t, :to_int, started_at)
emit_stop(t, :to_int, duration)
end
end
defp check_validity(int) when is_money(int) do
int
end
defp check_validity(other) do
raise ArgumentError, "invalid money value #{inspect(other)}"
end
defp emit_start(false, _op, _started_at), do: :ok
defp emit_start(prefix, op, started_at) do
name = prefix ++ [:start]
:telemetry.execute(
name,
%{time: started_at},
%{operation: op}
)
end
defp emit_stop(false, _op, _duration), do: :ok
defp emit_stop(prefix, op, duration) do
name = prefix ++ [:stop]
:telemetry.execute(
name,
%{duration: duration},
%{operation: op}
)
end
defp emit_event(false, _op, _src, _dst, _p), do: :ok
defp emit_event(prefix, :to_int, %Decimal{} = src, %Decimal{} = dst, p) when is_list(prefix) do
event =
if Decimal.eq?(src, dst) do
:lossless
else
:lossy
end
name = prefix ++ [event]
:telemetry.execute(
name,
%{
dst: dst,
diff: Decimal.sub(src, dst)
},
%{
src: src,
precision: p
}
)
end
defp current_time, do: :erlang.monotonic_time(:nanosecond)
defp time_diff(start, stop), do: stop - start
end
|
lib/pg_money/extension.ex
| 0.892869
| 0.489564
|
extension.ex
|
starcoder
|
defmodule Hornet do
@moduledoc """
Hornet is a simple library for stress testing.
It executes the given function with the given rate (calls per second) dynamically changing the number of processes to maintain the rate.
"""
alias Hornet.Scheduler
@doc """
Starts an instance of Hornet.
## Parameters
It accepts a keyword list.
Required parameters:
- rate - the required rate in operations per seconds. For example, `100` (ops/second)
- func - the anonymous function that has to be executed. For example, `fn -> 1 + 1 end`
- id - atom that will be used for Hornet's process names.
Optional parameters:
- start_period - every process executes the given function periodically. This is a starting value for this period. The default value is 100 ms.
- adjust_step - if the given rate can no be maintained (for example, if the function is executed too long), Hornet will start increasing the number of processes and the execution period for each process. The period will start increasing by adjust_step. The default value is 50ms.
- adjust_period - the number of processes is adjusted periodically by adjust_period value. The default value is 5_000 ms.
- error_rate - allowed rate for difference between the expected rate and the actual rate: |current_rate - expected_rate| < error_rate. The default value is 0.1.
- process_number_limit - if the given function's execution time is too long and the required rate is high, Hornet will be spawning processes indefinitely. This value will limit the number of processes. The default value is nil.
- rate_period - the period of measuring the current rate. The default value is 1_000 ms.
## Examples
iex> params = [rate: 1, func: fn -> IO.inspect("hello") end, id: :hello]
iex> Hornet.start(params)
{:ok, #PID<0.201.0>}
"hello"
"hello"
"hello"
"hello"
"hello"
...
"""
@spec start(Keyword.t()) :: GenServer.on_start()
def start(params) do
Scheduler.start_link(params)
end
@doc """
Stops an instance of Hornet. It accepts the pid returned by `start/1` or the provided id.
## Examples
iex> Hornet.stop(:hello)
:ok
"""
@spec stop(atom() | pid) :: :ok
def stop(name) do
Scheduler.stop(name)
end
end
|
lib/hornet.ex
| 0.908889
| 0.727951
|
hornet.ex
|
starcoder
|
defmodule Rig.Subscription do
@moduledoc false
@typedoc """
A subscription for a specific event type.
The constraints are expected in [conjunctive normal
form](https://en.wikipedia.org/wiki/Conjunctive_normal_form) and defined using a
list of maps. For example:
```elixir
%{
event_type: "com.github.pull.create",
constraints: [
%{ "head_repo" => "octocat/Hello-World" },
%{ "base_repo" => "octocat/Hello-World" }
]
}
```
"""
defmodule ValidationError do
defexception [:error, :params]
def message(%__MODULE__{error: error, params: params}),
do: "invalid subscription: #{error} when parsing #{inspect(params)}"
end
@type constraints :: [%{required(String.t()) => String.t()}]
@type t :: %__MODULE__{
event_type: String.t(),
constraints: constraints
}
@derive Jason.Encoder
@enforce_keys [:event_type]
defstruct event_type: nil,
constraints: []
defimpl String.Chars do
alias Rig.Subscription
def to_string(%Subscription{} = sub) do
"Subscription for #{sub.event_type} (#{inspect(sub.constraints)})"
end
end
# ---
@spec new(any) :: {:ok, t} | {:error, %ValidationError{}}
def new(%{} = params) do
params = %{
event_type: event_type(params),
constraints: constraints(params)
}
subscription = struct!(__MODULE__, params)
validate(subscription)
{:ok, subscription}
catch
{:error, reason} when byte_size(reason) > 0 ->
{:error, %ValidationError{error: reason, params: params}}
end
def new(params), do: {:error, %ValidationError{error: "not a map", params: params}}
# ---
@spec new!(any) :: t
def new!(params) do
case new(params) do
{:ok, sub} -> sub
{:error, err} -> raise err
end
end
# ---
defp event_type(%{event_type: event_type}), do: event_type
defp event_type(%{"event_type" => event_type}), do: event_type
defp event_type(%{"eventType" => event_type}), do: event_type
defp event_type(_), do: throw({:error, "event-type not found"})
defp constraints(%{constraints: constraints}), do: constraints
defp constraints(%{"constraints" => constraints}), do: constraints
defp constraints(%{one_of: constraints}), do: constraints
defp constraints(%{"one_of" => constraints}), do: constraints
defp constraints(%{"oneOf" => constraints}), do: constraints
defp constraints(_), do: []
# ---
defp validate(%__MODULE__{event_type: event_type, constraints: constraints}) do
validate_event_type(event_type)
validate_constraints(constraints)
end
# ---
defp validate_event_type(type) when byte_size(type) > 0, do: :ok
defp validate_event_type(_), do: throw({:error, "event-type empty"})
# ---
defp validate_constraints(constraints) when is_list(constraints) do
Enum.each(constraints, &validate_constraint/1)
end
defp validate_constraints(_),
do: throw({:error, "constraints expected to be a list of disjunctive clauses"})
# ---
defp validate_constraint(conjunction) when not is_map(conjunction),
do: throw({:error, "a disjunctive clause expected to be a conjunction represented by a map"})
defp validate_constraint(conjunction) do
if not Enum.all?(conjunction, fn {k, _} -> is_nonempty_string(k) end) do
throw({:error, "conjunctive clauses expected to be a map with nonempty strings as keys"})
end
end
# ---
defp is_nonempty_string(s) when byte_size(s) > 0, do: true
defp is_nonempty_string(_), do: false
end
|
apps/rig/lib/rig/subscription.ex
| 0.845751
| 0.883538
|
subscription.ex
|
starcoder
|
defmodule NaryTree do
@moduledoc """
NaryTree implements a data structure for an n-ary tree in which each node has zero or more children.
A node in a tree can have arbitrary number of children and depth. Trees are unbalanced and children unordered.
"""
defstruct root: nil, nodes: %{}
alias NaryTree.Node
@type t :: %__MODULE__{root: String.t, nodes: [%__MODULE__{}]}
@doc ~S"""
Create a new, empty tree.
## Example
iex> NaryTree.new()
%NaryTree{nodes: %{}, root: :empty}
"""
@spec new() :: __MODULE__.t()
def new(), do: %__MODULE__{root: :empty}
@doc ~S"""
Create a new tree with a root node.
## Example
iex> %NaryTree{root: key, nodes: nodes} = NaryTree.new(NaryTree.Node.new "Root node")
iex> nodes[key].name
"Root node"
"""
@spec new(Node.t()) :: __MODULE__.t()
def new(%Node{} = node) do
root = %Node{ node | parent: :empty, level: 0 }
%__MODULE__{root: root.id, nodes: %{root.id => root}}
end
@doc ~S"""
Add a child node to a tree root node. Returns an updated tree with added child.
RootNode
\
ChildNode
## Example
iex> tree = NaryTree.new(NaryTree.Node.new "Root node") |>
...> NaryTree.add_child(NaryTree.Node.new("Child"))
iex> %NaryTree{root: key, nodes: nodes} = tree
iex> [child_id | _] = nodes[key].children
iex> nodes[child_id].name
"Child"
"""
@spec add_child(__MODULE__.t(), Node.t()) :: __MODULE__.t()
def add_child(%__MODULE__{} = tree, %Node{} = child) do
add_child(tree, child, tree.root)
end
@doc ~S"""
Add a child node to the specified tree node. Returns an updated tree with added child.
RootNode
\
BranchNode
\
New node
## Example
iex> branch = NaryTree.Node.new("Branch Node")
iex> tree = NaryTree.new(NaryTree.Node.new("Root Node")) |>
...> NaryTree.add_child(branch) |>
...> NaryTree.add_child(NaryTree.Node.new("New node"), branch.id)
iex> Enum.count tree.nodes
3
"""
def add_child(_, %Node{id: child_id}, parent_id) when parent_id == child_id do
raise "Cannot add child to its own node"
end
def add_child(%__MODULE__{nodes: nodes} = tree, %Node{id: child_id} = child, parent_id) do
parent = tree.nodes[parent_id]
updated_nodes = nodes
|> Map.put_new(child_id, %Node{child | parent: parent.id, level: parent.level + 1})
|> Map.put(parent.id, %Node{parent | children: List.delete(parent.children, child_id) ++ [child_id] })
%__MODULE__{tree | nodes: updated_nodes}
end
@doc ~S"""
Check whether a node is a root node.
## Example
iex> node = NaryTree.Node.new "Root node"
iex> NaryTree.is_root? node
true
"""
@spec is_root?(Node.t()) :: boolean()
def is_root?(%Node{} = node), do: node.parent == :empty || node.parent == nil
@doc ~S"""
Check whether a node is a leaf node.
## Example
iex> tree = NaryTree.new(NaryTree.Node.new("Root node")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf node"))
iex> [node_id] = tree.nodes[tree.root].children
iex> leaf_node = tree.nodes[node_id]
iex> NaryTree.is_leaf? leaf_node
true
"""
@spec is_leaf?(Node.t()) :: boolean()
def is_leaf?(%Node{} = node), do: node.children == []
@doc ~S"""
Check whether a node has non-empty content.
## Example
iex> node = NaryTree.Node.new "Node", content: %{c: "Content"}
iex> NaryTree.has_content? node
true
"""
@spec has_content?(Node.t()) :: boolean()
def has_content?(%Node{} = node), do: !(node.content == nil || node.content == :empty)
@doc ~S"""
Enumerates tree nodes, and applies function to each node's content.
Returns updated tree, with new content for every nodes
## Example
iex> tree = NaryTree.new(NaryTree.Node.new("Root node")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf node 1")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf node 2"))
iex> Enum.map tree.nodes, fn({_,node}) -> node.content end
[:empty, :empty, :empty]
iex> NaryTree.update_content(tree, fn(_) -> %{x: 4} end) |>
...> Map.get(:nodes) |> Enum.map(fn({_,node}) -> node.content end)
[%{x: 4}, %{x: 4}, %{x: 4}]
"""
@spec update_content(__MODULE__.t(), function()) :: __MODULE__.t()
def update_content(%__MODULE__{nodes: nodes} = tree, func) do
%__MODULE__{tree | nodes: do_update_content(nodes, func)}
end
defp do_update_content(nodes, func) do
Enum.reduce(nodes, nodes, fn({id, node}, acc) ->
Map.put(acc, id, Map.update!(node, :content, func))
end)
end
@doc ~S"""
Enumerates tree nodes, and applies function to each leaf nodes' content.
Similar to `update_content/2`, but applies only to leaf nodes.
"""
@spec each_leaf(__MODULE__.t(), function()) :: __MODULE__.t()
def each_leaf(%__MODULE__{nodes: nodes} = tree, func) do
%__MODULE__{tree | nodes: do_each_leaf(nodes, func)}
end
defp do_each_leaf(nodes, func) do
Enum.reduce(nodes, nodes, fn({_,node}, acc) ->
if is_leaf?(node) do
Map.put(acc, node.id, Map.update!(node, :content, func))
else
acc
end
end)
end
@doc ~S"""
Check whether the argument is of NaryTree type.
## Example
iex> NaryTree.is_nary_tree? NaryTree.new(NaryTree.Node.new "Node")
true
"""
@spec is_nary_tree?(__MODULE__.t()) :: boolean()
def is_nary_tree?(%__MODULE__{}), do: true
def is_nary_tree?(_), do: false
@doc ~S"""
Move children nodes from one node to another node.
"""
@spec move_nodes(__MODULE__.t(), [Node.t()], Node.t()) :: __MODULE__.t()
def move_nodes(tree, [], _), do: tree
def move_nodes(tree, nodes, %Node{} = new_parent) do
move_nodes(tree, Enum.map(nodes, &(&1.id)), new_parent.id)
end
@spec move_nodes(__MODULE__.t(), [String.t()], String.t()) :: __MODULE__.t()
def move_nodes(tree, child_ids, new_parent_id) do
new_parent_node = tree.nodes[new_parent_id]
pid = tree.nodes[hd child_ids].parent
updated_nodes = Enum.reduce(child_ids, tree.nodes, fn(cid, acc) ->
Map.put acc, cid, %Node{ acc[cid] | parent: new_parent_id, level: new_parent_node.level+1}
end)
|> Map.put(pid, %Node{ tree.nodes[pid] | children: tree.nodes[pid].children -- child_ids })
|> Map.put(new_parent_id, %Node { new_parent_node | children: new_parent_node.children ++ child_ids })
%__MODULE__{ tree | nodes: updated_nodes }
end
@doc ~S"""
Get the node with the specified id from the tree.
## Example
iex> node = NaryTree.Node.new("Node")
iex> n = NaryTree.new(node) |>
...> NaryTree.get(node.id)
iex> n.name
"Node"
"""
def get(%__MODULE__{nodes: nodes}, id), do: Map.get nodes, id
@doc ~S"""
Put a node into the tree at the specified id.
Put will replace the name and content attributes of the node at id with
the attributes of the new nodes.
The children and parent of the old node will remain the same so that
the hierarchy structure remains the same.
## Example
iex> tree = NaryTree.new NaryTree.Node.new("Root")
iex> tree.nodes[tree.root].name
"Root"
iex> tree = NaryTree.put(tree, tree.root, NaryTree.Node.new("Node"))
iex> tree.nodes[tree.root].name
"Node"
"""
def put(%__MODULE__{nodes: nodes} = tree, id, node_to_replace) do
updated_node = %Node{nodes[id] | content: node_to_replace.content, name: node_to_replace.name}
%__MODULE__{ tree | nodes: Map.put(nodes, id, updated_node) }
end
@doc ~S"""
Delete a node in a tree.
If the deleted node has children, the children will be moved up in hierarchy
to become the children of the deleted node's parent.
Deleting root node results in `:error`
## Example
iex> branch = NaryTree.Node.new("Branch Node")
iex> leaf = NaryTree.Node.new("Leaf")
iex> tree = NaryTree.new(NaryTree.Node.new("Root Node")) |>
...> NaryTree.add_child(branch) |>
...> NaryTree.add_child(leaf, branch.id) |>
...> NaryTree.delete(branch.id)
iex> tree.nodes[branch.id]
nil
iex> tree.nodes[tree.root].children # leaf becomes root's child
[leaf.id]
"""
@spec delete(NaryTree.t(), any()) :: :error
def delete(%__MODULE__{} = tree, %Node{id: id}), do: delete(tree, id)
def delete(%__MODULE__{root: root}, id) when id == root, do: :error
def delete(%__MODULE__{nodes: nodes} = tree, id) do
if Enum.member? tree, id do
node = nodes[id]
tree
|> unlink_from_parent(node)
|> move_nodes(node.children, node.parent)
|> delete_node(id)
else
:error
end
end
defp unlink_from_parent(tree, %Node{parent: parent}) when parent == :empty or parent == nil, do: tree
defp unlink_from_parent(tree, node) do
parent = tree.nodes[node.parent]
updated_parent = %Node{ parent | children: (parent.children -- [node.id]) }
%__MODULE__{ tree | nodes: Map.put(tree.nodes, node.parent, updated_parent) }
end
defp delete_node(tree, id) do
%__MODULE__{ tree | nodes: Map.delete(tree.nodes, id) }
end
@doc ~S"""
Detach a branch in a tree. Returns the detached branch conplete with all its
descendents as a new tree struct.
## Example
iex> branch = NaryTree.Node.new("Branch Node")
iex> leaf = NaryTree.Node.new("Leaf")
iex> tree = NaryTree.new(NaryTree.Node.new("Root Node")) |>
...> NaryTree.add_child(branch) |>
...> NaryTree.add_child(leaf, branch.id)
iex> detached = NaryTree.detach(tree, branch.id)
iex> Enum.count detached.nodes
2
iex> detached.root
branch.id
"""
def detach(%__MODULE__{} = tree, node_id) when is_binary(node_id) do
if Enum.member? tree, node_id do
root = get(tree, node_id)
new_tree = new root
Enum.reduce root.children, new_tree, fn(child_id, acc) ->
add_all_descendents(acc, root.id, child_id, tree)
end
else
:error
end
end
defp add_all_descendents(tree, parent_id, node_id, old_tree) do
node = get old_tree, node_id
case node.children do
[] ->
add_child(tree, node, parent_id)
_ ->
new_tree = add_child(tree, node, parent_id)
Enum.reduce node.children, new_tree, fn(child_id, acc) ->
add_all_descendents(acc, node.id, child_id, old_tree)
end
end
end
@doc ~S"""
Merges a tree into another tree at the specified node point.
Returns the resulting combined tree or `:error` if the specified
node point doesn't exist.
## Example
iex> branch = NaryTree.Node.new("Branch Node")
iex> tree1 = NaryTree.new(NaryTree.Node.new("Root Node")) |>
...> NaryTree.add_child(branch)
iex> tree2 = NaryTree.new(NaryTree.Node.new("Subtree")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf"))
iex> combined = NaryTree.merge(tree1, tree2, branch.id)
iex> Enum.count combined.nodes
4
"""
def merge(%__MODULE__{} = tree,
%__MODULE__{} = branch,
node_id)
when is_binary(node_id) do
if Enum.member? tree, node_id do
node = get(tree, node_id)
updated_node = node |> Map.put(:children, node.children ++ [branch.root])
tree_nodes = Map.put tree.nodes, node_id, updated_node
branch_nodes = branch.nodes
|> Enum.reduce(branch.nodes, fn({id, n}, acc) ->
Map.put acc, id, %Node{ n | level: n.level + node.level + 1 }
end)
|> Map.put(branch.root, %Node{ root(branch) | parent: node.id , level: node.level + 1 })
%__MODULE__{ tree | nodes: Map.merge(tree_nodes, branch_nodes) }
else
:error
end
end
# Familial Relationships
@doc ~S"""
Returns the root node of a tree.
## Example
iex> tree = NaryTree.new(NaryTree.Node.new("Root Node"))
iex> %NaryTree.Node{name: name} = NaryTree.root(tree)
iex> name
"Root Node"
"""
def root(%__MODULE__{} = tree) do
get tree, tree.root
end
@doc ~S"""
Returns the children nodes of a tree.
"""
def children(%Node{} = node, %__MODULE__{} = tree) do
Enum.map node.children, &(get tree, &1)
end
@doc ~S"""
Returns the parent node of a tree, or `:empty` if there is none
## Example
iex> branch = NaryTree.Node.new("Branch Node")
iex> tree = NaryTree.new(NaryTree.Node.new("Root Node")) |>
...> NaryTree.add_child(branch)
iex> %NaryTree.Node{name: name} = NaryTree.root(tree)
iex> name
"Root Node"
"""
def parent(%Node{} = node, %__MODULE__{} = tree) do
get tree, node.parent
end
@doc ~S"""
Returns the sibling nodes of a node.
"""
def siblings(%Node{} = node, %__MODULE__{} = tree) do
parent(node, tree)
|> children(tree)
|> List.delete(node)
end
@doc ~S"""
Prints a tree in hierarchical fashion.
The second parameter is an optional function that accepts a node as a parameter.
`print_tree` will output the return value of the function for each node in the tree.
## Example
`iex> NaryTree.print_tree tree, fn(node) -> "#{x.name} : {x.content}" end`
or
`iex> NaryTree.print_tree tree, &("#{&1.name}: #{&1.id}")`
"""
def print_tree(%__MODULE__{} = tree, func \\ fn(x) -> "#{x.name}" end) do
do_print_tree(%Node{} = tree.nodes[tree.root], tree.nodes, func)
end
defp do_print_tree(node, _, _) when is_nil(node), do: raise "Expecting %NaryTree.Node(), found nil."
defp do_print_tree(%Node{children: children} = node, _nodes, func) when children == [] do
IO.puts indent(node.level) <> "- " <> func.(node)
end
defp do_print_tree(%Node{children: children} = node, nodes, func) do
IO.puts indent(node.level) <> "* " <> func.(node)
Enum.each children, fn(child_id) -> do_print_tree(nodes[child_id], nodes, func) end
end
defp indent(n, c \\ " ") do
String.duplicate(c, n*2)
end
@behaviour Access
@spec fetch(__MODULE__.t(), String.t()) :: {:ok, Node.t()} | :error
def fetch(%__MODULE__{nodes: nodes}, id) do
if Map.has_key?(nodes, id), do: {:ok, nodes[id]}, else: :error
end
@doc ~S"""
Invoked in order to access a node in a tree and update it at the same time
## Example
iex> tree = NaryTree.new NaryTree.Node.new "Root"
iex> {old_node, new_tree} = NaryTree.get_and_update tree, tree.root, &({&1, %NaryTree.Node{&1 | content: :not_empty}})
iex> old_node.content
:empty
iex> NaryTree.root(new_tree).content
:not_empty
"""
def get_and_update(%__MODULE__{} = tree, id, fun) when is_function(fun, 1) do
current = get(tree, id)
case fun.(current) do
{get, update} ->
{get, put(tree, id, update)}
:pop ->
{current, delete(tree, id)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
@doc ~S"""
Invoked to “pop” the specified node from the tree.
When key exists in the tree, it returns a `{a_node, new_tree}` tuple where `a_node` is the node that was under key and `new_tree` is the tree without `a_node`.
When key is not present in the tree, it returns `{nil, tree}`.
"""
def pop(tree, id) do
case delete(tree, id) do
%__MODULE__{} = new_tree ->
{get(tree, id), new_tree}
:error -> {nil, tree}
end
end
@doc """
Collects nodes of a tree by using depth-first traversal. Returns a list of `NaryTree.Node` structs
"""
def to_list(%__MODULE__{nodes: nodes} = tree) do
traverse(%Node{} = tree.nodes[tree.root], nodes, [])
|> :lists.reverse()
end
defp traverse(node, _, _) when is_nil(node), do: raise "Expecting %NaryTree.Node(), found nil."
defp traverse(%Node{children: children} = node, _nodes, acc) when children == [] do
[node | acc]
end
defp traverse(%Node{children: children} = node, nodes, acc) do
Enum.reduce children, [node | acc], fn(child_id, accumulator) ->
traverse nodes[child_id], nodes, accumulator
end
end
@doc """
Converts a tree into a hierarchical map with children nodes embedded in an array.
Takes tree as argument, and an optional function. The function takes a node parameter
and should return a map of attributes.
The default function returns
`%{id: node.id, name: node.name, content: node.content, level: node.level, parent: node.parent}`
## Example
iex> tree = NaryTree.new(NaryTree.Node.new("Root")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf 1")) |>
...> NaryTree.add_child(NaryTree.Node.new("Leaf 2")) |>
...> NaryTree.to_map( &(%{key: &1.name}) )
%{children: [%{key: "Leaf 1"}, %{key: "Leaf 2"}], key: "Root"}
"""
def to_map(%__MODULE__{nodes: nodes} = tree, func \\ &attr/1) do
node_to_map(%Node{} = nodes[tree.root], tree, func)
end
defp node_to_map(%Node{children: children} = node, _tree, func) when children == [] do
func.(node)
end
defp node_to_map(%Node{} = node, tree, func) do
func.(node)
|> Map.put(:children, Enum.reduce(node.children, [], fn(child_id, accumulator) ->
[node_to_map(__MODULE__.get(tree, child_id), tree, func) | accumulator]
end) |> :lists.reverse()
)
end
defp attr(node) do
%{id: node.id, name: node.name, content: node.content, level: node.level, parent: node.parent}
end
@doc """
Converts a map into a tree
## Example:
iex> tree = NaryTree.from_map %{name: "Root", children: [%{name: "Left"}, %{name: "Right"}]}
iex> Enum.count tree
3
"""
def from_map(%{name: name, content: content} = map), do: tree_from_map map, new(Node.new(name, content))
def from_map(%{name: name} = map), do: tree_from_map map, new(Node.new(name))
defp tree_from_map(%{children: children}, tree) do
Enum.reduce children, tree, fn(child, tree) -> tree_from_map(child, tree.root, tree) end
end
defp tree_from_map(%{}, tree), do: tree
defp tree_from_map(%{children: children} = map, id, acc) do
node = if Map.has_key?(map, :content), do: Node.new(map.name, map.content), else: Node.new(map.name)
t = add_child(acc, node, id)
Enum.reduce children, t, fn(child, tree) -> tree_from_map(child, node.id, tree) end
end
defp tree_from_map(%{} = map, id, acc) do
node = if Map.has_key?(map, :content), do: Node.new(map.name, map.content), else: Node.new(map.name)
add_child(acc, node, id)
end
@doc """
Converts a list of nodes back into nodes map `%{node1id => %NaryTree.Node{}, node2id => ...}`
"""
def list_to_nodes(list) when is_list(list) do
Enum.reduce list, %{}, fn(node, acc) ->
Map.put_new(acc, node.id, node)
end
end
defimpl Enumerable do
def count(%NaryTree{nodes: nodes}), do: {:ok, Kernel.map_size(nodes)}
@doc """
## TODO
## Examples
iex> r = NaryTree.new NaryTree.Node.new("Root", 3)
...> n = NaryTree.Node.new("Branch", 100)
...> NaryTree.add_child r, n
...> Enum.member? r, n.id
true
"""
def member?(%NaryTree{nodes: nodes}, id) do
case Map.has_key? nodes, id do
true -> {:ok, true}
false -> {:ok, false}
end
end
@doc """
## TODO
## Examples
iex> Enum.reduce tt, tt, fn(n, acc) ->
...> p = NaryTree.parent(n, acc)
...> pz = if p, do: p.content.w, else: 0
...> NaryTree.put acc, n.id, %NaryTree.Node{n | content: %{n.content | w: n.content.w + pz}}
...> end
"""
def reduce(%NaryTree{} = tree, acc, f) do
tree
|> NaryTree.to_list()
|> reduce_tree(acc, f)
end
defp reduce_tree(_, {:halt, acc}, _f), do: {:halted, acc}
defp reduce_tree(nodes, {:suspend, acc}, f), do: {:suspended, acc, &reduce_tree(nodes, &1, f)}
defp reduce_tree([], {:cont, acc}, _f), do: {:done, acc}
defp reduce_tree([h | t], {:cont, acc}, f), do: reduce_tree(t, f.(h, acc), f)
def slice(_tree) do
{:error, NaryTree} # let the default action take over
end
end
end
|
lib/nary_tree.ex
| 0.913194
| 0.705573
|
nary_tree.ex
|
starcoder
|
defmodule GrovePi.Poller do
@moduledoc """
A behaviour module for implementing polling on a pin.
The GrovePi.Poller behavior abstracts polling on a pin. Developers are
responsible for implementing `c:read_value/2` and a module using the
`GrovePi.Trigger` behaviour.
## Example
This example shows implementation of `GrovePi.Potentiometer`. The module
should `use GrovePi.Poller`, specifiying the `:default_trigger` and
`:read_type`. It should have a `c:read_value/2` callback which reads the
desired sensor.
defmodule GrovePi.Potentiometer do
alias GrovePi.Analog
use GrovePi.Poller, default_trigger: GrovePi.Potentiometer.DefaultTrigger,
read_type: Analog.adc_level
def read_value(prefix, pin) do
Analog.read(prefix, pin)
end
end
The requirements for creating the `:default_trigger` are described in
`GrovePi.Trigger`.
"""
@callback read_value(atom, GrovePi.pin) :: any
defmacro __using__([default_trigger: default_trigger, read_type: read_type]) do
quote location: :keep do
use GenServer
@behaviour GrovePi.Poller
@poll_interval 100
alias GrovePi.Registry.Pin
alias GrovePi.Registry.Subscriber
defmodule State do
@moduledoc false
defstruct [:pin,
:trigger_state,
:poll_interval,
:prefix,
:trigger,
:poll_reference,
]
def poll(%State{poll_interval: 0} = state, pid) do
state
end
def poll(%State{poll_interval: poll_interval} = state, pid) do
reference = Process.send_after(pid, :poll_button, poll_interval)
%{state | poll_reference: reference}
end
def cancel_polling(%State{poll_reference: reference} = state) do
Process.cancel_timer(reference)
%{state | poll_reference: nil}
end
def change_interval(state, interval) do
%{state | poll_interval: interval}
end
end
@doc """
Starts a process linked to the current process.
This is often used to start the process as part of a supervision tree.
## Options
* `:poll_interval` - The time in ms between polling for state.i If set to 0
polling will be turned off. Default: `100`
* `:trigger` - This is used to pass in a trigger to use for triggering
events. See specific poller for defaults
* `:trigger_opts` - This is used to pass options to a trigger `init\1`. The default is `[]`
"""
@spec start_link(GrovePi.pin) :: Supervisor.on_start
def start_link(pin, opts \\ []) do
poll_interval = Keyword.get(opts, :poll_interval, @poll_interval)
trigger = Keyword.get(opts, :trigger, unquote(default_trigger))
trigger_opts = Keyword.get(opts, :trigger_opts, [])
prefix = Keyword.get(opts, :prefix, Default)
opts = Keyword.put(opts, :name, Pin.name(prefix, pin))
GenServer.start_link(__MODULE__,
[pin,
poll_interval,
prefix,
trigger,
trigger_opts],
opts
)
end
def init([pin, poll_interval, prefix, trigger, trigger_opts]) do
{:ok, trigger_state} = trigger.init(trigger_opts)
state = %State{
pin: pin,
poll_interval: poll_interval,
prefix: prefix,
trigger: trigger,
trigger_state: trigger_state,
}
state_with_poll_reference = schedule_poll(state)
{:ok, state_with_poll_reference}
end
@doc """
Stops polling immediately
"""
@spec stop_polling(GrovePi.pin, atom) :: :ok
def stop_polling(pin, prefix \\ Default) do
GenServer.cast(Pin.name(prefix, pin), {:change_polling, 0})
end
@doc """
Stops the current scheduled polling event and starts a new one with
the new interval.
"""
@spec change_polling(GrovePi.pin, integer, atom) :: :ok
def change_polling(pin, interval, prefix \\ Default) do
GenServer.cast(Pin.name(prefix, pin), {:change_polling, interval})
end
@doc """
Read the value from the specified pin.
"""
@spec read(GrovePi.pin, atom) :: unquote(read_type)
def read(pin, prefix \\ Default) do
GenServer.call(Pin.name(prefix, pin), :read)
end
@doc """
Subscribes the current process to an event.
"""
@spec subscribe(GrovePi.pin, GrovePi.Trigger.event, atom)
:: {:ok, pid} | {:error, {:already_registered, pid}}
def subscribe(pin, event, prefix \\ Default) do
Subscriber.subscribe(prefix, {pin, event})
end
def handle_cast({:change_polling, interval}, state) do
new_state = state
|> State.cancel_polling
|> State.change_interval(interval)
|> State.poll(self())
{:noreply, new_state}
end
def handle_call(:read, _from, state) do
{value, new_state} = update_value(state)
{:reply, value, new_state}
end
def handle_info(:poll_button, state) do
{_, new_state} = update_value(state)
schedule_poll(state)
{:noreply, new_state}
end
@spec update_value(State) ::State
defp update_value(state) do
with value <- read_value(state.prefix, state.pin),
trigger = {_, trigger_state} <-
state.trigger.update(value, state.trigger_state),
:ok <- notify(trigger, state.prefix, state.pin),
do: {value, %{state | trigger_state: trigger_state}}
end
defp notify({:ok, _}, _, _) do
:ok
end
defp notify({event, trigger_state}, prefix, pin) do
Subscriber.notify_change(prefix, {pin, event, trigger_state})
end
defp schedule_poll(state) do
State.poll(state, self())
end
end
end
end
|
lib/grovepi/poller.ex
| 0.906288
| 0.724529
|
poller.ex
|
starcoder
|
defmodule Plug.BasicAuth do
@moduledoc """
Functionality for providing Basic HTTP authentication.
It is recommended to only use this module in production
if SSL is enabled and enforced. See `Plug.SSL` for more
information.
## High-level usage
If you have a single username and password, you can use
the `basic_auth/2` plug:
import Plug.BasicAuth
plug :basic_auth, username: "hello", password: "<PASSWORD>"
Or if you would rather put those in a config file:
# lib/your_app.ex
import Plug.BasicAuth
plug :basic_auth, Application.compile_env(:my_app, :basic_auth)
# config/config.exs
config :my_app, :basic_auth, username: "hello", password: "<PASSWORD>"
Once the user first accesses the page, the request will be denied
with reason 401 and the request is halted. The browser will then
prompt the user for username and password. If they match, then the
request succeeds.
Both approaches shown above rely on static configuration. In the next section
we will explore using lower level API for a more dynamic solution where the
credentials might be stored in a database, environment variables etc.
## Low-level usage
If you want to provide your own authentication logic on top of Basic HTTP
auth, you can use the low-level functions. As an example, we define `:auth`
plug that extracts username and password from the request headers, compares
them against the database, and either assigns a `:current_user` on success
or responds with an error on failure.
plug :auth
defp auth(conn, _opts) do
with {user, pass} <- Plug.BasicAuth.parse_basic_auth(conn),
%User{} = user <- MyApp.Accounts.find_by_username_and_password(user, pass) do
assign(conn, :current_user, user)
else
_ -> conn |> Plug.BasicAuth.request_basic_auth() |> halt()
end
end
Keep in mind that:
* The supplied `user` and `pass` may be empty strings;
* If you are comparing the username and password with existing strings,
do not use `==/2`. Use `Plug.Crypto.secure_compare/2` instead.
"""
import Plug.Conn
@doc """
Higher level usage of Basic HTTP auth.
See the module docs for examples.
## Options
* `:username` - the expected username
* `:password` - the expected password
* `:realm` - the authentication realm. The value is not fully
sanitized, so do not accept user input as the realm and use
strings with only alphanumeric characters and space
"""
def basic_auth(conn, options \\ []) do
username = Keyword.fetch!(options, :username)
password = Keyword.fetch!(options, :password)
with {request_username, request_password} <- parse_basic_auth(conn),
valid_username? = Plug.Crypto.secure_compare(username, request_username),
valid_password? = Plug.Crypto.secure_compare(password, request_password),
true <- valid_username? and valid_password? do
conn
else
_ -> conn |> request_basic_auth(options) |> halt()
end
end
@doc """
Parses the request username and password from Basic HTTP auth.
It returns either `{user, pass}` or `:error`. Note the username
and password may be empty strings. When comparing the username
and password with the expected values, be sure to use
`Plug.Crypto.secure_compare/2`.
See the module docs for examples.
"""
def parse_basic_auth(conn) do
with ["Basic " <> encoded_user_and_pass] <- get_req_header(conn, "authorization"),
{:ok, decoded_user_and_pass} <- Base.decode64(encoded_user_and_pass),
[user, pass] <- :binary.split(decoded_user_and_pass, ":") do
{user, pass}
else
_ -> :error
end
end
@doc """
Encodes a basic authentication header.
This can be used during tests:
put_req_header(conn, "authorization", encode_basic_auth("hello", "world"))
"""
def encode_basic_auth(user, pass) when is_binary(user) and is_binary(pass) do
"Basic " <> Base.encode64("#{user}:#{pass}")
end
@doc """
Requests basic authentication from the client.
It sets the response to status 401 with "Unauthorized" as body.
The response is not sent though (nor the connection is halted),
allowing developers to further customize it.
## Options
* `:realm` - the authentication realm. The value is not fully
sanitized, so do not accept user input as the realm and use
strings with only alphanumeric characters and space
"""
def request_basic_auth(conn, options \\ []) when is_list(options) do
realm = Keyword.get(options, :realm, "Application")
escaped_realm = String.replace(realm, "\"", "")
conn
|> put_resp_header("www-authenticate", "Basic realm=\"#{escaped_realm}\"")
|> resp(401, "Unauthorized")
end
end
|
lib/plug/basic_auth.ex
| 0.809991
| 0.445168
|
basic_auth.ex
|
starcoder
|
defmodule Verk.Manager do
@moduledoc """
A process that manages the state of each started queue
"""
use GenServer
require Logger
alias Verk.WorkersManager
@table :verk_manager
@ets_options [:ordered_set, :named_table, :public, read_concurrency: true]
@doc false
def start_link(queues), do: GenServer.start_link(__MODULE__, queues, name: __MODULE__)
@doc false
def init(queues) do
ets = :ets.new(@table, @ets_options)
for {queue, size} <- queues do
Verk.QueueList.add(queue)
:ets.insert_new(@table, {queue, size, :running})
end
{:ok, ets}
end
@doc """
It returns the status of each queue currently
[{:default, 25, :paused}, {:low_priority, 10, :running}]
"""
@spec status :: [{atom, pos_integer, atom}]
def status, do: :ets.tab2list(@table)
@doc """
It returns the status of each queue currently
[{:default, 25, :paused}, {:low_priority, 10, :running}]
"""
@spec status(atom) :: :running | :paused
def status(queue) do
[{^queue, _, queue_status}] = :ets.lookup(@table, queue)
queue_status
end
@spec pause(atom) :: boolean
def pause(queue) do
if :ets.update_element(@table, queue, {3, :paused}) do
WorkersManager.pause(queue)
true
else
false
end
end
@spec resume(atom) :: boolean
def resume(queue) do
if :ets.update_element(@table, queue, {3, :running}) do
WorkersManager.resume(queue)
true
else
false
end
end
@doc """
It adds the `queue` running with the amount of `size` of workers
It always returns the child spec
"""
@spec add(atom, pos_integer) :: Supervisor.on_start_child()
def add(queue, size) do
unless :ets.insert_new(@table, {queue, size, :running}) do
Logger.error("Queue #{queue} is already running")
end
Verk.QueueList.add(queue)
Verk.Manager.Supervisor.start_child(queue, size)
end
@doc """
It removes the `queue`
It returns `:ok` if successful and `{:error, :not_found}` otherwise
"""
@spec remove(atom) :: :ok | {:error, :not_found}
def remove(queue) do
:ets.delete(@table, queue)
Verk.Manager.Supervisor.stop_child(queue)
end
end
|
lib/verk/manager.ex
| 0.738952
| 0.452899
|
manager.ex
|
starcoder
|
defmodule GenWorker.State do
@moduledoc """
Configure worker
"""
alias GenWorker.{Error, State}
@default_run_each [days: 1]
@default_run_at [{:microsecond, {1, 0}}]
@type run_at_options() :: [
date: Timex.Types.date(),
year: Timex.Types.year(),
month: Timex.Types.month(),
day: Timex.Types.day(),
hour: Timex.Types.hour(),
minute: Timex.Types.minute(),
second: Timex.Types.second(),
microsecond: Timex.Types.microsecond()
]
@typep run_each_options :: [
microseconds: integer(),
milliseconds: integer(),
seconds: integer(),
minutes: integer(),
hours: integer(),
days: integer(),
weeks: integer(),
months: integer(),
years: integer()
]
@type t :: %__MODULE__{
run_at: run_at_options(),
run_each: run_each_options(),
caller: atom(),
timezone: Timex.TimezoneInfo.t(),
last_called_at: DateTime.t(),
worker_args: term()
}
@type options :: [
run_at: run_at_options() | %{(binary() | atom()) => run_at_options()},
run_each: run_each_options()
]
defstruct [:run_at, :caller, :run_each, :last_called_at, :timezone, :worker_args]
@doc """
Init state structure and validate
"""
@spec init!(options) :: State.t() | no_return() | Exception.t()
def init!(options) do
%State{
caller: options[:caller],
run_at: validate_run_at!(options[:run_at]),
run_each: validate_run_each!(options[:run_each]),
timezone: validate_timezone!(options[:timezone]),
worker_args: options[:worker_args]
}
end
defp validate_run_at!(run_at) when is_nil(run_at),
do: %{"default" => @default_run_at}
defp validate_run_at!(run_at) when is_list(run_at),
do: %{"default" => run_at_validator!(run_at)}
defp validate_run_at!(run_at) when is_map(run_at) do
run_at |> Map.values() |> Enum.each(&run_at_validator!/1)
run_at
end
defp run_at_validator!(run_at) do
case Timex.set(Timex.now(), run_at) do
%DateTime{} ->
Keyword.put_new(run_at, :microsecond, {1, 0})
{:error, {:bad_option, bad_option}} ->
raise Error, "Error invalid `#{bad_option}` run_at option."
{:error, reason} ->
raise Error, "Error invalid run_at option: #{inspect(reason)}"
end
end
defp validate_run_each!(nil) do
@default_run_each
end
defp validate_run_each!(run_each) do
case Timex.shift(Timex.now(), run_each) do
%DateTime{} ->
run_each
{:error, {:invalid_shift, bad_option}} ->
raise Error, "Error invalid `#{inspect(bad_option)}` run_each option."
{:error, reason} ->
raise Error, "Error invalid run_each option: #{inspect(reason)}"
end
end
defp validate_timezone!(nil) do
Application.get_env(:gen_worker, :timezone, :utc)
end
defp validate_timezone!(timezone) do
if not Timex.is_valid_timezone?(timezone) do
raise Error, "Error invalid `#{timezone}` timezone."
end
timezone
end
end
|
lib/gen_worker/state.ex
| 0.759582
| 0.484136
|
state.ex
|
starcoder
|
defmodule Mix.Tasks.Compile.Boundary do
# credo:disable-for-this-file Credo.Check.Readability.Specs
use Boundary, deps: [Boundary]
use Mix.Task.Compiler
@moduledoc """
Verifies cross-module function calls according to defined boundaries.
This compiler reports all cross-boundary function calls which are not permitted, according to
the current definition of boundaries. For details on defining boundaries, see the docs for the
`Boundary` module.
## Usage
Once you have configured the boundaries, you need to include the compiler in `mix.exs`:
```
defmodule MySystem.MixProject do
# ...
def project do
[
compilers: Mix.compilers() ++ [:boundary],
# ...
]
end
# ...
end
```
When developing a library, it's advised to use this compiler only in `:dev` and `:test`
environments:
```
defmodule Boundary.MixProject do
# ...
def project do
[
compilers: Mix.compilers() ++ extra_compilers(Mix.env()),
# ...
]
end
# ...
defp extra_compilers(:prod), do: []
defp extra_compilers(_env), do: [:boundaries]
end
```
## Warnings
Every invalid cross-boundary call is reported as a compiler warning. Consider the following example:
```
defmodule MySystem.User do
def auth() do
MySystemWeb.Endpoint.url()
end
end
```
Assuming that calls from `MySystem` to `MySystemWeb` are not allowed, you'll get the following warning:
```
$ mix compile
warning: forbidden call to MySystemWeb.Endpoint.url/0
(calls from MySystem to MySystemWeb are not allowed)
lib/my_system/user.ex:3
```
Since the compiler emits warnings, `mix compile` will still succeed, and you can normally start
your system, even if some boundary rules are violated. The compiler doesn't force you to immediately
fix these violations, which is a deliberate decision made to avoid disrupting the development flow.
At the same time, it's worth enforcing boundaries on the CI. This can easily be done by providing
the `--warnings-as-errors` option to `mix compile`.
"""
@recursive true
@impl Mix.Task.Compiler
def run(argv) do
errors = Boundary.MixCompiler.check()
print_diagnostic_errors(errors)
{status(errors, argv), errors}
end
defp status([], _), do: :ok
defp status([_ | _], argv), do: if(warnings_as_errors?(argv), do: :error, else: :ok)
defp warnings_as_errors?(argv) do
{parsed, _argv, _errors} = OptionParser.parse(argv, strict: [warnings_as_errors: :boolean])
Keyword.get(parsed, :warnings_as_errors, false)
end
defp print_diagnostic_errors(errors) do
if errors != [], do: IO.puts("")
Enum.each(errors, &print_diagnostic_error/1)
end
defp print_diagnostic_error(error) do
Mix.shell().info([severity(error.severity), error.message, location(error)])
end
defp location(error) do
if error.file != nil and error.file != "" do
pos = if error.position != nil, do: ":#{error.position}", else: ""
"\n #{error.file}#{pos}\n"
else
"\n"
end
end
defp severity(severity), do: [:bright, color(severity), "#{severity}: ", :reset]
defp color(:error), do: :red
defp color(:warning), do: :yellow
end
|
lib/mix/tasks/compile/boundary.ex
| 0.857843
| 0.741931
|
boundary.ex
|
starcoder
|
defmodule Flickrex.Auth do
@moduledoc """
Operations on Flickr Auth.
## Authentication
Certain Flickr methods require authorization from a user account. You must
present an authorization URL to the user, and obtain a verification code that
can be exchanged for access tokens. You can store and re-use the access tokens
without having to repeat the authorization step.
### Manual Verification
{:ok, %{body: request}} = Flickrex.Auth.request_token() |> Flickrex.request()
{:ok, auth_url} =
request.oauth_token
|> Flickrex.Auth.authorize_url()
|> Flickrex.request()
# Open the URL in your browser, authorize the app, and get the verify token
oauth_verifier = "..."
{:ok, %{body: access}} =
request.oauth_token
|> Flickrex.Auth.access_token(request.oauth_token_secret, oauth_verifier)
|> Flickrex.request()
# You can now call methods that require authorization
config = [oauth_token: access.oauth_token, oauth_token_secret: access.oauth_token_secret]
{:ok, resp} = Flickrex.Flickr.Test.login() |> Flickrex.request(config)
### Callback Verification
Specify a callback URL when generating the request token:
opts = [oauth_callback: "https://example.com/check"]
{:ok, %{body: request}} =
opts
|> Flickrex.Auth.request_token()
|> Flickrex.request()
{:ok, auth_url} =
request.oauth_token
|> Flickrex.Auth.authorize_url()
|> Flickrex.request()
Present the `auth_url` to the user and ask them to complete the authorization
process. Save the `request.oauth_token` and the `request.oauth_token_secret`.
After following the `auth_url` and authorizing your app, the user will be
re-directed to:
```sh
https://example.com/check?oauth_token=FOO&oauth_verifier=BAZ
```
The `oauth_token` in the URL query corresponds to the `request.oauth_token`
from the previous step, which you will need to recall the
`oauth_token_secret`.
{:ok, %{body: access}} =
oauth_token
|> Flickrex.Auth.access_token(oauth_token_secret, oauth_verifier)
|> Flickrex.request()
Finally, save `access.oauth_token` and `access.oauth_token_secret` for this
user, which you can re-use.
## Re-authenticating
Look up the access token and secret you have saved for the user, and use them
to configure a request:
config = [oauth_token: "...", oauth_token_secret: "..."]
{:ok, resp} = Flickrex.Flickr.Test.login() |> Flickrex.request(config)
See [User Authentication](https://www.flickr.com/services/api/auth.oauth.html)
on Flickr for more information.
"""
alias Flickrex.Operation
@doc """
Requests a temporary token to authenticate the user to your application.
## Options
* `oauth_callback` - For web apps, the URL to redirect the user to after
completing the authorization sequence. The URL will include query params
`oauth_token` and `oauth_verifier`. If this option is not set, then
authentication will default to out-of-band verification.
"""
@spec request_token(Keyword.t(String.t())) :: Operation.Auth.RequestToken.t()
defdelegate request_token(opts \\ []), to: Operation.Auth.RequestToken, as: :new
@doc """
Generates a Flickr authorization URL.
Takes an `oauth_token` from `request_token/1`.
## Options
* `perms` - Ask for "read", "write", or "delete" privileges. Overrides the
setting defined in your application's authentication flow.
"""
@spec authorize_url(String.t(), Keyword.t(String.t())) :: Operation.Auth.AuthorizeUrl.t()
defdelegate authorize_url(oauth_token, opts \\ []), to: Operation.Auth.AuthorizeUrl, as: :new
@doc """
Requests an access token from Flickr.
Takes an `oauth_token` and `oauth_token_secret` from `request_token/1`, and an
`oauth_verifier` from an authorizing Flickr account.
"""
@spec access_token(String.t(), String.t(), String.t()) :: Operation.Auth.AccessToken.t()
defdelegate access_token(oauth_token, oauth_token_secret, oauth_verifier),
to: Operation.Auth.AccessToken,
as: :new
end
|
lib/flickrex/auth.ex
| 0.819893
| 0.41834
|
auth.ex
|
starcoder
|
defmodule Mongo.InsertOneResult do
@moduledoc """
The successful result struct of `Mongo.insert_one/4`. Its fields are:
* `:inserted_id` - The id of the inserted document
"""
@type t :: %__MODULE__{
inserted_id: nil | BSON.ObjectId.t
}
defstruct [acknowledged: true, inserted_id: nil]
end
defmodule Mongo.InsertManyResult do
@moduledoc """
The successful result struct of `Mongo.insert_many/4`. Its fields are:
* `:inserted_ids` - The ids of the inserted documents indexed by their order
"""
@type t :: %__MODULE__{
inserted_ids: %{non_neg_integer => BSON.ObjectId.t}
}
defstruct [acknowledged: true, inserted_ids: nil]
end
defmodule Mongo.DeleteResult do
@moduledoc """
The successful result struct of `Mongo.delete_one/4` and `Mongo.delete_many/4`.
Its fields are:
* `:deleted_count` - Number of deleted documents
* `:acknowledged` - Write-concern
"""
@type t :: %__MODULE__{
acknowledged: boolean,
deleted_count: non_neg_integer
}
defstruct [acknowledged: true, deleted_count: 0]
end
defmodule Mongo.UpdateResult do
@moduledoc """
The successful result struct of `Mongo.update_one/5`, `Mongo.update_many/5`
and `Mongo.replace_one/5`. Its fields are:
* `:matched_count` - Number of matched documents
* `:modified_count` - Number of modified documents
* `:upserted_ids` - If the operation was an upsert, the upserted ids
"""
@type t :: %__MODULE__{
acknowledged: boolean,
matched_count: non_neg_integer,
modified_count: non_neg_integer,
upserted_ids: list(BSON.ObjectId.t)
}
defstruct [acknowledged: true, matched_count: 0, modified_count: 0, upserted_ids: []]
end
defmodule Mongo.BulkWriteResult do
@moduledoc """
The successful result struct of `Mongo.BulkWrite.write`. Its fields are:
* `:acknowledged` - Write-concern
* `:matched_count` - Number of matched documents
* `:modified_count` - Number of modified documents
* `:inserted_count` - Number of inserted documents
* `:deleted_count` - Number of deleted documents
* `:upserted_count` - Number of upserted documents
* `:upserted_ids` - If the operation was an upsert, the upserted ids
* `:inserted_ids` - If the operation was an insert, the inserted ids
* `:errors` - If the operation results in an error, the error is collected
"""
@type t :: %__MODULE__{
acknowledged: boolean,
matched_count: non_neg_integer,
modified_count: non_neg_integer,
inserted_count: non_neg_integer,
deleted_count: non_neg_integer,
upserted_count: non_neg_integer,
upserted_ids: list(BSON.ObjectId.t),
inserted_ids: list(BSON.ObjectId.t),
errors: list(Map.t)
}
alias Mongo.BulkWriteResult
defstruct [acknowledged: true,
matched_count: 0,
modified_count: 0,
inserted_count: 0,
deleted_count: 0,
upserted_count: 0,
inserted_ids: [],
upserted_ids: [],
errors: []
]
def insert_result(count, ids, errors) do
ids = Enum.reduce(errors, ids, fn error, ids -> filter_ids(ids, error) end)
%BulkWriteResult{inserted_count: count, inserted_ids: ids, errors: errors}
end
defp filter_ids(ids, %{"code" => 11000, "index" => index}) do
Enum.take(ids, index)
end
defp filter_ids(ids, _other) do
ids
end
def update_result(matched_count, modified_count, upserted_count, ids, errors) do
%BulkWriteResult{matched_count: matched_count, modified_count: modified_count, upserted_count: upserted_count, upserted_ids: ids, errors: errors}
end
def delete_result(count, errors) do
%BulkWriteResult{deleted_count: count, errors: errors}
end
def error(error) do
%BulkWriteResult{errors: [error]}
end
def empty() do
%BulkWriteResult{}
end
def add(%BulkWriteResult{} = src, %BulkWriteResult{} = dest) do
%BulkWriteResult{acknowledged: src.acknowledged,
matched_count: src.matched_count + dest.matched_count,
modified_count: src.modified_count + dest.modified_count,
inserted_count: src.inserted_count + dest.inserted_count,
deleted_count: src.deleted_count + dest.deleted_count,
upserted_count: src.upserted_count + dest.upserted_count,
inserted_ids: src.inserted_ids ++ dest.inserted_ids,
upserted_ids: src.upserted_ids ++ dest.upserted_ids,
errors: src.errors ++ dest.errors
}
end
def reduce(results, acc) do
Enum.reduce(results, acc, fn x, acc -> BulkWriteResult.add(acc, x) end)
end
def reduce(results) do
reduce(results, %BulkWriteResult{})
end
end
|
lib/mongo/results.ex
| 0.842102
| 0.59843
|
results.ex
|
starcoder
|
defmodule Crux.Structs.Member do
@moduledoc """
Represents a Discord [Guild Member Object](https://discordapp.com/developers/docs/resources/guild#guild-member-object-guild-member-structure).
Differences opposed to the Discord API Object:
- `:user` is just the user id
"""
@behaviour Crux.Structs
alias Crux.Structs.{Member, Util}
require Util
Util.modulesince("0.1.0")
defstruct(
user: nil,
nick: nil,
roles: nil,
joined_at: nil,
deaf: nil,
mute: nil,
guild_id: nil
)
Util.typesince("0.1.0")
@type t :: %__MODULE__{
user: Crux.Rest.snowflake(),
nick: String.t() | nil,
roles: MapSet.t(Crux.Rest.snowflake()),
joined_at: String.t(),
deaf: boolean() | nil,
mute: boolean() | nil,
guild_id: Crux.Rest.snowflake() | nil
}
@doc """
Creates a `Crux.Structs.Member` struct from raw data.
> Automatically invoked by `Crux.Structs.create/2`.
"""
@spec create(data :: map()) :: t()
Util.since("0.1.0")
def create(data) do
member =
data
|> Util.atomify()
|> Map.update!(:user, Util.map_to_id())
|> Map.update!(:roles, &MapSet.new(&1, fn role_id -> Util.id_to_int(role_id) end))
|> Map.update(:guild_id, nil, &Util.id_to_int/1)
struct(__MODULE__, member)
end
@doc ~S"""
Converts a `Crux.Structs.Member` into its discord mention format.
## Examples
```elixir
# Without nickname
iex> %Crux.Structs.Member{user: 218348062828003328, nick: nil}
...> |> Crux.Structs.Member.to_mention()
"<@218348062828003328>"
# With nickname
iex> %Crux.Structs.Member{user: 218348062828003328, nick: "weltraum"}
...> |> Crux.Structs.Member.to_mention()
"<@!218348062828003328>"
```
"""
@spec to_mention(user :: Crux.Structs.Member.t()) :: String.t()
Util.since("0.1.1")
def to_mention(%__MODULE__{user: id, nick: nil}), do: "<@#{id}>"
def to_mention(%__MODULE__{user: id}), do: "<@!#{id}>"
defimpl String.Chars, for: Crux.Structs.Member do
@spec to_string(Member.t()) :: String.t()
def to_string(%Member{} = data), do: Member.to_mention(data)
end
end
|
lib/structs/member.ex
| 0.802788
| 0.466724
|
member.ex
|
starcoder
|
defmodule Broker.BrokerServer do
use GenServer
# GenServer state is
# {types, triggers, stats}
# Types is [string]
# Triggers is [{type, source, destination}]
# Stats is %{{type,source} -> {enqueue, delivered, dropped}}
def start_link(_opts \\ []) do
state = {[], [], Map.new()}
GenServer.start_link(__MODULE__, state, name: BrokerServer)
end
def init(args) do
{[], [], %{}} = args
{:ok, args}
end
def handle_cast({:add_type, type}, {types, triggers, stats}) do
{:noreply, {Enum.uniq(types ++ [type]), triggers, stats}}
end
# Async handling of an emit of a new event.
# Depending on triggers configured, it will be routed to the correct place.
def handle_cast(
{:emit, event = %{"type" => type, "source" => source}},
{types, triggers, stats}) do
GenServer.cast(BrokerServer, {:add_type, type})
GenServer.cast(Ledger, {:record, event})
# Non linked spawn
IO.puts("BrokerServer:emit(Cast) #{inspect event}")
spawn fn -> Invoker.invoke(triggers, event) end
stats = stats_add_enqueue(stats, type, source)
{:noreply, {types, triggers, stats}}
end
def handle_cast(
{:set_trigger, trigger},
{types, triggers, stats}) do
IO.puts("BrokerServer:set_trigger #{inspect trigger}}")
{:noreply, {types, triggers ++ [trigger], stats}}
end
def handle_cast({:add_dropped, type, source},
{types, triggers, stats}) do
stats = stats_add_dropped(stats, type, source)
{:noreply, {types, triggers, stats}}
end
def handle_cast({:add_delivered, type, source},
{types, triggers, stats}) do
stats = stats_add_delivered(stats, type, source)
{:noreply, {types, triggers, stats}}
end
def handle_call({:list_types}, _from, {types, triggers, stats}) do
{:reply, types, {types, triggers, stats}}
end
def handle_call({:get_stats}, _from, {types, triggers, stats}) do
{:reply, stats, {types, triggers, stats}}
end
def handle_call({:get_triggers}, _from, {types, triggers, stats}) do
{:reply, triggers, {types, triggers, stats}}
end
# Private functions for delivery and helping with accounting.
defp get_stats(stats, type, source) when is_map(stats) do
case Map.get(stats, {type, source}) do
{enqueued, delivered, dropped} -> {enqueued, delivered, dropped}
_ -> {0, 0, 0}
end
end
defp stats_add_enqueue(stats, type, source) when is_map(stats) do
{enqueued, delivered, dropped} = get_stats(stats, type, source)
Map.put(stats, {type, source}, {enqueued + 1, delivered, dropped})
end
defp stats_add_delivered(stats, type, source) when is_map(stats) do
{enqueued, delivered, dropped} = get_stats(stats, type, source)
Map.put(stats, {type, source}, {enqueued, delivered + 1, dropped})
end
defp stats_add_dropped(stats, type, source) when is_map(stats) do
{enqueued, delivered, dropped} = get_stats(stats, type, source)
Map.put(stats, {type, source}, {enqueued, delivered, dropped + 1})
end
end
|
broker/lib/broker/brokerserver.ex
| 0.55435
| 0.442817
|
brokerserver.ex
|
starcoder
|
defmodule RDF.NS do
@moduledoc """
`RDF.Namespace`s for fundamental RDF vocabularies.
Namely:
- `RDF.NS.RDF`
- `RDF.NS.RDFS`
- `RDF.NS.OWL`
- `RDF.NS.SKOS`
- `RDF.NS.XSD`
"""
use RDF.Vocabulary.Namespace
# This is needed to ensure that the Turtle compiler is compiled and ready to be used to parse vocabularies.
# Without this we randomly get "unable to detect serialization format" errors depending on the parallel compilation order.
require RDF.Turtle
@vocabdoc """
The RDF vocabulary.
See <https://www.w3.org/TR/rdf11-concepts/>
"""
defvocab RDF,
base_iri: "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
file: "rdf.ttl",
alias: [
Nil: "nil",
LangString: "langString"
]
@vocabdoc """
The RDFS vocabulary.
See <https://www.w3.org/TR/rdf-schema/>
"""
defvocab RDFS,
base_iri: "http://www.w3.org/2000/01/rdf-schema#",
file: "rdfs.ttl"
@vocabdoc """
The OWL vocabulary.
See <https://www.w3.org/TR/owl-overview/>
"""
defvocab OWL,
base_iri: "http://www.w3.org/2002/07/owl#",
file: "owl.ttl"
@vocabdoc """
The SKOS vocabulary.
See <http://www.w3.org/TR/skos-reference/>
"""
defvocab SKOS,
base_iri: "http://www.w3.org/2004/02/skos/core#",
file: "skos.ttl"
@vocabdoc """
The XML Schema datatypes vocabulary.
See <https://www.w3.org/TR/xmlschema11-2/>
"""
defvocab XSD,
base_iri: "http://www.w3.org/2001/XMLSchema#",
terms: ~w[
string
normalizedString
token
language
Name
NCName
ID
IDREF
IDREFS
ENTITY
ENTITIES
NMTOKEN
NMTOKENS
boolean
float
double
decimal
integer
long
int
short
byte
nonPositiveInteger
negativeInteger
nonNegativeInteger
positiveInteger
unsignedLong
unsignedInt
unsignedShort
unsignedByte
duration
dayTimeDuration
yearMonthDuration
dateTime
time
date
gYearMonth
gYear
gMonthDay
gDay
gMonth
base64Binary
hexBinary
anyURI
QName
NOTATION
]
end
|
lib/rdf/ns.ex
| 0.83152
| 0.486697
|
ns.ex
|
starcoder
|
defmodule Hologram.Compiler.Transformer do
alias Hologram.Compiler.{Context, Reflection}
alias Hologram.Compiler.{
AccessOperatorTransformer,
AdditionOperatorTransformer,
AliasDirectiveTransformer,
AnonymousFunctionCallTransformer,
AnonymousFunctionTypeTransformer,
BinaryTypeTransformer,
BlockTransformer,
CaseExpressionTransformer,
ConsOperatorTransformer,
DivisionOperatorTransformer,
DotOperatorTransformer,
EqualToOperatorTransformer,
ForExpressionTransformer,
FunctionDefinitionTransformer,
FunctionCallTransformer,
IfExpressionTransformer,
ImportDirectiveTransformer,
LessThanOperatorTransformer,
ListConcatenationOperatorTransformer,
ListSubtractionOperatorTransformer,
ListTypeTransformer,
MacroDefinitionTransformer,
MapTypeTransformer,
MatchOperatorTransformer,
MembershipOperatorTransformer,
ModuleAttributeDefinitionTransformer,
ModuleDefinitionTransformer,
ModuleTypeTransformer,
MultiplicationOperatorTransformer,
NotEqualToOperatorTransformer,
QuoteTransformer,
PipeOperatorTransformer,
RelaxedBooleanAndOperatorTransformer,
RelaxedBooleanNotOperatorTransformer,
RelaxedBooleanOrOperatorTransformer,
RequireDirectiveTransformer,
StrictBooleanAndOperatorTransformer,
StructTypeTransformer,
SubtractionOperatorTransformer,
TypeOperatorTransformer,
TupleTypeTransformer,
UnaryNegativeOperatorTransformer,
UnaryPositiveOperatorTransformer,
UnquoteTransformer,
UseDirectiveTransformer
}
alias Hologram.Compiler.IR.{
AtomType,
BooleanType,
FloatType,
IntegerType,
ModuleAttributeOperator,
ModulePseudoVariable,
NilType,
ProtocolDefinition,
StringType,
Typespec,
Variable
}
# OPERATORS
def transform({{:., _, [Access, :get]}, _, _} = ast, %Context{} = context) do
AccessOperatorTransformer.transform(ast, context)
end
# must be defined before binary addition operator
def transform({:+, _, [_]} = ast, %Context{} = context) do
UnaryPositiveOperatorTransformer.transform(ast, context)
end
def transform({:+, _, _} = ast, %Context{} = context) do
AdditionOperatorTransformer.transform(ast, context)
end
def transform([{:|, _, _}] = ast, %Context{} = context) do
ConsOperatorTransformer.transform(ast, context)
end
def transform({:/, _, _} = ast, %Context{} = context) do
DivisionOperatorTransformer.transform(ast, context)
end
def transform({{:., _, _}, [no_parens: true, line: _], _} = ast, %Context{} = context) do
DotOperatorTransformer.transform(ast, context)
end
def transform({:==, _, _} = ast, %Context{} = context) do
EqualToOperatorTransformer.transform(ast, context)
end
def transform({:<, _, _} = ast, %Context{} = context) do
LessThanOperatorTransformer.transform(ast, context)
end
def transform({:++, _, _} = ast, %Context{} = context) do
ListConcatenationOperatorTransformer.transform(ast, context)
end
def transform({:--, _, _} = ast, %Context{} = context) do
ListSubtractionOperatorTransformer.transform(ast, context)
end
def transform({:=, _, _} = ast, %Context{} = context) do
MatchOperatorTransformer.transform(ast, context)
end
def transform({:in, _, _} = ast, %Context{} = context) do
MembershipOperatorTransformer.transform(ast, context)
end
# must be defined before module attribute operator
def transform({:@, _, [{:spec, _, [{:"::", _, _}]}]}, _) do
%Typespec{}
end
def transform({:@, _, [{name, _, ast}]}, _) when not is_list(ast) do
%ModuleAttributeOperator{name: name}
end
def transform({:*, _, _} = ast, %Context{} = context) do
MultiplicationOperatorTransformer.transform(ast, context)
end
def transform({:!=, _, _} = ast, %Context{} = context) do
NotEqualToOperatorTransformer.transform(ast, context)
end
def transform({:|>, _, _} = ast, %Context{} = context) do
PipeOperatorTransformer.transform(ast, context)
end
def transform({:&&, _, _} = ast, %Context{} = context) do
RelaxedBooleanAndOperatorTransformer.transform(ast, context)
end
def transform({:__block__, _, [{:!, _, _}]} = ast, %Context{} = context) do
RelaxedBooleanNotOperatorTransformer.transform(ast, context)
end
def transform({:!, _, _} = ast, %Context{} = context) do
RelaxedBooleanNotOperatorTransformer.transform(ast, context)
end
def transform({:||, _, _} = ast, %Context{} = context) do
RelaxedBooleanOrOperatorTransformer.transform(ast, context)
end
def transform({:and, _, _} = ast, %Context{} = context) do
StrictBooleanAndOperatorTransformer.transform(ast, context)
end
# must be defined before binary subtraction operator
def transform({:-, _, [_]} = ast, %Context{} = context) do
UnaryNegativeOperatorTransformer.transform(ast, context)
end
def transform({:-, _, _} = ast, %Context{} = context) do
SubtractionOperatorTransformer.transform(ast, context)
end
def transform({:"::", _, _} = ast, %Context{} = context) do
TypeOperatorTransformer.transform(ast, context)
end
# TYPES
def transform({:fn, _, _} = ast, %Context{} = context) do
AnonymousFunctionTypeTransformer.transform(ast, context)
end
def transform(ast, %Context{} = context) when is_atom(ast) and ast not in [nil, false, true] do
if Reflection.module?(ast) do
ModuleTypeTransformer.transform(ast, context)
else
%AtomType{value: ast}
end
end
def transform({:<<>>, _, _} = ast, %Context{} = context) do
BinaryTypeTransformer.transform(ast, context)
end
def transform(ast, _) when is_boolean(ast) do
%BooleanType{value: ast}
end
def transform(ast, _) when is_float(ast) do
%FloatType{value: ast}
end
def transform(ast, _) when is_integer(ast) do
%IntegerType{value: ast}
end
def transform(ast, %Context{} = context) when is_list(ast) do
ListTypeTransformer.transform(ast, context)
end
def transform({:%{}, _, _} = ast, %Context{} = context) do
MapTypeTransformer.transform(ast, context)
end
def transform({:__aliases__, _, _} = ast, %Context{} = context) do
ModuleTypeTransformer.transform(ast, context)
end
def transform(nil, _) do
%NilType{}
end
def transform(ast, _) when is_binary(ast) do
%StringType{value: ast}
end
def transform({:%, _, _} = ast, %Context{} = context) do
StructTypeTransformer.transform(ast, context)
end
def transform({:{}, _, _} = ast, %Context{} = context) do
TupleTypeTransformer.transform(ast, context)
end
def transform({_, _} = ast, %Context{} = context) do
TupleTypeTransformer.transform(ast, context)
end
# DEFINITIONS
def transform({:def, _, _} = ast, %Context{} = context) do
FunctionDefinitionTransformer.transform(ast, context)
end
def transform({:defp, _, _} = ast, %Context{} = context) do
FunctionDefinitionTransformer.transform(ast, context)
end
def transform({:defmacro, _, _} = ast, %Context{} = context) do
MacroDefinitionTransformer.transform(ast, context)
end
def transform({:defmodule, _, _} = ast, _) do
ModuleDefinitionTransformer.transform(ast)
end
# DEFER: implement
def transform({:defprotocol, _, _}, _) do
%ProtocolDefinition{}
end
def transform({:@, _, [{_, _, exprs}]} = ast, %Context{} = context) when is_list(exprs) do
ModuleAttributeDefinitionTransformer.transform(ast, context)
end
# DIRECTIVES
def transform({:alias, _, _} = ast, _) do
AliasDirectiveTransformer.transform(ast)
end
def transform({:import, _, _} = ast, _) do
ImportDirectiveTransformer.transform(ast)
end
def transform({:require, _, _} = ast, _) do
RequireDirectiveTransformer.transform(ast)
end
def transform({:use, _, _} = ast, _) do
UseDirectiveTransformer.transform(ast)
end
# CONTROL FLOW
# must be defined before module function call case
def transform({{:., _, [{_, _, nil}]}, [line: _], _} = ast, %Context{} = context) do
AnonymousFunctionCallTransformer.transform(ast, context)
end
def transform({:case, _, _} = ast, %Context{} = context) do
CaseExpressionTransformer.transform(ast, context)
end
def transform({:for, _, _} = ast, %Context{} = context) do
ForExpressionTransformer.transform(ast, context)
end
def transform({{:., _, _}, _, _} = ast, %Context{} = context) do
FunctionCallTransformer.transform(ast, context)
end
def transform({:if, _, _} = ast, %Context{} = context) do
IfExpressionTransformer.transform(ast, context)
end
# OTHER
def transform({:__block__, _, _} = ast, %Context{} = context) do
BlockTransformer.transform(ast, context)
end
def transform({:quote, _, _} = ast, %Context{} = context) do
QuoteTransformer.transform(ast, context)
end
def transform({:unquote, _, _} = ast, %Context{} = context) do
UnquoteTransformer.transform(ast, context)
end
# must be defined before variable case
def transform({:__MODULE__, _, _}, _) do
%ModulePseudoVariable{}
end
def transform({name, _, nil}, _) when is_atom(name) do
%Variable{name: name}
end
def transform({name, _, module}, _) when is_atom(name) and is_atom(module) do
%Variable{name: name}
end
# must be defined after variable case
def transform({function, _, _} = ast, %Context{} = context) when is_atom(function) do
FunctionCallTransformer.transform(ast, context)
end
end
|
lib/hologram/compiler/transformer.ex
| 0.695648
| 0.410018
|
transformer.ex
|
starcoder
|
if Code.ensure_loaded?(Absinthe.Relay) do
defmodule Cqrs.Absinthe.Relay do
@moduledoc """
Macros for `Absinthe.Relay`
## Optinal Application Configuration
config :cqrs_tools, :absinthe_relay, repo: Example.Repo
"""
require Logger
alias Cqrs.Guards
alias Cqrs.Absinthe.{Relay, Query}
defmacro __using__(_) do
quote do
import Cqrs.Absinthe.Relay,
only: [
derive_connection: 3,
connection_with_total_count: 1,
connection_with_total_count: 2,
connections_with_total_count: 1
]
end
end
@doc """
Creates an `Absinthe.Relay.Connection` query from a [Command](`Cqrs.Command`).
## Options
* `:repo` - The `Ecto.Repo` to use for the connection. Defaults to the configured repo in `:cqrs_tools, :absinthe_relay`.
* `:repo_fun` - The function of the `:repo` to run. Defaults to `:all`
* `:as` - The name to use for the query. Defaults to the query_module name snake_cased.
* `:only` - Use only the filters listed
* `:except` - Create filters for all except those listed
* `:arg_types` - A list of filter names to absinthe types. See example.
* `:before_resolve` - [Absinthe Middleware](`Absinthe.Middleware`) to run before the resolver.
* `:after_resolve` - [Absinthe Middleware](`Absinthe.Middleware`) to run after the resolver.
* `:parent_mappings` - A keyword list of query filters to functions that receive the field's parent object as an argument.
* `:filter_transforms` - A keyword list of query filters to functions that receive the filter's current value as an argument.
## Example
defmodule ExampleApi.Types.UserTypes do
@moduledoc false
use Cqrs.Absinthe.Relay
use Absinthe.Schema.Notation
use Absinthe.Relay.Schema.Notation, :modern
alias Example.Queries.ListUsers
enum :user_status do
value :active
value :suspended
end
object :user do
field :id, :id
field :name, :string
field :email, :string
field :status, :user_status
derive_connection GetUserFriends, :user,
as: :friends,
repo: Example.Repo,
parent_mappings: [user_id: fn %{id: id} -> id end]
end
connection(node_type: :user)
object :user_queries do
derive_connection ListUsers, :user,
as: :users,
repo: Example.Repo,
arg_types: [status: :user_status]
end
end
"""
defmacro derive_connection(query_module, return_type, opts) do
opts =
opts
|> Keyword.merge(source: query_module, macro: :derive_connection)
|> Macro.escape()
return_type = Macro.escape(return_type)
field =
quote location: :keep do
Guards.ensure_is_query!(unquote(query_module))
Query.create_connection_query(
unquote(query_module),
unquote(return_type),
Keyword.put_new(unquote(opts), :tag?, true)
)
end
Module.eval_quoted(__CALLER__, field)
end
@doc false
def define_connection_with_total_count(node_type, opts \\ []) do
fields = Keyword.get(opts, :do)
quote do
connection node_type: unquote(node_type) do
field :total_count, :integer, resolve: &Relay.resolve_total_count/3
unquote(fields)
edge do
end
end
end
end
@doc false
def resolve_total_count(%{connection_query: query, repo: repo}, _args, _res) do
total_count = repo.aggregate(query, :count, :id)
{:ok, total_count}
end
@doc false
def resolve_total_count(_connection, _args, _res) do
Logger.warn("Requested total_count on a connection that was not created by cqrs_tools.")
{:ok, nil}
end
@doc """
Creates a connection type for each node_type. The connection will contain a `total_count` field.
"""
defmacro connection_with_total_count(node_type, opts \\ []) when is_atom(node_type) do
define_connection_with_total_count(node_type, opts)
end
@doc """
Creates a connection type for each node_type. Each connection will contain a `total_count` field.
"""
defmacro connections_with_total_count(node_types) when is_list(node_types) do
Enum.map(node_types, &define_connection_with_total_count/1)
end
end
end
|
lib/cqrs/absinthe/relay.ex
| 0.823931
| 0.428891
|
relay.ex
|
starcoder
|
defmodule URL do
@moduledoc """
Functions for parsing URLs
This module provides functions for parsing URLs. It is modelled on
Elixir's `URI` module but will also parse scheme-specific URIs such
as [geo](https://tools.ietf.org/rfc/rfc5870), [data](https://tools.ietf.org/html/rfc2397)
[tel](https://tools.ietf.org/html/rfc3966), [mailto](https://tools.ietf.org/html/rfc2047),
and [uuid](https://tools.ietf.org/html/draft-kindel-uuid-uri-00).
"""
@type uri_type :: nil | URL.Data.t() | URL.Geo.t() | URL.Tel.t() | URL.UUID.t() | URL.Mailto.t()
defstruct scheme: nil,
path: nil,
query: nil,
fragment: nil,
authority: nil,
userinfo: nil,
host: nil,
port: nil,
parsed_path: nil
@type t() :: %__MODULE__{
authority: nil | binary(),
fragment: nil | binary(),
host: nil | binary(),
path: nil | binary(),
port: nil | :inet.port_number(),
query: nil | binary(),
scheme: nil | binary(),
userinfo: nil | binary(),
parsed_path: uri_type()
}
@supported_schemes %{
"tel" => URL.Tel,
"data" => URL.Data,
"geo" => URL.Geo,
"mailto" => URL.Mailto,
"uuid" => URL.UUID,
"urn" => URL.UUID
}
import URL.ParseHelpers.Core, only: [structify: 2]
import NimbleParsec
import URL.ParseHelpers.{Core, Mailto, Params, Unwrap}
@doc """
Parses a url and returns a %URL{} struct that
has the same shape as Elixir's %URI{} with the
addition of the `parsed_path` key.
## Example
iex> URL.parse("geo:48.198634,-16.371648,3.4;crs=wgs84;u=40.0")
%URL{
authority: nil,
fragment: nil,
host: nil,
parsed_path: %URL.Geo{
alt: 3.4,
lat: 48.198634,
lng: -16.371648,
params: %{"crs" => "wgs84", "u" => 40.0}
},
path: "48.198634,-16.371648,3.4;crs=wgs84;u=40.0",
port: nil,
query: nil,
scheme: "geo",
userinfo: nil
}
"""
@spec parse(url :: binary()) :: __MODULE__.t()
def parse(url) when is_binary(url) do
url
|> parse_scheme
|> merge_uri
end
@doc """
Parse a URL query string and percent decode.
## Returns
* Either a map of query params or
* an `{:error, {URL.Parser.ParseError, reason}}` tuple
## Examples
iex> URL.parse_query_string "url=http%3a%2f%2ffonzi.com%2f&name=Fonzi&mood=happy&coat=leather"
%{
"coat" => "leather",
"mood" => "happy",
"name" => "Fonzi",
"url" => "http://fonzi.com/"
}
iex> mailto = "mailto:user@%E7%B4%8D%E8%B1%86.example.org?subject=Test&body=NATTO"
iex> URL.parse(mailto) |> URL.parse_query_string
%{"body" => "NATTO", "subject" => "Test"}
"""
@spec parse_query_string(String.t() | map()) :: map() | {:error, {module(), binary()}}
def parse_query_string(query) when is_binary(query) do
with {:ok, [params]} <- unwrap(parse_query(query)) do
params
end
end
def parse_query_string({:error, {_, _}} = error) do
error
end
def parse_query_string(%{query: query}) do
parse_query_string(query)
end
@doc false
def parse_query(nil) do
{:ok, [%{}], "", %{}, {0, 0}, 0}
end
defparsec :parse_query,
optional(hfields())
defdelegate to_string(url), to: URI
defp parse_scheme(url) when is_binary(url) do
url
|> URI.parse
|> parse_scheme
end
for {scheme, module} <- @supported_schemes do
defp parse_scheme(%URI{scheme: unquote(scheme)} = uri) do
{uri, unquote(module).parse(uri)}
end
end
defp parse_scheme(%URI{} = uri) do
{uri, nil}
end
defp merge_uri({uri, parsed_path}) do
uri
|> Map.to_list
|> Enum.map(&__MODULE__.trim/1)
|> structify(__MODULE__)
|> add_parsed_path(parsed_path)
end
defp add_parsed_path(url, parsed_path) do
Map.put(url, :parsed_path, parsed_path)
end
@doc false
def trim({key, item}) when is_binary(item) do
{key, String.trim(item)}
end
def trim(other) do
other
end
end
|
lib/url.ex
| 0.84792
| 0.659367
|
url.ex
|
starcoder
|
defmodule Jerboa.Client do
@moduledoc """
STUN client process
Use `start/1` function to spawn new client process:
iex> Jerboa.Client.start server: %{address: server_ip, port: server_port}
{:ok, #PID<...>}
(see `start/1` for configuration options)
## Basic usage
### Requesting server reflexive address
The `bind/1` issues a Binding request to a server and returns reflexive IP address and port.
If returned message is not a valid STUN message or it doesn't include XOR Mapped Address
attribute, the client simply crashes.
iex> Jerboa.Client.bind client_pid
{:ok, {{192, 168, 1, 20}, 32780}}
`persist/1` sends a Binding indication to a server, which is not meant to return
any response, but is an attempt to refresh NAT bindings in routers on the path to a server.
Note that this is only an attempt, there is no guarantee that some router on the path
won't rebind client's inside address and port.
### Creating allocations
Allocation is a logical communication path between one client and multiple peers.
In practice a socket is created on the server, which peers can send data to,
and the server will forward this data to the client. Client can send data to
the server which will forward it to one or more peers.
Refer to [TURN RFC](https://trac.tools.ietf.org/html/rfc5766#section-2)
for a more detailed description.
`allocate/1` is used to request an allocation on the server. On success it returns
an `:ok` tuple, which contains allocated IP address and port number. Jerboa won't
try to request an allocation if it knows that the client already has one.
Note that allocations have an expiration time (RFC recommends 10 minutes), To refresh
an existing allocation one can use `refresh/1`.
### Installing permissions
Once the allocation is created, you may install permissions for peers in order to exchange
data with them over relay. Permissions are created using `create_permission/2`:
create_permission client, {192, 168, 22, 111}
create_permission client, [{192, 168, 22, 111}, {212, 168, 33, 222}]
### Sending and receiving data
After permission is installed, you may send and receive data from peer. To send
data you must simply call `send/3`, providing peer's address and data to be sent (a binary):
send client, {{192, 168, 22, 111}, 1234}, "Hello, world!"
Receiving data is handled using subscriptions mechanism. Once you subscribe to the data
(using `subscribe/3`) sent by some peer, it will be delivered to subscribing process
as a message in format:
{:peer_data, client :: pid, peer :: address, data :: binary}
Subscriptions imply that receiving data is asynchronous by default. There is a convenience
`recv/2` function, which will block calling process until it receives data from the given
peer address. `recv` accepts optional timeout in milliseconds (or atom `:infinity`), which
defaults to 5000.
Note that permissions installed do not affect subscriptions - if you subscribe to data from
peer which you've not installed permissions for, the data will never appear in subscribed
process' mailbox.
### Channels
If you're exchanging a lot of data with one of the peers, you might want to use
channels mechanism. Data sent throught the channel carries smaller message
header, so throughput of user data increases. Note that unlike permissions,
channels must be bound to both IP address and specific port number. To learn
more see `open_channel/2`.
## Logging
Client logs progress messages with `:debug` level, so Elixir's Logger needs to
be configured first to see them. It is recommended to allow Jerboa logging metadata,
i.e. `:jerboa_client` and `:jerboa_server`:
config :logger,
level: :debug,
metadata: [:jerboa_client, :jerboa_server]
"""
@type t :: pid
@type port_no :: :inet.port_number
@type ip :: :inet.ip4_address
@type address :: {ip, port_no}
@type start_opts :: [start_opt]
@type start_opt :: {:server, address}
| {:username, String.t}
| {:secret, String.t}
@type allocate_opts :: [allocate_opt]
@type allocate_opt :: {:even_port, boolean}
| {:reserve, boolean}
| {:reservation_token, <<_::64>>}
@type error :: :bad_response
| :no_allocation
| Jerboa.Format.Body.Attribute.ErrorCode.name
alias Jerboa.Client
@doc ~S"""
Starts STUN client process
iex> opts = [server: {{192, 168, 1, 20}, 3478}, username: "user", secret: "abcd"]
iex> Jerboa.Client.start(opts)
{:ok, #PID<...>}
### Options
* `:server` - required - a tuple with server's address and port
* `:username` - required - username used for authentication
* `:secret` - required - secret used for authentication
"""
@spec start(options :: Keyword.t) :: Supervisor.on_start_child
def start(opts) do
Supervisor.start_child(Client.Supervisor, [opts])
end
@doc """
Sends Binding request to a server
Returns reflexive address and port on successful response. Returns
error tuple if response from the server is invalid.
"""
@spec bind(t) :: {:ok, address} | {:error, :bad_response}
def bind(client) do
request(client, :bind).()
end
@doc """
Sends Binding indication to a server
"""
@spec persist(t) :: :ok
def persist(client) do
GenServer.cast(client, :persist)
end
@doc """
Creates allocation on the server or returns relayed transport
address if client already has an allocation
## Options
* `:even_port` - optional - if set to `true`, EVEN-PORT attribute
will be included in the request, which prompts the server to
allocate even port number
* `:reserve` - optional - if set to `true`, prompts the server to allocate
an even port, reserve next highest port number, and return a reservation
token which can be later used to create an allocation on reserved port.
If this option is present, `:even_port` is ignored.
* `:reservation_token` - optional - token returned by previous allocation
request with `reserve: true`. Passing the token should result in reserved
port being assigned to the allocation, or an error if the token is invalid
or the reservation has timed out. If this option is present, `:reserve`
is ignored.
"""
@spec allocate(t) :: {:ok, address} | {:error, error}
@spec allocate(t, allocate_opts) :: {:ok, address} | {:error, error}
def allocate(client, opts \\ []) do
call = request(client, {:allocate, opts})
case call.() do
{:error, :stale_nonce} -> call.()
{:error, :unauthorized} -> call.()
result -> result
end
end
@doc """
Tries to refresh the allocation on the server
"""
@spec refresh(t) :: :ok | {:error, error}
def refresh(client) do
maybe_retry(client, :refresh)
end
@doc """
Creates permissions on the allocation for the given peer
addresses
If permission is already installed for the given address,
the permission will be refreshed.
## Examples
create_permission client, {192, 168, 22, 111}
create_permission client, [{192, 168, 22, 111}, {212, 168, 33, 222}]
"""
@spec create_permission(t, peers :: ip | [ip, ...]) :: :ok | {:error, error}
def create_permission(_client, []), do: :ok
def create_permission(client, peers) when is_list(peers) do
maybe_retry(client, {:create_permission, peers})
end
def create_permission(client, peer), do: create_permission(client, [peer])
@doc """
Sends data to a given peer
Note that there are no guarantees that the data sent reaches
the peer. TURN servers don't acknowledge Send indications.
Returns `{:error, :no_permission}` if there is no permission installed
for the given peer.
"""
@spec send(t, peer :: address, data :: binary)
:: :ok | {:error, :no_permission}
def send(client, peer, data) do
request(client, {:send, peer, data}).()
end
@doc """
Subscribes PID to data received from the given peer
Message format is
{:peer_data, client_pid :: pid, peer :: address, data :: binary}
"""
@spec subscribe(t, sub :: pid, peer_addr :: ip) :: :ok
def subscribe(client, pid, peer_addr) do
request(client, {:subscribe, pid, peer_addr}).()
end
@doc """
Subscribes calling process to data received from the given peer
Message format is
{:peer_data, client_pid :: pid, peer :: address, data :: binary}
"""
@spec subscribe(t, peer_addr :: ip) :: :ok
def subscribe(client, peer_addr) do
subscribe(client, self(), peer_addr)
end
@doc """
Cancels subscription of given PID
"""
@spec unsubscribe(t, sub :: pid, peer_addr :: ip) :: :ok
def unsubscribe(client, pid, peer_addr) do
request(client, {:unsubscribe, pid, peer_addr}).()
end
@doc """
Cancels subscription of calling process
"""
@spec unsubscribe(t, peer_addr :: ip) :: :ok
def unsubscribe(client, peer_addr) do
unsubscribe(client, self(), peer_addr)
end
@doc """
Blocks the calling process until it receives the data from the given
peer
Calling process needs to be subscribed to this peer's data
before calling this function, otherwise it will always time out.
Accepts timeout in milliseconds as optional argument (defualt is 5000),
may be also atom `:infinity`.
This function simply uses subscriptions mechanism.
It implies lack of knowledge about permissions installed for the given
peer, thus if there is no permission, the function will most likely
time out.
"""
@spec recv(t, peer_addr :: Client.ip)
:: {:ok, peer :: Client.address, data :: binary} | {:error, :timeout}
@spec recv(t, peer_addr :: Client.ip, timeout :: non_neg_integer | :infinity)
:: {:ok, peer :: Client.address, data :: binary} | {:error, :timeout}
def recv(client, peer_addr, timeout \\ 5_000) do
receive do
{:peer_data, ^client, {^peer_addr, _} = peer, data} ->
{:ok, peer, data}
after
timeout ->
{:error, :timeout}
end
end
@doc """
Opens or refreshes a channel between client and one of the peers
Once the channel is opened, all communication with the peer will be
done via channel. It results in more efficient communication, because
channels require smaller message headers than STUN messages.
To exchange data via channel, you can use the same `send`, `subscribe`, and
friends API as in the regular TURN communication.
Opening a channel automatically installs a permission for the given peer.
However, note that permissions are valid for 5 minutes after installation,
whereas channels are valid for 10 minutes. It is required to refresh
permissions more often than channels.
"""
@spec open_channel(t, peer :: Client.address)
:: :ok
| {:error, error | :peer_locked | :capacity_reached | :retries_limit_reached}
def open_channel(client, peer) do
request(client, {:open_channel, peer}).()
end
@doc """
Stops the client
"""
@spec stop(t) :: :ok | {:error, error}
when error: :not_found | :simple_one_for_one
def stop(client) do
Supervisor.terminate_child(Client.Supervisor, client)
end
@doc false
@spec format_address(address) :: String.t
def format_address({ip, port}) do
"#{:inet.ntoa(ip)}:#{port}"
end
@spec request(t, term) :: (() -> {:error, error} | term)
defp request(client, req), do: fn -> GenServer.call(client, req) end
@spec maybe_retry(t, request) :: {:error, error} | term when
request: :allocate | :refresh | {:create_permission, [Client.ip, ...]}
defp maybe_retry(client, req) do
call = request(client, req)
case call.() do
{:error, :stale_nonce} ->
call.()
result ->
result
end
end
end
|
lib/jerboa/client.ex
| 0.862569
| 0.497803
|
client.ex
|
starcoder
|
defmodule Exceptional.Normalize do
@moduledoc ~S"""
Normalize values to a consistent exception struct or plain value.
In some ways this can be seen as the opposite of `tagged_tuple`/`ok`.
"""
defmacro __using__(_) do
quote do
import unquote(__MODULE__)
end
end
@doc ~S"""
Normalizes values into exceptions or plain values (no `{:error, _}` tuples).
Some error types may not be detected; you may pass a custom converter.
See more below.
Normal values will simply pass through:
iex> normalize(42)
42
Struct exceptions will also pass straight through:
iex> normalize(%Enum.OutOfBoundsError{message: "out of bounds error"})
%Enum.OutOfBoundsError{message: "out of bounds error"}
This covers the most common tuple error cases (see examples below), but is by
no means exhaustive.
iex> normalize(:error)
%ErlangError{original: nil}
iex> normalize(:error)
%ErlangError{original: nil}
iex> normalize({:error, "boom"})
%ErlangError{original: "boom"}
iex> normalize({:error, {1, 2, 3}})
%ErlangError{original: {1, 2, 3}}
iex> normalize({:error, "boom with stacktrace", ["trace"]})
%ErlangError{original: "boom with stacktrace"}
Some errors tuples cannot be detected.
Those cases will be returned as plain values.
iex> normalize({:good, "tuple", ["value"]})
{:good, "tuple", ["value"]}
You may optionally pass a converting function as a second argument.
This allows you to construct a variant of `normalize` that accounts for
some custom error message(s).
iex> {:oh_no, {"something bad happened", %{bad: :thing}}}
...> |> normalize(fn
...> {:oh_no, {message, _}} -> %File.Error{reason: message}
...> {:bang, message} -> %File.CopyError{reason: message}
...> otherwise -> otherwise
...> end)
%File.Error{reason: "something bad happened"}
iex> {:oh_yes, {1, 2, 3}}
...> |> normalize(fn
...> {:oh_no, {message, _}} -> %File.Error{reason: message}
...> {:bang, message} -> %File.CopyError{reason: message}
...> otherwise -> otherwise
...> end)
{:oh_yes, {1, 2, 3}}
"""
@spec normalize(any, fun) :: any
def normalize(error_or_value, conversion_fun \\ fn x -> x end) do
case error_or_value do
:error -> %ErlangError{}
{:error} -> %ErlangError{}
{:error, detail} -> Exception.normalize(:error, detail)
plain = {error_type, status, stacktrace} ->
err = Exception.normalize(error_type, status, stacktrace)
if Exception.exception?(err), do: err, else: plain
{:ok, value} -> value
value -> conversion_fun.(value)
end
end
end
|
lib/exceptional/normalize.ex
| 0.916081
| 0.528351
|
normalize.ex
|
starcoder
|
defmodule AWS.EMR do
@moduledoc """
Amazon EMR is a web service that makes it easier to process large amounts of
data efficiently.
Amazon EMR uses Hadoop processing combined with several AWS services to do tasks
such as web indexing, data mining, log file analysis, machine learning,
scientific simulation, and data warehouse management.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Amazon EMR",
api_version: "2009-03-31",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "elasticmapreduce",
global?: false,
protocol: "json",
service_id: "EMR",
signature_version: "v4",
signing_name: "elasticmapreduce",
target_prefix: "ElasticMapReduce"
}
end
@doc """
Adds an instance fleet to a running cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x.
"""
def add_instance_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddInstanceFleet", input, options)
end
@doc """
Adds one or more instance groups to a running cluster.
"""
def add_instance_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddInstanceGroups", input, options)
end
@doc """
AddJobFlowSteps adds new steps to a running cluster.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you
may require more than 256 steps to process your data. You can bypass the
256-step limitation in various ways, including using SSH to connect to the
master node and submitting queries directly to the software running on the
master node, such as Hive and Hadoop. For more information on how to do this,
see [Add More than 256 Steps to a Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
A step specifies the location of a JAR file stored either on the master node of
the cluster or in Amazon S3. Each step is performed by the main function of the
main class of the JAR file. The main class can be specified either in the
manifest of the JAR or by using the MainFunction parameter of the step.
Amazon EMR executes each step in the order listed. For a step to be considered
complete, the main function must exit with a zero exit code and all Hadoop jobs
started while the step was running must have completed and run successfully.
You can only add steps to a cluster that is in one of the following states:
STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
"""
def add_job_flow_steps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddJobFlowSteps", input, options)
end
@doc """
Adds tags to an Amazon EMR resource.
Tags make it easier to associate clusters in various ways, such as grouping
clusters to track your Amazon EMR resource allocation costs. For more
information, see [Tag Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
"""
def add_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTags", input, options)
end
@doc """
Cancels a pending step or steps in a running cluster.
Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0.
A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is
idempotent but asynchronous; it does not guarantee that a step will be canceled,
even if the request is successfully submitted. You can only cancel steps that
are in a `PENDING` state.
"""
def cancel_steps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelSteps", input, options)
end
@doc """
Creates a security configuration, which is stored in the service and can be
specified when a cluster is created.
"""
def create_security_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSecurityConfiguration", input, options)
end
@doc """
Creates a new Amazon EMR Studio.
"""
def create_studio(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateStudio", input, options)
end
@doc """
Maps a user or group to the Amazon EMR Studio specified by `StudioId`, and
applies a session policy to refine Studio permissions for that user or group.
"""
def create_studio_session_mapping(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateStudioSessionMapping", input, options)
end
@doc """
Deletes a security configuration.
"""
def delete_security_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSecurityConfiguration", input, options)
end
@doc """
Removes an Amazon EMR Studio from the Studio metadata store.
"""
def delete_studio(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteStudio", input, options)
end
@doc """
Removes a user or group from an Amazon EMR Studio.
"""
def delete_studio_session_mapping(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteStudioSessionMapping", input, options)
end
@doc """
Provides cluster-level details including status, hardware and software
configuration, VPC settings, and so on.
"""
def describe_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCluster", input, options)
end
@doc """
This API is no longer supported and will eventually be removed.
We recommend you use `ListClusters`, `DescribeCluster`, `ListSteps`,
`ListInstanceGroups` and `ListBootstrapActions` instead.
DescribeJobFlows returns a list of job flows that match all of the supplied
parameters. The parameters can include a list of job flow IDs, job flow states,
and restrictions on job flow creation date and time.
Regardless of supplied parameters, only job flows created within the last two
months are returned.
If no parameters are supplied, then job flows matching either of the following
criteria are returned:
* Job flows created and completed in the last two weeks
* Job flows created within the last two months that are in one of
the following states: `RUNNING`, `WAITING`, `SHUTTING_DOWN`, `STARTING`
Amazon EMR can return a maximum of 512 job flow descriptions.
"""
def describe_job_flows(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeJobFlows", input, options)
end
@doc """
Provides details of a notebook execution.
"""
def describe_notebook_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeNotebookExecution", input, options)
end
@doc """
Provides the details of a security configuration by returning the configuration
JSON.
"""
def describe_security_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSecurityConfiguration", input, options)
end
@doc """
Provides more detail about the cluster step.
"""
def describe_step(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStep", input, options)
end
@doc """
Returns details for the specified Amazon EMR Studio including ID, Name, VPC,
Studio access URL, and so on.
"""
def describe_studio(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStudio", input, options)
end
@doc """
Returns the Amazon EMR block public access configuration for your AWS account in
the current Region.
For more information see [Configure Block Public Access for Amazon EMR](https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html)
in the *Amazon EMR Management Guide*.
"""
def get_block_public_access_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetBlockPublicAccessConfiguration", input, options)
end
@doc """
Fetches the attached managed scaling policy for an Amazon EMR cluster.
"""
def get_managed_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetManagedScalingPolicy", input, options)
end
@doc """
Fetches mapping details for the specified Amazon EMR Studio and identity (user
or group).
"""
def get_studio_session_mapping(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetStudioSessionMapping", input, options)
end
@doc """
Provides information about the bootstrap actions associated with a cluster.
"""
def list_bootstrap_actions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListBootstrapActions", input, options)
end
@doc """
Provides the status of all clusters visible to this AWS account.
Allows you to filter the list of clusters based on certain criteria; for
example, filtering by cluster creation date and time or by status. This call
returns a maximum of 50 clusters per call, but returns a marker to track the
paging of the cluster list across multiple ListClusters calls.
"""
def list_clusters(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListClusters", input, options)
end
@doc """
Lists all available details about the instance fleets in a cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions.
"""
def list_instance_fleets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListInstanceFleets", input, options)
end
@doc """
Provides all available details about the instance groups in a cluster.
"""
def list_instance_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListInstanceGroups", input, options)
end
@doc """
Provides information for all active EC2 instances and EC2 instances terminated
in the last 30 days, up to a maximum of 2,000.
EC2 instances in any of the following states are considered active:
AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
"""
def list_instances(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListInstances", input, options)
end
@doc """
Provides summaries of all notebook executions.
You can filter the list based on multiple criteria such as status, time range,
and editor id. Returns a maximum of 50 notebook executions and a marker to track
the paging of a longer notebook execution list across multiple
`ListNotebookExecution` calls.
"""
def list_notebook_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListNotebookExecutions", input, options)
end
@doc """
Lists all the security configurations visible to this account, providing their
creation dates and times, and their names.
This call returns a maximum of 50 clusters per call, but returns a marker to
track the paging of the cluster list across multiple ListSecurityConfigurations
calls.
"""
def list_security_configurations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSecurityConfigurations", input, options)
end
@doc """
Provides a list of steps for the cluster in reverse order unless you specify
`stepIds` with the request of filter by `StepStates`.
You can specify a maximum of 10 `stepIDs`.
"""
def list_steps(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSteps", input, options)
end
@doc """
Returns a list of all user or group session mappings for the Amazon EMR Studio
specified by `StudioId`.
"""
def list_studio_session_mappings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListStudioSessionMappings", input, options)
end
@doc """
Returns a list of all Amazon EMR Studios associated with the AWS account.
The list includes details such as ID, Studio Access URL, and creation time for
each Studio.
"""
def list_studios(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListStudios", input, options)
end
@doc """
Modifies the number of steps that can be executed concurrently for the cluster
specified using ClusterID.
"""
def modify_cluster(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyCluster", input, options)
end
@doc """
Modifies the target On-Demand and target Spot capacities for the instance fleet
with the specified InstanceFleetID within the cluster specified using ClusterID.
The call either succeeds or fails atomically.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions.
"""
def modify_instance_fleet(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyInstanceFleet", input, options)
end
@doc """
ModifyInstanceGroups modifies the number of nodes and configuration settings of
an instance group.
The input parameters include the new target instance count for the group and the
instance group ID. The call will either succeed or fail atomically.
"""
def modify_instance_groups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyInstanceGroups", input, options)
end
@doc """
Creates or updates an automatic scaling policy for a core instance group or task
instance group in an Amazon EMR cluster.
The automatic scaling policy defines how an instance group dynamically adds and
terminates EC2 instances in response to the value of a CloudWatch metric.
"""
def put_auto_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutAutoScalingPolicy", input, options)
end
@doc """
Creates or updates an Amazon EMR block public access configuration for your AWS
account in the current Region.
For more information see [Configure Block Public Access for Amazon EMR](https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html)
in the *Amazon EMR Management Guide*.
"""
def put_block_public_access_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutBlockPublicAccessConfiguration", input, options)
end
@doc """
Creates or updates a managed scaling policy for an Amazon EMR cluster.
The managed scaling policy defines the limits for resources, such as EC2
instances that can be added or terminated from a cluster. The policy only
applies to the core and task nodes. The master node cannot be scaled after
initial configuration.
"""
def put_managed_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutManagedScalingPolicy", input, options)
end
@doc """
Removes an automatic scaling policy from a specified instance group within an
EMR cluster.
"""
def remove_auto_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveAutoScalingPolicy", input, options)
end
@doc """
Removes a managed scaling policy from a specified EMR cluster.
"""
def remove_managed_scaling_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveManagedScalingPolicy", input, options)
end
@doc """
Removes tags from an Amazon EMR resource.
Tags make it easier to associate clusters in various ways, such as grouping
clusters to track your Amazon EMR resource allocation costs. For more
information, see [Tag Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
The following example removes the stack tag with value Prod from a cluster:
"""
def remove_tags(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTags", input, options)
end
@doc """
RunJobFlow creates and starts running a new cluster (job flow).
The cluster runs the steps specified. After the steps complete, the cluster
stops and the HDFS partition is lost. To prevent loss of data, configure the
last step of the job flow to store results in Amazon S3. If the
`JobFlowInstancesConfig` `KeepJobFlowAliveWhenNoSteps` parameter is set to
`TRUE`, the cluster transitions to the WAITING state rather than shutting down
after the steps have completed.
For additional protection, you can set the `JobFlowInstancesConfig`
`TerminationProtected` parameter to `TRUE` to lock the cluster and prevent it
from being terminated by API call, user intervention, or in the event of a job
flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you
may require more than 256 steps to process your data. You can bypass the
256-step limitation in various ways, including using the SSH shell to connect to
the master node and submitting queries directly to the software running on the
master node, such as Hive and Hadoop. For more information on how to do this,
see [Add More than 256 Steps to a Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
For long running clusters, we recommend that you periodically store your
results.
The instance fleets configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions. The RunJobFlow request can contain
InstanceFleets parameters or InstanceGroups parameters, but not both.
"""
def run_job_flow(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RunJobFlow", input, options)
end
@doc """
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the
cluster cannot be terminated by user intervention, an API call, or in the event
of a job-flow error.
The cluster still terminates upon successful completion of the job flow. Calling
`SetTerminationProtection` on a cluster is similar to calling the Amazon EC2
`DisableAPITermination` API on all EC2 instances in a cluster.
`SetTerminationProtection` is used to prevent accidental termination of a
cluster and to ensure that in the event of an error, the instances persist so
that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting
`SetTerminationProtection` to `true`, you must first unlock the job flow by a
subsequent call to `SetTerminationProtection` in which you set the value to
`false`.
For more information, see[Managing Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_TerminationProtection.html)
in the *Amazon EMR Management Guide*.
"""
def set_termination_protection(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetTerminationProtection", input, options)
end
@doc """
Sets the `Cluster$VisibleToAllUsers` value, which determines whether the cluster
is visible to all IAM users of the AWS account associated with the cluster.
Only the IAM user who created the cluster or the AWS account root user can call
this action. The default value, `true`, indicates that all IAM users in the AWS
account can perform cluster actions if they have the proper IAM policy
permissions. If set to `false`, only the IAM user that created the cluster can
perform actions. This action works on running clusters. You can override the
default `true` setting when you create a cluster by using the
`VisibleToAllUsers` parameter with `RunJobFlow`.
"""
def set_visible_to_all_users(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SetVisibleToAllUsers", input, options)
end
@doc """
Starts a notebook execution.
"""
def start_notebook_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartNotebookExecution", input, options)
end
@doc """
Stops a notebook execution.
"""
def stop_notebook_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopNotebookExecution", input, options)
end
@doc """
TerminateJobFlows shuts a list of clusters (job flows) down.
When a job flow is shut down, any step not yet completed is canceled and the EC2
instances on which the cluster is running are stopped. Any log files not already
saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was
created.
The maximum number of clusters allowed is 10. The call to `TerminateJobFlows` is
asynchronous. Depending on the configuration of the cluster, it may take up to
1-5 minutes for the cluster to completely terminate and release allocated
resources, such as Amazon EC2 instances.
"""
def terminate_job_flows(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TerminateJobFlows", input, options)
end
@doc """
Updates an Amazon EMR Studio configuration, including attributes such as name,
description, and subnets.
"""
def update_studio(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateStudio", input, options)
end
@doc """
Updates the session policy attached to the user or group for the specified
Amazon EMR Studio.
"""
def update_studio_session_mapping(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateStudioSessionMapping", input, options)
end
end
|
lib/aws/generated/emr.ex
| 0.876397
| 0.523664
|
emr.ex
|
starcoder
|
defmodule Sanbase.Cryptocompare.Jobs do
alias Sanbase.Model.Project
# Execute the function until the moved rows are 0 or up to 100 iterations.
# The iterations are needed to avoid an infinite loop. If there is a task that
# finishes one job every second we risk to always return 1 row and never finish
# the job.
@queue Sanbase.Cryptocompare.HistoricalScheduler.queue() |> to_string()
def move_finished_jobs(opts \\ []) do
iterations = Keyword.get(opts, :iterations, 200)
limit = Keyword.get(opts, :limit, 10_000)
count =
1..iterations
|> Enum.reduce_while(0, fn _, rows_count_acc ->
case do_move_completed_jobs(@queue, limit) do
{:ok, 0} -> {:halt, rows_count_acc}
{:ok, rows_count} -> {:cont, rows_count + rows_count_acc}
end
end)
{:ok, count}
end
def remove_oban_jobs_unsupported_assets() do
{:ok, oban_jobs_base_assets} = get_oban_jobs_base_assets(@queue)
supported_base_assets =
Project.SourceSlugMapping.get_source_slug_mappings("cryptocompare")
|> Enum.map(&elem(&1, 0))
unsupported_base_assets = oban_jobs_base_assets -- supported_base_assets
Enum.map(unsupported_base_assets, fn base_asset ->
{:ok, _} = delete_not_completed_base_asset_jobs(@queue, base_asset)
end)
end
def get_oban_jobs_base_assets(queue) do
query = """
SELECT distinct(args->>'base_asset') FROM oban_jobs
WHERE queue = $1 AND completed_at IS NULL
"""
{:ok, %{rows: rows}} = Ecto.Adapters.SQL.query(Sanbase.Repo, query, [queue], timeout: 150_000)
{:ok, List.flatten(rows)}
end
# Private functions
defp do_move_completed_jobs(queue, limit) do
# Instead of deleting the records directly from the oban_jobs table, define
# a CTE that selects the needed jobs first so we can put a `limit` on how
# many can be done at once.
# In a second CTE delete those records from the oban_jobs and return them,
# so they can be used to be inserted into the `finished_oban_jobs` table.
# Return the number of affected rows so when they become 0 we can
query = "SELECT moveFinishedObanJobs($1, $2);"
# The affected rows count is returned as a result of the function and should not
# be taken from the `num_rows` field as it is always 1.
{:ok, %{rows: [[affected_rows_count]]}} = Sanbase.Repo.query(query, [queue, limit])
{:ok, affected_rows_count}
end
defp delete_not_completed_base_asset_jobs(queue, base_asset) do
query = """
DELETE FROM oban_jobs
WHERE queue = $1 AND args->>'base_asset' = $2 AND completed_at IS NULL;
"""
{:ok, %{num_rows: num_rows}} =
Ecto.Adapters.SQL.query(Sanbase.Repo, query, [queue, base_asset], timeout: 150_000)
{:ok, %{num_rows: num_rows, base_asset: base_asset}}
end
end
|
lib/sanbase/cryptocompare/jobs.ex
| 0.685423
| 0.426919
|
jobs.ex
|
starcoder
|
defmodule BSV.TxBuilder do
@moduledoc """
A flexible and powerful transaction building module and API.
The TxBuilder accepts inputs and outputs that are modules implementing the
`BSV.Contract` behaviour. This abstraction makes for a succinct and elegant
approach to building transactions. The `BSV.Contract` behaviour is flexible
and can be used to define any kind of locking and unlocking script, not
limited to a handful of standard transactions.
## Examples
Because each input and output is prepared with all the information it needs,
calling `to_tx/1` is all that is needed to build and sign the transaction.
iex> utxo = UTXO.from_params!(%{
...> "txid" => "5e3014372338f079f005eedc85359e4d96b8440e7dbeb8c35c4182e0c19a1a12",
...> "vout" => 0,
...> "satoshis" => 11000,
...> "script" => "76a914538fd179c8be0f289c730e33b5f6a3541be9668f88ac"
...> })
iex>
iex> builder = %TxBuilder{
...> inputs: [
...> P2PKH.unlock(utxo, %{keypair: @keypair})
...> ],
...> outputs: [
...> P2PKH.lock(10000, %{address: @address}),
...> OpReturn.lock(0, %{data: ["hello", "world"]})
...> ]
...> }
iex>
iex> tx = TxBuilder.to_tx(builder)
iex> Tx.to_binary(tx, encoding: :hex)
"0100000001121a9ac1e082415cc3b8be7d0e44b8964d9e3585dcee05f079f038233714305e000000006a47304402200f674ba40b14b8f85b751ad854244a4199008c5b491b076df2eb6c3efd0be4bf022004b48ef0e656ee1873d07cb3b06858970de702f63935df2fbe8816f1a5f15e1e412103f81f8c8b90f5ec06ee4245eab166e8af903fc73a6dd73636687ef027870abe39ffffffff0210270000000000001976a914538fd179c8be0f289c730e33b5f6a3541be9668f88ac00000000000000000e006a0568656c6c6f05776f726c6400000000"
"""
alias BSV.{Address, Contract, Script, Tx, TxIn, TxOut, UTXO, VarInt}
alias BSV.Contract.P2PKH
import BSV.Util, only: [reverse_bin: 1]
@default_rates %{
mine: %{ data: 0.5, standard: 0.5 },
relay: %{ data: 0.25, standard: 0.25 }
}
@default_opts %{
rates: @default_rates,
sort: false
}
defstruct inputs: [],
outputs: [],
change_script: nil,
lock_time: 0,
options: @default_opts
@typedoc "TxBuilder struct"
@type t() :: %__MODULE__{
inputs: list(Contract.t()),
outputs: list(Contract.t()),
change_script: Script.t() | nil,
lock_time: non_neg_integer(),
options: map()
}
@typedoc """
Fee quote
A fee quote is a data structure representing miner fees. It can be either a
single number representing satoshis per bytes, or a map with keys for both
`:data` and `:standard` miner rates.
"""
@type fee_quote() :: %{
mine: %{
data: number(),
standard: number()
},
relay: %{
data: number(),
standard: number()
},
} | %{
data: number(),
standard: number()
} | number()
@doc """
Adds the given unlocking script contract to the builder.
"""
@spec add_input(t(), Contract.t()) :: t()
def add_input(%__MODULE__{} = builder, %Contract{mfa: {_, :unlocking_script, _}} = input),
do: update_in(builder.inputs, & &1 ++ [input])
@doc """
Adds the given locking script contract to the builder.
"""
@spec add_output(t(), Contract.t()) :: t()
def add_output(%__MODULE__{} = builder, %Contract{mfa: {_, :locking_script, _}} = output),
do: update_in(builder.outputs, & &1 ++ [output])
@doc """
Calculates the required fee for the builder's transaction, optionally using
the given `t:fee_quote/0`.
When different `:data` and `:standard` rates are given, data outputs
(identified by locking scripts beginning with `OP_FALSE OP_RETURN`) are
calculated using the appropriate rate.
"""
@spec calc_required_fee(t(), fee_quote()) :: non_neg_integer()
def calc_required_fee(builder, rates \\ @default_rates)
def calc_required_fee(%__MODULE__{} = builder, rates) when is_number(rates),
do: calc_required_fee(builder, %{data: rates, standard: rates})
def calc_required_fee(%__MODULE__{} = builder, %{mine: rates}),
do: calc_required_fee(builder, rates)
def calc_required_fee(%__MODULE__{inputs: inputs, outputs: outputs}, %{data: _, standard: _} = rates) do
[
{:standard, 4 + 4}, # version & locktime
{:standard, length(inputs) |> VarInt.encode() |> byte_size()},
{:standard, length(outputs) |> VarInt.encode() |> byte_size()}
]
|> Kernel.++(Enum.map(inputs, & calc_script_fee(Contract.to_txin(&1))))
|> Kernel.++(Enum.map(outputs, & calc_script_fee(Contract.to_txout(&1))))
|> Enum.reduce(0, fn {type, bytes}, fee -> fee + ceil(rates[type] * bytes) end)
end
@doc """
Sets the change script on the builder as a P2PKH locking script to the given
address.
"""
@spec change_to(t(), Address.t() | Address.address_str()) :: t()
def change_to(%__MODULE__{} = builder, %Address{} = address) do
script = P2PKH.lock(0, %{address: address})
|> Contract.to_script()
Map.put(builder, :change_script, script)
end
def change_to(%__MODULE__{} = builder, address) when is_binary(address),
do: change_to(builder, Address.from_string!(address))
@doc """
Returns the sum of all inputs defined in the builder.
"""
@spec input_sum(t()) :: integer()
def input_sum(%__MODULE__{inputs: inputs}) do
inputs
|> Enum.map(& &1.subject.txout.satoshis)
|> Enum.sum()
end
@doc """
Returns the sum of all outputs defined in the builder.
"""
@spec output_sum(t()) :: integer()
def output_sum(%__MODULE__{outputs: outputs}) do
outputs
|> Enum.map(& &1.subject)
|> Enum.sum()
end
@doc """
Sorts the TxBuilder inputs and outputs according to [BIP-69](https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki).
BIP-69 defines deterministic lexographical indexing of transaction inputs and
outputs.
"""
@spec sort(t()) :: t()
def sort(%__MODULE__{} = builder) do
builder
|> Map.update!(:inputs, fn inputs ->
Enum.sort(inputs, fn %{subject: %UTXO{outpoint: a}}, %{subject: %UTXO{outpoint: b}} ->
{reverse_bin(a.hash), a.vout} < {reverse_bin(b.hash), b.vout}
end)
end)
|> Map.update!(:outputs, fn outputs ->
Enum.sort(outputs, fn a, b ->
script_a = Contract.to_script(a)
script_b = Contract.to_script(b)
{a.subject, Script.to_binary(script_a)} < {b.subject, Script.to_binary(script_b)}
end)
end)
end
@doc """
Builds and returns the signed transaction.
"""
@spec to_tx(t()) :: Tx.t()
def to_tx(%__MODULE__{inputs: inputs, outputs: outputs} = builder) do
builder = if builder.options.sort == true, do: sort(builder), else: builder
tx = struct(Tx, lock_time: builder.lock_time)
# First pass on populating inputs will zero out signatures
tx = Enum.reduce(inputs, tx, fn contract, tx ->
Tx.add_input(tx, Contract.to_txin(contract))
end)
# Create outputs
tx = Enum.reduce(outputs, tx, fn contract, tx ->
Tx.add_output(tx, Contract.to_txout(contract))
end)
# Append change if required
tx = case get_change_txout(builder) do
%TxOut{} = txout ->
Tx.add_output(tx, txout)
_ ->
tx
end
# Second pass on populating inputs with actual sigs
Enum.reduce(Enum.with_index(inputs), tx, fn {contract, vin}, tx ->
txin = contract
|> Contract.put_ctx({tx, vin})
|> Contract.to_txin()
update_in(tx.inputs, & List.replace_at(&1, vin, txin))
end)
end
# Returns change txout if script present and amount exceeds dust threshold
defp get_change_txout(%{change_script: %Script{} = script} = builder) do
change = input_sum(builder) - output_sum(builder)
fee = calc_required_fee(builder, builder.options.rates)
txout = %TxOut{script: script}
extra_fee = ceil(TxOut.get_size(txout) * builder.options.rates.mine.standard)
change = change - (fee + extra_fee)
if change >= dust_threshold(txout, builder.options.rates) do
Map.put(txout, :satoshis, change)
end
end
defp get_change_txout(_builder), do: nil
# Calculates the size of the given TxIn or TxOut
defp calc_script_fee(%TxIn{} = txin) do
{:standard, TxIn.get_size(txin)}
end
defp calc_script_fee(%TxOut{script: script} = txout) do
case script.chunks do
[:OP_FALSE, :OP_RETURN | _chunks] ->
{:data, TxOut.get_size(txout)}
_ ->
{:standard, TxOut.get_size(txout)}
end
end
# Returns the dust threshold of the given txout
# See: https://github.com/bitcoin-sv/bitcoin-sv/blob/master/src/primitives/transaction.h#L188-L208
defp dust_threshold(%TxOut{} = txout, %{relay: rates}),
do: 3 * floor((TxOut.get_size(txout) + 148) * rates.standard)
end
|
lib/bsv/tx_builder.ex
| 0.922036
| 0.561215
|
tx_builder.ex
|
starcoder
|
defmodule ExDiceRoller.Parser do
@moduledoc """
Functionality for parsing `t:ExDiceRoller.Tokenizer.tokens/0`.
iex> {:ok, tokens} = ExDiceRoller.Tokenizer.tokenize("2d3+9-(ydz)d(31+x)/(3d8+2)")
{:ok,
[
{:int, 1, '2'},
{:roll, 1, 'd'},
{:int, 1, '3'},
{:basic_operator, 1, '+'},
{:int, 1, '9'},
{:basic_operator, 1, '-'},
{:"(", 1, '('},
{:var, 1, 'y'},
{:roll, 1, 'd'},
{:var, 1, 'z'},
{:")", 1, ')'},
{:roll, 1, 'd'},
{:"(", 1, '('},
{:int, 1, '31'},
{:basic_operator, 1, '+'},
{:var, 1, 'x'},
{:")", 1, ')'},
{:complex_operator, 1, '/'},
{:"(", 1, '('},
{:int, 1, '3'},
{:roll, 1, 'd'},
{:int, 1, '8'},
{:basic_operator, 1, '+'},
{:int, 1, '2'},
{:")", 1, ')'}
]}
iex> ExDiceRoller.Parser.parse(tokens)
{:ok,
{{:operator, '-'},
{{:operator, '+'}, {:roll, 2, 3}, 9},
{{:operator, '/'},
{:roll, {:roll, {:var, 'y'}, {:var, 'z'}},
{{:operator, '+'}, 31, {:var, 'x'}}},
{{:operator, '+'}, {:roll, 3, 8}, 2}}}}
"""
alias ExDiceRoller.Tokenizer
@type expression ::
number
| {{:operator, charlist}, expression, expression}
| {:roll, expression, expression}
| {:var, charlist}
| {:sep, expression, expression}
@doc """
Converts a series of tokens provided by `tokenize/1` and parses them into
an expression structure. This expression structure is what's used by the
dice rolling functions to calculate rolls. The BNF grammar definition
file is located at `src/dice_parser.yrl`.
iex> {:ok, tokens} = ExDiceRoller.tokenize("2d8 + (1+2)")
{:ok,
[
{:int, 1, '2'},
{:roll, 1, 'd'},
{:int, 1, '8'},
{:basic_operator, 1, '+'},
{:"(", 1, '('},
{:int, 1, '1'},
{:basic_operator, 1, '+'},
{:int, 1, '2'},
{:")", 1, ')'}
]}
iex> {:ok, _} = ExDiceRoller.parse(tokens)
{:ok,
{{:operator, '+'}, {:roll, 2, 8},
{{:operator, '+'}, 1, 2}}}
"""
@spec parse(Tokenizer.tokens()) :: {:ok, expression}
def parse(tokens) do
case :dice_parser.parse(tokens) do
{:ok, _} = resp -> resp
{:error, {_, :dice_parser, reason}} -> {:error, {:token_parsing_failed, reason}}
end
end
end
|
lib/parser.ex
| 0.781414
| 0.442155
|
parser.ex
|
starcoder
|
defmodule Resx.Resource.Content do
@moduledoc """
The content of a resource.
%Resx.Resource.Content{
type: ["text/html]",
data: "<p>Hello</p>"
}
"""
alias Resx.Resource.Content
@enforce_keys [:type, :data]
defstruct [:type, :data]
@type acc :: Enumerable.acc
@type result :: Enumerable.result
@type reducer(element) :: (acc, (element, acc -> acc) -> result)
@type mime :: String.t
@type type :: [mime, ...]
@type t :: %Content{
type: type,
data: any
}
@doc """
Retrieve the content data.
By default content stream's will be concatenated into a single binary if
all parts are binaries, otherwise it will return a list of the unmodified
parts. This behaviour can be overridden by setting the `:content_combiner`
to a function of type `(Content.Stream.t -> any)`. Valid function formats
are any callback variant, see `Callback` for more information.
config :resx,
content_combiner: fn
%{ type: ["application/x.erlang.etf"|_], data: [data] } -> data
content -> Content.Stream.combine(content, <<>>)
end
To still use the default combiner in your custom combiner, you can pass the
content to `Resx.Resource.Content.Stream.combine(content, <<>>)`.
iex> Resx.Resource.Content.data(%Resx.Resource.Content{ type: [], data: "foo" })
"foo"
iex> Resx.Resource.Content.data(%Resx.Resource.Content.Stream{ type: [], data: ["foo", "bar"] })
"foobar"
iex> Resx.Resource.Content.data(%Resx.Resource.Content.Stream{ type: [], data: ["foo", :bar] })
["foo", :bar]
"""
@spec data(t | Content.Stream.t) :: any
def data(content = %Content.Stream{}) do
Application.get_env(:resx, :content_combiner, &Content.Stream.combine(&1, <<>>))
|> Callback.call([content])
end
def data(content), do: content.data
@doc """
Make some content explicit.
"""
@spec new(t | Content.Stream.t) :: t
def new(content), do: %Content{ type: content.type, data: data(content) }
@doc """
Get the reducer for this content.
Returns an enumerable function that will reduce the content into the type
requested.
The default reducers for the different types are:
* `:binary` - returns a reducer that assumes its content is already in binary
form.
Reducers can be overridden by setting the `:content_reducer` to a function
of type `(t | Content.Stream.t, :binary | atom -> reducer)`. Valid function
formats are any callback variant, see `Callback` for more information.
config :resx,
content_reducer: fn
content = %{ type: ["application/x.erlang.etf"|_] }, :binary -> &Enumerable.reduce([:erlang.term_to_binary(Resx.Resource.Content.data(content))], &1, &2)
content, :binary -> &Enumerable.reduce(Resx.Resource.Content.Stream.new(content), &1, &2)
end
The reducer should be able to be passed into an Enum or Stream function.
iex> reduce = Resx.Resource.Content.reducer(%Resx.Resource.Content.Stream{ type: [], data: ["1", "2", "3"] })
...> reduce.({ :cont, "" }, &({ :cont, &2 <> &1 }))
{ :done, "123" }
iex> reduce = Resx.Resource.Content.reducer(%Resx.Resource.Content.Stream{ type: [], data: ["1", "2", "3"] })
...> Enum.into(reduce, "")
"123"
iex> reduce = Resx.Resource.Content.reducer(%Resx.Resource.Content.Stream{ type: [], data: ["1", "2", "3"] })
...> Stream.take(reduce, 2) |> Enum.into("")
"12"
"""
@spec reducer(t | Content.Stream.t, :binary) :: reducer(binary)
@spec reducer(t | Content.Stream.t, atom) :: reducer(term)
def reducer(content, type \\ :binary)
def reducer(content, type) do
Application.get_env(:resx, :content_reducer, fn
content, :binary -> &Enumerable.reduce(Content.Stream.new(content), &1, &2)
end) |> Callback.call([content, type])
end
end
|
lib/resx/resource/content.ex
| 0.894438
| 0.435181
|
content.ex
|
starcoder
|
defmodule AWS.ACMPCA do
@moduledoc """
<note> <p/> </note> This is the *ACM Private CA API Reference*. It provides
descriptions, syntax, and usage examples for each of the actions and data
types involved in creating and managing private certificate authorities
(CA) for your organization.
The documentation for each action shows the Query API request parameters
and the XML response. Alternatively, you can use one of the AWS SDKs to
access an API that's tailored to the programming language or platform that
you're using. For more information, see [AWS
SDKs](https://aws.amazon.com/tools/#SDKs).
<note> Each ACM Private CA API action has a quota that determines the
number of times the action can be called per second. For more information,
see [API Rate Quotas in ACM Private
CA](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html#PcaLimits-api)
in the ACM Private CA user guide.
</note>
"""
@doc """
Creates a root or subordinate private certificate authority (CA). You must
specify the CA configuration, the certificate revocation list (CRL)
configuration, the CA type, and an optional idempotency token to avoid
accidental creation of multiple CAs. The CA configuration specifies the
name of the algorithm and key size to be used to create the CA private key,
the type of signing algorithm that the CA uses, and X.500 subject
information. The CRL configuration specifies the CRL expiration period in
days (the validity period of the CRL), the Amazon S3 bucket that will
contain the CRL, and a CNAME alias for the S3 bucket that is included in
certificates issued by the CA. If successful, this action returns the
Amazon Resource Name (ARN) of the CA.
ACM Private CAA assets that are stored in Amazon S3 can be protected with
encryption. For more information, see [Encrypting Your
CRLs](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaCreateCa.html#crl-encryption).
<note> Both PCA and the IAM principal must have permission to write to the
S3 bucket that you specify. If the IAM principal making the call does not
have permission to write to the bucket, then an exception is thrown. For
more information, see [Configure Access to ACM Private
CA](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html).
</note>
"""
def create_certificate_authority(client, input, options \\ []) do
request(client, "CreateCertificateAuthority", input, options)
end
@doc """
Creates an audit report that lists every time that your CA private key is
used. The report is saved in the Amazon S3 bucket that you specify on
input. The
[IssueCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html)
and
[RevokeCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RevokeCertificate.html)
actions use the private key.
<note> Both PCA and the IAM principal must have permission to write to the
S3 bucket that you specify. If the IAM principal making the call does not
have permission to write to the bucket, then an exception is thrown. For
more information, see [Configure Access to ACM Private
CA](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html).
</note> ACM Private CAA assets that are stored in Amazon S3 can be
protected with encryption. For more information, see [Encrypting Your Audit
Reports](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuditReport.html#audit-report-encryption).
"""
def create_certificate_authority_audit_report(client, input, options \\ []) do
request(client, "CreateCertificateAuthorityAuditReport", input, options)
end
@doc """
Grants one or more permissions on a private CA to the AWS Certificate
Manager (ACM) service principal (`acm.amazonaws.com`). These permissions
allow ACM to issue and renew ACM certificates that reside in the same AWS
account as the CA.
You can list current permissions with the
[ListPermissions](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html)
action and revoke them with the
[DeletePermission](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html)
action.
<p class="title"> **About Permissions**
<ul> <li> If the private CA and the certificates it issues reside in the
same account, you can use `CreatePermission` to grant permissions for ACM
to carry out automatic certificate renewals.
</li> <li> For automatic certificate renewal to succeed, the ACM service
principal needs permissions to create, retrieve, and list certificates.
</li> <li> If the private CA and the ACM certificates reside in different
accounts, then permissions cannot be used to enable automatic renewals.
Instead, the ACM certificate owner must set up a resource-based policy to
enable cross-account issuance and renewals. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> </ul>
"""
def create_permission(client, input, options \\ []) do
request(client, "CreatePermission", input, options)
end
@doc """
Deletes a private certificate authority (CA). You must provide the Amazon
Resource Name (ARN) of the private CA that you want to delete. You can find
the ARN by calling the
[ListCertificateAuthorities](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html)
action.
<note> Deleting a CA will invalidate other CAs and certificates below it in
your CA hierarchy.
</note> Before you can delete a CA that you have created and activated, you
must disable it. To do this, call the
[UpdateCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html)
action and set the **CertificateAuthorityStatus** parameter to `DISABLED`.
Additionally, you can delete a CA if you are waiting for it to be created
(that is, the status of the CA is `CREATING`). You can also delete it if
the CA has been created but you haven't yet imported the signed certificate
into ACM Private CA (that is, the status of the CA is
`PENDING_CERTIFICATE`).
When you successfully call
[DeleteCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeleteCertificateAuthority.html),
the CA's status changes to `DELETED`. However, the CA won't be permanently
deleted until the restoration period has passed. By default, if you do not
set the `PermanentDeletionTimeInDays` parameter, the CA remains restorable
for 30 days. You can set the parameter from 7 to 30 days. The
[DescribeCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DescribeCertificateAuthority.html)
action returns the time remaining in the restoration window of a private CA
in the `DELETED` state. To restore an eligible CA, call the
[RestoreCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RestoreCertificateAuthority.html)
action.
"""
def delete_certificate_authority(client, input, options \\ []) do
request(client, "DeleteCertificateAuthority", input, options)
end
@doc """
Revokes permissions on a private CA granted to the AWS Certificate Manager
(ACM) service principal (acm.amazonaws.com).
These permissions allow ACM to issue and renew ACM certificates that reside
in the same AWS account as the CA. If you revoke these permissions, ACM
will no longer renew the affected certificates automatically.
Permissions can be granted with the
[CreatePermission](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreatePermission.html)
action and listed with the
[ListPermissions](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html)
action.
<p class="title"> **About Permissions**
<ul> <li> If the private CA and the certificates it issues reside in the
same account, you can use `CreatePermission` to grant permissions for ACM
to carry out automatic certificate renewals.
</li> <li> For automatic certificate renewal to succeed, the ACM service
principal needs permissions to create, retrieve, and list certificates.
</li> <li> If the private CA and the ACM certificates reside in different
accounts, then permissions cannot be used to enable automatic renewals.
Instead, the ACM certificate owner must set up a resource-based policy to
enable cross-account issuance and renewals. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> </ul>
"""
def delete_permission(client, input, options \\ []) do
request(client, "DeletePermission", input, options)
end
@doc """
Deletes the resource-based policy attached to a private CA. Deletion will
remove any access that the policy has granted. If there is no policy
attached to the private CA, this action will return successful.
If you delete a policy that was applied through AWS Resource Access Manager
(RAM), the CA will be removed from all shares in which it was included.
The AWS Certificate Manager Service Linked Role that the policy supports is
not affected when you delete the policy.
The current policy can be shown with
[GetPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html)
and updated with
[PutPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html).
<p class="title"> **About Policies**
<ul> <li> A policy grants access on a private CA to an AWS customer
account, to AWS Organizations, or to an AWS Organizations unit. Policies
are under the control of a CA administrator. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> <li> A policy permits a user of AWS Certificate Manager (ACM) to
issue ACM certificates signed by a CA in another account.
</li> <li> For ACM to manage automatic renewal of these certificates, the
ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM
service to assume the identity of the user, subject to confirmation against
the ACM Private CA policy. For more information, see [Using a Service
Linked Role with
ACM](https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html).
</li> <li> Updates made in AWS Resource Manager (RAM) are reflected in
policies. For more information, see [Using AWS Resource Access Manager
(RAM) with ACM Private CA](acm-pca/latest/userguide/pca-ram.html).
</li> </ul>
"""
def delete_policy(client, input, options \\ []) do
request(client, "DeletePolicy", input, options)
end
@doc """
Lists information about your private certificate authority (CA) or one that
has been shared with you. You specify the private CA on input by its ARN
(Amazon Resource Name). The output contains the status of your CA. This can
be any of the following:
<ul> <li> `CREATING` - ACM Private CA is creating your private certificate
authority.
</li> <li> `PENDING_CERTIFICATE` - The certificate is pending. You must use
your ACM Private CA-hosted or on-premises root or subordinate CA to sign
your private CA CSR and then import it into PCA.
</li> <li> `ACTIVE` - Your private CA is active.
</li> <li> `DISABLED` - Your private CA has been disabled.
</li> <li> `EXPIRED` - Your private CA certificate has expired.
</li> <li> `FAILED` - Your private CA has failed. Your CA can fail because
of problems such a network outage or backend AWS failure or other errors. A
failed CA can never return to the pending state. You must create a new CA.
</li> <li> `DELETED` - Your private CA is within the restoration period,
after which it is permanently deleted. The length of time remaining in the
CA's restoration period is also included in this action's output.
</li> </ul>
"""
def describe_certificate_authority(client, input, options \\ []) do
request(client, "DescribeCertificateAuthority", input, options)
end
@doc """
Lists information about a specific audit report created by calling the
[CreateCertificateAuthorityAuditReport](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html)
action. Audit information is created every time the certificate authority
(CA) private key is used. The private key is used when you call the
[IssueCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html)
action or the
[RevokeCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RevokeCertificate.html)
action.
"""
def describe_certificate_authority_audit_report(client, input, options \\ []) do
request(client, "DescribeCertificateAuthorityAuditReport", input, options)
end
@doc """
Retrieves a certificate from your private CA or one that has been shared
with you. The ARN of the certificate is returned when you call the
[IssueCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html)
action. You must specify both the ARN of your private CA and the ARN of the
issued certificate when calling the **GetCertificate** action. You can
retrieve the certificate if it is in the **ISSUED** state. You can call the
[CreateCertificateAuthorityAuditReport](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html)
action to create a report that contains information about all of the
certificates issued and revoked by your private CA.
"""
def get_certificate(client, input, options \\ []) do
request(client, "GetCertificate", input, options)
end
@doc """
Retrieves the certificate and certificate chain for your private
certificate authority (CA) or one that has been shared with you. Both the
certificate and the chain are base64 PEM-encoded. The chain does not
include the CA certificate. Each certificate in the chain signs the one
before it.
"""
def get_certificate_authority_certificate(client, input, options \\ []) do
request(client, "GetCertificateAuthorityCertificate", input, options)
end
@doc """
Retrieves the certificate signing request (CSR) for your private
certificate authority (CA). The CSR is created when you call the
[CreateCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html)
action. Sign the CSR with your ACM Private CA-hosted or on-premises root or
subordinate CA. Then import the signed certificate back into ACM Private CA
by calling the
[ImportCertificateAuthorityCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ImportCertificateAuthorityCertificate.html)
action. The CSR is returned as a base64 PEM-encoded string.
"""
def get_certificate_authority_csr(client, input, options \\ []) do
request(client, "GetCertificateAuthorityCsr", input, options)
end
@doc """
Retrieves the resource-based policy attached to a private CA. If either the
private CA resource or the policy cannot be found, this action returns a
`ResourceNotFoundException`.
The policy can be attached or updated with
[PutPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html)
and removed with
[DeletePolicy](acm-pca/latest/APIReference/API_DeletePolicy.html).
<p class="title"> **About Policies**
<ul> <li> A policy grants access on a private CA to an AWS customer
account, to AWS Organizations, or to an AWS Organizations unit. Policies
are under the control of a CA administrator. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> <li> A policy permits a user of AWS Certificate Manager (ACM) to
issue ACM certificates signed by a CA in another account.
</li> <li> For ACM to manage automatic renewal of these certificates, the
ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM
service to assume the identity of the user, subject to confirmation against
the ACM Private CA policy. For more information, see [Using a Service
Linked Role with
ACM](https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html).
</li> <li> Updates made in AWS Resource Manager (RAM) are reflected in
policies. For more information, see [Using AWS Resource Access Manager
(RAM) with ACM Private CA](acm-pca/latest/userguide/pca-ram.html).
</li> </ul>
"""
def get_policy(client, input, options \\ []) do
request(client, "GetPolicy", input, options)
end
@doc """
Imports a signed private CA certificate into ACM Private CA. This action is
used when you are using a chain of trust whose root is located outside ACM
Private CA. Before you can call this action, the following preparations
must in place:
<ol> <li> In ACM Private CA, call the
[CreateCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html)
action to create the private CA that that you plan to back with the
imported certificate.
</li> <li> Call the
[GetCertificateAuthorityCsr](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificateAuthorityCsr.html)
action to generate a certificate signing request (CSR).
</li> <li> Sign the CSR using a root or intermediate CA hosted by either an
on-premises PKI hierarchy or by a commercial CA.
</li> <li> Create a certificate chain and copy the signed certificate and
the certificate chain to your working directory.
</li> </ol> The following requirements apply when you import a CA
certificate.
<ul> <li> You cannot import a non-self-signed certificate for use as a root
CA.
</li> <li> You cannot import a self-signed certificate for use as a
subordinate CA.
</li> <li> Your certificate chain must not include the private CA
certificate that you are importing.
</li> <li> Your ACM Private CA-hosted or on-premises CA certificate must be
the last certificate in your chain. The subordinate certificate, if any,
that your root CA signed must be next to last. The subordinate certificate
signed by the preceding subordinate CA must come next, and so on until your
chain is built.
</li> <li> The chain must be PEM-encoded.
</li> <li> The maximum allowed size of a certificate is 32 KB.
</li> <li> The maximum allowed size of a certificate chain is 2 MB.
</li> </ul> *Enforcement of Critical Constraints*
ACM Private CA allows the following extensions to be marked critical in the
imported CA certificate or chain.
<ul> <li> Basic constraints (*must* be marked critical)
</li> <li> Subject alternative names
</li> <li> Key usage
</li> <li> Extended key usage
</li> <li> Authority key identifier
</li> <li> Subject key identifier
</li> <li> Issuer alternative name
</li> <li> Subject directory attributes
</li> <li> Subject information access
</li> <li> Certificate policies
</li> <li> Policy mappings
</li> <li> Inhibit anyPolicy
</li> </ul> ACM Private CA rejects the following extensions when they are
marked critical in an imported CA certificate or chain.
<ul> <li> Name constraints
</li> <li> Policy constraints
</li> <li> CRL distribution points
</li> <li> Authority information access
</li> <li> Freshest CRL
</li> <li> Any other extension
</li> </ul>
"""
def import_certificate_authority_certificate(client, input, options \\ []) do
request(client, "ImportCertificateAuthorityCertificate", input, options)
end
@doc """
Uses your private certificate authority (CA), or one that has been shared
with you, to issue a client certificate. This action returns the Amazon
Resource Name (ARN) of the certificate. You can retrieve the certificate by
calling the
[GetCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificate.html)
action and specifying the ARN.
<note> You cannot use the ACM **ListCertificateAuthorities** action to
retrieve the ARNs of the certificates that you issue by using ACM Private
CA.
</note>
"""
def issue_certificate(client, input, options \\ []) do
request(client, "IssueCertificate", input, options)
end
@doc """
Lists the private certificate authorities that you created by using the
[CreateCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html)
action.
"""
def list_certificate_authorities(client, input, options \\ []) do
request(client, "ListCertificateAuthorities", input, options)
end
@doc """
List all permissions on a private CA, if any, granted to the AWS
Certificate Manager (ACM) service principal (acm.amazonaws.com).
These permissions allow ACM to issue and renew ACM certificates that reside
in the same AWS account as the CA.
Permissions can be granted with the
[CreatePermission](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreatePermission.html)
action and revoked with the
[DeletePermission](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html)
action.
<p class="title"> **About Permissions**
<ul> <li> If the private CA and the certificates it issues reside in the
same account, you can use `CreatePermission` to grant permissions for ACM
to carry out automatic certificate renewals.
</li> <li> For automatic certificate renewal to succeed, the ACM service
principal needs permissions to create, retrieve, and list certificates.
</li> <li> If the private CA and the ACM certificates reside in different
accounts, then permissions cannot be used to enable automatic renewals.
Instead, the ACM certificate owner must set up a resource-based policy to
enable cross-account issuance and renewals. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> </ul>
"""
def list_permissions(client, input, options \\ []) do
request(client, "ListPermissions", input, options)
end
@doc """
Lists the tags, if any, that are associated with your private CA or one
that has been shared with you. Tags are labels that you can use to identify
and organize your CAs. Each tag consists of a key and an optional value.
Call the
[TagCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_TagCertificateAuthority.html)
action to add one or more tags to your CA. Call the
[UntagCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UntagCertificateAuthority.html)
action to remove tags.
"""
def list_tags(client, input, options \\ []) do
request(client, "ListTags", input, options)
end
@doc """
Attaches a resource-based policy to a private CA.
A policy can also be applied by
[sharing](https://docs.aws.amazon.com/acm-pca/latest/userguide/pca-ram.html)
a private CA through AWS Resource Access Manager (RAM).
The policy can be displayed with
[GetPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html)
and removed with
[DeletePolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePolicy.html).
<p class="title"> **About Policies**
<ul> <li> A policy grants access on a private CA to an AWS customer
account, to AWS Organizations, or to an AWS Organizations unit. Policies
are under the control of a CA administrator. For more information, see
[Using a Resource Based Policy with ACM Private
CA](acm-pca/latest/userguide/pca-rbp.html).
</li> <li> A policy permits a user of AWS Certificate Manager (ACM) to
issue ACM certificates signed by a CA in another account.
</li> <li> For ACM to manage automatic renewal of these certificates, the
ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM
service to assume the identity of the user, subject to confirmation against
the ACM Private CA policy. For more information, see [Using a Service
Linked Role with
ACM](https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html).
</li> <li> Updates made in AWS Resource Manager (RAM) are reflected in
policies. For more information, see [Using AWS Resource Access Manager
(RAM) with ACM Private CA](acm-pca/latest/userguide/pca-ram.html).
</li> </ul>
"""
def put_policy(client, input, options \\ []) do
request(client, "PutPolicy", input, options)
end
@doc """
Restores a certificate authority (CA) that is in the `DELETED` state. You
can restore a CA during the period that you defined in the
**PermanentDeletionTimeInDays** parameter of the
[DeleteCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeleteCertificateAuthority.html)
action. Currently, you can specify 7 to 30 days. If you did not specify a
**PermanentDeletionTimeInDays** value, by default you can restore the CA at
any time in a 30 day period. You can check the time remaining in the
restoration period of a private CA in the `DELETED` state by calling the
[DescribeCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DescribeCertificateAuthority.html)
or
[ListCertificateAuthorities](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html)
actions. The status of a restored CA is set to its pre-deletion status when
the **RestoreCertificateAuthority** action returns. To change its status to
`ACTIVE`, call the
[UpdateCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html)
action. If the private CA was in the `PENDING_CERTIFICATE` state at
deletion, you must use the
[ImportCertificateAuthorityCertificate](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ImportCertificateAuthorityCertificate.html)
action to import a certificate authority into the private CA before it can
be activated. You cannot restore a CA after the restoration period has
ended.
"""
def restore_certificate_authority(client, input, options \\ []) do
request(client, "RestoreCertificateAuthority", input, options)
end
@doc """
Revokes a certificate that was issued inside ACM Private CA. If you enable
a certificate revocation list (CRL) when you create or update your private
CA, information about the revoked certificates will be included in the CRL.
ACM Private CA writes the CRL to an S3 bucket that you specify. A CRL is
typically updated approximately 30 minutes after a certificate is revoked.
If for any reason the CRL update fails, ACM Private CA attempts makes
further attempts every 15 minutes. With Amazon CloudWatch, you can create
alarms for the metrics `CRLGenerated` and `MisconfiguredCRLBucket`. For
more information, see [Supported CloudWatch
Metrics](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaCloudWatch.html).
<note> Both PCA and the IAM principal must have permission to write to the
S3 bucket that you specify. If the IAM principal making the call does not
have permission to write to the bucket, then an exception is thrown. For
more information, see [Configure Access to ACM Private
CA](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html).
</note> ACM Private CA also writes revocation information to the audit
report. For more information, see
[CreateCertificateAuthorityAuditReport](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html).
<note> You cannot revoke a root CA self-signed certificate.
</note>
"""
def revoke_certificate(client, input, options \\ []) do
request(client, "RevokeCertificate", input, options)
end
@doc """
Adds one or more tags to your private CA. Tags are labels that you can use
to identify and organize your AWS resources. Each tag consists of a key and
an optional value. You specify the private CA on input by its Amazon
Resource Name (ARN). You specify the tag by using a key-value pair. You can
apply a tag to just one private CA if you want to identify a specific
characteristic of that CA, or you can apply the same tag to multiple
private CAs if you want to filter for a common relationship among those
CAs. To remove one or more tags, use the
[UntagCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UntagCertificateAuthority.html)
action. Call the
[ListTags](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListTags.html)
action to see what tags are associated with your CA.
"""
def tag_certificate_authority(client, input, options \\ []) do
request(client, "TagCertificateAuthority", input, options)
end
@doc """
Remove one or more tags from your private CA. A tag consists of a key-value
pair. If you do not specify the value portion of the tag when calling this
action, the tag will be removed regardless of value. If you specify a
value, the tag is removed only if it is associated with the specified
value. To add tags to a private CA, use the
[TagCertificateAuthority](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_TagCertificateAuthority.html).
Call the
[ListTags](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListTags.html)
action to see what tags are associated with your CA.
"""
def untag_certificate_authority(client, input, options \\ []) do
request(client, "UntagCertificateAuthority", input, options)
end
@doc """
Updates the status or configuration of a private certificate authority
(CA). Your private CA must be in the `ACTIVE` or `DISABLED` state before
you can update it. You can disable a private CA that is in the `ACTIVE`
state or make a CA that is in the `DISABLED` state active again.
<note> Both PCA and the IAM principal must have permission to write to the
S3 bucket that you specify. If the IAM principal making the call does not
have permission to write to the bucket, then an exception is thrown. For
more information, see [Configure Access to ACM Private
CA](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html).
</note>
"""
def update_certificate_authority(client, input, options \\ []) do
request(client, "UpdateCertificateAuthority", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "acm-pca"}
host = build_host("acm-pca", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ACMPrivateCA.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/acmpca.ex
| 0.826046
| 0.717723
|
acmpca.ex
|
starcoder
|
defmodule FireAct do
@moduledoc """
Inspired by Plug, a helper module for defining action handlers with
optional params validations via Ecto.Changeset.
Perfect for extracting logic outside the controller endpoints.
Example usage:
```
defmodule RegisterUser do
use FireAct.Handler
use FireAct.ChangesetParams, %{
age: :integer,
email: :string
}
def handle(action, permitted_params) do
MyApp.User.create_changeset(permitted_params)
|> MyApp.Repo.insert()
|> case do
{:ok, user} ->
action |> assign(:user, user)
{:error, error} ->
action |> assign(:error, error) |> fail()
end
end
def validate_params(_action, changeset) do
changeset
|> validate_email()
|> validate_required([:age, :email])
end
defp validate_email(changeset) do
if "<EMAIL>" == get_field(changeset, :email) do
changeset
else
changeset
|> add_error(:email, "only <EMAIL> is OK")
end
end
end
{:ok, %{assigns: %{user: user}}} = FireAct.run(RegisterUser, %{
age: 1,
email: "<EMAIL>"
})
```
"""
alias FireAct.Action
@plug_init_mode Application.get_env(:fire_act, :plug_init_mode, :runtime)
def run(handlers), do: Action.new(%{}, %{}) |> do_run(List.wrap(handlers), [])
def run(%Action{} = action, handlers) do
do_run(action, List.wrap(handlers), [])
end
def run(handlers, params), do: Action.new(params, %{}) |> do_run(List.wrap(handlers), [])
def run(handlers, params, assigns),
do: Action.new(params, assigns) |> do_run(List.wrap(handlers), [])
def plug_init_mode do
@plug_init_mode
end
defp do_run(%Action{} = action, [], _), do: {:ok, action}
defp do_run(%Action{} = action, [handler | handlers], executed_handlers) do
handler.call(action, [])
|> case do
{code, %Action{} = action} when code in ~w(ok error)a -> action
action -> action
end
|> case do
%Action{failed: true} = action ->
rollback_handlers(action, executed_handlers)
%Action{failed: false} = action ->
do_run(action, handlers, [handler | executed_handlers])
end
end
defp rollback_handlers(action, []), do: {:error, action}
defp rollback_handlers(action, [handler | executed_handlers]) do
case handler.rollback(action) do
%FireAct.Action{} = action ->
rollback_handlers(action, executed_handlers)
_ ->
rollback_handlers(action, executed_handlers)
end
end
end
|
lib/fire_act.ex
| 0.705481
| 0.518912
|
fire_act.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.