code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Deckhub.Hearthstone.Card do
@moduledoc """
Represents an individual Hearthstone card.
* `armor` -- Armor value of the card, specific to [Hero cards][hero-cards]
* `artist` -- Name of the artist or artists that designed the card's art
* `attack` -- Amount of damage the card causes when used to attack
* `card_class` -- Class that can use the card or `NEUTRAL` if it can be used by any class
* `card_id` -- Guaranteed unique identifier for the card
* `collectible` -- `true` if the card can be collected
* `cost` -- Mana cost to cast the spell, equip the weapon, or summon the minion
* `dbf_id` -- Numeric id of the card
* `durability` -- Starting durability of a weapon
* `elite` -- I have no clue what this means
* `flavor` -- Flavor text when viewing the detail of the card
* `health` -- Starting health of the minion
* `image` -- URL of the card image
* `name` -- Name of the card
* `race` -- Type of minion, if any, such as `BEAST`, `MURLOC`, or `PIRATE`
* `rarity` -- Quality of the card: `FREE`, `COMMON`, `RARE`, `EPIC`, or `LEGENDARY`
* `set` -- The set the card was released in
* `slug_name` -- Slug version of the card name for lookup when the `card_id` isn't known
* `small_image` -- URL of the small sized card image
* `text` -- Text on the front of the card
* `type` -- Type of the card: `HERO`, `MINION`, `SPELL`, or `WEAPON`
[hero-cards]: https://hearthstone.gamepedia.com/Hero_card
"""
use Ecto.Schema
import Ecto.Changeset
alias Deckhub.Hearthstone.Card
@type t :: %Card{}
schema "cards" do
field(:armor, :integer)
field(:artist, :string)
field(:attack, :integer)
field(:card_class, :string)
field(:card_id, :string)
field(:collectible, :boolean)
field(:cost, :integer)
field(:dbf_id, :integer)
field(:durability, :integer)
field(:elite, :boolean)
field(:flavor, :string)
field(:health, :integer)
field(:image, :string)
field(:name, :string)
field(:race, :string)
field(:rarity, :string)
field(:set, :string)
field(:slug_name, :string)
field(:small_image, :string)
field(:text, :string)
field(:type, :string)
timestamps()
end
@doc false
def changeset(card, attrs) do
card
|> cast(attrs, [
:armor,
:artist,
:attack,
:card_class,
:card_id,
:collectible,
:cost,
:dbf_id,
:durability,
:elite,
:flavor,
:health,
:image,
:name,
:race,
:rarity,
:set,
:slug_name,
:small_image,
:text,
:type
])
|> validate_required([
:card_class,
:card_id,
:collectible,
:dbf_id,
:image,
:name,
:rarity,
:set,
:slug_name,
:small_image,
:type
])
end
@doc """
Determines whether the given card is a minion.
"""
@spec minion?(t()) :: boolean()
def minion?(%Card{type: "MINION"}), do: true
def minion?(%Card{}), do: false
@doc """
Returns the color associated with the given card rarity.
"""
@spec rarity_color(t()) :: String.t()
def rarity_color(%Card{rarity: "FREE"}), do: "#9d9d9d"
def rarity_color(%Card{rarity: "COMMON"}), do: "#9d9d9d"
def rarity_color(%Card{rarity: "RARE"}), do: "#0070dd"
def rarity_color(%Card{rarity: "EPIC"}), do: "#a335ee"
def rarity_color(%Card{rarity: "LEGENDARY"}), do: "#ff8000"
@doc """
Determines whether the given card is a spell.
"""
@spec spell?(t()) :: boolean()
def spell?(%Card{type: "SPELL"}), do: true
def spell?(%Card{}), do: false
@doc """
Determines whether the given card is a weapon.
"""
@spec weapon?(t()) :: boolean()
def weapon?(%Card{type: "WEAPON"}), do: true
def weapon?(%Card{}), do: false
defimpl Phoenix.Param, for: Deckhub.Hearthstone.Card do
def to_param(%Deckhub.Hearthstone.Card{card_id: card_id, name: name}) do
"#{card_id}-#{Deckhub.Text.to_slug(name)}"
end
end
end
|
lib/deckhub/hearthstone/card.ex
| 0.850949
| 0.625924
|
card.ex
|
starcoder
|
defmodule Pastelli do
require Logger
@moduledoc """
Adapter interface to the Elli webserver.
## Options
* `:ip` - the ip to bind the server to.
Must be a tuple in the format `{x, y, z, w}`.
* `:port` - the port to run the server.
Defaults to 4000 (http)
* `:acceptors` - the number of acceptors for the listener.
Defaults to 20.
* `:max_connections` - max number of connections supported.
Defaults to `:infinity`.
* `:ref` - the reference name to be used.
Defaults to `plug.HTTP` (http) and `plug.HTTPS` (https).
This is the value that needs to be given on shutdown.
"""
@doc """
Run elli under http.
## Example
# Starts a new interface
Plug.Adapters.Elli.http MyPlug, [], port: 80
# shut it down
Plug.Adapters.Elli.shutdown MyPlug.HTTP
"""
@spec http(module(), Keyword.t, Keyword.t) ::
{:ok, pid} | {:error, :eaddrinuse} | {:error, term}
def http(plug, options, elli_options) do
run(:http, plug, options, elli_options)
end
def https(_plug, _options, _elli_options) do
raise ArgumentError, message: "NotImplemented"
end
def shutdown(ref) do
Pastelli.Supervisor.shutdown(ref)
end
@doc """
returns a child spec for elli to be supervised in your application
"""
import Supervisor.Spec
def child_spec(scheme, plug, options, elli_options) do
{id, elli_options} = Keyword.pop elli_options, :supervisor_id, :elli
args = build_elli_options(scheme, plug, options, elli_options)
worker(:elli, [args], id: id)
end
defp run(scheme, plug, options, elli_options) do
Pastelli.Supervisor.start_link(
ref_for(plug),
build_elli_options(scheme, plug, options, elli_options)
)
end
def build_elli_options(_scheme, plug, options, elli_options) do
default_elli_options
|> Keyword.put(:callback_args, {plug, options})
|> Keyword.merge(elli_options)
end
defp default_elli_options do
[
port: 4000,
callback: Pastelli.Handler
]
end
defp ref_for(plug) do
Module.concat plug, "HTTP"
end
end
|
lib/pastelli.ex
| 0.815416
| 0.493714
|
pastelli.ex
|
starcoder
|
defmodule Day13.Redo do
def part1(file_name \\ "test.txt") do
file_name
|> parse()
|> grab_first_fold()
|> fold()
|> MapSet.size()
end
def part2(file_name \\ "test.txt") do
file_name
|> parse()
|> fold()
|> print_format()
|> IO.puts()
end
def print_format(coords) do
max_x = coords |> Enum.max_by(fn {x, _y} -> x end) |> elem(0)
max_y = coords |> Enum.max_by(fn {_x, y} -> y end) |> elem(1)
0..max_y
|> Enum.map_join("\n", fn y ->
0..max_x
|> Enum.map_join("", fn x ->
if MapSet.member?(coords, {x, y}), do: "#", else: " "
end)
end)
end
def fold(%{coords: coords, folds: []}) do
coords
end
def fold(%{coords: coords, folds: [{"y", fold_y} | rest]}) do
new_coords = fold_to_top(coords, fold_y)
fold(%{coords: new_coords, folds: rest})
end
def fold(%{coords: coords, folds: [{"x", fold_x} | rest]}) do
new_coords = fold_to_left(coords, fold_x)
fold(%{coords: new_coords, folds: rest})
end
def fold_to_top(coords, fold_y) do
Enum.reduce(coords, MapSet.new, fn
{x, y}, acc when y > fold_y -> MapSet.put(acc, {x, y - (2 * (y - fold_y))})
coord, acc -> MapSet.put(acc, coord)
end)
end
def fold_to_left(coords, fold_x) do
Enum.reduce(coords, MapSet.new, fn
{x, y}, acc when x > fold_x -> MapSet.put(acc, {x - (2 * (x - fold_x)), y})
coord, acc -> MapSet.put(acc, coord)
end)
end
def grab_first_fold(%{folds: [first | _rest]} = state) do
%{state | folds: [first]}
end
def parse(file_name) do
"priv/" <> file_name
|> File.read!()
|> String.split("\n\n")
|> then(fn [coords_line, folds_line] ->
coords = to_coords(coords_line)
folds = to_folds(folds_line)
%{coords: coords, folds: folds}
end)
end
def to_folds(line) do
line
|> String.split("\n", trim: true)
|> Enum.map(fn <<"fold along ", axis::binary-size(1), _::binary-size(1), value::binary>> ->
{axis, String.to_integer(value)}
end)
end
def to_coords(line) do
line
|> String.split("\n", trim: true)
|> Enum.map(fn token ->
token
|> String.split(",")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end)
|> MapSet.new()
end
end
|
jpcarver+elixir/day13/lib/da13.redo.ex
| 0.578329
| 0.453443
|
da13.redo.ex
|
starcoder
|
defmodule Artemis.Helpers.Schedule do
@moduledoc """
Helper functions for Schedules
Implemented using Cocktail: https://github.com/peek-travel/cocktail
Documentation: https://hexdocs.pm/cocktail/
"""
@doc """
Encode Cocktail.Schedule struct as iCal string
Takes an existing Cocktail.Schedule struct or a list of recurrence rules.
"""
def encode(%Cocktail.Schedule{} = value) do
Cocktail.Schedule.to_i_calendar(value)
end
def encode(value) when is_bitstring(value), do: value
def encode(recurrence_rules) when is_list(recurrence_rules) do
Timex.now()
|> Cocktail.schedule()
|> add_recurrence_rules(recurrence_rules)
|> encode()
end
defp add_recurrence_rules(schedule, rules) do
Enum.reduce(rules, schedule, fn rule, acc ->
add_recurrence_rule(acc, rule)
end)
end
defp add_recurrence_rule(schedule, rule) do
frequency = get_recurrence_rule_frequency(rule)
options = get_recurrence_rule_options(rule)
Cocktail.Schedule.add_recurrence_rule(schedule, frequency, options)
end
defp get_recurrence_rule_frequency(rule) do
rule
|> Map.get("frequency", "daily")
|> Artemis.Helpers.to_atom()
end
defp get_recurrence_rule_options(rule) do
days =
rule
|> Map.get("days", [])
|> Enum.map(&Artemis.Helpers.to_integer/1)
{fallback_hour, fallback_minute} = parse_hours_and_minutes_from_time(rule)
[
days: days,
hours: Map.get(rule, "hours", [fallback_hour]),
minutes: Map.get(rule, "minutes", [fallback_minute]),
seconds: Map.get(rule, "seconds", [0])
]
end
defp parse_hours_and_minutes_from_time(rule) do
time =
rule
|> Map.get("time")
|> String.pad_leading(5, "0")
|> Timex.parse!("{h24}:{m}")
{time.hour, time.minute}
rescue
_ -> {0, 0}
end
@doc """
Decode iCal string as Cocktail.Schedule struct
"""
def decode(value) when is_bitstring(value) do
{:ok, result} = Cocktail.Schedule.from_i_calendar(value)
result
end
def decode(value), do: value
@doc """
Return recurrence rules
"""
def recurrence_rules(schedule) do
schedule
|> decode()
|> Kernel.||(%{})
|> Map.get(:recurrence_rules, [])
end
@doc """
Return days of the week from recurrence rule
"""
def days(schedule, options \\ []) do
schedule
|> get_schedule_recurrence_rule_validations(options)
|> Artemis.Helpers.deep_get([:day, :days], [])
end
@doc """
Return hours from recurrence rule
"""
def hours(schedule, options \\ []) do
schedule
|> get_schedule_recurrence_rule_validations(options)
|> Artemis.Helpers.deep_get([:hour_of_day, :hours], [])
end
@doc """
Return hour from recurrence rule
Note: assumes only one value for a given schedule recurrence rule
"""
def hour(schedule, options \\ []) do
schedule
|> hours(options)
|> List.first()
end
@doc """
Return minutes from recurrence rule
"""
def minutes(schedule, options \\ []) do
schedule
|> get_schedule_recurrence_rule_validations(options)
|> Artemis.Helpers.deep_get([:minute_of_hour, :minutes], [])
end
@doc """
Return minutes from recurrence rule
Note: assumes only one value for a given schedule recurrence rule
"""
def minute(schedule, options \\ []) do
schedule
|> minutes(options)
|> List.first()
end
@doc """
Return seconds from recurrence rule
"""
def seconds(schedule, options \\ []) do
schedule
|> get_schedule_recurrence_rule_validations(options)
|> Artemis.Helpers.deep_get([:second_of_minute, :seconds], [])
end
@doc """
Return seconds from recurrence rule
Note: assumes only one value for a given schedule recurrence rule
"""
def second(schedule, options \\ []) do
schedule
|> seconds(options)
|> List.first()
end
@doc """
Humanize schedule value
"""
def humanize(%Cocktail.Schedule{} = value) do
value
|> Cocktail.Schedule.to_string()
|> String.replace("on ", "on ")
|> String.replace("on and", "on")
|> String.replace(" on the 0th minute of the hour", "")
|> String.replace(" on the 0th second of the minute", "")
end
def humanize(value) when is_bitstring(value) do
value
|> decode()
|> humanize()
end
def humanize(nil), do: nil
@doc """
Returns the current scheduled date
"""
def current(schedule, start_time \\ Timex.now())
def current(%Cocktail.Schedule{} = schedule, start_time) do
schedule
|> Map.put(:start_time, start_time)
|> Cocktail.Schedule.occurrences()
|> Enum.take(1)
|> hd()
end
def current(schedule, start_time) when is_bitstring(schedule) do
current(decode(schedule), start_time)
end
@doc """
Returns occurrences of the scheduled date
"""
def occurrences(schedule, start_time \\ Timex.now(), count \\ 10)
def occurrences(nil, _start_time, _count), do: []
def occurrences(schedule, start_time, count) do
schedule
|> decode()
|> Map.put(:start_time, start_time)
|> Cocktail.Schedule.occurrences()
|> Enum.take(count)
end
@doc """
Return the start date for a given schedule. Attemps to parse a `DTSTART`
clause in an iCal encoded entry. Returns `nil` if not found.
"""
def start_date(schedule, options \\ []), do: parse_date_from_ical(schedule, "DTSTART", options)
@doc """
Return the end date for a given schedule. Attemps to parse a `DTEND`
clause in an iCal encoded entry. Returns `nil` if not found.
"""
def end_date(schedule, options \\ []), do: parse_date_from_ical(schedule, "DTEND", options)
defp parse_date_from_ical(schedule, key, options) do
format = Keyword.get(options, :format, "{YYYY}{0M}{0D}T{h24}{m}{s}")
regex = Keyword.get(options, :regex, ~r/[0-9]{8}T[0-9]{6}/)
start_section =
schedule
|> encode()
|> String.split("\n")
|> Enum.find(&(&1 =~ key))
|> Kernel.||("")
timestamp =
regex
|> Regex.run(start_section)
|> Kernel.||([])
|> List.first()
|> Kernel.||("")
timestamp
|> Timex.parse!(format)
|> Timex.Timezone.convert("Etc/UTC")
rescue
_error in Timex.Parse.ParseError -> nil
end
# Helpers
defp get_schedule_recurrence_rule_validations(schedule, options) do
index = Keyword.get(options, :index, 0)
decoded_schedule = decode(schedule)
if decoded_schedule do
decoded_schedule
|> Map.get(:recurrence_rules, [])
|> Enum.at(index)
|> Kernel.||(%{})
|> Map.get(:validations, %{})
end
end
end
|
apps/artemis/lib/artemis/helpers/schedule.ex
| 0.881436
| 0.543469
|
schedule.ex
|
starcoder
|
defmodule AWS.WAFV2 do
@moduledoc """
This is the latest version of the **AWS WAF** API, released in November, 2019.
The names of the entities that you use to access this API, like endpoints and
namespaces, all have the versioning information added, like "V2" or "v2", to
distinguish from the prior version. We recommend migrating your resources to
this version, because it has a number of significant improvements.
If you used AWS WAF prior to this release, you can't use this AWS WAFV2 API to
access any AWS WAF resources that you created before. You can access your old
rules, web ACLs, and other AWS WAF resources only through the AWS WAF Classic
APIs. The AWS WAF Classic APIs have retained the prior names, endpoints, and
namespaces.
For information, including how to migrate your AWS WAF resources to this
version, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS
requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST
API, an Application Load Balancer, or an AWS AppSync GraphQL API. AWS WAF also
lets you control access to your content. Based on conditions that you specify,
such as the IP addresses that requests originate from or the values of query
strings, the API Gateway REST API, CloudFront distribution, the Application Load
Balancer, or the AWS AppSync GraphQL API responds to requests either with the
requested content or with an HTTP 403 status code (Forbidden). You also can
configure CloudFront to return a custom error page when a request is blocked.
This API guide is for developers who need detailed information about AWS WAF API
actions, data types, and errors. For detailed information about AWS WAF features
and an overview of how to use AWS WAF, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/).
You can make calls using the endpoints listed in [AWS Service Endpoints for AWS WAF](https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region).
* For regional applications, you can use any of the endpoints in the
list. A regional application can be an Application Load Balancer (ALB), an API
Gateway REST API, or an AppSync GraphQL API.
* For AWS CloudFront applications, you must use the API endpoint
listed for US East (N. Virginia): us-east-1.
Alternatively, you can use one of the AWS SDKs to access an API that's tailored
to the programming language or platform that you're using. For more information,
see [AWS SDKs](http://aws.amazon.com/tools/#SDKs).
We currently provide two versions of the AWS WAF API: this API and the prior
versions, the classic AWS WAF APIs. This new API provides the same functionality
as the older versions, with the following major improvements:
* You use one API for both global and regional applications. Where
you need to distinguish the scope, you specify a `Scope` parameter and set it to
`CLOUDFRONT` or `REGIONAL`.
* You can define a Web ACL or rule group with a single call, and
update it with a single call. You define all rule specifications in JSON format,
and pass them to your rule group or Web ACL calls.
* The limits AWS WAF places on the use of rules more closely
reflects the cost of running each type of rule. Rule groups include capacity
settings, so you know the maximum cost of a rule group when you use it.
"""
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Associates a Web ACL with a regional application resource, to protect the
resource. A regional application can be an Application Load Balancer (ALB), an
API Gateway REST API, or an AppSync GraphQL API.
For AWS CloudFront, don't use this call. Instead, use your CloudFront
distribution configuration. To associate a Web ACL, in the CloudFront call
`UpdateDistribution`, set the web ACL ID to the Amazon Resource Name (ARN) of
the Web ACL. For information, see
[UpdateDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html).
"""
def associate_web_a_c_l(client, input, options \\ []) do
request(client, "AssociateWebACL", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Returns the web ACL capacity unit (WCU) requirements for a specified scope and
set of rules. You can use this to check the capacity requirements for the rules
you want to use in a `RuleGroup` or `WebACL`.
AWS WAF uses WCUs to calculate and control the operating resources that are used
to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity
differently for each rule type, to reflect the relative cost of each rule.
Simple rules that cost little to run use fewer WCUs than more complex rules that
use more processing power. Rule group capacity is fixed at creation, which helps
users plan their web ACL WCU usage when they use a rule group. The WCU limit for
web ACLs is 1,500.
"""
def check_capacity(client, input, options \\ []) do
request(client, "CheckCapacity", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Creates an `IPSet`, which you use to identify web requests that originate from
specific IP addresses or ranges of IP addresses. For example, if you're
receiving a lot of requests from a ranges of IP addresses, you can configure AWS
WAF to block them using an IPSet that lists those IP addresses.
"""
def create_i_p_set(client, input, options \\ []) do
request(client, "CreateIPSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Creates a `RegexPatternSet`, which you reference in a
`RegexPatternSetReferenceStatement`, to have AWS WAF inspect a web request
component for the specified patterns.
"""
def create_regex_pattern_set(client, input, options \\ []) do
request(client, "CreateRegexPatternSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Creates a `RuleGroup` per the specifications provided.
A rule group defines a collection of rules to inspect and control web requests
that you can use in a `WebACL`. When you create a rule group, you define an
immutable capacity limit. If you update a rule group, you must stay within the
capacity. This allows others to reuse the rule group with confidence in its
capacity requirements.
"""
def create_rule_group(client, input, options \\ []) do
request(client, "CreateRuleGroup", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Creates a `WebACL` per the specifications provided.
A Web ACL defines a collection of rules to use to inspect and control web
requests. Each rule has an action defined (allow, block, or count) for requests
that match the statement of the rule. In the Web ACL, you assign a default
action to take (allow, block) for any request that does not match any of the
rules. The rules in a Web ACL can be a combination of the types `Rule`,
`RuleGroup`, and managed rule group. You can associate a Web ACL with one or
more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon
API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL
API.
"""
def create_web_a_c_l(client, input, options \\ []) do
request(client, "CreateWebACL", input, options)
end
@doc """
Deletes all rule groups that are managed by AWS Firewall Manager for the
specified web ACL.
You can only use this if `ManagedByFirewallManager` is false in the specified
`WebACL`.
"""
def delete_firewall_manager_rule_groups(client, input, options \\ []) do
request(client, "DeleteFirewallManagerRuleGroups", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Deletes the specified `IPSet`.
"""
def delete_i_p_set(client, input, options \\ []) do
request(client, "DeleteIPSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Deletes the `LoggingConfiguration` from the specified web ACL.
"""
def delete_logging_configuration(client, input, options \\ []) do
request(client, "DeleteLoggingConfiguration", input, options)
end
@doc """
Permanently deletes an IAM policy from the specified rule group.
You must be the owner of the rule group to perform this operation.
"""
def delete_permission_policy(client, input, options \\ []) do
request(client, "DeletePermissionPolicy", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Deletes the specified `RegexPatternSet`.
"""
def delete_regex_pattern_set(client, input, options \\ []) do
request(client, "DeleteRegexPatternSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Deletes the specified `RuleGroup`.
"""
def delete_rule_group(client, input, options \\ []) do
request(client, "DeleteRuleGroup", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Deletes the specified `WebACL`.
You can only use this if `ManagedByFirewallManager` is false in the specified
`WebACL`.
"""
def delete_web_a_c_l(client, input, options \\ []) do
request(client, "DeleteWebACL", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Provides high-level information for a managed rule group, including descriptions
of the rules.
"""
def describe_managed_rule_group(client, input, options \\ []) do
request(client, "DescribeManagedRuleGroup", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Disassociates a Web ACL from a regional application resource. A regional
application can be an Application Load Balancer (ALB), an API Gateway REST API,
or an AppSync GraphQL API.
For AWS CloudFront, don't use this call. Instead, use your CloudFront
distribution configuration. To disassociate a Web ACL, provide an empty web ACL
ID in the CloudFront call `UpdateDistribution`. For information, see
[UpdateDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html).
"""
def disassociate_web_a_c_l(client, input, options \\ []) do
request(client, "DisassociateWebACL", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the specified `IPSet`.
"""
def get_i_p_set(client, input, options \\ []) do
request(client, "GetIPSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Returns the `LoggingConfiguration` for the specified web ACL.
"""
def get_logging_configuration(client, input, options \\ []) do
request(client, "GetLoggingConfiguration", input, options)
end
@doc """
Returns the IAM policy that is attached to the specified rule group.
You must be the owner of the rule group to perform this operation.
"""
def get_permission_policy(client, input, options \\ []) do
request(client, "GetPermissionPolicy", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the keys that are currently blocked by a rate-based rule. The maximum
number of managed keys that can be blocked for a single rate-based rule is
10,000. If more than 10,000 addresses exceed the rate limit, those with the
highest rates are blocked.
"""
def get_rate_based_statement_managed_keys(client, input, options \\ []) do
request(client, "GetRateBasedStatementManagedKeys", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the specified `RegexPatternSet`.
"""
def get_regex_pattern_set(client, input, options \\ []) do
request(client, "GetRegexPatternSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the specified `RuleGroup`.
"""
def get_rule_group(client, input, options \\ []) do
request(client, "GetRuleGroup", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Gets detailed information about a specified number of requests--a sample--that
AWS WAF randomly selects from among the first 5,000 requests that your AWS
resource received during a time range that you choose. You can specify a sample
size of up to 500 requests, and you can specify any time range in the previous
three hours.
`GetSampledRequests` returns a time range, which is usually the time range that
you specified. However, if your resource (such as a CloudFront distribution)
received 5,000 requests before the specified time range elapsed,
`GetSampledRequests` returns an updated time range. This new time range
indicates the actual period during which AWS WAF selected the requests in the
sample.
"""
def get_sampled_requests(client, input, options \\ []) do
request(client, "GetSampledRequests", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the specified `WebACL`.
"""
def get_web_a_c_l(client, input, options \\ []) do
request(client, "GetWebACL", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the `WebACL` for the specified resource.
"""
def get_web_a_c_l_for_resource(client, input, options \\ []) do
request(client, "GetWebACLForResource", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of managed rule groups that are available for you to use.
This list includes all AWS Managed Rules rule groups and the AWS Marketplace
managed rule groups that you're subscribed to.
"""
def list_available_managed_rule_groups(client, input, options \\ []) do
request(client, "ListAvailableManagedRuleGroups", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of `IPSetSummary` objects for the IP sets that you manage.
"""
def list_i_p_sets(client, input, options \\ []) do
request(client, "ListIPSets", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of your `LoggingConfiguration` objects.
"""
def list_logging_configurations(client, input, options \\ []) do
request(client, "ListLoggingConfigurations", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of `RegexPatternSetSummary` objects for the regex pattern
sets that you manage.
"""
def list_regex_pattern_sets(client, input, options \\ []) do
request(client, "ListRegexPatternSets", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of the Amazon Resource Names (ARNs) for the regional
resources that are associated with the specified web ACL. If you want the list
of AWS CloudFront resources, use the AWS CloudFront call
`ListDistributionsByWebACLId`.
"""
def list_resources_for_web_a_c_l(client, input, options \\ []) do
request(client, "ListResourcesForWebACL", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of `RuleGroupSummary` objects for the rule groups that you
manage.
"""
def list_rule_groups(client, input, options \\ []) do
request(client, "ListRuleGroups", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves the `TagInfoForResource` for the specified resource. Tags are
key:value pairs that you can use to categorize and manage your resources, for
purposes like billing. For example, you might set the tag key to "customer" and
the value to the customer name or ID. You can specify one or more tags to add to
each AWS resource, up to 50 tags for a resource.
You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule
groups, IP sets, and regex pattern sets. You can't manage or view tags through
the AWS WAF console.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Retrieves an array of `WebACLSummary` objects for the web ACLs that you manage.
"""
def list_web_a_c_ls(client, input, options \\ []) do
request(client, "ListWebACLs", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Enables the specified `LoggingConfiguration`, to start logging from a web ACL,
according to the configuration provided.
You can access information about all traffic that AWS WAF inspects using the
following steps:
1. Create an Amazon Kinesis Data Firehose.
Create the data firehose with a PUT source and in the Region that you are
operating. If you are capturing logs for Amazon CloudFront, always create the
firehose in US East (N. Virginia).
Give the data firehose a name that starts with the prefix `aws-waf-logs-`. For
example, `aws-waf-logs-us-east-2-analytics`.
Do not create the data firehose using a `Kinesis stream` as your source.
2. Associate that firehose to your web ACL using a
`PutLoggingConfiguration` request.
When you successfully enable logging using a `PutLoggingConfiguration` request,
AWS WAF will create a service linked role with the necessary permissions to
write logs to the Amazon Kinesis Data Firehose. For more information, see
[Logging Web ACL Traffic Information](https://docs.aws.amazon.com/waf/latest/developerguide/logging.html)
in the *AWS WAF Developer Guide*.
"""
def put_logging_configuration(client, input, options \\ []) do
request(client, "PutLoggingConfiguration", input, options)
end
@doc """
Attaches an IAM policy to the specified resource.
Use this to share a rule group across accounts.
You must be the owner of the rule group to perform this operation.
This action is subject to the following restrictions:
* You can attach only one policy with each `PutPermissionPolicy`
request.
* The ARN in the request must be a valid WAF `RuleGroup` ARN and the
rule group must exist in the same region.
* The user making the request must be the owner of the rule group.
"""
def put_permission_policy(client, input, options \\ []) do
request(client, "PutPermissionPolicy", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Associates tags with the specified AWS resource. Tags are key:value pairs that
you can use to categorize and manage your resources, for purposes like billing.
For example, you might set the tag key to "customer" and the value to the
customer name or ID. You can specify one or more tags to add to each AWS
resource, up to 50 tags for a resource.
You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule
groups, IP sets, and regex pattern sets. You can't manage or view tags through
the AWS WAF console.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Disassociates tags from an AWS resource. Tags are key:value pairs that you can
associate with AWS resources. For example, the tag key might be "customer" and
the tag value might be "companyA." You can specify one or more tags to add to
each container. You can add up to 50 tags to each AWS resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Updates the specified `IPSet`.
"""
def update_i_p_set(client, input, options \\ []) do
request(client, "UpdateIPSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Updates the specified `RegexPatternSet`.
"""
def update_regex_pattern_set(client, input, options \\ []) do
request(client, "UpdateRegexPatternSet", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Updates the specified `RuleGroup`.
A rule group defines a collection of rules to inspect and control web requests
that you can use in a `WebACL`. When you create a rule group, you define an
immutable capacity limit. If you update a rule group, you must stay within the
capacity. This allows others to reuse the rule group with confidence in its
capacity requirements.
"""
def update_rule_group(client, input, options \\ []) do
request(client, "UpdateRuleGroup", input, options)
end
@doc """
This is the latest version of **AWS WAF**, named AWS WAFV2, released in
November, 2019.
For information, including how to migrate your AWS WAF resources from the prior
release, see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html).
Updates the specified `WebACL`.
A Web ACL defines a collection of rules to use to inspect and control web
requests. Each rule has an action defined (allow, block, or count) for requests
that match the statement of the rule. In the Web ACL, you assign a default
action to take (allow, block) for any request that does not match any of the
rules. The rules in a Web ACL can be a combination of the types `Rule`,
`RuleGroup`, and managed rule group. You can associate a Web ACL with one or
more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon
API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL
API.
"""
def update_web_a_c_l(client, input, options \\ []) do
request(client, "UpdateWebACL", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "wafv2"}
host = build_host("wafv2", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSWAF_20190729.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/waf_v2.ex
| 0.887089
| 0.571587
|
waf_v2.ex
|
starcoder
|
defmodule EQRCode.Encode do
@moduledoc """
Data encoding in Byte Mode.
"""
import Bitwise
@byte_mode 0b0100
@pad <<236, 17>>
@capacity_l [0, 17, 32, 53, 78, 106, 134, 154]
@ecc_l %{
1 => 19,
2 => 34,
3 => 55,
4 => 80,
5 => 108,
6 => 136,
7 => 156
}
@mask0 <<0x99999999999999666666666666669966666666659999999996699533333333332CCD332CCCCCCCCCCCCCCD333333333333332CCD332CCCCCCCCCCCCCCD333333333333332CCD332CCCCCCCCCCCCCCD333333333333332CCD332CCCCCCCCCCCCCCD333333333333332CCD332CCCCCCCCCCCCCCD33333333333333333332CCCCCCCCCD33333333::1072>>
@doc """
Encode the binary.
Example:
iex> QRCode.Encode.encode("hello world!")
{1, [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,
0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1,
0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,
1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0,
0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0]}
"""
@spec encode(binary) :: {integer, [0 | 1]}
def encode(bin) do
version = version(bin)
encoded =
[<<@byte_mode::4>>, <<byte_size(bin)>>, bin, <<0::4>>]
|> Enum.flat_map(&bits/1)
|> pad_bytes(version)
{version, encoded}
end
@doc """
Encode the binary with custom pattern bits.
"""
@spec encode(binary, bitstring) :: {integer, [0 | 1]}
def encode(bin, bits) do
version = 5
n = byte_size(bin)
n1 = n + 2
n2 = @ecc_l[version] - n1
<<_::binary-size(n1), mask::binary-size(n2), _::binary>> = @mask0
encoded =
<<@byte_mode::4, n::8, bin::binary-size(n), 0::4, xor(bits, mask)::bits>>
|> bits()
|> pad_bytes(version)
{version, encoded}
end
defp xor(<<>>, _), do: <<>>
defp xor(_, <<>>), do: <<>>
defp xor(<<a::1, t1::bits>>, <<b::1, t2::bits>>) do
<<a ^^^ b::1, xor(t1, t2)::bits>>
end
@doc """
Returns the lowest version for the given binary.
Example:
iex> QRCode.Encode.version("hello world!")
1
"""
@spec version(binary) :: integer
def version(bin) do
len = byte_size(bin)
Enum.find_index(@capacity_l, &(&1 >= len))
end
@doc """
Returns bits for any binary data.
Example:
iex> QRCode.Encode.bits(<<123, 4>>)
[0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0]
"""
@spec bits(bitstring) :: [0 | 1]
def bits(bin) do
for <<b::1 <- bin>>, do: b
end
defp pad_bytes(list, version) do
n = @ecc_l[version] * 8 - length(list)
Stream.cycle(bits(@pad))
|> Stream.take(n)
|> (&Enum.concat(list, &1)).()
end
end
|
lib/eqrcode/encode.ex
| 0.6705
| 0.44559
|
encode.ex
|
starcoder
|
defmodule Sentry.Client do
@behaviour Sentry.HTTPClient
@moduledoc ~S"""
This module is the default client for sending an event to Sentry via HTTP.
It makes use of `Task.Supervisor` to allow sending tasks synchronously or asynchronously, and defaulting to asynchronous. See `Sentry.Client.send_event/2` for more information.
### Configuration
* `:before_send_event` - allows performing operations on the event before
it is sent. Accepts an anonymous function or a {module, function} tuple, and
the event will be passed as the only argument.
* `:after_send_event` - callback that is called after attempting to send an event.
Accepts an anonymous function or a {module, function} tuple. The result of the HTTP call as well as the event will be passed as arguments.
The return value of the callback is not returned.
Example configuration of putting Logger metadata in the extra context:
config :sentry,
before_send_event: fn(event) ->
metadata = Map.new(Logger.metadata)
%{event | extra: Map.merge(event.extra, metadata)}
end,
after_send_event: fn(event, result) ->
case result do
{:ok, id} ->
Logger.info("Successfully sent event!")
_ ->
Logger.info(fn -> "Did not successfully send event! #{inspect(event)}" end)
end
end
"""
alias Sentry.{Event, Util, Config}
require Logger
@type get_dsn :: {String.t, String.t, Integer.t}
@sentry_version 5
@max_attempts 4
@hackney_pool_name :sentry_pool
quote do
unquote(@sentry_client "sentry-elixir/#{Mix.Project.config[:version]}")
end
@doc """
Attempts to send the event to the Sentry API up to 4 times with exponential backoff.
The event is dropped if it all retries fail.
### Options
* `:result` - Allows specifying how the result should be returned. Options include `:sync`, `:none`, and `:async`. `:sync` will make the API call synchronously, and return `{:ok, event_id}` if successful. `:none` sends the event from an unlinked child process under `Sentry.TaskSupervisor` and will return `{:ok, ""}` regardless of the result. `:async` will start an unlinked task and return a tuple of `{:ok, Task.t}` on success where the Task can be awaited upon to receive the result asynchronously. When used in an OTP behaviour like GenServer, the task will send a message that needs to be matched with `GenServer.handle_info/2`. See `Task.Supervisor.async_nolink/2` for more information. `:async` is the default.
* `:sample_rate` - The sampling factor to apply to events. A value of 0.0 will deny sending any events, and a value of 1.0 will send 100% of events.
"""
@spec send_event(Event.t) :: {:ok, Task.t | String.t} | :error | :unsampled
def send_event(%Event{} = event, opts \\ []) do
if Config.dsn() == "" do
:unsampled
else
result = Keyword.get(opts, :result, :async)
sample_rate = Keyword.get(opts, :sample_rate) || Config.sample_rate()
event = maybe_call_before_send_event(event)
if sample_event?(sample_rate) do
encode_and_send(event, result)
else
:unsampled
end
end
end
defp encode_and_send(event, result) do
case Poison.encode(event) do
{:ok, body} ->
do_send_event(event, body, result)
{:error, error} ->
log_api_error("Unable to encode Sentry error - #{inspect(error)}")
:error
end
end
defp do_send_event(event, body, :async) do
{endpoint, public_key, secret_key} = get_dsn!()
auth_headers = authorization_headers(public_key, secret_key)
{:ok, Task.Supervisor.async_nolink(Sentry.TaskSupervisor, fn ->
try_request(:post, endpoint, auth_headers, body)
|> maybe_call_after_send_event(event)
end)}
end
defp do_send_event(event, body, :sync) do
{endpoint, public_key, secret_key} = get_dsn!()
auth_headers = authorization_headers(public_key, secret_key)
try_request(:post, endpoint, auth_headers, body)
|> maybe_call_after_send_event(event)
end
defp do_send_event(event, body, :none) do
{endpoint, public_key, secret_key} = get_dsn!()
auth_headers = authorization_headers(public_key, secret_key)
Task.Supervisor.start_child(Sentry.TaskSupervisor, fn ->
try_request(:post, endpoint, auth_headers, body)
|> maybe_call_after_send_event(event)
end)
{:ok, ""}
end
defp try_request(method, url, headers, body, current_attempt \\ 1)
defp try_request(_, _, _, _, current_attempt)
when current_attempt > @max_attempts, do: :error
defp try_request(method, url, headers, body, current_attempt) do
case request(method, url, headers, body) do
{:ok, id} -> {:ok, id}
_ ->
sleep(current_attempt)
try_request(method, url, headers, body, current_attempt + 1)
end
end
@doc """
Makes the HTTP request to Sentry using hackney.
Hackney options can be set via the `hackney_opts` configuration option.
"""
def request(method, url, headers, body) do
hackney_opts = Config.hackney_opts()
|> Keyword.put_new(:pool, @hackney_pool_name)
with {:ok, 200, _, client} <- :hackney.request(method, url, headers, body, hackney_opts),
{:ok, body} <- :hackney.body(client),
{:ok, json} <- Poison.decode(body) do
{:ok, Map.get(json, "id")}
else
{:ok, status, headers, client} ->
:hackney.skip_body(client)
error_header = :proplists.get_value("X-Sentry-Error", headers, "")
log_api_error("#{body}\nReceived #{status} from Sentry server: #{error_header}")
:error
_ ->
log_api_error(body)
:error
end
end
@doc """
Generates a Sentry API authorization header.
"""
@spec authorization_header(String.t, String.t) :: String.t
def authorization_header(public_key, secret_key) do
timestamp = Util.unix_timestamp()
data = [
sentry_version: @sentry_version,
sentry_client: @sentry_client,
sentry_timestamp: timestamp,
sentry_key: public_key,
sentry_secret: secret_key
]
query = data
|> Enum.map(fn {name, value} -> "#{name}=#{value}" end)
|> Enum.join(", ")
"Sentry " <> query
end
defp authorization_headers(public_key, secret_key) do
[
{"User-Agent", @sentry_client},
{"X-Sentry-Auth", authorization_header(public_key, secret_key)}
]
end
@doc """
Get a Sentry DSN which is simply a URI.
"""
@spec get_dsn! :: get_dsn
def get_dsn! do
# {PROTOCOL}://{PUBLIC_KEY}:{SECRET_KEY}@{HOST}/{PATH}{PROJECT_ID}
%URI{userinfo: userinfo, host: host, port: port, path: path, scheme: protocol} = URI.parse(Config.dsn())
[public_key, secret_key] = String.split(userinfo, ":", parts: 2)
[_, binary_project_id] = String.split(path, "/")
project_id = String.to_integer(binary_project_id)
endpoint = "#{protocol}://#{host}:#{port}/api/#{project_id}/store/"
{endpoint, public_key, secret_key}
end
def maybe_call_after_send_event(result, event) do
case Config.after_send_event() do
function when is_function(function, 2) ->
function.(event, result)
{module, function} ->
apply(module, function, [event, result])
nil ->
nil
_ ->
raise ArgumentError, message: ":after_send_event must be an anonymous function or a {Module, Function} tuple"
end
result
end
def maybe_call_before_send_event(event) do
case Config.before_send_event do
function when is_function(function, 1) ->
function.(event)
{module, function} ->
apply(module, function, [event])
nil ->
event
_ ->
raise ArgumentError, message: ":before_send_event must be an anonymous function or a {Module, Function} tuple"
end
end
def hackney_pool_name do
@hackney_pool_name
end
defp log_api_error(body) do
Logger.warn(fn ->
["Failed to send Sentry event.", ?\n, body]
end)
end
defp sleep(attempt_number) do
# sleep 2^n seconds
:math.pow(2, attempt_number)
|> Kernel.*(1000)
|> Kernel.round()
|> :timer.sleep()
end
defp sample_event?(1), do: true
defp sample_event?(1.0), do: true
defp sample_event?(0), do: false
defp sample_event?(0.0), do: false
defp sample_event?(sample_rate) do
:rand.uniform < sample_rate
end
end
|
lib/sentry/client.ex
| 0.876944
| 0.454835
|
client.ex
|
starcoder
|
defmodule SymbolicExpression.Parser do
alias SymbolicExpression.Parser.State
require Logger
@whitespace [?\n, ?\s, ?\t]
@end_comment [?\n]
@string_terminals [?"]
@escaped_characters [?"]
@doc """
Parses an s-expression held in a string. Returns `{:ok, result}` on success,
`{:error, reason}` when the string does not contain a valid s-expression.
See [Wikipedia](https://en.wikipedia.org/wiki/S-expression) for more details
about s-expressions.
## Example
iex> alias SymbolicExpression.Parser
iex> Parser.parse ~S|(1 2 3)|
{:ok, [1, 2, 3]}
iex> Parser.parse "invalid"
{:error, :bad_arg}
"""
def parse(exp) when is_binary(exp) do
try do
{:ok, parse!(exp)}
rescue
error in [ArgumentError] ->
Logger.error "Failed to parse expression: '#{exp}' with error: '#{inspect error}'"
{:error, :bad_arg}
end
end
@doc """
Like `parse/1`, except raises an `ArgumentError` when the string does not
contain a valid s-expression.
## Example
iex> alias SymbolicExpression.Parser
iex> Parser.parse! ~S|(1 2 3)|
[1, 2, 3]
iex> try do
iex> Parser.parse! "invalid"
iex> rescue
iex> _ in [ArgumentError] ->
iex> :exception_raised
iex> end
:exception_raised
"""
def parse!(""), do: []
def parse!(exp) when is_binary(exp), do: _parse!(State.new exp)
@doc """
Like `parse/1` except the input is a file path instead of a binary.
"""
def parse_file(file) when is_binary(file) do
try do
{:ok, parse_file!(file)}
rescue
error in [ArgumentError] ->
Logger.error "Failed to parse expression in file: '#{file}' with error: '#{inspect error}'"
{:error, :bad_arg}
error in [File.Error] ->
Logger.error "Failed to parse expression in file: '#{file}' with error: '#{inspect error}'"
{:error, :bad_file}
end
end
@doc """
Like `parse_file/1` except raises `ArgumentError` when the string does not
contain a valid s-expression or `File.Error` if the file can't be read.
"""
def parse_file!(file) when is_binary(file) do
file |> Path.expand |> File.read! |> parse!
end
# Start comment.
defp _parse!(s = %State{expression: ";" <> rest, in_term: false, in_comment: false}) do
_parse! %State{s | expression: rest, in_comment: true}
end
# End comment.
defp _parse!(s = %State{expression: << c :: utf8 >> <> rest, in_comment: true}) when c in @end_comment do
_parse! %State{s | expression: rest, in_comment: false}
end
# In comment.
defp _parse!(s = %State{expression: << _c :: utf8 >> <> rest, in_comment: true}) do
_parse! %State{s | expression: rest}
end
# New scope.
defp _parse!(s = %State{expression: "(" <> rest, in_term: false, paren_count: count, result: result}) when count > 0 or result == [[]] do
_parse! %State{s | expression: rest, paren_count: count + 1, result: [[] | s.result]}
end
# End scope with no current term.
defp _parse!(s = %State{expression: ")" <> rest, term: "", in_term: false, paren_count: count, result: [first, second | tail]}) when count > 0 do
_parse! %State{s | expression: rest, paren_count: count - 1, result: [second ++ [first] | tail]}
end
# End scope with current term.
defp _parse!(s = %State{expression: ")" <> rest, in_term: false, paren_count: count, result: [first, second | tail]}) when count > 0 do
_parse! %State{s | expression: rest, term: "", paren_count: count - 1, result: [second ++ [first ++ [process s.term]] | tail]}
end
# Insignificant whitespace.
defp _parse!(s = %State{expression: << c :: utf8 >> <> rest, term: "", in_term: false}) when c in @whitespace do
_parse! %State{s | expression: rest}
end
# Significant whitespace.
defp _parse!(s = %State{expression: << c :: utf8 >> <> rest, in_term: false, result: [head | tail]}) when c in @whitespace do
_parse! %State{s | expression: rest, term: "", result: [head ++ [process s.term] | tail]}
end
# Open or close quoted string.
defp _parse!(s = %State{expression: << c :: utf8 >> <> rest}) when c in @string_terminals do
_parse! %State{s | expression: rest, term: s.term <> <<c>>, in_term: !s.in_term}
end
# Escaped characters.
defp _parse!(s = %State{expression: << ?\\, c :: utf8 >> <> rest}) when c in @escaped_characters do
_parse! %State{s | expression: rest, term: s.term <> <<c>>}
end
# Append character to current term.
defp _parse!(s = %State{expression: << c :: utf8 >> <> rest}) do
_parse! %State{s | expression: rest, term: s.term <> <<c>>}
end
# Base case.
defp _parse!(%State{expression: "", term: "", in_term: false, paren_count: 0, result: [[head | _]| _]}), do: head
# Catch all for errors.
defp _parse!(s) do
raise ArgumentError, message: """
Invalid s-expression with
remaining exp: #{inspect s.expression}
term: #{inspect s.term}
in term: #{inspect s.in_term}
result #{inspect s.result}
"""
end
defp process(term) do
process_int(term) || process_float(term) || process_quoted_string(term) || process_atom(term)
end
defp process_int(term) do
case Integer.parse(term) do
{int, ""} ->
int
_ ->
nil
end
end
defp process_float(term) do
case Float.parse(term) do
{float, ""} ->
float
_ ->
nil
end
end
defp process_quoted_string(term) do
case ~R/^["](.*)["]$/ |> Regex.run(term) do
[^term, match] ->
match
_ ->
nil
end
end
defp process_atom(term), do: String.to_atom(term)
end
|
lib/symbolic_expression/parser.ex
| 0.752286
| 0.60542
|
parser.ex
|
starcoder
|
defmodule TheFuzz.Util do
@moduledoc """
Utilities for TheFuzz.
"""
@doc """
Finds the length of a string in a less verbose way.
## Example
iex> TheFuzz.Util.len("Jason")
5
"""
def len(value), do: String.length(value)
@doc """
Checks to see if a string is alphabetic.
## Example
iex> TheFuzz.Util.is_alphabetic?("Jason5")
false
iex> TheFuzz.Util.is_alphabetic?("Jason")
true
"""
def is_alphabetic?(value) do
!Regex.match?(~r/[\W0-9]/, value)
end
@doc """
Removes duplicates from a string (except for c)
## Example
iex> TheFuzz.Util.deduplicate("buzz")
"buz"
iex> TheFuzz.Util.deduplicate("accept")
"accept"
"""
def deduplicate(value) do
cond do
String.length(value) <= 1 ->
value
true ->
(String.codepoints(value)
|> Stream.chunk_every(2, 1, :discard)
|> Stream.filter(&(hd(&1) == "c" || hd(&1) != hd(tl(&1))))
|> Stream.map(&hd(&1))
|> Enum.to_list()
|> to_string) <> String.last(value)
end
end
@doc """
Finds the intersection of two lists. If Strings are provided, it uses the
codepoints of said string.
## Example
iex> TheFuzz.Util.intersect('context', 'contentcontent')
'contet'
iex> TheFuzz.Util.intersect("context", "contentcontent")
["c", "o", "n", "t", "e", "t"]
"""
def intersect(l1, l2) when is_binary(l1) and is_binary(l2) do
intersect(String.codepoints(l1), String.codepoints(l2))
end
def intersect(l1, l2), do: intersect(l1, l2, length(l1), length(l2), [])
defp intersect(_, _, s1, s2, acc) when s1 == 0 or s2 == 0, do: acc
defp intersect(l1, l2, s1, s2, acc) do
cond do
hd(l1) == hd(l2) ->
intersect(tl(l1), tl(l2), s1 - 1, s2 - 1, acc ++ [hd(l2)])
Enum.find_index(l1, &(&1 == hd(l2))) == nil ->
intersect(l1, tl(l2), s1, s2 - 1, acc)
true ->
cond do
max(s1, s2) == s1 ->
intersect(tl(l1), l2, s1 - 1, s2, acc)
true ->
intersect(l1, tl(l2), s1, s2 - 1, acc)
end
end
end
@doc """
[ngram tokenizes](http://en.wikipedia.org/wiki/N-gram) the string provided.
## Example
iex> TheFuzz.Util.ngram_tokenize("abcdefghijklmnopqrstuvwxyz", 2)
["ab", "bc", "cd", "de", "ef", "fg", "gh", "hi", "ij", "jk", "kl", "lm",
"mn", "no", "op", "pq", "qr", "rs", "st", "tu", "uv", "vw", "wx", "xy",
"yz"]
"""
def ngram_tokenize(string, n) when is_binary(string) do
ngram_tokenize(String.codepoints(string), n)
end
def ngram_tokenize(characters, n) do
case n <= 0 || length(characters) < n do
true -> nil
false -> Stream.chunk_every(characters, n, 1, :discard) |> Enum.map(&to_string(&1))
end
end
end
|
lib/the_fuzz/util.ex
| 0.747339
| 0.416174
|
util.ex
|
starcoder
|
defmodule CosmosDbEx.Response do
@moduledoc """
Formatted response from CosmosDb.
## Request Charge
This is the R/U (Request Unit) charge that the query cost to return the response from Cosmos Db.
In other words, this was the cost of all the database operations that had to happen in order
for CosmosDb to read or write to/from the database. For more information on Request Units please
see [Request Units in Azure Cosmos DB](https://docs.microsoft.com/en-us/azure/cosmos-db/request-units).
## Request Duration
This is the time, in milliseconds, that it took CosmosDb to execute the query sent.
## Body
This is the body of the response sent from Cosmos Db. It is expected that the body will be a map.
Additional items gathered from the response headers will be placed in the properties field of the
Response struct. Some convience methods will be provided for commonly used properties like
'request charge', 'request duration', and 'continuation_token'.
"""
@enforce_keys [:body]
defstruct resource_id: nil,
count: 0,
body: nil,
properties: %{}
@type t :: %__MODULE__{
resource_id: String.t(),
count: integer(),
body: map(),
properties: map()
}
@doc """
Returns the request charge (Cosmos Db's Request Unit measurement) of the request. Returns nil if
no request charge is found in the response.
"""
@spec get_request_charge(t()) :: float()
def get_request_charge(%{properties: %{request_charge: request_charge}}) do
{ru, _} = Float.parse(request_charge)
ru
end
def get_request_charge(_), do: nil
@doc """
Returns the request duration, in milliseconds, of the request. Returns nil if no request_duration
is found.
"""
@spec get_request_duration(t()) :: String.t()
def get_request_duration(%{properties: %{request_duration: request_duration}}) do
request_duration
end
def get_request_duration(_), do: nil
@doc """
Returns the continuation token of the request. This token can be sent with the next request to
retrieve the next page of results from the query.
# Example
iex> container = Container.new("TestItemsDb", "ItemsContainer")
iex> {:ok, response} = CosmosDbEx.get_documents(container)
iex> {:ok, response} = CosmosDbEx.get_documents(container, CosmosDbEx.Response.get_continuation_token(response))
> NOTE: Cosmos Db returns results in pages of up to a maximum of 1000 items.
Returns nil if no continuation token is found. Nil also signals that there are no more items left
from the query.
"""
@spec get_continuation_token(t()) :: map()
def get_continuation_token(%{
properties: %{continuation_token: continuation_token}
}) do
continuation_token
end
def get_continuation_token(_), do: nil
end
|
lib/cosmos_db_ex/response.ex
| 0.859487
| 0.423995
|
response.ex
|
starcoder
|
defmodule Edeliver.Relup.Instructions.ResumeChannels do
@moduledoc """
This upgrade instruction resumes the websocket processes
connected to phoenix channels when the upgrade is done
to continue handling channel events. Use this instruction
at the end of the upgrade modification if the
`Edeliver.Relup.Instructions.SuspendChannels`
is used at the beginning. Make sure that it is used before
the
`Edeliver.Relup.Instructions.ResumeRanchAcceptors`
instruction to avoid that recently started websockets
which were not suspendet are tried to be resumed.
Suspending and resuming websocket processes for
phoenix channels requires a recent phoenix version
which handles sys events for websockets. It also
requires that the builtin phoenix pubsub backend
`Phoenix.PubSub.PG2` is used for the phoenix channels.
"""
use Edeliver.Relup.RunnableInstruction
alias Edeliver.Relup.Instructions.CheckRanchAcceptors
alias Edeliver.Relup.Instructions.CheckRanchConnections
@doc """
Returns name of the application.
This name is taken as argument for the `run/1` function and is required
to access the acceptor processes through the supervision tree
"""
def arguments(_instructions = %Instructions{}, _config = %{name: name}) when is_atom(name) do
name
end
def arguments(_instructions = %Instructions{}, _config = %{name: name}) when is_binary(name) do
name |> String.to_atom
end
@doc """
This module depends on the `Edeliver.Relup.Instructions.CheckRanchAcceptors` and
the `Edeliver.Relup.Instructions.CheckRanchConnections` module
which must be loaded before this instruction for upgrades and unloaded
after this instruction for downgrades.
"""
@spec dependencies() :: [Edeliver.Relup.Instructions.CheckRanchAcceptors]
def dependencies do
[Edeliver.Relup.Instructions.CheckRanchAcceptors, Edeliver.Relup.Instructions.CheckRanchConnections]
end
@doc """
Resumes a list of processes.
Because resume a process might take a while depending on the length
of the message queue or duration of current operation processed by the pid, suspending is done
asynchronously for each process by spawing a new process which calls `:sys.resume/2` and then waiting
for all results before returning from this function. Be careful when using `:infinity` as timeout,
because this function might hang for infinite time if one of the process does not handle sys events.
"""
@spec bulk_resume(processes::[pid], timeout::pos_integer|:infinity) :: :ok | {:errors, count::pos_integer, [{pid::pid, reason::term}]} | :not_supported
def bulk_resume(processes, timeout \\ 1000) do
pids_and_monitor_refs = for pid <- processes do
spawned_pid = :proc_lib.spawn(fn ->
:ok = :sys.resume(pid, timeout)
end)
{pid, spawned_pid, :erlang.monitor(:process, spawned_pid)}
end
result = Enum.reduce(pids_and_monitor_refs, {0, 0, []}, fn({pid, spawned_pid, monitor_ref}, {errors_count, not_supported_count, errors}) ->
receive do
{:DOWN, ^monitor_ref, :process, ^spawned_pid, reason} ->
case reason do
:normal -> {errors_count, not_supported_count, errors}
error = {:noproc, {:sys, :suspend, [^pid, ^timeout]}} -> {errors_count+1, not_supported_count+1, [{pid, error}|errors]}
error = {:timeout, {:sys, :suspend, [^pid, ^timeout]}} -> {errors_count+1, not_supported_count+1, [{pid, error}|errors]}
error -> {errors_count+1, not_supported_count, [{pid, error}|errors]}
end
end
end)
case result do
{_errors_count = 0, _not_supported_count = 0, _errors = []} -> :ok
{not_supported_count, not_supported_count, _errors = [_|_]} when length(processes) == not_supported_count -> :not_supported
{errors_count, _not_supported_count, errors} -> {:errors, errors_count, Enum.reverse(errors)}
end
end
@doc """
Resumes all websocket channels
to continue handling channel events after the upgrade. This is possible
only in recent phoenix versions since handling sys events is required for resuming.
If an older version is used, a warning is printed that suspending is not supported.
"""
@spec run(otp_application_name::atom) :: :ok
def run(otp_application_name) do
info "Resuming phoenix websocket channels..."
ranch_listener_sup = CheckRanchAcceptors.ranch_listener_sup(otp_application_name)
assume true = is_pid(ranch_listener_sup), "Failed to resume phoenix websocket channels. Ranch listener supervisor not found."
ranch_connections_sup = CheckRanchConnections.ranch_connections_sup(ranch_listener_sup)
assume true = is_pid(ranch_connections_sup), "Failed to resume phoenix websocket channels. Ranch connections supervisor not found."
assume true = is_list(connections = CheckRanchConnections.ranch_connections(ranch_connections_sup)), "Failed to resume phoenix websocket channels. No connection processes found."
case CheckRanchConnections.websocket_channel_connections(otp_application_name, connections) do
[] -> info "No websocket connections for phoenix channels are running."
websocket_connections = [_|_] ->
websocket_connections_count = Enum.count(websocket_connections)
info "Resuming #{inspect websocket_connections_count} websocket connections..."
case bulk_resume(websocket_connections) do
:ok -> info "Resumed #{inspect websocket_connections_count} websocket connections."
:not_supported ->
warn "Resuming websocket connections for phoenix channels is not supported."
{:errors, errors_count, _errors} ->
succeeded_count = websocket_connections_count - errors_count
warn "Resumed #{inspect succeeded_count} of #{inspect websocket_connections_count} websocket connections. #{inspect errors_count} failed."
debug "#{inspect errors_count} not resumed websockets might still hang for a while or might have been crashed."
end
:not_detected ->
warn "Resuming websocket connections for phoenix channels is not supported because websocket connections cannot be detected."
end
end
end
|
lib/edeliver/relup/instructions/resume_channels.ex
| 0.696681
| 0.436742
|
resume_channels.ex
|
starcoder
|
defmodule Scenic.Primitive.Group do
@moduledoc """
A container to hold other primitives.
Any styles placed on a group will be inherited by the primitives in the
group. Any transforms placed on a group will be multiplied into the transforms
in the primitives in the group.
## Data
`uids`
The data for an arc is a list of internal uids for the primitives it contains
## Styles
The group is special in that it accepts all styles and transforms, even if they
are non-standard. These are then inherited by any primitives, including SceneRefs
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#group/3)
"""
use Scenic.Primitive
alias Scenic.Primitive
# import IEx
# ============================================================================
# data verification and serialization
# --------------------------------------------------------
@doc false
def build(nil, opts), do: build([], opts)
def build(ids, opts) do
verify!(ids)
Primitive.build(__MODULE__, ids, opts)
end
# --------------------------------------------------------
@doc false
def info(data),
do: """
#{IO.ANSI.red()}#{__MODULE__} data must be a list of valid uids of other elements in the graph.
#{IO.ANSI.yellow()}Received: #{inspect(data)}
#{IO.ANSI.default_color()}
"""
# --------------------------------------------------------
@doc false
def verify(ids) when is_list(ids) do
if Enum.all?(ids, &is_integer/1), do: {:ok, ids}, else: :invalid_data
end
def verify(_), do: :invalid_data
# ============================================================================
# filter and gather styles
@doc """
Returns a list of styles recognized by this primitive.
"""
@spec valid_styles() :: [:all, ...]
def valid_styles(), do: [:all]
def filter_styles(styles) when is_map(styles), do: styles
# ============================================================================
# apis to manipulate the list of child ids
# ----------------------------------------------------------------------------
def insert_at(%Primitive{module: __MODULE__, data: uid_list} = p, index, uid) do
Map.put(
p,
:data,
List.insert_at(uid_list, index, uid)
)
end
# ----------------------------------------------------------------------------
def delete(%Primitive{module: __MODULE__, data: uid_list} = p, uid) do
Map.put(
p,
:data,
Enum.reject(uid_list, fn xid -> xid == uid end)
)
end
# ----------------------------------------------------------------------------
def increment(%Primitive{module: __MODULE__, data: uid_list} = p, offset) do
Map.put(
p,
:data,
Enum.map(uid_list, fn xid -> xid + offset end)
)
end
end
|
lib/scenic/primitive/group.ex
| 0.788217
| 0.442516
|
group.ex
|
starcoder
|
defmodule PixelFont.TableSource.Glyf.Item do
alias PixelFont.Glyph
alias PixelFont.Glyph.{BitmapData, CompositeData}
alias PixelFont.TableSource.Glyf.Simple
alias PixelFont.TableSource.Glyf.Composite
defstruct ~w(num_of_contours xmin ymin xmax ymax description)a
@type t :: %__MODULE__{
num_of_contours: integer(),
xmin: integer(),
ymin: integer(),
xmax: integer(),
ymax: integer(),
description: Simple.t() | Composite.t()
}
@spec new(Glyph.t()) :: t()
def new(glyph)
def new(%Glyph{data: %BitmapData{} = data}) do
%__MODULE__{
num_of_contours: length(data.contours),
xmin: data.xmin,
ymin: data.ymin,
xmax: data.xmax,
ymax: data.ymax,
description: Simple.new(data.contours)
}
end
@spec new(Glyph.t()) :: t()
def new(%Glyph{data: %CompositeData{} = data}) do
boundaries =
data.components
|> Enum.map(fn %{glyph: %Glyph{} = ref_glyph, x_offset: xoff, y_offset: yoff} ->
%BitmapData{xmin: xmin, ymin: ymin, xmax: xmax, ymax: ymax} = ref_glyph.data
{xmin + xoff, ymin + yoff, xmax + xoff, ymax + yoff}
end)
zero = fn -> 0 end
%__MODULE__{
num_of_contours: -1,
xmin: boundaries |> Enum.map(&elem(&1, 0)) |> Enum.min(zero),
ymin: boundaries |> Enum.map(&elem(&1, 1)) |> Enum.min(zero),
xmax: boundaries |> Enum.map(&elem(&1, 2)) |> Enum.max(zero),
ymax: boundaries |> Enum.map(&elem(&1, 3)) |> Enum.max(zero),
description: Composite.new(data.components)
}
end
@spec compile(t()) :: map()
def compile(item)
def compile(%{num_of_contours: 0}) do
%{
data: "",
real_size: 0,
padded_size: 0
}
end
def compile(item) do
data = [
<<item.num_of_contours::big-16>>,
<<item.xmin::big-16>>,
<<item.ymin::big-16>>,
<<item.xmax::big-16>>,
<<item.ymax::big-16>>,
compile_description(item.description)
]
data_bin = IO.iodata_to_binary(data)
size = byte_size(data_bin)
pad_size =
case rem(size, 4) do
0 -> 0
x -> 4 - x
end
pad = <<0::pad_size*8>>
%{
data: data_bin <> pad,
real_size: size,
padded_size: size + pad_size
}
end
defp compile_description(desc)
defp compile_description(%Simple{} = desc) do
%Simple{inst_size: inst_size} = desc
[
Enum.map(desc.last_points, &<<&1::big-16>>),
<<inst_size::big-16>>,
desc.inst,
desc.flags,
Enum.map(desc.x_coords, &<<&1::8>>),
Enum.map(desc.y_coords, &<<&1::8>>)
]
end
defp compile_description(%Composite{} = desc) do
desc.components
end
end
|
lib/pixel_font/table_source/glyf/item.ex
| 0.76555
| 0.403596
|
item.ex
|
starcoder
|
defmodule Plymio.Funcio.Index do
@moduledoc ~S"""
Utility Functions for Indices
## Documentation Terms
See also `Plymio.Funcio` for an overview and other documentation terms.
### *index*
An *index* is an integer
### *indices*
An *indices* is a list of *index*
### *index range*
An *index range* is a specification for an *indices*.
It can include integers, integer ranges, lists of integers or an
enumerable that realises to a list of integers.
Examples are:
0
-2
1 .. 5
[99, 1, 2, -1, -2, 4, -1, 0]
[0 .. 4, [5 , 6], 7 .. 9]
0 .. 9 |> Stream.map(&(&1))
[0 .. 4, [5 , 6], 7 .. 9, 0 .. 9, 0 .. 9 |> Stream.map(&(&1))]
"""
import Plymio.Funcio.Error,
only: [
new_error_result: 1
]
import Plymio.Fontais.Utility,
only: [
list_wrap_flat_just: 1
]
import Plymio.Fontais.Guard,
only: [
is_positive_integer: 1
]
import Plymio.Funcio.Enum.Map.Collate,
only: [
map_collate0_enum: 2
]
@type error :: Plymio.Funcio.error()
@type index :: Plymio.Funcio.index()
@type indices :: Plymio.Funcio.indices()
@type fun1_predicate :: Plymio.Funcio.fun1_predicate()
@error_text_index_invalid "index invalid"
@error_text_index_range_invalid "index range invalid"
@error_text_indices_invalid "indices invalid"
@doc ~S"""
`validate_index/1` takes a value and validates it is an integer, returning
`{:ok, index}`, otherwise `{:error, error}`
## Examples
iex> 1 |> validate_index
{:ok, 1}
iex> {:error, error} = [1,2,3] |> validate_index
...> error |> Exception.message
"index invalid, got: [1, 2, 3]"
iex> {:error, error} = :not_an_index |> validate_index
...> error |> Exception.message
"index invalid, got: :not_an_index"
"""
@since "0.1.0"
@spec validate_index(any) :: {:ok, index} | {:error, error}
def validate_index(index)
def validate_index(index) when is_integer(index) do
{:ok, index}
end
def validate_index(index) do
new_error_result(m: @error_text_index_invalid, v: index)
end
@doc false
def new_index_error_result(index)
def new_index_error_result(index) do
index
|> List.wrap()
|> length
|> case do
1 ->
new_error_result(m: @error_text_index_invalid, v: index |> hd)
_ ->
new_error_result(m: @error_text_indices_invalid, v: index)
end
end
@doc ~S"""
`validate_indices/1` takes a list and validates each element is an integer, returning
`{:ok, indices}`, otherwise `{:error, error}`
## Examples
iex> [1,2,3] |> validate_indices
{:ok, [1,2,3]}
iex> {:error, error} = 42 |> validate_indices
...> error |> Exception.message
"indices invalid, got: 42"
iex> {:error, error} = [1,:b,3] |> validate_indices
...> error |> Exception.message
"index invalid, got: :b"
iex> {:error, error} = [1,:b,:c] |> validate_indices
...> error |> Exception.message
"indices invalid, got: [:b, :c]"
iex> {:error, error} = 42 |> validate_indices
...> error |> Exception.message
"indices invalid, got: 42"
"""
@since "0.1.0"
@spec validate_indices(any) :: {:ok, indices} | {:error, error}
def validate_indices(indices)
def validate_indices(indices) when is_list(indices) do
indices
|> Enum.split_with(&is_integer/1)
|> case do
{indices, []} ->
{:ok, indices}
{_, others} ->
others |> new_index_error_result
end
end
def validate_indices(indices) do
new_error_result(m: @error_text_indices_invalid, v: indices)
end
@doc ~S"""
`validate_positive_indices/1` takes a list and validates each element is a postive integer,
returning `{:ok, indices}`, otherwise `{:error, error}`
## Examples
iex> [1,2,3] |> validate_positive_indices
{:ok, [1,2,3]}
iex> {:error, error} = [1,-1,2] |> validate_positive_indices
...> error |> Exception.message
"index invalid, got: -1"
iex> {:error, error} = 42 |> validate_positive_indices
...> error |> Exception.message
"indices invalid, got: 42"
iex> {:error, error} = [1,:b,3] |> validate_positive_indices
...> error |> Exception.message
"index invalid, got: :b"
iex> {:error, error} = [1,:b,:c] |> validate_positive_indices
...> error |> Exception.message
"indices invalid, got: [:b, :c]"
"""
@since "0.1.0"
@spec validate_positive_indices(any) :: {:ok, indices} | {:error, error}
def validate_positive_indices(indices)
def validate_positive_indices(indices) when is_list(indices) do
indices
|> Enum.split_with(&is_positive_integer/1)
|> case do
{indices, []} ->
{:ok, indices}
{_, others} ->
others |> new_index_error_result
end
end
def validate_positive_indices(indices) do
new_error_result(m: @error_text_indices_invalid, v: indices)
end
@doc ~S"""
`normalise_indices/1` calls `Plymio.Fontais.Utility.list_wrap_flat_just/1`
on ist argument and then calls
`validate_indices/1`, returning `{:ok, indices}`, otherwise
`{:error, error}`
## Examples
iex> [1,2,3] |> normalise_indices
{:ok, [1,2,3]}
iex> 42 |> normalise_indices
{:ok, [42]}
iex> {:error, error} = [1,:b,3] |> normalise_indices
...> error |> Exception.message
"index invalid, got: :b"
iex> {:error, error} = [1,:b,:c] |> normalise_indices
...> error |> Exception.message
"indices invalid, got: [:b, :c]"
iex> {:error, error} = :not_an_index |> normalise_indices
...> error |> Exception.message
"index invalid, got: :not_an_index"
"""
@since "0.1.0"
@spec normalise_indices(any) :: {:ok, indices} | {:error, error}
def normalise_indices(indices) do
indices
|> list_wrap_flat_just
|> validate_indices
end
@doc ~S"""
`normalise_index_range/1` takes an *index range* and realises it to return `{:ok, indices}`.
## Examples
iex> 1 |> normalise_index_range
{:ok, [1]}
iex> -2 |> normalise_index_range
{:ok, [-2]}
iex> [1,2,3,3,1,2] |> normalise_index_range
{:ok, [1,2,3,3,1,2]}
iex> 0 .. 4 |> normalise_index_range
{:ok, [0,1,2,3,4]}
iex> 0 .. 9 |> Stream.map(&(&1)) |> normalise_index_range
{:ok, [0,1,2,3,4,5,6,7,8,9]}
iex> [0 .. 4, [5 , 6], 7 .. 9, 0 .. 9, 0 .. 9 |> Stream.map(&(&1))]
...> |> normalise_index_range
{:ok, [0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9]}
iex> {:error, error} = :not_valid |> normalise_index_range
...> error |> Exception.message
"index range invalid, got: :not_valid"
"""
@since "0.1.0"
@spec normalise_index_range(any) :: {:ok, indices} | {:error, error}
def normalise_index_range(indices)
def normalise_index_range(index_range) when is_integer(index_range) do
{:ok, [index_range]}
end
def normalise_index_range(%Stream{} = index_range) do
index_range
|> Enum.to_list()
|> normalise_index_range
end
def normalise_index_range(_.._ = index_range) do
index_range
|> Enum.to_list()
|> normalise_index_range
end
def normalise_index_range(index_range) when is_map(index_range) do
index_range
|> Map.keys()
|> normalise_index_range
end
def normalise_index_range(index_range)
when is_atom(index_range) do
index_range
|> case do
:first ->
0 |> normalise_index_range
:last ->
-1 |> normalise_index_range
x ->
new_error_result(m: @error_text_index_range_invalid, v: x)
end
end
def normalise_index_range(index_range) when is_list(index_range) do
cond do
Keyword.keyword?(index_range) ->
index_range
|> Keyword.values()
|> normalise_index_range
true ->
index_range
|> map_collate0_enum(fn index ->
index
|> normalise_index_range
|> case do
{:error, %{__struct__: _}} ->
new_error_result(m: @error_text_index_invalid, v: index)
{:ok, _} = result ->
result
end
end)
|> case do
{:error, %{__exception__: true}} = result -> result
{:ok, indices} -> indices |> normalise_indices
end
end
end
def normalise_index_range(index_range) do
new_error_result(m: @error_text_index_range_invalid, v: index_range)
end
@doc ~S"""
`create_predicate_indices/1` takes an *indices* and creates an arity 1 function that expects to be passed a `{value, index}` 2tuple and returns `true` if the `index` is in the *indices*, else `false`.
## Examples
iex> {:ok, fun} = [0,1,2] |> create_predicate_indices
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:error, error} = :not_valid |> create_predicate_indices
...> error |> Exception.message
"indices invalid, got: :not_valid"
"""
@since "0.1.0"
@spec create_predicate_indices(any) :: {:ok, fun1_predicate} | {:error, error}
def create_predicate_indices(indices) do
with {:ok, indices} <- indices |> validate_indices do
indices_map = indices |> Map.new(fn k -> {k, nil} end)
fun = fn
{_form, index} ->
indices_map |> Map.has_key?(index)
x ->
raise ArgumentError,
message: "predicate argument {form,index} invalid; got #{inspect(x)}"
end
{:ok, fun}
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`create_predicate_index_range/1` takes *index range* and creates an arity 1 function that expects to be passed a `{value, index}` 2tuple and returns `true` if the `index` is in the *index range*, else `false`.
If the *index range* is an arity 1 function, it is "wrapped" to ensure the result is always `true` or `false`.
If the *index range* is `nil` an always `true` predicate will be returned.
Note negative indices will cause an error.
## Examples
iex> {:ok, fun} = 42 |> create_predicate_index_range
...> true = {:x, 42} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:ok, fun} = [0,1,2] |> create_predicate_index_range
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:error, error} = [-2,0,-1] |> create_predicate_index_range
...> error |> Exception.message
"indices invalid, got: [-2, -1]"
iex> {:ok, fun} = 0 .. 2 |> create_predicate_index_range
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:ok, fun} = fn _ -> false end |> create_predicate_index_range
...> false = {:x, 0} |> fun.()
...> false = {%{a: 1}, 2} |> fun.()
...> false = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:ok, fun} = fn _ -> 42 end |> create_predicate_index_range
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
true
iex> {:ok, fun} = nil |> create_predicate_index_range
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
true
iex> {:error, error} = :not_valid |> create_predicate_index_range
...> error |> Exception.message
"index range invalid, got: :not_valid"
"""
@since "0.1.0"
@spec create_predicate_index_range(any) :: {:ok, fun1_predicate} | {:error, error}
def create_predicate_index_range(index_range)
# range = arity 1 fun
def create_predicate_index_range(index_range) when is_function(index_range, 1) do
# ensure true / false
fun = fn v ->
v
|> index_range.()
|> case do
x when x in [nil, false] -> false
_ -> true
end
end
{:ok, fun}
end
# nil => always true
def create_predicate_index_range(index_range) when is_nil(index_range) do
{:ok, fn _ -> true end}
end
def create_predicate_index_range(index_range) do
with {:ok, range_indices} <- index_range |> normalise_index_range,
{:ok, positive_indices} <- range_indices |> validate_positive_indices,
{:ok, _fun} = result <- positive_indices |> create_predicate_indices do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
end
|
lib/funcio/index/index.ex
| 0.906866
| 0.501404
|
index.ex
|
starcoder
|
defmodule Milkpotion.Request do
@moduledoc """
This module is the main entry point for issuing requests to the
_Remember the Milk_ service. It rate-limits any request (if necessary)
and also ensures a proper error handling.
"""
require Logger
alias Milkpotion.Base.RateLimiter
@doc """
Issues a GET request to the given `url` with the provided http `headers`.
It returns either `{:ok, body}` where body contains the parsed JSON data,
or `{:error, cause, message}`. The cause will be an atom describing the
error in more detail. The possible values are:
- `:http`: This caputures HTTP connection issues (e.g., timeouts):
- `:json`: The received data was malformed, or at least not of the expected
format.
- `:request`: ?
"""
@spec get(binary, map) :: {:ok, map} | {:error, atom, binary | map}
def get(url, headers \\ %{}) do
with {_, response} <- RateLimiter.run(:get, url, "", headers),
{:ok, body} <- parse_http_response(response),
{:ok, _} = rtm <- parse_rtm_response(body), do: rtm
end
@spec parse_http_response(%HTTPoison.Response{}) :: {:ok, binary}
def parse_http_response(%HTTPoison.Response{status_code: 200, body: body}) do
{:ok, body}
end
@spec parse_http_response(%HTTPoison.Response{}) :: {:error, :http, binary}
def parse_http_response(%HTTPoison.Response{status_code: code}) do
error_message = "Unexpected: RTM service responded with #{code}."
_ = Logger.error error_message
{:error, :http, error_message}
end
@spec parse_http_response(%HTTPoison.Error{}) :: {:error, :http, binary}
def parse_http_response(%HTTPoison.Error{reason: reason}) do
_ = Logger.info "Could not fetch data. Reason: #{reason}"
{:error, :http, reason}
end
@spec parse_rtm_response(binary) :: {:ok, map} | {:error, atom, binary} | {:error, atom, map}
def parse_rtm_response(raw) do
ret = with {:ok, data} <- Poison.decode(raw),
{:ok, body} <- extract_rtm_body(data),
:ok <- extract_rtm_status(body) do
{:ok, body}
end
case ret do
{:ok, _} = success -> success
{:error, :invalid} -> {:error, :json, "Parse error: bad format"}
{:error, {:invalid, msg}} -> {:error, :json, "Parse error: #{msg}"}
{:error, _, _} = failure -> failure
end
end
@spec extract_rtm_body(map) :: {:ok, map} | {:error, :json, binary}
defp extract_rtm_body(data) when is_map(data) do
case Map.fetch(data, "rsp") do
{:ok, _} = success ->
success
:error ->
{:error, :json, "Bad response: it did not contain the field \"rsp\"."}
end
end
@spec extract_rtm_status(map) :: :ok | {:error, atom, binary | map}
defp extract_rtm_status(data) when is_map(data) do
case Map.fetch(data, "stat") do
{:ok, status} ->
case status do
"ok" ->
:ok
"fail" ->
case extract_rtm_error(data) do
{:ok, info} ->
{:error, :request, info}
{:error, _, _} = error ->
error
end
end
:error ->
{:error, :json, "Bad response: it did not contain the field \"stat\"."}
end
end
@spec extract_rtm_error(map) :: {:ok, map} | {:error, :json, binary}
defp extract_rtm_error(data) when is_map(data) do
ret = with {:ok, error} <- Map.fetch(data, "err"),
{:ok, code} <- Map.fetch(error, "code"),
{:ok, message} <- Map.fetch(error, "msg") do
{:ok, %{code: code, message: message}}
end
case ret do
{:ok, _} = success ->
success
:error ->
error_message = "Could not parse error information from response: #{inspect data}"
_ = Logger.error error_message
{:error, :json, error_message}
end
end
end
|
lib/milkpotion/request.ex
| 0.827096
| 0.41938
|
request.ex
|
starcoder
|
defmodule ABCI.Types.Timestamp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
seconds: integer,
nanos: integer
}
defstruct [:seconds, :nanos]
field :seconds, 1, type: :int64
field :nanos, 2, type: :int32
end
defmodule ABCI.Types.KVPair do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field :key, 1, type: :bytes
field :value, 2, type: :bytes
end
defmodule ABCI.Types.KI64Pair do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: integer
}
defstruct [:key, :value]
field :key, 1, type: :bytes
field :value, 2, type: :int64
end
defmodule ABCI.Types.ProofOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: String.t(),
key: String.t(),
data: String.t()
}
defstruct [:type, :key, :data]
field :type, 1, type: :string
field :key, 2, type: :bytes
field :data, 3, type: :bytes
end
defmodule ABCI.Types.Proof do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
ops: [ABCI.Types.ProofOp.t()]
}
defstruct [:ops]
field :ops, 1, repeated: true, type: ABCI.Types.ProofOp
end
defmodule ABCI.Types.Request do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any}
}
defstruct [:value]
oneof :value, 0
field :echo, 2, type: ABCI.Types.RequestEcho, oneof: 0
field :flush, 3, type: ABCI.Types.RequestFlush, oneof: 0
field :info, 4, type: ABCI.Types.RequestInfo, oneof: 0
field :set_option, 5, type: ABCI.Types.RequestSetOption, oneof: 0
field :init_chain, 6, type: ABCI.Types.RequestInitChain, oneof: 0
field :query, 7, type: ABCI.Types.RequestQuery, oneof: 0
field :begin_block, 8, type: ABCI.Types.RequestBeginBlock, oneof: 0
field :check_tx, 9, type: ABCI.Types.RequestCheckTx, oneof: 0
field :deliver_tx, 19, type: ABCI.Types.RequestDeliverTx, oneof: 0
field :end_block, 11, type: ABCI.Types.RequestEndBlock, oneof: 0
field :commit, 12, type: ABCI.Types.RequestCommit, oneof: 0
end
defmodule ABCI.Types.RequestEcho do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
message: String.t()
}
defstruct [:message]
field :message, 1, type: :string
end
defmodule ABCI.Types.RequestFlush do
@moduledoc false
use Protobuf, syntax: :proto3
defstruct []
end
defmodule ABCI.Types.RequestInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
version: String.t(),
block_version: non_neg_integer,
p2p_version: non_neg_integer
}
defstruct [:version, :block_version, :p2p_version]
field :version, 1, type: :string
field :block_version, 2, type: :uint64
field :p2p_version, 3, type: :uint64
end
defmodule ABCI.Types.RequestSetOption do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: :string
end
defmodule ABCI.Types.RequestInitChain do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
time: ABCI.Types.Timestamp.t(),
chain_id: String.t(),
consensus_params: ABCI.Types.ConsensusParams.t(),
validators: [ABCI.Types.ValidatorUpdate.t()],
app_state_bytes: String.t()
}
defstruct [:time, :chain_id, :consensus_params, :validators, :app_state_bytes]
field :time, 1, type: ABCI.Types.Timestamp
field :chain_id, 2, type: :string
field :consensus_params, 3, type: ABCI.Types.ConsensusParams
field :validators, 4, repeated: true, type: ABCI.Types.ValidatorUpdate
field :app_state_bytes, 5, type: :bytes
end
defmodule ABCI.Types.RequestQuery do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data: String.t(),
path: String.t(),
height: integer,
prove: boolean
}
defstruct [:data, :path, :height, :prove]
field :data, 1, type: :bytes
field :path, 2, type: :string
field :height, 3, type: :int64
field :prove, 4, type: :bool
end
defmodule ABCI.Types.RequestBeginBlock do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
header: ABCI.Types.Header.t(),
last_commit_info: ABCI.Types.LastCommitInfo.t(),
byzantine_validators: [ABCI.Types.Evidence.t()]
}
defstruct [:hash, :header, :last_commit_info, :byzantine_validators]
field :hash, 1, type: :bytes
field :header, 2, type: ABCI.Types.Header
field :last_commit_info, 3, type: ABCI.Types.LastCommitInfo
field :byzantine_validators, 4, repeated: true, type: ABCI.Types.Evidence
end
defmodule ABCI.Types.RequestCheckTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tx: String.t()
}
defstruct [:tx]
field :tx, 1, type: :bytes
end
defmodule ABCI.Types.RequestDeliverTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tx: String.t()
}
defstruct [:tx]
field :tx, 1, type: :bytes
end
defmodule ABCI.Types.RequestEndBlock do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: integer
}
defstruct [:height]
field :height, 1, type: :int64
end
defmodule ABCI.Types.RequestCommit do
@moduledoc false
use Protobuf, syntax: :proto3
defstruct []
end
defmodule ABCI.Types.Response do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any}
}
defstruct [:value]
oneof :value, 0
field :exception, 1, type: ABCI.Types.ResponseException, oneof: 0
field :echo, 2, type: ABCI.Types.ResponseEcho, oneof: 0
field :flush, 3, type: ABCI.Types.ResponseFlush, oneof: 0
field :info, 4, type: ABCI.Types.ResponseInfo, oneof: 0
field :set_option, 5, type: ABCI.Types.ResponseSetOption, oneof: 0
field :init_chain, 6, type: ABCI.Types.ResponseInitChain, oneof: 0
field :query, 7, type: ABCI.Types.ResponseQuery, oneof: 0
field :begin_block, 8, type: ABCI.Types.ResponseBeginBlock, oneof: 0
field :check_tx, 9, type: ABCI.Types.ResponseCheckTx, oneof: 0
field :deliver_tx, 10, type: ABCI.Types.ResponseDeliverTx, oneof: 0
field :end_block, 11, type: ABCI.Types.ResponseEndBlock, oneof: 0
field :commit, 12, type: ABCI.Types.ResponseCommit, oneof: 0
end
defmodule ABCI.Types.ResponseException do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
error: String.t()
}
defstruct [:error]
field :error, 1, type: :string
end
defmodule ABCI.Types.ResponseEcho do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
message: String.t()
}
defstruct [:message]
field :message, 1, type: :string
end
defmodule ABCI.Types.ResponseFlush do
@moduledoc false
use Protobuf, syntax: :proto3
defstruct []
end
defmodule ABCI.Types.ResponseInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data: String.t(),
version: String.t(),
app_version: non_neg_integer,
last_block_height: integer,
last_block_app_hash: String.t()
}
defstruct [:data, :version, :app_version, :last_block_height, :last_block_app_hash]
field :data, 1, type: :string
field :version, 2, type: :string
field :app_version, 3, type: :uint64
field :last_block_height, 4, type: :int64
field :last_block_app_hash, 5, type: :bytes
end
defmodule ABCI.Types.ResponseSetOption do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
code: non_neg_integer,
log: String.t(),
info: String.t()
}
defstruct [:code, :log, :info]
field :code, 1, type: :uint32
field :log, 3, type: :string
field :info, 4, type: :string
end
defmodule ABCI.Types.ResponseInitChain do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
consensus_params: ABCI.Types.ConsensusParams.t(),
validators: [ABCI.Types.ValidatorUpdate.t()]
}
defstruct [:consensus_params, :validators]
field :consensus_params, 1, type: ABCI.Types.ConsensusParams
field :validators, 2, repeated: true, type: ABCI.Types.ValidatorUpdate
end
defmodule ABCI.Types.ResponseQuery do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
code: non_neg_integer,
log: String.t(),
info: String.t(),
index: integer,
key: String.t(),
value: String.t(),
proof: ABCI.Types.Proof.t(),
height: integer,
codespace: String.t()
}
defstruct [:code, :log, :info, :index, :key, :value, :proof, :height, :codespace]
field :code, 1, type: :uint32
field :log, 3, type: :string
field :info, 4, type: :string
field :index, 5, type: :int64
field :key, 6, type: :bytes
field :value, 7, type: :bytes
field :proof, 8, type: ABCI.Types.Proof
field :height, 9, type: :int64
field :codespace, 10, type: :string
end
defmodule ABCI.Types.ResponseBeginBlock do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tags: [ABCI.Types.KVPair.t()]
}
defstruct [:tags]
field :tags, 1, repeated: true, type: ABCI.Types.KVPair
end
defmodule ABCI.Types.ResponseCheckTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
code: non_neg_integer,
data: String.t(),
log: String.t(),
info: String.t(),
gas_wanted: integer,
gas_used: integer,
tags: [ABCI.Types.KVPair.t()],
codespace: String.t()
}
defstruct [:code, :data, :log, :info, :gas_wanted, :gas_used, :tags, :codespace]
field :code, 1, type: :uint32
field :data, 2, type: :bytes
field :log, 3, type: :string
field :info, 4, type: :string
field :gas_wanted, 5, type: :int64
field :gas_used, 6, type: :int64
field :tags, 7, repeated: true, type: ABCI.Types.KVPair
field :codespace, 8, type: :string
end
defmodule ABCI.Types.ResponseDeliverTx do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
code: non_neg_integer,
data: String.t(),
log: String.t(),
info: String.t(),
gas_wanted: integer,
gas_used: integer,
tags: [ABCI.Types.KVPair.t()],
codespace: String.t()
}
defstruct [:code, :data, :log, :info, :gas_wanted, :gas_used, :tags, :codespace]
field :code, 1, type: :uint32
field :data, 2, type: :bytes
field :log, 3, type: :string
field :info, 4, type: :string
field :gas_wanted, 5, type: :int64
field :gas_used, 6, type: :int64
field :tags, 7, repeated: true, type: ABCI.Types.KVPair
field :codespace, 8, type: :string
end
defmodule ABCI.Types.ResponseEndBlock do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
validator_updates: [ABCI.Types.ValidatorUpdate.t()],
consensus_param_updates: ABCI.Types.ConsensusParams.t(),
tags: [ABCI.Types.KVPair.t()]
}
defstruct [:validator_updates, :consensus_param_updates, :tags]
field :validator_updates, 1, repeated: true, type: ABCI.Types.ValidatorUpdate
field :consensus_param_updates, 2, type: ABCI.Types.ConsensusParams
field :tags, 3, repeated: true, type: ABCI.Types.KVPair
end
defmodule ABCI.Types.ResponseCommit do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
data: String.t()
}
defstruct [:data]
field :data, 2, type: :bytes
end
defmodule ABCI.Types.ConsensusParams do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
block_size: ABCI.Types.BlockSizeParams.t(),
evidence: ABCI.Types.EvidenceParams.t(),
validator: ABCI.Types.ValidatorParams.t()
}
defstruct [:block_size, :evidence, :validator]
field :block_size, 1, type: ABCI.Types.BlockSizeParams
field :evidence, 2, type: ABCI.Types.EvidenceParams
field :validator, 3, type: ABCI.Types.ValidatorParams
end
defmodule ABCI.Types.BlockSizeParams do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
max_bytes: integer,
max_gas: integer
}
defstruct [:max_bytes, :max_gas]
field :max_bytes, 1, type: :int64
field :max_gas, 2, type: :int64
end
defmodule ABCI.Types.EvidenceParams do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
max_age: integer
}
defstruct [:max_age]
field :max_age, 1, type: :int64
end
defmodule ABCI.Types.ValidatorParams do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
pub_key_types: [String.t()]
}
defstruct [:pub_key_types]
field :pub_key_types, 1, repeated: true, type: :string
end
defmodule ABCI.Types.LastCommitInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
round: integer,
votes: [ABCI.Types.VoteInfo.t()]
}
defstruct [:round, :votes]
field :round, 1, type: :int32
field :votes, 2, repeated: true, type: ABCI.Types.VoteInfo
end
defmodule ABCI.Types.Header do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
version: ABCI.Types.Version.t(),
chain_id: String.t(),
height: integer,
time: ABCI.Types.Timestamp.t(),
num_txs: integer,
total_txs: integer,
last_block_id: ABCI.Types.BlockID.t(),
last_commit_hash: String.t(),
data_hash: String.t(),
validators_hash: String.t(),
next_validators_hash: String.t(),
consensus_hash: String.t(),
app_hash: String.t(),
last_results_hash: String.t(),
evidence_hash: String.t(),
proposer_address: String.t()
}
defstruct [
:version,
:chain_id,
:height,
:time,
:num_txs,
:total_txs,
:last_block_id,
:last_commit_hash,
:data_hash,
:validators_hash,
:next_validators_hash,
:consensus_hash,
:app_hash,
:last_results_hash,
:evidence_hash,
:proposer_address
]
field :version, 1, type: ABCI.Types.Version
field :chain_id, 2, type: :string
field :height, 3, type: :int64
field :time, 4, type: ABCI.Types.Timestamp
field :num_txs, 5, type: :int64
field :total_txs, 6, type: :int64
field :last_block_id, 7, type: ABCI.Types.BlockID
field :last_commit_hash, 8, type: :bytes
field :data_hash, 9, type: :bytes
field :validators_hash, 10, type: :bytes
field :next_validators_hash, 11, type: :bytes
field :consensus_hash, 12, type: :bytes
field :app_hash, 13, type: :bytes
field :last_results_hash, 14, type: :bytes
field :evidence_hash, 15, type: :bytes
field :proposer_address, 16, type: :bytes
end
defmodule ABCI.Types.Version do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
Block: non_neg_integer,
App: non_neg_integer
}
defstruct [:Block, :App]
field :Block, 1, type: :uint64
field :App, 2, type: :uint64
end
defmodule ABCI.Types.BlockID do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
parts_header: ABCI.Types.PartSetHeader.t()
}
defstruct [:hash, :parts_header]
field :hash, 1, type: :bytes
field :parts_header, 2, type: ABCI.Types.PartSetHeader
end
defmodule ABCI.Types.PartSetHeader do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
total: integer,
hash: String.t()
}
defstruct [:total, :hash]
field :total, 1, type: :int32
field :hash, 2, type: :bytes
end
defmodule ABCI.Types.Validator do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
power: integer
}
defstruct [:address, :power]
field :address, 1, type: :bytes
field :power, 3, type: :int64
end
defmodule ABCI.Types.ValidatorUpdate do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
pub_key: ABCI.Types.PubKey.t(),
power: integer
}
defstruct [:pub_key, :power]
field :pub_key, 1, type: ABCI.Types.PubKey
field :power, 2, type: :int64
end
defmodule ABCI.Types.VoteInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
validator: ABCI.Types.Validator.t(),
signed_last_block: boolean
}
defstruct [:validator, :signed_last_block]
field :validator, 1, type: ABCI.Types.Validator
field :signed_last_block, 2, type: :bool
end
defmodule ABCI.Types.PubKey do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: String.t(),
data: String.t()
}
defstruct [:type, :data]
field :type, 1, type: :string
field :data, 2, type: :bytes
end
defmodule ABCI.Types.Evidence do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type: String.t(),
validator: ABCI.Types.Validator.t(),
height: integer,
time: ABCI.Types.Timestamp.t(),
total_voting_power: integer
}
defstruct [:type, :validator, :height, :time, :total_voting_power]
field :type, 1, type: :string
field :validator, 2, type: ABCI.Types.Validator
field :height, 3, type: :int64
field :time, 4, type: ABCI.Types.Timestamp
field :total_voting_power, 5, type: :int64
end
|
lib/abci/types/types.pb.ex
| 0.805135
| 0.491334
|
types.pb.ex
|
starcoder
|
defmodule Supervisor.Spec do
@moduledoc """
Convenience functions for defining a supervision specification.
## Example
By using the functions in this module one can define a supervisor
and start it with `Supervisor.start_link/2`:
import Supervisor.Spec
children = [
worker(MyWorker, [arg1, arg2, arg3]),
supervisor(MySupervisor, [arg1])
]
Supervisor.start_link(children, strategy: :one_for_one)
In many situations, it may be handy to define supervisors backed
by a module:
defmodule MySupervisor do
use Supervisor
def start_link(arg) do
Supervisor.start_link(__MODULE__, arg)
end
def init(arg) do
children = [
worker(MyWorker, [arg], restart: :temporary)
]
supervise(children, strategy: :simple_one_for_one)
end
end
Notice in this case we don't have to explicitly import
`Supervisor.Spec` as `use Supervisor` automatically does so.
Explicit supervisors as above are required when there is a need to:
1. Partially change the supervision tree during hot-code swaps.
2. Define supervisors inside other supervisors.
3. Perform actions inside the supervision `init/1` callback.
For example, you may want to start an ETS table that is linked to
the supervisor (i.e. if the supervision tree needs to be restarted,
the ETS table must be restarted too).
## Supervisor and worker options
In the example above, we defined workers and supervisors
and each accepts the following options:
* `:id` - a name used to identify the child specification
internally by the supervisor; defaults to the given module
name
* `:function` - the function to invoke on the child to start it
* `:restart` - defines when a terminated child process should be restarted
* `:shutdown` - defines how a child process should be terminated
* `:modules` - it should be a list with one element `[module]`,
where module is the name of the callback module only if the
child process is a `Supervisor` or `GenServer`; if the child
process is a `GenEvent`, modules should be `:dynamic`
### Restart values (:restart)
The following restart values are supported:
* `:permanent` - the child process is always restarted
* `:temporary` - the child process is never restarted (not even
when the supervisor's strategy is `:rest_for_one` or `:one_for_all`)
* `:transient` - the child process is restarted only if it
terminates abnormally, i.e. with another exit reason than
`:normal`, `:shutdown` or `{:shutdown, term}`
### Shutdown values (:shutdown)
The following shutdown values are supported:
* `:brutal_kill` - the child process is unconditionally terminated
using `exit(child, :kill)`.
* `:infinity` - if the child process is a supervisor, this is a mechanism
to give the subtree enough time to shutdown. It can also be used with
workers with care.
* Finally, the value can also be any integer meaning that the supervisor tells
the child process to terminate by calling `Process.exit(child, :shutdown)`
and then waits for an exit signal back. If no exit signal is received
within the specified time (in milliseconds), the child process is
unconditionally terminated using `Process.exit(child, :kill)`.
"""
@typedoc "Supported strategies"
@type strategy :: :simple_one_for_one | :one_for_one | :one_for_all | :rest_for_one
@typedoc "Supported restart values"
@type restart :: :permanent | :transient | :temporary
@typedoc "Supported shutdown values"
@type shutdown :: :brutal_kill | :infinity | non_neg_integer
@typedoc "Supported worker values"
@type worker :: :worker | :supervisor
@typedoc "Supported module values"
@type modules :: :dynamic | [module]
@typedoc "Supported id values"
@type child_id :: term
@typedoc "The supervisor specification"
@type spec :: {child_id,
start_fun :: {module, atom, [term]},
restart,
shutdown,
worker,
modules}
@doc """
Receives a list of children (workers or supervisors) to
supervise and a set of options.
Returns a tuple containing the supervisor specification.
## Examples
supervise children, strategy: :one_for_one
## Options
* `:strategy` - the restart strategy option. It can be either
`:one_for_one`, `:rest_for_one`, `:one_for_all`, or
`:simple_one_for_one`. You can learn more about strategies
in the `Supervisor` module docs.
* `:max_restarts` - the maximum amount of restarts allowed in
a time frame. Defaults to 3.
* `:max_seconds` - the time frame in which `:max_restarts` applies.
Defaults to 5.
The `:strategy` option is required and by default maximum 3 restarts
are allowed within 5 seconds. Please check the `Supervisor` module for
a complete description of the available strategies.
"""
@spec supervise([spec], strategy: strategy,
max_restarts: non_neg_integer,
max_seconds: non_neg_integer) :: {:ok, tuple}
# TODO: Make it return a tuple of format {:ok, children, opts}
# TODO: Deprecate once the new tuple format has been established
def supervise(children, options) do
unless strategy = options[:strategy] do
raise ArgumentError, "expected :strategy option to be given"
end
maxR = Keyword.get(options, :max_restarts, 3)
maxS = Keyword.get(options, :max_seconds, 5)
assert_unique_ids(Enum.map(children, &elem(&1, 0)))
{:ok, {{strategy, maxR, maxS}, children}}
end
defp assert_unique_ids([id|rest]) do
if id in rest do
raise ArgumentError,
"duplicated id #{inspect id} found in the supervisor specification, " <>
"please explicitly pass the :id option when defining this worker/supervisor"
else
assert_unique_ids(rest)
end
end
defp assert_unique_ids([]) do
:ok
end
@doc """
Defines the given `module` as a worker which will be started
with the given arguments.
worker ExUnit.Runner, [], restart: :permanent
By default, the function `start_link` is invoked on the given
module. Overall, the default values for the options are:
[id: module,
function: :start_link,
restart: :permanent,
shutdown: 5000,
modules: [module]]
Check `Supervisor.Spec` module docs for more information on
the options.
"""
@spec worker(module, [term], [restart: restart, shutdown: shutdown,
id: term, function: atom, modules: modules]) :: spec
def worker(module, args, options \\ []) do
child(:worker, module, args, options)
end
@doc """
Defines the given `module` as a supervisor which will be started
with the given arguments.
supervisor ExUnit.Runner, [], restart: :permanent
By default, the function `start_link` is invoked on the given
module. Overall, the default values for the options are:
[id: module,
function: :start_link,
restart: :permanent,
shutdown: :infinity,
modules: [module]]
Check `Supervisor.Spec` module docs for more information on
the options.
"""
@spec supervisor(module, [term], [restart: restart, shutdown: shutdown,
id: term, function: atom, modules: modules]) :: spec
def supervisor(module, args, options \\ []) do
options = Keyword.put_new(options, :shutdown, :infinity)
child(:supervisor, module, args, options)
end
# TODO: Do and expose proper child validation
# TODO: Convert into map childspecs by v2.0?
defp child(type, module, args, options) do
id = Keyword.get(options, :id, module)
modules = Keyword.get(options, :modules, modules(module))
function = Keyword.get(options, :function, :start_link)
restart = Keyword.get(options, :restart, :permanent)
shutdown = Keyword.get(options, :shutdown, 5000)
{id, {module, function, args},
restart, shutdown, type, modules}
end
# TODO: Remove GenEvent when there is no more GenEvent v2.0
defp modules(GenEvent), do: :dynamic
defp modules(module), do: [module]
end
|
lib/elixir/lib/supervisor/spec.ex
| 0.823151
| 0.672251
|
spec.ex
|
starcoder
|
defmodule Primes do
@moduledoc """
This module defines functions for working with prime numbers.
"""
def is_prime(n), do: is_prime(n, stream())
def is_prime(2, _primes), do: true
def is_prime(n, _primes) when n < 2, do: false
def is_prime(n, primes) do
Enum.find_value(primes, false, fn p ->
cond do
rem(n, p) == 0 -> {:ok, false}
p > :math.sqrt(n) -> {:ok, true}
true -> false
end
end)
|> elem(1)
end
@doc """
Returns an infinite stream which generates the sequence of prime numbers.
"""
@spec stream() :: Stream.t
def stream() do
Stream.unfold({%{}, 2}, fn {c, x} ->
{c, p} = next_prime(c, x)
{p, {c, p+1}}
end)
end
@doc """
Returns a stream of primes less than or equal to max, in ascending order.
Uses bit operations to achieve high performance with low memory requirements.
"""
@spec list(number) :: [pos_integer]
def list(max) do
s = max - 1
fast_list(<<0::1, -1::size(s)>>, 2)
|> Stream.unfold(fn
<<bit::1, tail::bits>> -> {bit, tail}
<<>> -> nil
end)
|> Stream.transform(1, fn
0, n -> {[], n+1}
1, n -> {[n], n+1}
end)
end
defp fast_list(bits, x, offset \\ 0)
defp fast_list(bits, x, offset) when bit_size(bits) >= x - offset do
y = x - 1
<<head::bits-size(y), bit::1, tail::bits>> = bits
if bit == 1 do
flip_every(tail, x, <<head::bits, bit::1>>)
|> fast_list(x+1, offset)
else
fast_list(bits, x+1, offset)
end
end
defp fast_list(bits, _x, _offset), do: bits
defp flip_every(bits, n, head) do
skip = n - 1
case bits do
<<chunk::bits-size(skip), _::1, tail::bits>> ->
flip_every(tail, n, <<head::bits, chunk::bits, 0::1>>)
bits ->
<<head::bits, bits::bits>>
end
end
@doc """
Calculates the prime factorization of the given integer.
"""
@spec factorize(pos_integer) :: [pos_integer, ...]
@spec factorize(pos_integer, Enum.t) :: [pos_integer, ...]
def factorize(n), do: factorize(n, stream())
def factorize(1, _), do: []
def factorize(n, primes) when n > 1 do
Stream.transform(primes, n, fn p, n ->
cond do
n == 1 -> {:halt, 1}
p > :math.sqrt(n) -> {[n], 1}
true -> factor_helper(n, p, 0)
end
end)
end
defp factor_helper(n, p, k) when rem(n, p) == 0, do: factor_helper(div(n, p), p, k+1)
defp factor_helper(n, p, k), do: {List.duplicate(p, k), n}
defp next_prime(c, x) do
if x in Map.keys(c) do
Map.get(c, x)
|> Enum.reduce(c, &Map.update(&2, x+&1, [&1], fn p -> p ++ [&1] end))
|> Map.delete(x)
|> next_prime(x+1)
else
{Map.put(c, x*x, [x]), x}
end
end
end
|
lib/primes.ex
| 0.770162
| 0.542621
|
primes.ex
|
starcoder
|
defmodule Resourceful.Error do
@moduledoc """
Errors in `Resourceful` follow a few conventions. This module contains
functions to help work with those conventions. Client-facing errors are
loosely inspired by and should be easily converted to [JSON:API-style
errors](https://jsonapi.org/format/#errors), however they should also be
suitable when JSON:API isn't used at all.
## Error Structure
All errors returned in `Resourceful` are expected to be two element tuples
where the first element is `:error`. This is a common Elixir and Erlang
convention, but it's strict for the purposes of this library. `Resourceful`
generates and expects one of two kinds of errors:
### Basic Errors
Basic errors are always a two element tuple where the second element is an
atom that should related to specific kind of error returned. These are meant
for situations where context is either easily inferred, unnecessary, or
obvious in some other manner.
Example: `{:error, :basic_err}`
Basic errors that require context are often transformed into contextual errors
by higher level functions as they have context that lower level functions do
not.
### Contextual Errors
Contextual errors are always a two element tuple where the second element is
another two element tuple in which the first element is an atom and the second
element is a map containing contextual information such as user input, data
being validated, and/or the source of the error. The map's keys must be atoms.
Example: `{:error, {:context_err, %{input: "XYZ", source: ["email_address"]}}`
Errors outside of this format are not expected to work with these functions.
#### Sources
While not strictly required by contextual errors the `:source` key is used to
indicate an element in a complex data structure that is responsible for a
particular error. It must be a list of atoms, strings, or integers used to
navigate a tree of data. Non-nested values should still be in the form of a
list as prefixing sources is common. It's common for functions generating
errors to be ignorant of their full context and higher level functions to
prepend errors with their current location.
#### Other Common Conventions
There are a few other keys that appear in errors regularly:
* `:detail`: A human friendly description about the error. If there is
information related to resolving the error, it belongs here.
* `:input`: A text representation of the actual input given by the client. It
should be as close as possible to the original. (In general, `IO.inspect/1` is
used.)
* `:key`: Not to be confused with `:source`, a `:key` key should always be
present when a lookup of data is done by some sort of key and there is a
failure.
* `:value`: Not to be confused with `:input`, a `:value` key
**Note:** all of these keys have convenience functions as it is a very common
convention to create a new error with one of these keys or add one to an
existing error.
### Error Types
Both basic and contextual errors will contain an atom describing the specific
type of error. These should be thought of as error codes and should be unique
to the kinds of errors. For example, `:attribute_not_found` means that in some
context, an attribute for a given resource doesn't exist. This could be an
attempt to filter or sort. Either way, the error type remains the same. In
both contexts, this error means the same thing.
Contextual errors of a particular type should always contain at least some
expected keys in their context maps. For example, `:attribute_not_found`
should always contain a `:name` and may contain other information. More is
usually better when it comes to errors.
Keys should remain consistent in their use. `:invalid_filter_operator`, for
example, should always contain an `:attribute` key implying that the failure
was on a valid attribute whereas `:attribute_not_found` contains a `:key` key
implying it was never resolved to an actual attribute.
"""
@type basic() :: {:error, atom()}
@type contextual() :: {:error, {atom(), map()}}
@type t() :: basic() | contextual()
@type or_type() :: atom() | t()
@builtins %{
attribute_not_found: %{
detail:
"An attribute with the name `%{key}` could not be found for resource type `%{resource_type}`.",
title: "Attribute Could Not Be Found"
},
cannot_filter_by_attribute: %{
detail: "`%{attribute}` is a valid attribute but cannot be used to filter resource.",
title: "Resource Cannot Be Filtered By Attribute"
},
cannot_sort_by_attribute: %{
detail: "`%{attribute}` is a valid attribute but cannot be used to sort resource.",
title: "Resource Cannot Be Sorted By Attribute"
},
invalid_field: %{
detail: "`%{key}` is not a valid field name for resource type `%{resource_type}`.",
title: "Invalid Field"
},
invalid_field_type: %{
detail: "`%{key}` is not a valid resource type in this request.",
title: "Invalid Field Type"
},
invalid_filter: %{
title: "Invalid Filter"
},
invalid_filter_operator: %{
detail: "`%{operator}` is not a valid filter operator.",
title: "Invalid Filter Operator"
},
invalid_filter_value: %{
detail: "`%{value}` is not compatible with filter operator `%{operator}`.",
title: "Invalid Filter Value"
},
invalid_sorter: %{
title: "Invalid Sorter"
},
max_depth_exceeded: %{
detail:
"`%{resource_type}` does not support queries with a depth greater than `%{max_depth}`",
title: "Max Query Depth Exceeded"
},
max_filters_exceeded: %{
detail: "Resource cannot be filtered by more than %{max_allowed} filters.",
title: "Max Filters Exceeded"
},
max_sorters_exceeded: %{
detail: "Resource cannot be sortered by more than %{max_allowed} sorters.",
title: "Max Sorters Exceeded"
},
no_type_registry: %{
detail: "Relationships for resource type `%{resource_type}` have not been configured.",
title: "Relationships Not Configured"
},
relationship_nesting_not_allowed: %{
detail:
"Relationship `%{key}` for resource type `%{resource_type}` does not allow nested queries. In most cases, this is because a relationship is a to-many relationship.",
title: "Relationship Nesting Not Allowed"
},
relationship_not_found: %{
detail:
"A relationship with the name `%{key}` could not be found for resource type `%{resource_type}`.",
title: "Relationship Could Not Be Found"
},
type_cast_failure: %{
detail: "`%{input}` cannot be cast to type `%{type}`.",
title: "Type Cast Failure"
}
}
@error_type_defaults Enum.into(
Application.get_env(:resourceful, :error_type_defaults) || %{},
@builtins
)
@doc """
Mostly a convenience function to use instead of `list/1` with the option to
auto source errors as well. Additionally it will take a non-collection value
and convert it to a list.
Returns a list of errors.
"""
@spec all(any(), keyword()) :: list()
def all(errors, opts \\ [])
def all(%Ecto.Changeset{} = changeset, _opts), do: from_changeset(changeset)
def all(errors, opts) when is_list(errors) or is_map(errors) do
if Keyword.get(opts, :auto_source) do
auto_source(errors)
else
errors
end
|> list()
end
def all(value, opts), do: all([value], Keyword.put(opts, :auto_source, false))
@doc """
Recursively checks arbitrary data structures for any basic or contextual
errors.
"""
@spec any?(any()) :: boolean()
def any?({:error, _}), do: true
def any?(list) when is_list(list), do: Enum.any?(list, &any?/1)
def any?(%Ecto.Changeset{valid?: valid}), do: !valid
def any?(%{} = map) do
map
|> Map.values()
|> any?()
end
def any?(_), do: false
@doc """
Transverses an arbitrary data structure that may contain errors and prepends
`:source` data given the error's position in the data structure using either
an index from a list or a key from a map.
**Note:** This will not work for keyword lists. In order to infer source
information they must first be converted to maps.
Returns input data structure with errors modified to contain `:source` in
their context.
"""
@spec auto_source(t() | map() | list(), list()) :: any()
def auto_source(error_or_enum, prefix \\ [])
def auto_source(%{} = map, prefix) do
Map.new(map, fn {src, val} ->
{src, auto_source(val, prefix ++ [src])}
end)
end
def auto_source(list, prefix) when is_list(list) do
list
|> Stream.with_index()
|> Enum.map(fn {val, src} -> auto_source(val, prefix ++ [src]) end)
end
def auto_source({:error, _} = error, prefix), do: prepend_source(error, prefix)
def auto_source(non_error, _), do: non_error
@doc """
Extracts the context map from an error.
Returns a context map.
"""
@spec context(t() | map()) :: map()
def context({:error, {_, %{} = context}}), do: context
def context({:error, _}), do: %{}
def context(%{} = context), do: context
@doc """
Deletes `key` from an error's context map if present.
Returns an error tuple.
"""
@spec delete_context_key(t(), atom()) :: t()
def delete_context_key({:error, {type, %{} = context}}, key) do
{:error, {type, Map.delete(context, key)}}
end
def delete_context_key({:error, _} = error, _), do: error
@doc """
Converts errors from an Ecto Changeset into Resourceful errors. The `type` is
inferred from the `:validation` key as Resourceful tends to use longer names.
Rather than relying on separate input params, the `:input` is inserted from
`data`, and `:source` is also inferred.
"""
@spec from_changeset(%Ecto.Changeset{}) :: [contextual()]
def from_changeset(%Ecto.Changeset{data: data, errors: errors, valid?: false}) do
do_from_changeset(errors, data)
end
defp do_from_changeset(errors, %{} = data) when is_list(errors) do
Enum.map(errors, &do_from_changeset(&1, data))
end
defp do_from_changeset({source, {detail, context_list}}, %{} = data)
when is_atom(source) do
context_list
|> Map.new()
|> Map.merge(%{
detail: detail,
input: changeset_input(source, data),
source: [source]
})
|> changeset_error()
end
defp changeset_error(%{validation: :cast} = context) do
changeset_error(:type_cast_failure, Map.delete(context, :detail))
end
defp changeset_error(%{} = context) do
changeset_error(:input_validation_failure, context)
end
defp changeset_error(type, %{} = context) do
{:error, {type, Map.delete(context, :validation)}}
end
defp changeset_input(source, %{} = data) do
Map.get(data, to_string(source)) || Map.get(data, source)
end
@doc """
Many error types should, at a minimum, have an associated `:title`. If there
are regular context values, it should also include a `:detail` value as well.
Both of these keys provide extra information about the nature of the error
and can help the client understand the particulars of the provided context.
While it might not be readily obvious what `:key` means in an error, if it is
used in `:detail` it will help the client understand the significance.
Unlike the error's type itself--which realistically should serve as an error
code of sorts--the title should should be more human readable and able to be
localized, although it should be consistent. Similarly, detail should be able
to be localized although it can change depending on the specifics of the error
or values in the context map.
This function handles injecting default `:title` and `:detail` items into the
context map if they are available for an error type and replacing context-
related bindings in messages. (See `message_with_context/2` for details.)
In the future, this is also where localization should happen.
"""
@spec humanize(t() | [t()], keyword()) :: contextual() | [contextual()]
def humanize(error, opts \\ [])
def humanize(errors, opts) when is_list(errors) do
Enum.map(errors, &humanize(&1, opts))
end
def humanize({:error, errors}, opts) when is_list(errors) do
{:error, humanize(errors, opts)}
end
def humanize({:error, {type, %{} = context}}, _opts) do
{:error,
{type,
Enum.reduce([:detail, :title], context, fn key, new_ctx ->
case Map.get(context, key) || default_type_message([type, key]) do
nil -> new_ctx
msg -> Map.put(new_ctx, key, message_with_context(msg, context))
end
end)}}
end
def humanize({:error, _} = error, opts) do
error
|> with_context()
|> humanize(opts)
end
defp default_type_message(path), do: get_in(@error_type_defaults, path)
@doc """
Transforms an arbitrary data structure that may contain errors into a single,
flat list of those errors. Non-error values are removed. Collections are
checked recursively.
Maps are given special treatment in that their values are checked but their
keys are discarded.
This format is meant to keep reading errors fairly simple and consistent at
the edge. Clients can rely on reading a single list of errors regardless of
whether transversing nested validation failures or handling more simple single
fault situations.
This function is also designed with another convention in mind: mixing
successes and failures in a single payload. A design goal of error use in this
library is to wait until as late as possible to return errors. That way, a
single request can return the totality of its failure to the client. This way,
many different paths can be evaluated and if there are any errors along the
way, those errors can be returned in full.
"""
@spec list(list() | map()) :: [t()]
def list(enum) when is_list(enum) or is_map(enum) do
enum
|> flatten_maps()
|> List.flatten()
|> Enum.filter(&any?/1)
end
defp flatten_maps(list) when is_list(list), do: Enum.map(list, &flatten_maps/1)
defp flatten_maps(%{} = map) do
map
|> Map.values()
|> flatten_maps()
end
defp flatten_maps(value), do: value
@doc """
Replaces context bindings in a message with atom keys in a context map.
A message of `"Invalid input %{input}." would have `%{input}` replaced with
the value in the context map of `:input`.
"""
@spec message_with_context(String.t(), map()) :: String.t()
def message_with_context(message, %{} = context) do
Regex.replace(~r/%\{(\w+)\}/, message, fn _, key ->
context
|> Map.get(String.to_atom(key))
|> to_string()
end)
end
@doc """
Recursively transforms arbitrary data structures containing `:ok` tuples with
just values. Values which are not in `:ok` tuples are left untouched.
It does not check for errors. If errors are included, they will remain in the
structure untouched. This function is designed to work on error free data and
is unlikely to be used on its own but rather with `or_ok/1`.
Keyword lists where `:ok` may be included with other keys won't be returned
as probably intended. Keep this limitation in mind.
Returns the input data structure with all instances of `{:ok, value}` replaced
with `value`.
"""
@spec ok_value(any()) :: any()
def ok_value({:ok, value}), do: ok_value(value)
def ok_value(list) when is_list(list), do: Enum.map(list, &ok_value/1)
def ok_value(%Ecto.Changeset{changes: changes}), do: changes
def ok_value(%_{} = struct), do: struct
def ok_value(%{} = map), do: Map.new(map, fn {k, v} -> {k, ok_value(v)} end)
def ok_value(value), do: value
@doc """
Checks an arbitrary data structure for errors and returns either the errors
or valid data.
See `all/1`, `any?/1`, and `ok_value/1` for specific details as this function
combines the three into a common pattern. Return the errors if there are any
or the validated data.
Returns either a list of errors wrapped in an `:error` tuple or valid data
wrapped in an `:ok` tuple.
"""
def or_ok(value, opts \\ []) do
case any?(value) do
true -> {:error, all(value, opts)}
_ -> {:ok, ok_value(value)}
end
end
@doc """
Adds or prepends source context to an error. A common pattern when dealing
with sources in nested data structures being transversed recursively is for
the child structure to have no knowledge of the parent structure. Once the
child errors are resolved the parent can then prepend its location in the
structure to the child's errors.
"""
@spec prepend_source(or_type() | [or_type()], any()) :: contextual() | [contextual()]
def prepend_source(errors, prefix) when is_list(errors) do
Enum.map(errors, &prepend_source(&1, prefix))
end
def prepend_source({:error, {error, %{source: source} = context}}, prefix) do
{:error, {error, %{context | source: List.wrap(prefix) ++ source}}}
end
def prepend_source({:error, {error, %{} = context}}, prefix) do
{:error, {error, Map.put(context, :source, List.wrap(prefix))}}
end
def prepend_source(error_or_type, prefix) do
error_or_type
|> with_context()
|> prepend_source(prefix)
end
@doc """
Adds a context map to an error if it lacks one, converting a basic error to a
contextual error. It may also take a single atom to prevent error generating
code from having to constantly wrap errors in an `:error` tuple.
"""
@spec with_context(or_type()) :: contextual()
def with_context({:error, {type, %{}}} = error) when is_atom(type), do: error
def with_context({:error, type}) when is_atom(type), do: contextual_error(type)
def with_context(type) when is_atom(type), do: contextual_error(type)
defp contextual_error(type), do: {:error, {type, %{}}}
@doc """
Adds the specified context as an error's context. If the error already has a
context map the new context is merged.
"""
@spec with_context(or_type(), map()) :: contextual()
def with_context(error_or_type, %{} = context) do
error_or_type
|> with_context()
|> merge_context(context)
end
defp merge_context({:error, {type, %{} = context}}, new_context) do
{:error, {type, Map.merge(context, new_context)}}
end
@doc """
Adds a context map to an error if it lacks on and then puts the key and value
into that map.
"""
@spec with_context(or_type(), atom(), any()) :: contextual()
def with_context(error_or_type, key, value) do
error_or_type
|> with_context()
|> with_context_value(key, value)
end
defp with_context_value({:error, {type, %{} = context}}, key, value) do
{:error, {type, Map.put(context, key, value)}}
end
@doc """
Convenience function to create or modify an existing error with `:input`
context.
"""
@spec with_input(or_type(), any()) :: contextual()
def with_input(error_or_type, input) do
with_context(error_or_type, :input, input)
end
@doc """
Convenience function to create or modify an existing error with `:key`
context.
"""
@spec with_key(or_type(), any()) :: contextual()
def with_key(error_or_type, key), do: with_context(error_or_type, :key, key)
@doc """
Adds source context to an error and replaces `:source` if present.
"""
@spec with_source(or_type(), any(), map()) :: contextual()
def with_source(error_or_type, source, %{} = context \\ %{}) do
error_or_type
|> with_context(context)
|> delete_context_key(:source)
|> prepend_source(source)
end
end
|
lib/resourceful/error.ex
| 0.937204
| 0.665461
|
error.ex
|
starcoder
|
defmodule Exchange.Trade do
@moduledoc """
Placeholder to define trades
"""
alias Exchange.Order
defstruct trade_id: UUID.uuid1(),
ticker: nil,
currency: nil,
buyer_id: nil,
seller_id: nil,
buy_order_id: nil,
sell_order_id: nil,
price: nil,
size: nil,
buy_init_size: nil,
sell_init_size: nil,
type: :full_fill,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
@spec decode_from_jason(map) :: %Exchange.Trade{}
@doc """
Decodes the payload to a Trade struct
## Parameters
- payload: map with necessary parameters to populate the struct
"""
def decode_from_jason(trade) do
%Exchange.Trade{
trade_id: Map.get(trade, :trade_id),
ticker: Map.get(trade, :ticker) |> String.to_atom(),
currency: Map.get(trade, :currency) |> String.to_atom(),
buyer_id: Map.get(trade, :buyer_id),
seller_id: Map.get(trade, :seller_id),
buy_order_id: Map.get(trade, :buy_order_id),
sell_order_id: Map.get(trade, :sell_order_id),
price: Map.get(trade, :price),
size: Map.get(trade, :size),
buy_init_size: Map.get(trade, :buy_init_size),
sell_init_size: Map.get(trade, :sell_init_size),
type: Map.get(trade, :type) |> String.to_atom(),
acknowledged_at: Map.get(trade, :acknowledged_at)
}
end
@doc """
Function that creates a trade given two matching orders
## Parameters
- order: Newly placed order
- matched_order: Order that is in the `Exchange.OrderBook` that matches the newly placed order
- type: Atom that can either be `:partial_fill` or `:fulfill`
"""
@spec generate_trade(
order :: Exchange.Order.order(),
matched_order :: Exchange.Order.order(),
type :: atom,
currency :: atom
) :: %Exchange.Trade{}
def generate_trade(
%Order{side: s1, ticker: t1} = order,
%Order{side: s2} = matched_order,
type,
currency
)
when s1 != s2 do
sides = get_sides(order, matched_order)
%Exchange.Trade{
trade_id: UUID.uuid1(),
buy_order_id: sides.buy_order_id,
buyer_id: sides.buyer_id,
sell_order_id: sides.sell_order_id,
seller_id: sides.seller_id,
buy_init_size: sides.buy_init_size,
sell_init_size: sides.sell_init_size,
size: min(order.size, matched_order.size),
price: matched_order.price,
type: type,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond),
ticker: t1,
currency: currency
}
end
defp get_sides(order, matched_order) do
if order.side == :buy do
%{
buy_order_id: order.order_id,
buyer_id: order.trader_id,
sell_order_id: matched_order.order_id,
seller_id: matched_order.trader_id,
buy_init_size: order.initial_size,
sell_init_size: matched_order.initial_size
}
else
%{
sell_order_id: order.order_id,
seller_id: order.trader_id,
buy_order_id: matched_order.order_id,
buyer_id: matched_order.trader_id,
buy_init_size: matched_order.initial_size,
sell_init_size: order.initial_size
}
end
end
end
defimpl Jason.Encoder, for: Exchange.Trade do
def encode(value, opts) do
Jason.Encode.map(
Map.take(
value,
[
:trade_id,
:ticker,
:currency,
:buyer_id,
:seller_id,
:buy_order_id,
:sell_order_id,
:price,
:size,
:buy_init_size,
:sell_init_size,
:type,
:acknowledged_at
]
),
opts
)
end
end
|
lib/exchange/trade.ex
| 0.810966
| 0.401306
|
trade.ex
|
starcoder
|
defmodule Ecto.Query.Util do
@moduledoc """
This module provide utility functions on queries.
"""
alias Ecto.Query
@doc """
Look up a source with a variable.
"""
def find_source(sources, {:&, _, [ix]}) when is_tuple(sources) do
elem(sources, ix)
end
def find_source(sources, {:&, _, [ix]}) when is_list(sources) do
Enum.at(sources, ix)
end
@doc """
Look up the expression where the variable was bound.
"""
def source_expr(%Query{from: from}, {:&, _, [0]}) do
from
end
def source_expr(%Query{joins: joins}, {:&, _, [ix]}) do
Enum.at(joins, ix - 1)
end
@doc "Returns the source from a source tuple."
def source({source, _model}), do: source
@doc "Returns model from a source tuple or nil if there is none."
def model({_source, model}), do: model
# Converts internal type format to "typespec" format
@doc false
def type_to_ast({type, inner}), do: {type, [], [type_to_ast(inner)]}
def type_to_ast(type) when is_atom(type), do: {type, [], nil}
@doc false
defmacro types do
~w(boolean string integer float decimal binary datetime date time interval
uuid)a
end
@doc false
defmacro poly_types do
~w(array)a
end
# Takes an elixir value and returns its ecto type
# Only allows query literals
@doc false
def value_to_type(value)
def value_to_type(nil), do: {:ok, :any}
def value_to_type(value) when is_boolean(value), do: {:ok, :boolean}
def value_to_type(value) when is_binary(value), do: {:ok, :string}
def value_to_type(value) when is_integer(value), do: {:ok, :integer}
def value_to_type(value) when is_float(value), do: {:ok, :float}
def value_to_type(%Ecto.Tagged{value: binary, type: :binary}) do
case value_to_type(binary) do
{:ok, type} when type in [:binary, :string, :any] ->
{:ok, :binary}
{:error, _} = err ->
err
_ ->
{:error, "binary/1 argument has to be of binary type"}
end
end
def value_to_type(%Ecto.Tagged{value: binary, type: :uuid}) do
case value_to_type(binary) do
{:ok, type} when type in [:uuid, :string, :any] ->
if is_nil(binary) or byte_size(binary) == 16 do
{:ok, :uuid}
else
{:error, "uuid `#{inspect binary}` is not 16 bytes"}
end
{:error, _} = err ->
err
_ ->
{:error, "uuid/1 argument has to be of binary type"}
end
end
def value_to_type(%Ecto.Tagged{value: list, type: {:array, inner}}) do
if inner in types do
elem_types = Enum.map(list, &value_to_type/1)
unless is_nil(list) do
error =
Enum.find_value(elem_types, fn
{:ok, type} ->
unless type_eq?(inner, type) or type_castable?(type, inner) do
{:error, "all elements in array have to be of same type"}
end
{:error, _} = err ->
err
end)
end
error || {:ok, {:array, inner}}
else
{:error, "invalid type given to `array/2`: `#{inspect inner}`"}
end
end
def value_to_type(value), do: {:error, "unknown type of value `#{inspect value}`"}
# Takes an elixir value and returns its ecto type.
# Different to value_to_type/1 it also allows values
# that can be interpolated into the query
@doc false
def external_to_type(%Decimal{}), do: {:ok, :decimal}
def external_to_type(%Ecto.DateTime{} = dt) do
values = Map.delete(dt, :__struct__) |> Map.values
types = Enum.map(values, &external_to_type/1)
res = Enum.find_value(types, fn
{:ok, :integer} -> nil
{:error, _} = err -> err
{:error, "all datetime elements have to be of integer type"}
end)
res || {:ok, :datetime}
end
def external_to_type(%Ecto.Date{} = d) do
values = Map.delete(d, :__struct__) |> Map.values
types = Enum.map(values, &external_to_type/1)
res = Enum.find_value(types, fn
{:ok, :integer} -> nil
{:error, _} = err -> err
{:error, "all date elements have to be of integer type"}
end)
res || {:ok, :date}
end
def external_to_type(%Ecto.Time{} = t) do
values = Map.delete(t, :__struct__) |> Map.values
types = Enum.map(values, &external_to_type/1)
res = Enum.find_value(types, fn
{:ok, :integer} -> nil
{:error, _} = err -> err
{:error, "all time elements have to be of integer type"}
end)
res || {:ok, :time}
end
def external_to_type(%Ecto.Interval{} = dt) do
values = Map.delete(dt, :__struct__) |> Map.values
types = Enum.map(values, &external_to_type/1)
res = Enum.find_value(types, fn
{:ok, :integer} -> nil
{:error, _} = err -> err
_ -> {:error, "all interval elements have to be of integer type"}
end)
if res do
res
else
{:ok, :interval}
end
end
def external_to_type(value), do: value_to_type(value)
# Returns true if the two types are considered equal by the type system
# Note that this does not consider casting
@doc false
def type_eq?(_, :any), do: true
def type_eq?(:any, _), do: true
def type_eq?({outer, inner1}, {outer, inner2}), do: type_eq?(inner1, inner2)
def type_eq?(type, type), do: true
def type_eq?(_, _), do: false
# Returns true if the literal type can be inferred as the second type.
# A literal type is a type that does not require wrapping with
# %Ecto.Tagged{}.
@doc false
def type_castable?(:string, :binary), do: true
def type_castable?(:string, :uuid), do: true
def type_castable?(_, _), do: false
# Tries to cast the given value to the specified type.
# If value cannot be casted just return it.
@doc false
def try_cast(binary, :binary) when is_binary(binary) do
%Ecto.Tagged{value: binary, type: :binary}
end
def try_cast(binary, :uuid) when is_binary(binary) do
%Ecto.Tagged{value: binary, type: :uuid}
end
def try_cast(list, {:array, inner}) when is_list(list) do
%Ecto.Tagged{value: list, type: {:array, inner}}
end
def try_cast(value, _) do
value
end
# Get var for given model in query
def model_var(query, model) do
sources = Tuple.to_list(query.sources)
pos = Enum.find_index(sources, &(model(&1) == model))
{:&, [], [pos]}
end
# Find var in select clause. Returns a list of tuple and list indicies to
# find the var.
def locate_var({left, right}, var) do
locate_var({:{}, [], [left, right]}, var)
end
def locate_var({:{}, _, list}, var) do
locate_var(list, var)
end
def locate_var({:assoc, _, [left, _right]}, var) do
if left == var, do: []
end
def locate_var(list, var) when is_list(list) do
list = Stream.with_index(list)
res = Enum.find_value(list, fn {elem, ix} ->
if poss = locate_var(elem, var) do
{poss, ix}
else
nil
end
end)
case res do
{poss, pos} -> [pos|poss]
nil -> nil
end
end
def locate_var(expr, var) do
if expr == var, do: []
end
end
|
lib/ecto/query/util.ex
| 0.853303
| 0.486758
|
util.ex
|
starcoder
|
defmodule Leaderboard do
@moduledoc ~S"""
The implementation of leaderboard (rank table) based on ETS tables.
It associates a key with a score and orders these records according to the
score. The score can be any term.
The leaderboard provides an API for inserting and deleting records as well
as functions for reading records in defined order.
## Usage
Once the leaderboard is started using `Leaderboard.start_link/2` with
a unique name of the table, it can be used to store and read records:
{:ok, _pid} = Leaderboard.start_link(Leaderboad.Score)
Leaderboard.insert(Leaderboard.Score, 1, "key1")
Leaderboard.insert(Leaderboard.Score, 3, "key3")
Leaderboard.insert(Leaderboard.Score, 2, "key2")
Leaderboard.select(Leaderboard.Score, :descend, 2)
#=> [{3, "key3"}, {2, "key2"}]
Leaderboard.select(Leaderboard.Score, :ascend, 2)
#=> [{1, "key1"}, {2, "key2"}]
Usually, the leaderboard is started as a part of a supervision tree:
worker(Leaderboard, [Leaderboard.Score])
When a key is already present and it is inserted again, the score associated
with the given key gets updated (`insert/3` works as update function as
well).
Note that all the write operations such as `insert/3` and `delete/2` (as
opposed to the read operations) are serialised via the `GenServer` process.
"""
use GenServer
@typedoc """
Name of the leaderboard
"""
@type table_name :: atom
@typedoc """
Options used by the `start*` functions
"""
@type options :: GenServer.options
@typedoc """
Return values of `start*` functions
"""
@type on_start :: GenServer.on_start
@typedoc """
Score of a given key
"""
@type score :: term
@typedoc """
Key associated with a score
"""
@type key :: term
@typedoc """
Score and key together
"""
@type record :: {score, key}
@typedoc """
Match specification
"""
@type match_spec :: :ets.match_spec
@typedoc """
Order of returned records
"""
@type order :: :ascend | :descend
@typedoc """
The max number of records to return (or all of them)
"""
@type limit :: pos_integer | :all
@doc """
Starts `GenServer` process with link to the current process.
The `table_name` must be an atom, based on which ETS leaderboard tables
are created. The `GenServer` process is the owner of the ETS tables.
"""
@spec start_link(table_name, options) :: on_start
def start_link(table_name, options \\ []) do
GenServer.start_link(__MODULE__, [table_name], options)
end
@doc """
Starts `GenServer` process without links.
"""
@spec start(table_name, options) :: on_start
def start(table_name, options \\ []) do
GenServer.start(__MODULE__, [table_name], options)
end
@doc """
Deletes a record based on the `key`.
"""
@spec delete(table_name, key, timeout) :: boolean
def delete(table_name, key, timeout \\ 5000) do
server = Leaderboard.Table.server_pid(table_name)
GenServer.call(server, {:delete, key}, timeout)
end
@doc """
Deletes all the records.
"""
@spec delete_all(table_name, timeout) :: :ok
def delete_all(table_name, timeout \\ 5000) do
server = Leaderboard.Table.server_pid(table_name)
GenServer.call(server, :delete_all, timeout)
end
@doc """
Inserts a new record or updates the `score` of an existing `key`.
"""
@spec insert(table_name, score, key, timeout) :: :ok
def insert(table_name, score, key, timeout \\ 5000) do
server = Leaderboard.Table.server_pid(table_name)
GenServer.call(server, {:insert, score, key}, timeout)
end
@doc """
Returns a `score` associated with a `key`.
"""
@spec lookup(table_name, key) :: score | nil
def lookup(table_name, key) do
Leaderboard.Table.lookup(table_name, key)
end
@doc """
Returns all the values as defined in `match_spec`.
The returned values don't have to be records in form of
`{score, key}`. The values are matched using the `match_spec` and they
are ordered in specified `order`.
For example, the `match_spec` to return all the records is
`[{{:"$1"}, [], [:"$1"]}]`.
"""
@spec match(table_name, match_spec, order) :: [term]
def match(table_name, match_spec, order) do
score_table = Leaderboard.Table.score_table_name(table_name)
Leaderboard.Table.match(score_table, match_spec, order, :all)
end
@doc """
Behaves the same as `match/3`, but also has `limit` that defines the max
number of returned values.
"""
@spec match(table_name, match_spec, order, limit) :: [term]
def match(table_name, match_spec, order, limit) do
score_table = Leaderboard.Table.score_table_name(table_name)
Leaderboard.Table.match(score_table, match_spec, order, limit)
end
@doc """
Returns all the records ordered in specified `order`.
"""
@spec select(table_name, order) :: [record]
def select(table_name, order) do
score_table = Leaderboard.Table.score_table_name(table_name)
Leaderboard.Table.select(score_table, order, :all)
end
@doc """
Behaves the same as `select/2`, but also has `limit` that defines the max
number of returned records.
"""
@spec select(table_name, order, limit) :: [record]
def select(table_name, order, limit) do
score_table = Leaderboard.Table.score_table_name(table_name)
Leaderboard.Table.select(score_table, order, limit)
end
@doc """
Returns the number of records in the table.
"""
@spec size(table_name) :: non_neg_integer
def size(table_name) do
Leaderboard.Table.size(table_name)
end
def init([table_name]) do
score_table = Leaderboard.Table.init_score_table(table_name)
key_table = Leaderboard.Table.init_key_table(table_name, self())
{:ok, %{score_table: score_table, key_table: key_table}}
end
def handle_call({:insert, score, key}, _from,
%{score_table: score_table, key_table: key_table} = state) do
Leaderboard.Table.delete(key, score_table, key_table)
Leaderboard.Table.insert(score_table, key_table, score, key)
{:reply, :ok, state}
end
def handle_call({:delete, key}, _from,
%{score_table: score_table, key_table: key_table} = state) do
reply = Leaderboard.Table.delete(score_table, key_table, key)
{:reply, reply, state}
end
def handle_call(:delete_all, _from,
%{score_table: score_table, key_table: key_table} = state) do
Leaderboard.Table.delete_all(score_table, key_table)
{:reply, :ok, state}
end
end
defmodule Leaderboard.Table do
@moduledoc false
@server_key :"$server_pid"
@match_spec_all [{{:"$1"}, [], [:"$1"]}]
def init_score_table(key_table) do
table_name = score_table_name(key_table)
:ets.new(table_name, [:ordered_set, :protected, :named_table,
read_concurrency: true])
end
def init_key_table(key_table, server_pid) do
:ets.new(key_table, [:set, :protected, :named_table,
read_concurrency: true])
insert_server_pid(key_table, server_pid)
key_table
end
def server_pid(key_table) do
lookup_server_pid(key_table)
end
def score_table_name(key_table) do
# Append "Score" to key_table
Module.concat(key_table, "Score")
end
def delete(score_table, key_table, key) do
case :ets.lookup(key_table, key) do
[{^key, score}] ->
:ets.delete(key_table, key)
:ets.delete(score_table, {score, key})
true
[] ->
false
end
end
def delete_all(score_table, key_table) do
server_pid = lookup_server_pid(key_table)
:ets.delete_all_objects(score_table)
:ets.delete_all_objects(key_table)
insert_server_pid(key_table, server_pid)
end
def insert(score_table, key_table, score, key) do
# score_table has just key which is {score, key}, there is no value
# associated with the key.
:ets.insert(score_table, {{score, key}})
:ets.insert(key_table, {key, score})
end
def lookup(key_table, key) do
case :ets.lookup(key_table, key) do
[{^key, score}] -> score
[] -> nil
end
end
def match(score_table, match_spec, :descend, :all) do
score_table
|> :ets.select_reverse(match_spec)
|> match_result()
end
def match(score_table, match_spec, :descend, limit) do
score_table
|> :ets.select_reverse(match_spec, limit)
|> match_result()
end
def match(score_table, match_spec, :ascend, :all) do
score_table
|> :ets.select(match_spec)
|> match_result()
end
def match(score_table, match_spec, :ascend, limit) do
score_table
|> :ets.select(match_spec, limit)
|> match_result()
end
def select(score_table, :descend, 1) do
score_table
|> :ets.last()
|> single_select_result()
end
def select(score_table, :ascend, 1) do
score_table
|> :ets.first()
|> single_select_result()
end
def select(score_table, order, limit) do
match(score_table, @match_spec_all, order, limit)
end
def size(key_table) do
:ets.info(key_table, :size) - 1
end
defp insert_server_pid(key_table, server_pid) do
:ets.insert(key_table, {@server_key, server_pid})
end
defp lookup_server_pid(key_table) do
[{@server_key, pid}] = :ets.lookup(key_table, @server_key)
pid
end
defp single_select_result({_score, _key} = record), do: [record]
defp single_select_result(_), do: []
defp match_result({records, _cont}), do: records
defp match_result(records) when is_list(records), do: records
defp match_result(_), do: []
end
|
lib/leaderboard.ex
| 0.811452
| 0.577793
|
leaderboard.ex
|
starcoder
|
defmodule Canvas.Resources.Courses do
@moduledoc """
Provides functions to interact with the
[course endpoints](https://canvas.instructure.com/doc/api/courses).
"""
alias Canvas.{Client, Response}
alias Canvas.Resources.{Account, Course, EnrollmentTerm}
def list_your_courses() do
end
def list_courses_for_a_user() do
end
def create_a_new_course() do
end
def upload_a_file() do
end
def list_students() do
end
def list_users_in_course() do
end
def list_recently_logged_in_students() do
end
def get_single_user() do
end
def preview_processed_html() do
end
def course_activity_stream() do
end
def course_activity_stream_summary() do
end
def course_todo_items() do
end
def delete_or_conclude_a_course() do
end
def get_course_settings() do
end
def update_course_settings() do
end
@doc """
Return information on a single course.
See:
- https://canvas.instructure.com/doc/api/courses.html#method.courses.show
## Examples:
client = %Canvas.Client{access_token: "<PASSWORD>", base_url: "https://instructure.test"}
{:ok, response} = Canvas.Resources.Courses.get_a_single_course(client, 101)
"""
@spec get_a_single_course(Client.t(), String.t() | integer, Keyword.t()) ::
{:ok | :error, Response.t()}
def get_a_single_course(client, course_id, options \\ []) do
url = Client.versioned("/courses/#{course_id}")
Client.get(client, url, options)
|> Response.parse(%Course{term: %EnrollmentTerm{}, account: %Account{}})
end
@doc """
Return information on a single course within an account.
See:
- https://canvas.instructure.com/doc/api/courses.html#method.courses.show
## Examples:
client = %Canvas.Client{access_token: "<PASSWORD>", base_url: "https://instructure.test"}
{:ok, response} = Canvas.Resources.Courses.get_a_single_course_by_account(client, 1, 101)
"""
@spec get_a_single_course_by_account(
Client.t(),
String.t() | integer,
String.t() | integer,
Keyword.t()
) ::
{:ok | :error, Response.t()}
def get_a_single_course_by_account(client, account_id, course_id, options \\ []) do
url = Client.versioned("/accounts/#{account_id}/courses/#{course_id}")
Client.get(client, url, options)
|> Response.parse(%Course{term: %EnrollmentTerm{}, account: %Account{}})
end
def update_a_course() do
end
def update_courses() do
end
def reset_a_course() do
end
def get_effective_due_dates() do
end
def permissions() do
end
def get_course_copy_status() do
end
def copy_course_content() do
end
end
|
lib/canvas/resources/courses.ex
| 0.812459
| 0.523359
|
courses.ex
|
starcoder
|
defmodule Comeonin.Pbkdf2 do
@moduledoc """
Pbkdf2 is a password-based key derivation function
that uses a password, a variable-length salt and an iteration
count and applies a pseudorandom function to these to
produce a key.
The original implementation used SHA-1 as the pseudorandom function,
but this version uses HMAC-SHA-512.
"""
use Bitwise
alias Comeonin.Pbkdf2Base64
alias Comeonin.Config
alias Comeonin.Tools
@max_length bsl(1, 32) - 1
@salt_length 16
@doc """
Generate a salt for use with the `hashpass` function.
The minimum length of the salt is 16 and the maximum length
is 1024. The default is 16.
"""
def gen_salt(salt_length \\ @salt_length)
def gen_salt(salt_length) when salt_length in 16..1024 do
Tools.random_bytes(salt_length)
end
def gen_salt(_) do
raise ArgumentError, message: "The salt is the wrong length."
end
@doc """
Hash the password using pbkdf2_sha512.
"""
def hashpass(password, salt, rounds \\ Config.pbkdf2_rounds) do
if is_binary(salt) do
pbkdf2(password, salt, rounds, 64) |> format(salt, rounds)
else
raise ArgumentError, message: "Wrong type. The salt needs to be a string."
end
end
@doc """
Hash the password with a salt which is randomly generated.
To change the complexity (and the time taken) of the password hash
calculation, you need to change the value for `pbkdf2_rounds`
in the config file.
"""
def hashpwsalt(password) do
hashpass(password, gen_salt, Config.pbkdf2_rounds)
end
defp format(hash, salt, rounds) do
"$pbkdf2-sha512$#{rounds}$#{Pbkdf2Base64.encode(salt)}$#{Pbkdf2Base64.encode(hash)}"
end
@doc """
Check the password.
The check is performed in constant time to avoid timing attacks.
"""
def checkpw(password, hash) when is_binary(password) and is_binary(hash) do
[_, _, rounds, salt, hash] = String.split(hash, "$")
pbkdf2(password, Pbkdf2Base64.decode(salt), String.to_integer(rounds), 64)
|> Pbkdf2Base64.encode
|> Tools.secure_check(hash)
end
def checkpw(_password, _hash) do
raise ArgumentError, message: "Wrong type. The password and hash need to be strings."
end
@doc """
Perform a dummy check for a user that does not exist.
This always returns false. The reason for implementing this check is
in order to make user enumeration by timing responses more difficult.
"""
def dummy_checkpw do
hashpwsalt("password")
false
end
defp pbkdf2(_password, _salt, _rounds, length) when length > @max_length do
raise ArgumentError, "length must be less than or equal to #{@max_length}"
end
defp pbkdf2(password, salt, rounds, length) when byte_size(salt) in 16..1024 do
pbkdf2(password, salt, rounds, length, 1, [], 0)
end
defp pbkdf2(_password, _salt, _rounds, _length) do
raise ArgumentError, message: "The salt is the wrong length."
end
defp pbkdf2(_password, _salt, _rounds, max_length, _block_index, acc, length)
when length >= max_length do
key = acc |> Enum.reverse |> IO.iodata_to_binary
<<bin::binary-size(max_length), _::binary>> = key
bin
end
defp pbkdf2(password, salt, rounds, max_length, block_index, acc, length) do
initial = :crypto.hmac(:sha512, password, <<salt::binary, block_index::integer-size(32)>>)
block = iterate(password, rounds - 1, initial, initial)
pbkdf2(password, salt, rounds, max_length, block_index + 1,
[block | acc], byte_size(block) + length)
end
defp iterate(_password, 0, _prev, acc), do: acc
defp iterate(password, round, prev, acc) do
next = :crypto.hmac(:sha512, password, prev)
iterate(password, round - 1, next, :crypto.exor(next, acc))
end
end
|
deps/comeonin/lib/comeonin/pbkdf2.ex
| 0.809427
| 0.515498
|
pbkdf2.ex
|
starcoder
|
defmodule Prometheus.InvalidValueError do
@moduledoc """
Raised when given `value` is invalid i.e. when you pass a negative number to
`Prometheus.Metric.Counter.inc/2`.
"""
defexception [:value, :orig_message]
def message(%{value: value, orig_message: message}) do
"Invalid value: #{inspect(value)} (#{message})."
end
end
defmodule Prometheus.InvalidMetricNameError do
@moduledoc """
Raised when given metric `name` is invalid i.e. can't be represented as printable utf-8
string that matches `^[a-zA-Z_:][a-zA-Z0-9_:]*$` regular expression.
"""
defexception [:name]
def message(%{name: name}) do
"Invalid metric name: #{name}."
end
end
defmodule Prometheus.InvalidMetricLabelsError do
@moduledoc """
Raised when `labels` isn't a list.
"""
defexception [:labels]
def message(%{labels: labels}) do
"Invalid metric labels: #{labels}."
end
end
defmodule Prometheus.InvalidMetricHelpError do
@moduledoc """
Raised when given metric `help` is invalid i.e. isn't a printable utf-8 string.
"""
defexception [:help]
def message(%{help: help}) do
"Invalid metric help: #{help}."
end
end
defmodule Prometheus.InvalidMetricArityError do
@moduledoc """
Raised when metric arity is invalid e.g. counter metric was created with two labels but
only one label value is passed to `Prometheus.Metric.Counter.inc/2`.
"""
defexception [:present, :expected]
def message(%{present: present, expected: expected}) do
"Invalid metric arity: got #{present}, expected #{expected}."
end
end
defmodule Prometheus.UnknownMetricError do
defexception [:registry, :name]
def message(%{registry: registry, name: name}) do
"Unknown metric {registry: #{registry}, name: #{name}}."
end
end
defmodule Prometheus.InvalidLabelNameError do
@moduledoc """
Raised when label `name` is invalid i.e. can't be represented as printable utf-8 string
that matches `^[a-zA-Z_][a-zA-Z0-9_]*$` regular expression or starts with `__`.
Metric can impose further restrictions on label names.
"""
defexception [:name, :orig_message]
def message(%{name: name, orig_message: message}) do
"Invalid label name: #{name} (#{message})."
end
end
defmodule Prometheus.MFAlreadyExistsError do
@moduledoc """
Raised when one tries to create metric in `registry` with `name` it already exists.
"""
defexception [:registry, :name]
def message(%{registry: registry, name: name}) do
"Metric #{registry}:#{name} already exists."
end
end
defmodule Prometheus.NoBucketsError do
@moduledoc """
Raised by histogram constructors when buckets can't be found in spec, or
found value is empty list.
"""
defexception [:buckets]
def message(%{buckets: buckets}) do
"Invalid histogram buckets: #{buckets}."
end
end
defmodule Prometheus.InvalidBucketsError do
@moduledoc """
Raised by histogram constructors when buckets are invalid i.e. not sorted in increasing
order or generator spec is unknown.
"""
defexception [:buckets, :orig_message]
def message(%{buckets: buckets, orig_message: message}) do
buckets = :io_lib.format("~p", [buckets])
"Invalid histogram buckets: #{buckets} (#{message})."
end
end
defmodule Prometheus.InvalidBoundError do
@moduledoc """
Raised by histogram constructors when bucket bound isn't a number.
"""
defexception [:bound]
def message(%{bound: bound}) do
"Invalid histogram bound: #{bound}."
end
end
defmodule Prometheus.MissingMetricSpecKeyError do
@moduledoc """
Raised when required metric `spec` `key` is missing. All metrics
require at least `name` and when metric created `help`.
Metrics can have their specific required keys.
"""
defexception [:key, :spec]
def message(%{key: key}) do
"Required key #{key} is missing from metric spec."
end
end
defmodule Prometheus.InvalidBlockArityError do
@moduledoc """
Raised when fn passed as block has more then 0 arguments
"""
defexception [:args]
def message(%{args: args}) do
insp = Enum.map_join(args, ", ", &inspect/1)
"Fn with arity #{length(args)} (args: #{insp}) passed as block."
end
end
defmodule Prometheus.Error do
@moduledoc false
# credo:disable-for-this-file Credo.Check.Refactor.ABCSize
# credo:disable-for-this-file Credo.Check.Refactor.CyclomaticComplexity
def normalize(erlang_error) do
case erlang_error do
%ErlangError{original: original} ->
case original do
{:invalid_value, value, message} ->
%Prometheus.InvalidValueError{value: value, orig_message: message}
{:invalid_metric_name, name, _message} ->
%Prometheus.InvalidMetricNameError{name: name}
{:invalid_metric_help, help, _message} ->
%Prometheus.InvalidMetricHelpError{help: help}
{:invalid_metric_arity, present, expected} ->
%Prometheus.InvalidMetricArityError{present: present, expected: expected}
{:unknown_metric, registry, name} ->
%Prometheus.UnknownMetricError{registry: registry, name: name}
{:invalid_metric_labels, labels, _message} ->
%Prometheus.InvalidMetricLabelsError{labels: labels}
{:invalid_metric_label_name, name, message} ->
%Prometheus.InvalidLabelNameError{name: name, orig_message: message}
{:mf_already_exists, {registry, name}, _message} ->
%Prometheus.MFAlreadyExistsError{registry: registry, name: name}
{:no_buckets, buckets} ->
%Prometheus.NoBucketsError{buckets: buckets}
{:invalid_buckets, buckets, message} ->
%Prometheus.InvalidBucketsError{
buckets: buckets,
orig_message: message
}
{:invalid_bound, bound} ->
%Prometheus.InvalidBoundError{bound: bound}
{:missing_metric_spec_key, key, spec} ->
%Prometheus.MissingMetricSpecKeyError{key: key, spec: spec}
_ ->
erlang_error
end
_ ->
erlang_error
end
end
defmacro with_prometheus_error(block) do
quote do
try do
unquote(block)
rescue
e in ErlangError ->
reraise(
Prometheus.Error.normalize(e),
unquote(
if macro_exported?(Kernel.SpecialForms, :__STACKTRACE__, 0) do
quote(do: __STACKTRACE__)
else
quote(do: System.stacktrace())
end
)
)
end
end
end
end
|
astreu/deps/prometheus_ex/lib/prometheus/error.ex
| 0.84412
| 0.479565
|
error.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.Resolvers.EtherbiResolver do
require Logger
alias Sanbase.Model.{Infrastructure, ExchangeAddress}
@doc ~S"""
Return the token age consumed for the given slug and time period.
"""
def token_age_consumed(
_root,
%{slug: _slug, from: _from, to: _to, interval: _interval} = args,
_resolution
) do
SanbaseWeb.Graphql.Resolvers.MetricResolver.timeseries_data(
%{},
args,
%{source: %{metric: "age_destroyed"}}
)
|> Sanbase.Utils.Transform.duplicate_map_keys(:value, :burn_rate)
|> Sanbase.Utils.Transform.rename_map_keys(old_key: :value, new_key: :token_age_consumed)
end
@doc ~S"""
Return the average age of the tokens that were transacted for the given slug and time period.
"""
def average_token_age_consumed_in_days(
root,
%{slug: _, from: _, to: _, interval: _} = args,
resolution
) do
with {:ok, age_consumed} <- token_age_consumed(root, args, resolution),
{:ok, transaction_volume} <- transaction_volume(root, args, resolution) do
average_token_age_consumed_in_days =
Enum.zip(age_consumed, transaction_volume)
|> Enum.map(fn {%{token_age_consumed: token_age_consumed, datetime: datetime},
%{transaction_volume: trx_volume}} ->
%{
datetime: datetime,
token_age: token_age_in_days(token_age_consumed, trx_volume)
}
end)
{:ok, average_token_age_consumed_in_days}
end
end
def transaction_volume(
_root,
%{slug: _slug, from: _from, to: _to, interval: _interval} = args,
_resolution
) do
SanbaseWeb.Graphql.Resolvers.MetricResolver.timeseries_data(
%{},
args,
%{source: %{metric: "transaction_volume"}}
)
|> Sanbase.Utils.Transform.rename_map_keys(old_key: :value, new_key: :transaction_volume)
end
@doc ~S"""
Return the amount of tokens that were transacted in or out of an exchange wallet for a given slug
and time period
"""
def exchange_funds_flow(
_root,
%{slug: _slug, from: _from, to: _to, interval: _interval} = args,
_resolution
) do
SanbaseWeb.Graphql.Resolvers.MetricResolver.timeseries_data(
%{},
args,
%{source: %{metric: "exchange_balance"}}
)
|> Sanbase.Utils.Transform.rename_map_keys(old_key: :value, new_key: :in_out_difference)
end
def token_velocity(
_root,
%{slug: _slug, from: _from, to: _to, interval: _interval} = args,
_resolution
) do
SanbaseWeb.Graphql.Resolvers.MetricResolver.timeseries_data(
%{},
args,
%{source: %{metric: "velocity"}}
)
|> Sanbase.Utils.Transform.rename_map_keys(old_key: :value, new_key: :token_velocity)
end
def all_exchange_wallets(_root, _args, _resolution) do
{:ok, ExchangeAddress.all_exchange_wallets()}
end
def exchange_wallets(_root, %{slug: "ethereum"}, _resolution) do
{:ok, ExchangeAddress.exchange_wallets_by_infrastructure(Infrastructure.get("ETH"))}
end
def exchange_wallets(_root, %{slug: "bitcoin"}, _resolution) do
{:ok, ExchangeAddress.exchange_wallets_by_infrastructure(Infrastructure.get("BTC"))}
end
def exchange_wallets(_, _, _) do
{:error, "Currently only ethereum and bitcoin exchanges are supported"}
end
defp token_age_in_days(token_age_consumed, trx_volume)
when token_age_consumed <= 0.1 or trx_volume <= 0.1 do
0
end
defp token_age_in_days(token_age_consumed, trx_volume) do
token_age_consumed / trx_volume
end
end
|
lib/sanbase_web/graphql/resolvers/etherbi_resolver.ex
| 0.812161
| 0.429728
|
etherbi_resolver.ex
|
starcoder
|
defmodule Frame do
defstruct type: :scored, rolls: []
end
defmodule BowlingGame do
defstruct frames: []
end
defmodule BowlingKata do
def parse_input(""), do: []
def parse_input(:nil), do: []
def parse_input(rolls) do
rolls
|> String.upcase
|> String.graphemes
|> parse_frame(%BowlingGame{}, 1)
|> (&(&1.frames)).()
|> Enum.reverse
end
defp parse_frame([], game, _) do
game
end
defp parse_frame(["X", "X", "X" | rest], %BowlingGame{frames: frames}, 10) do
frame = %Frame{type: :strike, rolls: [10, 10, 10] }
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, 10 + 1
end
defp parse_frame(["X", r1, "/" | rest], %BowlingGame{frames: frames}, 10) do
{rvalue, _} = Integer.parse(r1)
frame = %Frame{type: :strike, rolls: [10, rvalue, 10 - rvalue] }
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, 10 + 1
end
defp parse_frame(["X", r1, r2 | rest], %BowlingGame{frames: frames}, 10) do
{rvalue1, _} = Integer.parse(r1)
{rvalue2, _} = Integer.parse(r2)
frame = %Frame{type: :strike, rolls: [10, rvalue1, rvalue2] }
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, 10 + 1
end
defp parse_frame([r1, "/", "X" | rest], %BowlingGame{frames: frames}, 10) do
{rvalue, _} = Integer.parse(r1)
frame = %Frame{type: :spare, rolls: [rvalue, 10 - rvalue, 10] }
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, 10 + 1
end
defp parse_frame([r1, "/", r2 | rest], %BowlingGame{frames: frames}, 10) do
{rvalue1, _} = Integer.parse(r1)
{rvalue2, _} = Integer.parse(r2)
frame = %Frame{type: :spare, rolls: [rvalue1, 10 - rvalue1, rvalue2] }
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, 10 + 1
end
defp parse_frame(["X" | rest], %BowlingGame{frames: frames}, frame_num) do
game = %BowlingGame{frames: [%Frame{type: :strike, rolls: [10]} | frames]}
parse_frame rest, game, frame_num + 1
end
defp parse_frame(["-", "-" | rest], %BowlingGame{frames: frames}, frame_num) do
game = %BowlingGame{frames: [%Frame{type: :scored, rolls: [0,0]} | frames]}
parse_frame rest, game, frame_num + 1
end
defp parse_frame([r1, "-" | rest], %BowlingGame{frames: frames}, frame_num) do
{rvalue, _} = Integer.parse(r1)
game = %BowlingGame{frames: [%Frame{type: :scored, rolls: [rvalue, 0]} | frames]}
parse_frame rest, game, frame_num + 1
end
defp parse_frame(["-", r2 | rest], %BowlingGame{frames: frames}, frame_num) do
{rvalue, _} = Integer.parse(r2)
game = %BowlingGame{frames: [%Frame{type: :scored, rolls: [0,rvalue]} | frames]}
parse_frame rest, game, frame_num + 1
end
defp parse_frame([r1, "/" | rest], %BowlingGame{frames: frames}, frame_num) do
{rvalue, _} = Integer.parse(r1)
frame = %Frame{type: :spare, rolls: [rvalue, 10 - rvalue]}
game = %BowlingGame{frames: [frame | frames]}
parse_frame rest, game, frame_num + 1
end
defp parse_frame([r1, r2 | rest], %BowlingGame{frames: frames}, frame_num) do
{r1value, _} = Integer.parse(r1)
{r2value, _} = Integer.parse(r2)
game = %BowlingGame{
frames: [%Frame{type: :scored, rolls: [r1value, r2value]} | frames]
}
parse_frame rest, game, frame_num + 1
end
end
|
lib/bowling_kata.ex
| 0.545286
| 0.718965
|
bowling_kata.ex
|
starcoder
|
defmodule MangoPay.User do
@moduledoc """
Functions for MangoPay [client](https://docs.mangopay.com/endpoints/v2.01/users#e253_the-user-object).
"""
use MangoPay.Query.Base
set_path "users"
@doc """
Get a user.
## Examples
{:ok, user} = MangoPay.User.get(id)
"""
def get id do
_get id
end
@doc """
Get a user.
## Examples
user = MangoPay.User.get!(id)
"""
def get! id do
_get! id
end
@doc """
List all users.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821
}
{:ok, users} = MangoPay.User.all query
"""
def all(query \\ %{}) do
_all(nil, query)
end
@doc """
List all users.
## Examples
query = %{
"Page": 1,
"Per_Page": 25,
"Sort": "CreationDate:DESC",
"BeforeDate": 1463440221,
"AfterDate": 1431817821
}
users = MangoPay.User.all! query
"""
def all!(query \\ %{}) do
_all!(nil, query)
end
@doc """
Get emoney.
## Examples
{:ok, client} = MangoPay.User.get_emoney("user_id")
"""
def get_emoney id do
_get [resource(), "#{id}", "emoney"]
end
@doc """
Get emoney.
## Examples
client = MangoPay.User.get_emoney!("user_id")
"""
def get_emoney! id do
_get! [resource(), "#{id}", "emoney"]
end
defmodule Natural do
@moduledoc """
Functions for MangoPay user natural.
MangoPay official API documentation: https://docs.mangopay.com/endpoints/v2.01/users#e253_the-user-object
"""
use MangoPay.Query.Base
set_path "users/natural"
@doc """
Create a user.
## Examples
params = %{
"Tag": "custom meta",
"FirstName": "Joe",
"LastName": "Blogs",
"Address": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"Birthday": 1463496101,
"Nationality": "GB",
"CountryOfResidence": "FR",
"Occupation": "Carpenter",
"IncomeRange": 2,
"Email": "<EMAIL>",
"Capacity": "NORMAL"
}
{:ok, user} = MangoPay.User.Natural.create(params)
"""
def create params do
_create params
end
@doc """
Create a user.
## Examples
params = %{
"Tag": "custom meta",
"FirstName": "Joe",
"LastName": "Blogs",
"Address": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"Birthday": 1463496101,
"Nationality": "GB",
"CountryOfResidence": "FR",
"Occupation": "Carpenter",
"IncomeRange": 2,
"Email": "<EMAIL>",
"Capacity": "NORMAL"
}
user = MangoPay.User.Natural.create!(params)
"""
def create! params do
_create! params
end
@doc """
Update a user.
## Examples
params = %{
"Tag": "custom meta",
"FirstName": "Joe",
"LastName": "Blogs",
"Address": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"Birthday": 1463496101,
"Nationality": "GB",
"CountryOfResidence": "FR",
"Occupation": "Carpenter",
"IncomeRange": 2,
"Email": "<EMAIL>"
}
{:ok, user} = MangoPay.User.Natural.update(id, params)
"""
def update id, params do
_update params, id
end
@doc """
Update a user.
## Examples
params = %{
"Tag": "custom meta",
"FirstName": "Joe",
"LastName": "Blogs",
"Address": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"Birthday": 1463496101,
"Nationality": "GB",
"CountryOfResidence": "FR",
"Occupation": "Carpenter",
"IncomeRange": 2,
"Email": "<EMAIL>"
}
user = MangoPay.User.Natural.update(id, params)
"""
def update! id, params do
_update! params, id
end
end
defmodule Legal do
@moduledoc """
Functions for MangoPay user legal.
MangoPay official API documentation: https://docs.mangopay.com/endpoints/v2.01/users#e253_the-user-object
"""
use MangoPay.Query.Base
set_path "users/legal"
@doc """
Create a user.
## Examples
params = %{
"Tag": "custom meta",
"HeadquartersAddress": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"LegalPersonType": "BUSINESS",
"Name": "<NAME>",
"LegalRepresentativeAddress": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"LegalRepresentativeBirthday": 1463496101,
"LegalRepresentativeCountryOfResidence": "ES",
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeEmail": "<EMAIL>",
"LegalRepresentativeFirstName": "Joe",
"LegalRepresentativeLastName": "Blogs",
"Email": "<EMAIL>",
"CompanyNumber": "LU72HN11"
}
{:ok, user} = MangoPay.UserLegal.create(params)
"""
def create params do
_create params
end
@doc """
Create a user.
## Examples
params = %{
"Tag": "custom meta",
"HeadquartersAddress": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"LegalPersonType": "BUSINESS",
"Name": "<NAME>",
"LegalRepresentativeAddress": %{
"AddressLine1": "1 Mangopay Street",
"AddressLine2": "The Loop",
"City": "Paris",
"Region": "Ile de France",
"PostalCode": "75001",
"Country": "FR"
},
"LegalRepresentativeBirthday": 1463496101,
"LegalRepresentativeCountryOfResidence": "ES",
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeEmail": "<EMAIL>",
"LegalRepresentativeFirstName": "Joe",
"LegalRepresentativeLastName": "Blogs",
"Email": "<EMAIL>",
"CompanyNumber": "LU72HN11"
}
user = MangoPay.UserLegal.create!(params)
"""
def create! params do
_create! params
end
@doc """
Update a user.
## Examples
params = %{
"Tag": "custom meta",
"HeadquartersAddress": "3 Mangopay Loop, Paris, 777",
"Name": "Mangopay Ltd",
"LegalRepresentativeAddress": "34bis Mangopay Circle, Paris, 777",
"LegalRepresentativeBirthday": 1463496101,
"LegalRepresentativeCountryOfResidence": "ES",
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeEmail": "<EMAIL>",
"LegalRepresentativeFirstName": "Joe",
"LegalRepresentativeLastName": "Blogs",
"LegalPersonType": "BUSINESS",
"CompanyNumber": "LU72HN11"
}
{:ok, user} = MangoPay.UserLegal.update(id, params)
"""
def update id, params do
_update params, id
end
@doc """
Update a user.
## Examples
params = %{
"Tag": "custom meta",
"HeadquartersAddress": "3 Mangopay Loop, Paris, 777",
"Name": "Mangopay Ltd",
"LegalRepresentativeAddress": "34bis Mangopay Circle, Paris, 777",
"LegalRepresentativeBirthday": 1463496101,
"LegalRepresentativeCountryOfResidence": "ES",
"LegalRepresentativeNationality": "FR",
"LegalRepresentativeEmail": "<EMAIL>",
"LegalRepresentativeFirstName": "Joe",
"LegalRepresentativeLastName": "Blogs",
"LegalPersonType": "BUSINESS",
"CompanyNumber": "LU72HN11"
}
user = MangoPay.UserLegal.update(id, params)
"""
def update! id, params do
_update! params, id
end
end
end
|
lib/mango_pay/user.ex
| 0.718989
| 0.439928
|
user.ex
|
starcoder
|
defmodule Phoenix.Logger do
@moduledoc """
Instrumenter to handle logging of various instrumentation events.
## Instrumentation
Phoenix uses the `:telemetry` library for instrumentation. The following events
are published by Phoenix with the following measurements and metadata:
* `[:phoenix, :endpoint, :start]` - dispatched by `Plug.Telemetry` in your
endpoint at the beginning of every request.
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}`
* Options: `%{log: Logger.level | false}`
* Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false`
* `[:phoenix, :endpoint, :stop]` - dispatched by `Plug.Telemetry` in your
endpoint whenever the response is sent
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}`
* Options: `%{log: Logger.level | false}`
* Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false`
* `[:phoenix, :router_dispatch, :start]` - dispatched by `Phoenix.Router`
before dispatching to a matched route
* Measurement: `%{system_time: System.system_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}`
* Disable logging: Pass `log: false` to the router macro, for example: `get("/page", PageController, :index, log: false)`
* `[:phoenix, :router_dispatch, :exception]` - dispatched by `Phoenix.Router`
after exceptions on dispatching a route
* Measurement: `%{duration: native_time}`
* Metadata: `%{kind: :throw | :error | :exit, reason: term(), stacktrace: Exception.stacktrace()}`
* Disable logging: This event is not logged
* `[:phoenix, :router_dispatch, :stop]` - dispatched by `Phoenix.Router`
after successfully dispatching a matched route
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}`
* Disable logging: This event is not logged
* `[:phoenix, :error_rendered]` - dispatched at the end of an error view being rendered
* Measurement: `%{duration: native_time}`
* Metadata: `%{status: Plug.Conn.status, kind: Exception.kind, reason: term, stacktrace: Exception.stacktrace}`
* Disable logging: Set `render_errors: [log: false]` on your endpoint configuration
* `[:phoenix, :socket_connected]` - dispatched by `Phoenix.Socket`, at the end of a socket connection
* Measurement: `%{duration: native_time}`
* Metadata: `%{endpoint: atom, transport: atom, params: term, connect_info: map, vsn: binary, user_socket: atom, result: :ok | :error, serializer: atom, log: Logger.level | false}`
* Disable logging: `use Phoenix.Socket, log: false` or `socket "/foo", MySocket, websocket: [log: false]` in your endpoint
* `[:phoenix, :channel_joined]` - dispatched at the end of a channel join
* Measurement: `%{duration: native_time}`
* Metadata: `%{params: term, socket: Phoenix.Socket.t}`
* Disable logging: This event cannot be disabled
* `[:phoenix, :channel_handled_in]` - dispatched at the end of a channel handle in
* Measurement: `%{duration: native_time}`
* Metadata: `%{event: binary, params: term, socket: Phoenix.Socket.t}`
* Disable logging: This event cannot be disabled
## Parameter filtering
When logging parameters, Phoenix can filter out sensitive parameters
such as passwords and tokens. Parameters to be filtered can be
added via the `:filter_parameters` option:
config :phoenix, :filter_parameters, ["password", "secret"]
With the configuration above, Phoenix will filter any parameter
that contains the terms `password` or `secret`. The match is
case sensitive.
Phoenix's default is `["password"]`.
Phoenix can filter all parameters by default and selectively keep
parameters. This can be configured like so:
config :phoenix, :filter_parameters, {:keep, ["id", "order"]}
With the configuration above, Phoenix will filter all parameters,
except those that match exactly `id` or `order`. If a kept parameter
matches, all parameters nested under that one will also be kept.
## Disabling
When you are using custom logging system it is not always desirable to enable
`#{inspect __MODULE__}` by default. You can always disable this in general by:
config :phoenix, :logger, false
"""
require Logger
@doc false
def install do
handlers = %{
[:phoenix, :endpoint, :start] => &phoenix_endpoint_start/4,
[:phoenix, :endpoint, :stop] => &phoenix_endpoint_stop/4,
[:phoenix, :router_dispatch, :start] => &phoenix_router_dispatch_start/4,
[:phoenix, :error_rendered] => &phoenix_error_rendered/4,
[:phoenix, :socket_connected] => &phoenix_socket_connected/4,
[:phoenix, :channel_joined] => &phoenix_channel_joined/4,
[:phoenix, :channel_handled_in] => &phoenix_channel_handled_in/4
}
for {key, fun} <- handlers do
:telemetry.attach({__MODULE__, key}, key, fun, :ok)
end
end
@doc false
def duration(duration) do
duration = System.convert_time_unit(duration, :native, :microsecond)
if duration > 1000 do
[duration |> div(1000) |> Integer.to_string(), "ms"]
else
[Integer.to_string(duration), "µs"]
end
end
@doc false
def filter_values(values, params \\ Application.get_env(:phoenix, :filter_parameters, []))
def filter_values(values, {:discard, params}), do: discard_values(values, params)
def filter_values(values, {:keep, params}), do: keep_values(values, params)
def filter_values(values, params), do: discard_values(values, params)
defp discard_values(%{__struct__: mod} = struct, _params) when is_atom(mod) do
struct
end
defp discard_values(%{} = map, params) do
Enum.into(map, %{}, fn {k, v} ->
if is_binary(k) and String.contains?(k, params) do
{k, "[FILTERED]"}
else
{k, discard_values(v, params)}
end
end)
end
defp discard_values([_ | _] = list, params) do
Enum.map(list, &discard_values(&1, params))
end
defp discard_values(other, _params), do: other
defp keep_values(%{__struct__: mod}, _params) when is_atom(mod), do: "[FILTERED]"
defp keep_values(%{} = map, params) do
Enum.into(map, %{}, fn {k, v} ->
if is_binary(k) and k in params do
{k, discard_values(v, [])}
else
{k, keep_values(v, params)}
end
end)
end
defp keep_values([_ | _] = list, params) do
Enum.map(list, &keep_values(&1, params))
end
defp keep_values(_other, _params), do: "[FILTERED]"
## Event: [:phoenix, :endpoint, *]
defp phoenix_endpoint_start(_, _, %{conn: conn} = metadata, _) do
case metadata[:options][:log] do
false ->
:ok
level ->
Logger.log(level || :info, fn ->
%{method: method, request_path: request_path} = conn
[method, ?\s, request_path]
end)
end
end
defp phoenix_endpoint_stop(_, %{duration: duration}, %{conn: conn} = metadata, _) do
case metadata[:options][:log] do
false ->
:ok
level ->
Logger.log(level || :info, fn ->
%{status: status, state: state} = conn
status = Integer.to_string(status)
[connection_type(state), ?\s, status, " in ", duration(duration)]
end)
end
end
defp connection_type(:set_chunked), do: "Chunked"
defp connection_type(_), do: "Sent"
## Event: [:phoenix, :error_rendered]
defp phoenix_error_rendered(_, _, %{log: false}, _), do: :ok
defp phoenix_error_rendered(_, _, %{log: level, status: status, kind: kind, reason: reason}, _) do
Logger.log(level, fn ->
[
"Converted ",
Atom.to_string(kind),
?\s,
error_banner(kind, reason),
" to ",
Integer.to_string(status),
" response"
]
end)
end
defp error_banner(:error, %type{}), do: inspect(type)
defp error_banner(_kind, reason), do: inspect(reason)
## Event: [:phoenix, :router_dispatch, :start]
defp phoenix_router_dispatch_start(_, _, %{log: false}, _), do: :ok
defp phoenix_router_dispatch_start(_, _, metadata, _) do
%{log: level, conn: conn, pipe_through: pipe_through, plug: plug, plug_opts: plug_opts} =
metadata
Logger.log(level, fn ->
[
"Processing with ",
inspect(plug),
maybe_action(plug_opts),
?\n,
" Parameters: ",
params(conn.params),
?\n,
" Pipelines: ",
inspect(pipe_through)
]
end)
end
defp maybe_action(action) when is_atom(action), do: [?., Atom.to_string(action), ?/, ?2]
defp maybe_action(_), do: []
defp params(%Plug.Conn.Unfetched{}), do: "[UNFETCHED]"
defp params(params), do: params |> filter_values() |> inspect()
## Event: [:phoenix, :socket_connected]
defp phoenix_socket_connected(_, _, %{log: false}, _), do: :ok
defp phoenix_socket_connected(_, %{duration: duration}, %{log: level} = meta, _) do
Logger.log(level, fn ->
%{
transport: transport,
params: params,
user_socket: user_socket,
result: result,
serializer: serializer
} = meta
[
connect_result(result),
inspect(user_socket),
" in ",
duration(duration),
"\n Transport: ",
inspect(transport),
"\n Serializer: ",
inspect(serializer),
"\n Parameters: ",
inspect(filter_values(params))
]
end)
end
defp connect_result(:ok), do: "CONNECTED TO "
defp connect_result(:error), do: "REFUSED CONNECTION TO "
## Event: [:phoenix, :channel_joined]
def phoenix_channel_joined(_, %{duration: duration}, %{socket: socket} = metadata, _) do
channel_log(:log_join, socket, fn ->
%{result: result, params: params} = metadata
[
join_result(result),
socket.topic,
" in ",
duration(duration),
"\n Parameters: ",
inspect(filter_values(params))
]
end)
end
defp join_result(:ok), do: "JOINED "
defp join_result(:error), do: "REFUSED JOIN "
## Event: [:phoenix, :channel_handle_in]
def phoenix_channel_handled_in(_, %{duration: duration}, %{socket: socket} = metadata, _) do
channel_log(:log_handle_in, socket, fn ->
%{event: event, params: params} = metadata
[
"HANDLED ",
event,
" INCOMING ON ",
socket.topic,
" (",
inspect(socket.channel),
") in ",
duration(duration),
"\n Parameters: ",
inspect(filter_values(params))
]
end)
end
defp channel_log(_log_option, %{topic: "phoenix" <> _}, _fun), do: :ok
defp channel_log(log_option, %{private: private}, fun) do
if level = Map.get(private, log_option) do
Logger.log(level, fun)
end
end
end
|
lib/phoenix/logger.ex
| 0.876694
| 0.551151
|
logger.ex
|
starcoder
|
defmodule Lob do
require Chacha20
@moduledoc """
Length-Object-Binary (LOB) Packet Encoding
Data serialization, primarily in use by the [Telehash Project](http://telehash.org)
"""
@type maybe_binary :: binary | nil
@doc """
Decode a wire packet for consumption
The parts are returned in a struct compliant with the specification.
Errors reflecting improperly decoded JSON are stored in the `json` field.
"""
@spec decode(binary) :: Lob.DecodedPacket.t() | no_return
def decode(<<s::size(16), rest::binary>>), do: rest |> decode_rest(s)
@spec encode(maybe_binary | map, maybe_binary) :: binary | no_return
@doc """
Encode a head and body into a packet
The packet should be usable across any supported transport. May raise an
exception if the payload is too large or there are encoding errors.
"""
def encode(head, body) when is_nil(head), do: encode("", body)
def encode(head, body) when is_nil(body), do: encode(head, "")
def encode(head, body) when is_binary(head), do: head_size(head) <> head <> body
def encode(head, body) when is_map(head), do: encode(Jason.encode!(head), body)
defp head_size(s) when byte_size(s) <= 0xFFFF, do: <<byte_size(s)::size(16)>>
defp head_size(s) when byte_size(s) > 0xFFFF, do: raise("Head payload too large.")
@spec decode_rest(binary, char) :: Lob.DecodedPacket.t()
defp decode_rest(r, _s) when r == "", do: %Lob.DecodedPacket{}
defp decode_rest(r, s) when s == 0, do: Lob.DecodedPacket.__build__(nil, nil, r)
defp decode_rest(r, s) do
<<head::binary-size(s), body::binary>> = r
json =
if s <= 6 do
nil
else
case Jason.decode(head) do
{:ok, j} -> j
e -> e
end
end
Lob.DecodedPacket.__build__(head, json, body)
end
# SHA256 of "telehash"
defp cloak_key,
do:
<<215, 240, 229, 85, 84, 98, 65, 178, 169, 68, 236, 214, 208, 222, 102, 133, 106, 197, 11,
11, 171, 167, 106, 111, 90, 71, 130, 149, 108, 169, 69, 154>>
@spec cloak(binary) :: binary
@doc """
Cloak a packet to frustrate wire monitoring
A random number (between 1 and 20) of rounds are applied. This also
serves to slightly obfuscate the message size.
"""
def cloak(b), do: cloak_loop(b, :rand.uniform(20))
defp cloak_loop(b, 0), do: b
defp cloak_loop(b, rounds) do
n = make_nonce()
cloak_loop(n <> Chacha20.crypt(b, cloak_key(), n), rounds - 1)
end
defp make_nonce do
n = :crypto.strong_rand_bytes(8)
case binary_part(n, 0, 1) do
<<0>> -> make_nonce()
_ -> n
end
end
@spec decloak(binary) :: Lob.DecodedPacket.t() | no_return
@doc """
De-cloak a cloaked packet.
Upon success, the decoded packet will have the number of cloaking rounds unfurled
in the `cloaked` field.
"""
def decloak(b), do: decloak_loop(b, 0)
defp decloak_loop(<<0, _rest::binary>> = b, r), do: %{decode(b) | cloaked: r}
defp decloak_loop(<<nonce::binary-size(8), ct::binary>>, r),
do: decloak_loop(Chacha20.crypt(ct, cloak_key(), nonce), r + 1)
end
|
lib/lob.ex
| 0.854126
| 0.457864
|
lob.ex
|
starcoder
|
defmodule Google.Protobuf.Api do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
methods: [],
options: [],
version: "",
source_context: nil,
mixins: [],
syntax: 0
]
@type t :: %__MODULE__{
name: String.t,
methods: [Google.Protobuf.Method.t],
options: [Google.Protobuf.Option.t],
version: String.t,
source_context: Google.Protobuf.SourceContext.t,
mixins: [Google.Protobuf.Mixin.t],
syntax: Google.Protobuf.Syntax.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.repeated_field(:struct, data.methods, <<18>>),
Encoder.repeated_field(:struct, data.options, <<26>>),
Encoder.field(:string, data.version, <<34>>),
Encoder.field(:struct, data.source_context, <<42>>),
Encoder.repeated_field(:struct, data.mixins, <<50>>),
Encoder.enum_field(Google.Protobuf.Syntax, data.syntax, <<56>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Method, :methods, acc, data)
end
def decode(acc, <<26, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
def decode(acc, <<34, data::binary>>) do
Decoder.field(:string, :version, acc, data)
end
def decode(acc, <<42, data::binary>>) do
Decoder.struct_field(Google.Protobuf.SourceContext, :source_context, acc, data)
end
def decode(acc, <<50, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Mixin, :mixins, acc, data)
end
def decode(acc, <<56, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Syntax, :syntax, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,5,6,7] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:mixins, v}, acc -> Map.update(acc, :mixins, [v], fn e -> [v | e] end)
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{:methods, v}, acc -> Map.update(acc, :methods, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :mixins, Elixir.Enum.reverse(struct.mixins))
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct = Map.put(struct, :methods, Elixir.Enum.reverse(struct.methods))
struct
end
end
defmodule Google.Protobuf.Method do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
request_type_url: "",
request_streaming: false,
response_type_url: "",
response_streaming: false,
options: [],
syntax: 0
]
@type t :: %__MODULE__{
name: String.t,
request_type_url: String.t,
request_streaming: boolean,
response_type_url: String.t,
response_streaming: boolean,
options: [Google.Protobuf.Option.t],
syntax: Google.Protobuf.Syntax.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.field(:string, data.request_type_url, <<18>>),
Encoder.field(:bool, data.request_streaming, <<24>>),
Encoder.field(:string, data.response_type_url, <<34>>),
Encoder.field(:bool, data.response_streaming, <<40>>),
Encoder.repeated_field(:struct, data.options, <<50>>),
Encoder.enum_field(Google.Protobuf.Syntax, data.syntax, <<56>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.field(:string, :request_type_url, acc, data)
end
def decode(acc, <<24, data::binary>>) do
Decoder.field(:bool, :request_streaming, acc, data)
end
def decode(acc, <<34, data::binary>>) do
Decoder.field(:string, :response_type_url, acc, data)
end
def decode(acc, <<40, data::binary>>) do
Decoder.field(:bool, :response_streaming, acc, data)
end
def decode(acc, <<50, data::binary>>) do
Decoder.struct_field(Google.Protobuf.Option, :options, acc, data)
end
def decode(acc, <<56, data::binary>>) do
Decoder.enum_field(Google.Protobuf.Syntax, :syntax, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2,3,4,5,6,7] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{:options, v}, acc -> Map.update(acc, :options, [v], fn e -> [v | e] end)
{k, v}, acc -> Map.put(acc, k, v)
end)
struct = Map.put(struct, :options, Elixir.Enum.reverse(struct.options))
struct
end
end
defmodule Google.Protobuf.Mixin do
@moduledoc false
alias Pbuf.Decoder
import Bitwise, only: [bsr: 2, band: 2]
@derive Jason.Encoder
defstruct [
name: "",
root: ""
]
@type t :: %__MODULE__{
name: String.t,
root: String.t
}
@spec new(Enum.t) :: t
def new(data) do
struct(__MODULE__, data)
end
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:string, data.name, <<10>>),
Encoder.field(:string, data.root, <<18>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.field(:string, :name, acc, data)
end
def decode(acc, <<18, data::binary>>) do
Decoder.field(:string, :root, acc, data)
end
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1,2] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
|
lib/protoc/google/protobuf/api.pb.ex
| 0.748536
| 0.663955
|
api.pb.ex
|
starcoder
|
defmodule Membrane.RTP.H264.NAL.Header do
@moduledoc """
Defines a structure representing Network Abstraction Layer Unit Header
Defined in [RFC 6184](https://tools.ietf.org/html/rfc6184#section-5.3)
```
+---------------+
|0|1|2|3|4|5|6|7|
+-+-+-+-+-+-+-+-+
|F|NRI| Type |
+---------------+
```
"""
@typedoc """
NRI stands for nal_ref_idc. This value represents importance of
frame that is being parsed.
The higher the value the more important frame is (for example key
frames have nri value of 3) and a value of 00 indicates that the
content of the NAL unit is not used to reconstruct reference pictures
for inter picture prediction. NAL units with NRI equal 0 can be discarded
without risking the integrity of the reference pictures, although these
payloads might contain metadata.
"""
@type nri :: 0..3
@typedoc """
Specifies the type of RBSP data structure contained in the NAL unit.
Types are defined as follows.
| ID | RBSP Type |
|----------|----------------|
| 0 | Unspecified |
| 1-23 | NAL unit types |
| 24 | STAP-A |
| 25 | STAP-B |
| 26 | MTAP-16 |
| 27 | MTAP-24 |
| 28 | FU-A |
| 29 | FU-B |
| Reserved | 30-31 |
RBSP stands for Raw Byte Sequence Payload
RBSP types are described in detail [here](https://yumichan.net/video-processing/video-compression/introduction-to-h264-nal-unit)
"""
@type type :: 1..31
@type supported_types :: :stap_a | :fu_a | :single_nalu
@type unsupported_types :: :stap_b | :mtap_16 | :mtap_24 | :fu_b
@type types :: supported_types | unsupported_types | :reserved
defstruct [:nal_ref_idc, :type]
@type t :: %__MODULE__{
nal_ref_idc: nri(),
type: type()
}
@spec parse_unit_header(binary()) :: {:error, :malformed_data} | {:ok, {t(), binary()}}
def parse_unit_header(raw_nal)
def parse_unit_header(<<0::1, nri::2, type::5, rest::binary()>>) do
nal = %__MODULE__{
nal_ref_idc: nri,
type: type
}
{:ok, {nal, rest}}
end
# If first bit is not set to 0 packet is flagged as malformed
def parse_unit_header(_binary), do: {:error, :malformed_data}
@doc """
Adds NAL header to payload
"""
@spec add_header(binary(), 0 | 1, nri(), type()) :: binary()
def add_header(payload, reserved, nri, type),
do: <<reserved::1, nri::2, type::5>> <> payload
@doc """
Parses type stored in NAL Header
"""
@spec decode_type(t) :: types()
def decode_type(%__MODULE__{type: type}), do: do_decode_type(type)
defp do_decode_type(number) when number in 1..21, do: :single_nalu
defp do_decode_type(24), do: :stap_a
defp do_decode_type(25), do: :stap_b
defp do_decode_type(26), do: :mtap_16
defp do_decode_type(27), do: :mtap_24
defp do_decode_type(28), do: :fu_a
defp do_decode_type(29), do: :fu_b
defp do_decode_type(number) when number in 30..31 or number in [22, 23], do: :reserved
@doc """
Encodes given NAL type
"""
@spec encode_type(types()) :: type()
def encode_type(:single_nalu), do: 1
def encode_type(:stap_a), do: 24
def encode_type(:stap_b), do: 25
def encode_type(:mtap_16), do: 26
def encode_type(:mtap_24), do: 27
def encode_type(:fu_a), do: 28
def encode_type(:fu_b), do: 29
def encode_type(:reserved), do: 30
end
|
lib/rtp_h264/nal_header.ex
| 0.837786
| 0.932944
|
nal_header.ex
|
starcoder
|
defmodule Kino.Frame do
@moduledoc """
A widget wrapping a static output.
This widget serves as a placeholder for a regular output,
so that it can be dynamically replaced at any time.
Also see `Kino.animate/3` which offers a convenience on
top of this widget.
## Examples
widget = Kino.Frame.new() |> tap(&Kino.render/1)
for i <- 1..100 do
Kino.Frame.render(widget, i)
Process.sleep(50)
end
Or with a scheduled task in the background.
widget = Kino.Frame.new() |> tap(&Kino.render/1)
Kino.Frame.periodically(widget, 50, 0, fn i ->
Kino.Frame.render(widget, i)
{:cont, i + 1}
end)
"""
@doc false
use GenServer, restart: :temporary
defstruct [:pid]
@type t :: %__MODULE__{pid: pid()}
@typedoc false
@type state :: %{
parent_monitor_ref: reference(),
pids: list(pid()),
output: Kino.Output.t() | nil
}
@doc """
Starts a widget process.
"""
@spec new() :: t()
def new() do
parent = self()
opts = [parent: parent]
{:ok, pid} = DynamicSupervisor.start_child(Kino.WidgetSupervisor, {__MODULE__, opts})
%__MODULE__{pid: pid}
end
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@doc """
Renders the given term within the frame.
This works similarly to `Kino.render/1`, but the frame
widget only shows the last rendered result.
"""
@spec render(t(), term()) :: :ok
def render(widget, term) do
GenServer.cast(widget.pid, {:render, term})
end
@doc """
Registers a callback to run periodically in the widget process.
The callback is run every `interval_ms` milliseconds and receives
the accumulated value. The callback should return either of:
* `{:cont, acc}` - the continue with the new accumulated value
* `:halt` - to no longer schedule callback evaluation
The callback is run for the first time immediately upon registration.
"""
@spec periodically(t(), pos_integer(), term(), (term() -> {:cont, term()} | :halt)) :: :ok
def periodically(widget, interval_ms, acc, fun) do
GenServer.cast(widget.pid, {:periodically, interval_ms, acc, fun})
end
@impl true
def init(opts) do
parent = Keyword.fetch!(opts, :parent)
parent_monitor_ref = Process.monitor(parent)
{:ok, %{parent_monitor_ref: parent_monitor_ref, pids: [], output: nil}}
end
@impl true
def handle_cast({:render, term}, state) do
output = Kino.Render.to_livebook(term)
for pid <- state.pids do
send(pid, {:render, %{output: output}})
end
state = %{state | output: output}
{:noreply, state}
end
def handle_cast({:periodically, interval_ms, acc, fun}, state) do
periodically_iter(interval_ms, acc, fun)
{:noreply, state}
end
@impl true
def handle_info({:connect, pid}, state) do
Process.monitor(pid)
send(pid, {:connect_reply, %{output: state.output}})
{:noreply, %{state | pids: [pid | state.pids]}}
end
def handle_info({:periodically_iter, interval_ms, acc, fun}, state) do
periodically_iter(interval_ms, acc, fun)
{:noreply, state}
end
def handle_info({:DOWN, ref, :process, _object, _reason}, %{parent_monitor_ref: ref} = state) do
{:stop, :shutdown, state}
end
def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do
{:noreply, %{state | pids: List.delete(state.pids, pid)}}
end
defp periodically_iter(interval_ms, acc, fun) do
case fun.(acc) do
{:cont, acc} ->
Process.send_after(self(), {:periodically_iter, interval_ms, acc, fun}, interval_ms)
:halt ->
:ok
end
end
end
|
lib/kino/frame.ex
| 0.890464
| 0.623076
|
frame.ex
|
starcoder
|
defmodule Tirexs.Resources do
@moduledoc """
The intend is to provide an abstraction for dealing with ES resources.
The interface of this module is aware about elasticsearch REST APIs conventions.
Meanwhile, a `Tirexs.HTTP` provides just a general interface.
"""
import Tirexs.HTTP
@doc "the same as `Tirexs.HTTP.ok?(Tirexs.HTTP.head(path, uri))` expression. See `Tirexs.HTTP.ok?/1` and `Tirexs.HTTP.head/2`"
def exists?(path, uri), do: ok?(head(path, uri))
def exists?(url_or_path_or_uri), do: ok?(head(url_or_path_or_uri))
@doc "the same as `Tirexs.HTTP.head!(path, uri)` expression. See `Tirexs.HTTP.head!/2`"
def exists!(path, uri), do: head!(path, uri)
def exists!(url_or_path_or_uri), do: head!(url_or_path_or_uri)
@doc """
Composes an URN from parts into request ready path as a binary string.
## Examples:
iex> urn ["bear_test", "/_alias", ["2015", "2016"]]
"bear_test/_alias/2015,2016"
iex> urn [["bear_test", "another_bear_test"], "_refresh", { [ignore_unavailable: true] }]
"bear_test,another_bear_test/_refresh?ignore_unavailable=true"
iex> urn("bear_test", "bear_type", "10", "_explain?analyzer=some")
"bear_test/bear_type/10/_explain?analyzer=some"
Also see `Tirexs.HTTP.url/1`.
"""
def urn(part) when is_binary(part) do
normalize(part)
end
def urn(parts) when is_list(parts) do
Enum.map(parts, fn(part) -> normalize(part) end) |> Enum.join("/") |> String.replace("/?", "?")
end
def urn(a, b), do: urn([a ,b])
def urn(a, b, c), do: urn([a,b,c])
def urn(a, b, c, d), do: urn([a,b,c,d])
def urn(a, b, c, d, e), do: urn([a,b,c,d,e])
def urn(a, b, c, d, e, f), do: urn([a,b,c,d,e,f])
def urn(a, b, c, d, e, f, g), do: urn([a,b,c,d,e,f,g])
@doc false
def normalize(resource) when is_binary(resource) do
String.strip(resource) |> String.replace_prefix("/", "")
end
def normalize({ params }) do
"?" <> URI.encode_query(params)
end
def normalize(resource) do
pluralize(resource) |> normalize()
end
@doc false
def pluralize(resource) when is_integer(resource), do: to_string(resource)
def pluralize(resource) when is_binary(resource), do: resource
def pluralize(resource), do: Enum.join(resource, ",")
@doc """
Tries to bump resource. This one just makes a request and behaves like a proxy to
one of avalable resource helper. You're able to bump any resources which are defined in
`Tirexs.Resources.APIs`.
Let's consider the following use case:
iex> path = Tirexs.Resources.APIs._refresh(["bear_test", "duck_test"], { [force: false] })
"bear_test,duck_test/_refresh?force=false"
iex> Tirexs.HTTP.post(path)
{ :ok, 200, ... }
With bump, the same is:
iex> bump._refresh(["bear_test", "duck_test"], { [force: false] })
{ :ok, 200, ... }
It is also available for bumping some resources with request body:
iex> search = [query: [ term: [ user: "zatvobor" ] ] ]
iex> bump(search)._count("bear_test", "my_type")
{ :ok, 200, ... }
iex> payload = "{ \"index\": { \"_id\": \"2\" }}\n{ \"title\": \"My second blog post\" }\n"
iex> bump(payload)._bulk("website", "blog", { [refresh: true] })
{ :ok, 200, ... }
Play with resources you have and see what kind of HTTP verb is used.
"""
def bump(), do: __t(:bump)
def bump(%URI{} = uri), do: __t(:bump, [], uri)
def bump(body), do: __t(:bump, body)
def bump(body, %URI{} = uri), do: __t(:bump, body, uri)
def bump!(), do: __t(:bump!)
def bump!(%URI{} = uri), do: __t(:bump!, [], uri)
def bump!(body), do: __t(:bump!, body)
def bump!(body, %URI{} = uri), do: __t(:bump!, body, uri)
@doc false
def __c(urn, meta) when is_binary(urn) do
if ctx = Process.delete(:tirexs_resources_chain) do
args = case { urn, ctx[:body] } do
{ urn, [] } -> [ urn, ctx[:uri] ]
{ urn, body } -> [ urn, ctx[:uri], body ]
end
Kernel.apply(Tirexs.HTTP, meta[ctx[:label]], args)
else
urn
end
end
@doc false
defp __t(label, body \\ [], %URI{} = uri \\ Tirexs.ENV.get_uri_env()) do
Process.put(:tirexs_resources_chain, [label: label, body: body, uri: uri])
Tirexs.Resources.APIs
end
end
|
lib/tirexs/resources.ex
| 0.742141
| 0.479016
|
resources.ex
|
starcoder
|
defmodule AWS.DynamoDB do
@moduledoc """
Amazon DynamoDB
Amazon DynamoDB is a fully managed NoSQL database service that provides fast and
predictable performance with seamless scalability.
DynamoDB lets you offload the administrative burdens of operating and scaling a
distributed database, so that you don't have to worry about hardware
provisioning, setup and configuration, replication, software patching, or
cluster scaling.
With DynamoDB, you can create database tables that can store and retrieve any
amount of data, and serve any level of request traffic. You can scale up or
scale down your tables' throughput capacity without downtime or performance
degradation, and use the Amazon Web Services Management Console to monitor
resource utilization and performance metrics.
DynamoDB automatically spreads the data and traffic for your tables over a
sufficient number of servers to handle your throughput and storage requirements,
while maintaining consistent and fast performance. All of your data is stored on
solid state disks (SSDs) and automatically replicated across multiple
Availability Zones in an Amazon Web Services Region, providing built-in high
availability and data durability.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "DynamoDB",
api_version: "2012-08-10",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "dynamodb",
global?: false,
protocol: "json",
service_id: "DynamoDB",
signature_version: "v4",
signing_name: "dynamodb",
target_prefix: "DynamoDB_20120810"
}
end
@doc """
This operation allows you to perform batch reads or writes on data stored in
DynamoDB, using PartiQL.
The entire batch must consist of either read statements or write statements, you
cannot mix both in one batch.
"""
def batch_execute_statement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchExecuteStatement", input, options)
end
@doc """
The `BatchGetItem` operation returns the attributes of one or more items from
one or more tables.
You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many
as 100 items. `BatchGetItem` returns a partial result if the response size limit
is exceeded, the table's provisioned throughput is exceeded, or an internal
processing failure occurs. If a partial result is returned, the operation
returns a value for `UnprocessedKeys`. You can use this value to retry the
operation starting with the next item to get.
If you request more than 100 items, `BatchGetItem` returns a
`ValidationException` with the message "Too many items requested for the
BatchGetItem call."
For example, if you ask to retrieve 100 items, but each individual item is 300
KB in size, the system returns 52 items (so as not to exceed the 16 MB limit).
It also returns an appropriate `UnprocessedKeys` value so you can get the next
page of results. If desired, your application can include its own logic to
assemble the pages of results into one dataset.
If *none* of the items can be processed due to insufficient provisioned
throughput on all of the tables in the request, then `BatchGetItem` returns a
`ProvisionedThroughputExceededException`. If *at least one* of the items is
successfully processed, then `BatchGetItem` completes successfully, while
returning the keys of the unread items in `UnprocessedKeys`.
If DynamoDB returns any unprocessed items, you should retry the batch operation
on those items. However, *we strongly recommend that you use an exponential
backoff algorithm*. If you retry the batch operation immediately, the underlying
read or write requests can still fail due to throttling on the individual
tables. If you delay the batch operation using exponential backoff, the
individual requests in the batch are much more likely to succeed.
For more information, see [Batch Operations and Error Handling](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations)
in the *Amazon DynamoDB Developer Guide*.
By default, `BatchGetItem` performs eventually consistent reads on every table
in the request. If you want strongly consistent reads instead, you can set
`ConsistentRead` to `true` for any or all tables.
In order to minimize response latency, `BatchGetItem` retrieves items in
parallel.
When designing your application, keep in mind that DynamoDB does not return
items in any particular order. To help parse the response by item, include the
primary key values for the items in your request in the `ProjectionExpression`
parameter.
If a requested item does not exist, it is not returned in the result. Requests
for nonexistent items consume the minimum read capacity units according to the
type of read. For more information, see [Working with Tables](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations)
in the *Amazon DynamoDB Developer Guide*.
"""
def batch_get_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchGetItem", input, options)
end
@doc """
The `BatchWriteItem` operation puts or deletes multiple items in one or more
tables.
A single call to `BatchWriteItem` can write up to 16 MB of data, which can
comprise as many as 25 put or delete requests. Individual items to be written
can be as large as 400 KB.
`BatchWriteItem` cannot update items. To update items, use the `UpdateItem`
action.
The individual `PutItem` and `DeleteItem` operations specified in
`BatchWriteItem` are atomic; however `BatchWriteItem` as a whole is not. If any
requested operations fail because the table's provisioned throughput is exceeded
or an internal processing failure occurs, the failed operations are returned in
the `UnprocessedItems` response parameter. You can investigate and optionally
resend the requests. Typically, you would call `BatchWriteItem` in a loop. Each
iteration would check for unprocessed items and submit a new `BatchWriteItem`
request with those unprocessed items until all items have been processed.
If *none* of the items can be processed due to insufficient provisioned
throughput on all of the tables in the request, then `BatchWriteItem` returns a
`ProvisionedThroughputExceededException`.
If DynamoDB returns any unprocessed items, you should retry the batch operation
on those items. However, *we strongly recommend that you use an exponential
backoff algorithm*. If you retry the batch operation immediately, the underlying
read or write requests can still fail due to throttling on the individual
tables. If you delay the batch operation using exponential backoff, the
individual requests in the batch are much more likely to succeed.
For more information, see [Batch Operations and Error Handling](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations)
in the *Amazon DynamoDB Developer Guide*.
With `BatchWriteItem`, you can efficiently write or delete large amounts of
data, such as from Amazon EMR, or copy data from another database into DynamoDB.
In order to improve performance with these large-scale operations,
`BatchWriteItem` does not behave in the same way as individual `PutItem` and
`DeleteItem` calls would. For example, you cannot specify conditions on
individual put and delete requests, and `BatchWriteItem` does not return deleted
items in the response.
If you use a programming language that supports concurrency, you can use threads
to write items in parallel. Your application must include the necessary logic to
manage the threads. With languages that don't support threading, you must update
or delete the specified items one at a time. In both situations,
`BatchWriteItem` performs the specified put and delete operations in parallel,
giving you the power of the thread pool approach without having to introduce
complexity into your application.
Parallel processing reduces latency, but each specified put and delete request
consumes the same number of write capacity units whether it is processed in
parallel or not. Delete operations on nonexistent items consume one write
capacity unit.
If one or more of the following is true, DynamoDB rejects the entire batch write
operation:
* One or more tables specified in the `BatchWriteItem` request does
not exist.
* Primary key attributes specified on an item in the request do not
match those in the corresponding table's primary key schema.
* You try to perform multiple operations on the same item in the
same `BatchWriteItem` request. For example, you cannot put and delete the same
item in the same `BatchWriteItem` request.
* Your request contains at least two items with identical hash and
range keys (which essentially is two put operations).
* There are more than 25 requests in the batch.
* Any individual item in a batch exceeds 400 KB.
* The total request size exceeds 16 MB.
"""
def batch_write_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "BatchWriteItem", input, options)
end
@doc """
Creates a backup for an existing table.
Each time you create an on-demand backup, the entire table data is backed up.
There is no limit to the number of on-demand backups that can be taken.
When you create an on-demand backup, a time marker of the request is cataloged,
and the backup is created asynchronously, by applying all changes until the time
of the request to the last full table snapshot. Backup requests are processed
instantaneously and become available for restore within minutes.
You can call `CreateBackup` at a maximum rate of 50 times per second.
All backups in DynamoDB work without consuming any provisioned throughput on the
table.
If you submit a backup request on 2018-12-14 at 14:25:00, the backup is
guaranteed to contain all data committed to the table up to 14:24:00, and data
committed after 14:26:00 will not be. The backup might contain data
modifications made between 14:24:00 and 14:26:00. On-demand backup does not
support causal consistency.
Along with data, the following are also included on the backups:
* Global secondary indexes (GSIs)
* Local secondary indexes (LSIs)
* Streams
* Provisioned read and write capacity
"""
def create_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBackup", input, options)
end
@doc """
Creates a global table from an existing table.
A global table creates a replication relationship between two or more DynamoDB
tables with the same table name in the provided Regions.
This operation only applies to [Version 2017.11.29](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html)
of global tables.
If you want to add a new replica table to a global table, each of the following
conditions must be true:
* The table must have the same primary key as all of the other
replicas.
* The table must have the same name as all of the other replicas.
* The table must have DynamoDB Streams enabled, with the stream
containing both the new and the old images of the item.
* None of the replica tables in the global table can contain any
data.
If global secondary indexes are specified, then the following conditions must
also be met:
* The global secondary indexes must have the same name.
* The global secondary indexes must have the same hash key and sort
key (if present).
If local secondary indexes are specified, then the following conditions must
also be met:
* The local secondary indexes must have the same name.
* The local secondary indexes must have the same hash key and sort
key (if present).
Write capacity settings should be set consistently across your replica tables
and secondary indexes. DynamoDB strongly recommends enabling auto scaling to
manage the write capacity settings for all of your global tables replicas and
indexes.
If you prefer to manage write capacity settings manually, you should provision
equal replicated write capacity units to your replica tables. You should also
provision equal replicated write capacity units to matching secondary indexes
across your global table.
"""
def create_global_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGlobalTable", input, options)
end
@doc """
The `CreateTable` operation adds a new table to your account.
In an Amazon Web Services account, table names must be unique within each
Region. That is, you can have two tables with same name if you create the tables
in different Regions.
`CreateTable` is an asynchronous operation. Upon receiving a `CreateTable`
request, DynamoDB immediately returns a response with a `TableStatus` of
`CREATING`. After the table is created, DynamoDB sets the `TableStatus` to
`ACTIVE`. You can perform read and write operations only on an `ACTIVE` table.
You can optionally define secondary indexes on the new table, as part of the
`CreateTable` operation. If you want to create multiple tables with secondary
indexes on them, you must create the tables sequentially. Only one table with
secondary indexes can be in the `CREATING` state at any given time.
You can use the `DescribeTable` action to check the table status.
"""
def create_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTable", input, options)
end
@doc """
Deletes an existing backup of a table.
You can call `DeleteBackup` at a maximum rate of 10 times per second.
"""
def delete_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBackup", input, options)
end
@doc """
Deletes a single item in a table by primary key.
You can perform a conditional delete operation that deletes the item if it
exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the item's attribute values
in the same operation, using the `ReturnValues` parameter.
Unless you specify conditions, the `DeleteItem` is an idempotent operation;
running it multiple times on the same item or attribute does *not* result in an
error response.
Conditional deletes are useful for deleting items only if specific conditions
are met. If those conditions are met, DynamoDB performs the delete. Otherwise,
the item is not deleted.
"""
def delete_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteItem", input, options)
end
@doc """
The `DeleteTable` operation deletes a table and all of its items.
After a `DeleteTable` request, the specified table is in the `DELETING` state
until DynamoDB completes the deletion. If the table is in the `ACTIVE` state,
you can delete it. If a table is in `CREATING` or `UPDATING` states, then
DynamoDB returns a `ResourceInUseException`. If the specified table does not
exist, DynamoDB returns a `ResourceNotFoundException`. If table is already in
the `DELETING` state, no error is returned.
DynamoDB might continue to accept data read and write operations, such as
`GetItem` and `PutItem`, on a table in the `DELETING` state until the table
deletion is complete.
When you delete a table, any indexes on that table are also deleted.
If you have DynamoDB Streams enabled on the table, then the corresponding stream
on that table goes into the `DISABLED` state, and the stream is automatically
deleted after 24 hours.
Use the `DescribeTable` action to check the status of the table.
"""
def delete_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTable", input, options)
end
@doc """
Describes an existing backup of a table.
You can call `DescribeBackup` at a maximum rate of 10 times per second.
"""
def describe_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBackup", input, options)
end
@doc """
Checks the status of continuous backups and point in time recovery on the
specified table.
Continuous backups are `ENABLED` on all tables at table creation. If point in
time recovery is enabled, `PointInTimeRecoveryStatus` will be set to ENABLED.
After continuous backups and point in time recovery are enabled, you can restore
to any point in time within `EarliestRestorableDateTime` and
`LatestRestorableDateTime`.
`LatestRestorableDateTime` is typically 5 minutes before the current time. You
can restore your table to any point in time during the last 35 days.
You can call `DescribeContinuousBackups` at a maximum rate of 10 times per
second.
"""
def describe_continuous_backups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeContinuousBackups", input, options)
end
@doc """
Returns information about contributor insights, for a given table or global
secondary index.
"""
def describe_contributor_insights(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeContributorInsights", input, options)
end
@doc """
Returns the regional endpoint information.
"""
def describe_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEndpoints", input, options)
end
@doc """
Describes an existing table export.
"""
def describe_export(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeExport", input, options)
end
@doc """
Returns information about the specified global table.
This operation only applies to [Version 2017.11.29](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html)
of global tables. If you are using global tables [Version 2019.11.21](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html)
you can use
[DescribeTable](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html)
instead.
"""
def describe_global_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGlobalTable", input, options)
end
@doc """
Describes Region-specific settings for a global table.
This operation only applies to [Version 2017.11.29](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html)
of global tables.
"""
def describe_global_table_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeGlobalTableSettings", input, options)
end
@doc """
Returns information about the status of Kinesis streaming.
"""
def describe_kinesis_streaming_destination(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeKinesisStreamingDestination",
input,
options
)
end
@doc """
Returns the current provisioned-capacity quotas for your Amazon Web Services
account in a Region, both for the Region as a whole and for any one DynamoDB
table that you create there.
When you establish an Amazon Web Services account, the account has initial
quotas on the maximum read capacity units and write capacity units that you can
provision across all of your DynamoDB tables in a given Region. Also, there are
per-table quotas that apply when you create a table there. For more information,
see [Service, Account, and Table Quotas](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
page in the *Amazon DynamoDB Developer Guide*.
Although you can increase these quotas by filing a case at [Amazon Web Services Support Center](https://console.aws.amazon.com/support/home#/), obtaining the
increase is not instantaneous. The `DescribeLimits` action lets you write code
to compare the capacity you are currently using to those quotas imposed by your
account so that you have enough time to apply for an increase before you hit a
quota.
For example, you could use one of the Amazon Web Services SDKs to do the
following:
1. Call `DescribeLimits` for a particular Region to obtain your
current account quotas on provisioned capacity there.
2. Create a variable to hold the aggregate read capacity units
provisioned for all your tables in that Region, and one to hold the aggregate
write capacity units. Zero them both.
3. Call `ListTables` to obtain a list of all your DynamoDB tables.
4. For each table name listed by `ListTables`, do the following:
* Call `DescribeTable` with the table name.
* Use the data returned by `DescribeTable` to add the
read capacity units and write capacity units provisioned for the table itself to
your variables.
* If the table has one or more global secondary indexes
(GSIs), loop over these GSIs and add their provisioned capacity values to your
variables as well.
5. Report the account quotas for that Region returned by
`DescribeLimits`, along with the total current provisioned capacity levels you
have calculated.
This will let you see whether you are getting close to your account-level
quotas.
The per-table quotas apply only when you are creating a new table. They restrict
the sum of the provisioned capacity of the new table itself and all its global
secondary indexes.
For existing tables and their GSIs, DynamoDB doesn't let you increase
provisioned capacity extremely rapidly, but the only quota that applies is that
the aggregate provisioned capacity over all your tables and GSIs cannot exceed
either of the per-account quotas.
`DescribeLimits` should only be called periodically. You can expect throttling
errors if you call it more than once in a minute.
The `DescribeLimits` Request element has no content.
"""
def describe_limits(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLimits", input, options)
end
@doc """
Returns information about the table, including the current status of the table,
when it was created, the primary key schema, and any indexes on the table.
If you issue a `DescribeTable` request immediately after a `CreateTable`
request, DynamoDB might return a `ResourceNotFoundException`. This is because
`DescribeTable` uses an eventually consistent query, and the metadata for your
table might not be available at that moment. Wait for a few seconds, and then
try the `DescribeTable` request again.
"""
def describe_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTable", input, options)
end
@doc """
Describes auto scaling settings across replicas of the global table at once.
This operation only applies to [Version 2019.11.21](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html)
of global tables.
"""
def describe_table_replica_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTableReplicaAutoScaling", input, options)
end
@doc """
Gives a description of the Time to Live (TTL) status on the specified table.
"""
def describe_time_to_live(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTimeToLive", input, options)
end
@doc """
Stops replication from the DynamoDB table to the Kinesis data stream.
This is done without deleting either of the resources.
"""
def disable_kinesis_streaming_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableKinesisStreamingDestination", input, options)
end
@doc """
Starts table data replication to the specified Kinesis data stream at a
timestamp chosen during the enable workflow.
If this operation doesn't return results immediately, use
DescribeKinesisStreamingDestination to check if streaming to the Kinesis data
stream is ACTIVE.
"""
def enable_kinesis_streaming_destination(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableKinesisStreamingDestination", input, options)
end
@doc """
This operation allows you to perform reads and singleton writes on data stored
in DynamoDB, using PartiQL.
"""
def execute_statement(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExecuteStatement", input, options)
end
@doc """
This operation allows you to perform transactional reads or writes on data
stored in DynamoDB, using PartiQL.
The entire transaction must consist of either read statements or write
statements, you cannot mix both in one transaction. The EXISTS function is an
exception and can be used to check the condition of specific attributes of the
item in a similar manner to `ConditionCheck` in the
[TransactWriteItems](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems)
API.
"""
def execute_transaction(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExecuteTransaction", input, options)
end
@doc """
Exports table data to an S3 bucket.
The table must have point in time recovery enabled, and you can export data from
any time within the point in time recovery window.
"""
def export_table_to_point_in_time(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExportTableToPointInTime", input, options)
end
@doc """
The `GetItem` operation returns a set of attributes for the item with the given
primary key.
If there is no matching item, `GetItem` does not return any data and there will
be no `Item` element in the response.
`GetItem` provides an eventually consistent read by default. If your application
requires a strongly consistent read, set `ConsistentRead` to `true`. Although a
strongly consistent read might take more time than an eventually consistent
read, it always returns the last updated value.
"""
def get_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetItem", input, options)
end
@doc """
List backups associated with an Amazon Web Services account.
To list backups for a given table, specify `TableName`. `ListBackups` returns a
paginated list of results with at most 1 MB worth of items in a page. You can
also specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that
these boundaries are for the time at which the original backup was requested.
You can call `ListBackups` a maximum of five times per second.
"""
def list_backups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListBackups", input, options)
end
@doc """
Returns a list of ContributorInsightsSummary for a table and all its global
secondary indexes.
"""
def list_contributor_insights(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListContributorInsights", input, options)
end
@doc """
Lists completed exports within the past 90 days.
"""
def list_exports(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListExports", input, options)
end
@doc """
Lists all global tables that have a replica in the specified Region.
This operation only applies to [Version 2017.11.29](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html)
of global tables.
"""
def list_global_tables(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListGlobalTables", input, options)
end
@doc """
Returns an array of table names associated with the current account and
endpoint.
The output from `ListTables` is paginated, with each page returning a maximum of
100 table names.
"""
def list_tables(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTables", input, options)
end
@doc """
List all tags on an Amazon DynamoDB resource.
You can call ListTagsOfResource up to 10 times per second, per account.
For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
in the *Amazon DynamoDB Developer Guide*.
"""
def list_tags_of_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsOfResource", input, options)
end
@doc """
Creates a new item, or replaces an old item with a new item.
If an item that has the same primary key as the new item already exists in the
specified table, the new item completely replaces the existing item. You can
perform a conditional put operation (add a new item if one with the specified
primary key doesn't exist), or replace an existing item if it has certain
attribute values. You can return the item's attribute values in the same
operation, using the `ReturnValues` parameter.
This topic provides general information about the `PutItem` API.
For information on how to call the `PutItem` API using the Amazon Web Services
SDK in specific languages, see the following:
[ PutItem in the Command Line Interface](http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for .NET](http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for C++](http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for Go](http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for Java](http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for JavaScript](http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for PHP V3](http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for Python (Boto)](http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem)
[ PutItem in the SDK for Ruby V2](http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem)
When you add an item, the primary key attributes are the only required
attributes. Attribute values cannot be null.
Empty String and Binary attribute values are allowed. Attribute values of type
String and Binary must have a length greater than zero if the attribute is used
as a key attribute for a table or index. Set type attributes cannot be empty.
Invalid Requests with empty values will be rejected with a `ValidationException`
exception.
To prevent a new item from replacing an existing item, use a conditional
expression that contains the `attribute_not_exists` function with the name of
the attribute being used as the partition key for the table. Since every record
must contain that attribute, the `attribute_not_exists` function will only
succeed if no matching item exists.
For more information about `PutItem`, see [Working with Items](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
in the *Amazon DynamoDB Developer Guide*.
"""
def put_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutItem", input, options)
end
@doc """
You must provide the name of the partition key attribute and a single value for
that attribute.
`Query` returns all items with that partition key value. Optionally, you can
provide a sort key attribute and use a comparison operator to refine the search
results.
Use the `KeyConditionExpression` parameter to provide a specific value for the
partition key. The `Query` operation will return all of the items from the table
or index with that partition key value. You can optionally narrow the scope of
the `Query` operation by specifying a sort key value and a comparison operator
in `KeyConditionExpression`. To further refine the `Query` results, you can
optionally provide a `FilterExpression`. A `FilterExpression` determines which
items within the results should be returned to you. All of the other results are
discarded.
A `Query` operation always returns a result set. If no matching items are found,
the result set will be empty. Queries that do not return results consume the
minimum number of read capacity units for that type of read operation.
DynamoDB calculates the number of read capacity units consumed based on item
size, not on the amount of data that is returned to an application. The number
of capacity units consumed will be the same whether you request all of the
attributes (the default behavior) or just some of them (using a projection
expression). The number will also be the same whether or not you use a
`FilterExpression`.
`Query` results are always sorted by the sort key value. If the data type of the
sort key is Number, the results are returned in numeric order; otherwise, the
results are returned in order of UTF-8 bytes. By default, the sort order is
ascending. To reverse the order, set the `ScanIndexForward` parameter to false.
A single `Query` operation will read up to the maximum number of items set (if
using the `Limit` parameter) or a maximum of 1 MB of data and then apply any
filtering to the results using `FilterExpression`. If `LastEvaluatedKey` is
present in the response, you will need to paginate the result set. For more
information, see [Paginating the Results](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination)
in the *Amazon DynamoDB Developer Guide*.
`FilterExpression` is applied after a `Query` finishes, but before the results
are returned. A `FilterExpression` cannot contain partition key or sort key
attributes. You need to specify those attributes in the
`KeyConditionExpression`.
A `Query` operation can return an empty result set and a `LastEvaluatedKey` if
all the items read for the page of results are filtered out.
You can query a table, a local secondary index, or a global secondary index. For
a query on a table or on a local secondary index, you can set the
`ConsistentRead` parameter to `true` and obtain a strongly consistent result.
Global secondary indexes support eventually consistent reads only, so do not
specify `ConsistentRead` when querying a global secondary index.
"""
def query(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "Query", input, options)
end
@doc """
Creates a new table from an existing backup.
Any number of users can execute up to 4 concurrent restores (any type of
restore) in a given account.
You can call `RestoreTableFromBackup` at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
* Auto scaling policies
* IAM policies
* Amazon CloudWatch metrics and alarms
* Tags
* Stream settings
* Time to Live (TTL) settings
"""
def restore_table_from_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreTableFromBackup", input, options)
end
@doc """
Restores the specified table to the specified point in time within
`EarliestRestorableDateTime` and `LatestRestorableDateTime`.
You can restore your table to any point in time during the last 35 days. Any
number of users can execute up to 4 concurrent restores (any type of restore) in
a given account.
When you restore using point in time recovery, DynamoDB restores your table data
to the state based on the selected date and time (day:hour:minute:second) to a
new table.
Along with data, the following are also included on the new restored table using
point in time recovery:
* Global secondary indexes (GSIs)
* Local secondary indexes (LSIs)
* Provisioned read and write capacity
* Encryption settings
All these settings come from the current settings of the source table at the
time of restore.
You must manually set up the following on the restored table:
* Auto scaling policies
* IAM policies
* Amazon CloudWatch metrics and alarms
* Tags
* Stream settings
* Time to Live (TTL) settings
* Point in time recovery settings
"""
def restore_table_to_point_in_time(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreTableToPointInTime", input, options)
end
@doc """
The `Scan` operation returns one or more items and item attributes by accessing
every item in a table or a secondary index.
To have DynamoDB return fewer items, you can provide a `FilterExpression`
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1
MB, the scan stops and results are returned to the user as a `LastEvaluatedKey`
value to continue the scan in a subsequent operation. The results also include
the number of items exceeding the limit. A scan can result in no table data
meeting the filter criteria.
A single `Scan` operation reads up to the maximum number of items set (if using
the `Limit` parameter) or a maximum of 1 MB of data and then apply any filtering
to the results using `FilterExpression`. If `LastEvaluatedKey` is present in the
response, you need to paginate the result set. For more information, see
[Paginating the Results](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination)
in the *Amazon DynamoDB Developer Guide*.
`Scan` operations proceed sequentially; however, for faster performance on a
large table or secondary index, applications can request a parallel `Scan`
operation by providing the `Segment` and `TotalSegments` parameters. For more
information, see [Parallel Scan](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan)
in the *Amazon DynamoDB Developer Guide*.
`Scan` uses eventually consistent reads when accessing the data in a table;
therefore, the result set might not include the changes to data in the table
immediately before the operation began. If you need a consistent copy of the
data, as of the time that the `Scan` begins, you can set the `ConsistentRead`
parameter to `true`.
"""
def scan(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "Scan", input, options)
end
@doc """
Associate a set of tags with an Amazon DynamoDB resource.
You can then activate these user-defined tags so that they appear on the Billing
and Cost Management console for cost allocation tracking. You can call
TagResource up to five times per second, per account.
For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
in the *Amazon DynamoDB Developer Guide*.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
`TransactGetItems` is a synchronous operation that atomically retrieves multiple
items from one or more tables (but not from indexes) in a single account and
Region.
A `TransactGetItems` call can contain up to 25 `TransactGetItem` objects, each
of which contains a `Get` structure that specifies an item to retrieve from a
table in the account and Region. A call to `TransactGetItems` cannot retrieve
items from tables in more than one Amazon Web Services account or Region. The
aggregate size of the items in the transaction cannot exceed 4 MB.
DynamoDB rejects the entire `TransactGetItems` request if any of the following
is true:
* A conflicting operation is in the process of updating an item to
be read.
* There is insufficient provisioned capacity for the transaction to
be completed.
* There is a user error, such as an invalid data format.
* The aggregate size of the items in the transaction cannot exceed 4
MB.
"""
def transact_get_items(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TransactGetItems", input, options)
end
@doc """
`TransactWriteItems` is a synchronous write operation that groups up to 25
action requests.
These actions can target items in different tables, but not in different Amazon
Web Services accounts or Regions, and no two actions can target the same item.
For example, you cannot both `ConditionCheck` and `Update` the same item. The
aggregate size of the items in the transaction cannot exceed 4 MB.
The actions are completed atomically so that either all of them succeed, or all
of them fail. They are defined by the following objects:
* `Put` — Initiates a `PutItem` operation to write a new item.
This structure specifies the primary key of the item to be written, the name of
the table to write it in, an optional condition expression that must be
satisfied for the write to succeed, a list of the item's attributes, and a field
indicating whether to retrieve the item's attributes if the condition is not
met.
* `Update` — Initiates an `UpdateItem` operation to update an
existing item. This structure specifies the primary key of the item to be
updated, the name of the table where it resides, an optional condition
expression that must be satisfied for the update to succeed, an expression that
defines one or more attributes to be updated, and a field indicating whether to
retrieve the item's attributes if the condition is not met.
* `Delete` — Initiates a `DeleteItem` operation to delete an
existing item. This structure specifies the primary key of the item to be
deleted, the name of the table where it resides, an optional condition
expression that must be satisfied for the deletion to succeed, and a field
indicating whether to retrieve the item's attributes if the condition is not
met.
* `ConditionCheck` — Applies a condition to an item that is not
being modified by the transaction. This structure specifies the primary key of
the item to be checked, the name of the table where it resides, a condition
expression that must be satisfied for the transaction to succeed, and a field
indicating whether to retrieve the item's attributes if the condition is not
met.
DynamoDB rejects the entire `TransactWriteItems` request if any of the following
is true:
* A condition in one of the condition expressions is not met.
* An ongoing operation is in the process of updating the same item.
* There is insufficient provisioned capacity for the transaction to
be completed.
* An item size becomes too large (bigger than 400 KB), a local
secondary index (LSI) becomes too large, or a similar validation error occurs
because of changes made by the transaction.
* The aggregate size of the items in the transaction exceeds 4 MB.
* There is a user error, such as an invalid data format.
"""
def transact_write_items(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TransactWriteItems", input, options)
end
@doc """
Removes the association of tags from an Amazon DynamoDB resource.
You can call `UntagResource` up to five times per second, per account.
For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
in the *Amazon DynamoDB Developer Guide*.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
`UpdateContinuousBackups` enables or disables point in time recovery for the
specified table.
A successful `UpdateContinuousBackups` call returns the current
`ContinuousBackupsDescription`. Continuous backups are `ENABLED` on all tables
at table creation. If point in time recovery is enabled,
`PointInTimeRecoveryStatus` will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore
to any point in time within `EarliestRestorableDateTime` and
`LatestRestorableDateTime`.
`LatestRestorableDateTime` is typically 5 minutes before the current time. You
can restore your table to any point in time during the last 35 days.
"""
def update_continuous_backups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateContinuousBackups", input, options)
end
@doc """
Updates the status for contributor insights for a specific table or index.
CloudWatch Contributor Insights for DynamoDB graphs display the partition key
and (if applicable) sort key of frequently accessed items and frequently
throttled items in plaintext. If you require the use of AWS Key Management
Service (KMS) to encrypt this table’s partition key and sort key data with an
AWS managed key or customer managed key, you should not enable CloudWatch
Contributor Insights for DynamoDB for this table.
"""
def update_contributor_insights(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateContributorInsights", input, options)
end
@doc """
Adds or removes replicas in the specified global table.
The global table must already exist to be able to use this operation. Any
replica to be added must be empty, have the same name as the global table, have
the same key schema, have DynamoDB Streams enabled, and have the same
provisioned and maximum write capacity units.
Although you can use `UpdateGlobalTable` to add replicas and remove replicas in
a single request, for simplicity we recommend that you issue separate requests
for adding or removing replicas.
If global secondary indexes are specified, then the following conditions must
also be met:
* The global secondary indexes must have the same name.
* The global secondary indexes must have the same hash key and sort
key (if present).
* The global secondary indexes must have the same provisioned and
maximum write capacity units.
"""
def update_global_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGlobalTable", input, options)
end
@doc """
Updates settings for a global table.
"""
def update_global_table_settings(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateGlobalTableSettings", input, options)
end
@doc """
Edits an existing item's attributes, or adds a new item to the table if it does
not already exist.
You can put, delete, or add attribute values. You can also perform a conditional
update on an existing item (insert a new attribute name-value pair if it doesn't
exist, or replace an existing name-value pair if it has certain expected
attribute values).
You can also return the item's attribute values in the same `UpdateItem`
operation using the `ReturnValues` parameter.
"""
def update_item(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateItem", input, options)
end
@doc """
Modifies the provisioned throughput settings, global secondary indexes, or
DynamoDB Streams settings for a given table.
You can only perform one of the following operations at once:
* Modify the provisioned throughput settings of the table.
* Enable or disable DynamoDB Streams on the table.
* Remove a global secondary index from the table.
* Create a new global secondary index on the table. After the index
begins backfilling, you can use `UpdateTable` to perform other operations.
`UpdateTable` is an asynchronous operation; while it is executing, the table
status changes from `ACTIVE` to `UPDATING`. While it is `UPDATING`, you cannot
issue another `UpdateTable` request. When the table returns to the `ACTIVE`
state, the `UpdateTable` operation is complete.
"""
def update_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTable", input, options)
end
@doc """
Updates auto scaling settings on your global tables at once.
This operation only applies to [Version 2019.11.21](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html)
of global tables.
"""
def update_table_replica_auto_scaling(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTableReplicaAutoScaling", input, options)
end
@doc """
The `UpdateTimeToLive` method enables or disables Time to Live (TTL) for the
specified table.
A successful `UpdateTimeToLive` call returns the current
`TimeToLiveSpecification`. It can take up to one hour for the change to fully
process. Any additional `UpdateTimeToLive` calls for the same table during this
one hour duration result in a `ValidationException`.
TTL compares the current time in epoch time format to the time stored in the TTL
attribute of an item. If the epoch time value stored in the attribute is less
than the current time, the item is marked as expired and subsequently deleted.
The epoch time format is the number of seconds elapsed since 12:00:00 AM January
1, 1970 UTC.
DynamoDB deletes expired items on a best-effort basis to ensure availability of
throughput for other data operations.
DynamoDB typically deletes expired items within two days of expiration. The
exact duration within which an item gets deleted after expiration is specific to
the nature of the workload. Items that have expired and not been deleted will
still show up in reads, queries, and scans.
As items are deleted, they are removed from any local secondary index and global
secondary index immediately in the same eventually consistent way as a standard
delete operation.
For more information, see [Time To Live](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html)
in the Amazon DynamoDB Developer Guide.
"""
def update_time_to_live(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTimeToLive", input, options)
end
end
|
lib/aws/generated/dynamodb.ex
| 0.925638
| 0.568176
|
dynamodb.ex
|
starcoder
|
defmodule Snitch.Tools.Helper.Zone do
@moduledoc """
Test helpers to insert zones and zone members.
"""
alias Snitch.Data.Schema.{Country, CountryZoneMember, State, StateZoneMember, Zone}
alias Snitch.Core.Tools.MultiTenancy.Repo
@zone %{
name: nil,
description: nil,
zone_type: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
@state %{
name: nil,
code: nil,
country_id: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
@country %{
iso_name: nil,
iso: nil,
iso3: nil,
name: nil,
numcode: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
@state_zone_member %{
state_id: nil,
zone_id: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
@country_zone_member %{
country_id: nil,
zone_id: nil,
inserted_at: DateTime.utc_now(),
updated_at: DateTime.utc_now()
}
def countries_with_manifest(manifest) do
cs =
Enum.map(manifest, fn iso ->
%{@country | iso: iso, iso3: iso <> "_", name: iso, numcode: iso}
end)
{_, countries} = Repo.insert_all(Country, cs, on_conflict: :nothing, returning: true)
countries
end
def states_with_manifest(manifest) do
ss =
Enum.map(manifest, fn {name, code, country} ->
%{@state | country_id: country.id, name: name, code: code}
end)
{_, states} = Repo.insert_all(State, ss, on_conflict: :nothing, returning: true)
states
end
def zone_members(manifest) do
zm =
manifest
|> Enum.map(fn
{%{zone_type: "S"} = zone, states} ->
Enum.map(states, fn state ->
%{@state_zone_member | zone_id: zone.id, state_id: state.id}
end)
{%{zone_type: "C"} = zone, countries} ->
Enum.map(countries, fn country ->
%{@country_zone_member | zone_id: zone.id, country_id: country.id}
end)
end)
|> List.flatten()
szm = Enum.filter(zm, fn member -> Map.has_key?(member, :state_id) end)
czm = Enum.filter(zm, fn member -> Map.has_key?(member, :country_id) end)
{_, state_members} =
Repo.insert_all(StateZoneMember, szm, on_conflict: :nothing, returning: true)
{_, country_members} =
Repo.insert_all(CountryZoneMember, czm, on_conflict: :nothing, returning: true)
{state_members, country_members}
end
@doc """
Creates zones according to the manifest.
## Sample manifest
```
%{
"domestic" => %{zone_type: "S", description: "something"}
}
```
"""
def zones_with_manifest(manifest) do
zones =
Enum.map(manifest, fn {name, params} ->
Map.merge(%{@zone | name: name}, params)
end)
{_, zones} = Repo.insert_all(Zone, zones, on_conflict: :nothing, returning: true)
zones
end
end
|
apps/snitch_core/lib/core/tools/helpers/zone.ex
| 0.792062
| 0.593109
|
zone.ex
|
starcoder
|
defmodule Tirexs.Query do
#http://www.elasticsearch.org/guide/reference/query-dsl/
@moduledoc false
import Tirexs.DSL.Logic
import Tirexs.Query.Logic
import Tirexs.ElasticSearch
require Record
Record.defrecord :record_result, [count: 0, max_score: nil, facets: [], hits: [], _scroll_id: nil, aggregations: []]
@doc false
defmacro query([do: block]) do
[query: extract(block)]
end
@doc false
def _query(options, query_opts\\[]) do
if is_list(options) do
query_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[query: extract(options) ++ query_opts]
end
@doc false
def match(options) do
case options do
[options] -> [match: extract(extract_do([options]))]
_ ->
[field, value, options] = extract_options(options)
[match: Dict.put([], to_atom(field), [query: value] ++ options)]
end
end
@doc false
def range(options) do
[field, value, _] = extract_options(options)
[range: Dict.put([], to_atom(field), value)]
end
@doc false
def multi_match(options) do
[query, fields, _] = extract_options(options)
[multi_match: [query: query, fields: fields]]
end
@doc false
def boosting(options, boosting_opts\\[]) do
if is_list(options) do
boosting_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[boosting: extract(options) ++ boosting_opts]
end
@doc false
def ids(options) do
[type, values, _] = extract_options(options)
[ids: [type: type, values: values]]
end
@doc false
def query_string(options) do
[query, options, _] = extract_options(options)
[query_string: [query: query] ++ options]
end
@doc false
def custom_score(options, custom_score_opts\\[]) do
if is_list(options) do
custom_score_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[custom_score: extract(options) ++ custom_score_opts]
end
@doc false
def custom_boost_factor(options, custom_boost_factor_opts\\[]) do
if is_list(options) do
custom_boost_factor_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[custom_boost_factor: extract(options) ++ custom_boost_factor_opts]
end
@doc false
def constant_score(options, constant_score_opts\\[]) do
if is_list(options) do
constant_score_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[constant_score: extract(options) ++ constant_score_opts]
end
@doc false
def dis_max(options, dis_max_opts\\[]) do
if is_list(options) do
dis_max_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[dis_max: extract(options) ++ dis_max_opts]
end
@doc false
def term(options) do
[field, values, options] = extract_options(options)
[term: Dict.put(options, to_atom(field), values)]
end
@doc false
def field(options) do
[field, values, _] = extract_options(options)
[field: Dict.put([], to_atom(field), values)]
end
@doc false
def flt(options) do
[value, fields, options] = extract_options(options)
[fuzzy_like_this: [like_text: value, fields: fields] ++ options]
end
@doc false
def flt_field(options) do
[field, options, _] = extract_options(options)
[fuzzy_like_this_field: Dict.put([], to_atom(field), options)]
end
@doc false
def fuzzy(options) do
[field, values, _] = extract_options(options)
[fuzzy: Dict.put([], to_atom(field), values)]
end
@doc false
def has_child(options, has_child_opts\\[]) do
if is_list(options) do
has_child_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[has_child: extract(options) ++ has_child_opts]
end
@doc false
def has_parent(options, has_parent_opts\\[]) do
if is_list(options) do
has_parent_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[has_parent: extract(options) ++ has_parent_opts]
end
@doc false
def match_all(options) do
Dict.put([], :match_all, options)
end
@doc false
def mlt(options) do
[value, fields, options] = extract_options(options)
[more_like_this: [like_text: value, fields: fields] ++ options]
end
@doc false
def mlt_field(options) do
[field, options, _] = extract_options(options)
[more_like_this_field: Dict.put([], to_atom(field), options)]
end
@doc false
def prefix(options) do
[field, values, _] = extract_options(options)
[prefix: Dict.put([], to_atom(field), values)]
end
@doc false
def span_first(options, span_first_opts\\[]) do
if is_list(options) do
span_first_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[span_first: extract(options) ++ span_first_opts]
end
@doc false
def span_term(options) do
[field, options, _] = extract_options(options)
[span_term: Dict.put([], to_atom(field), options)]
end
@doc false
def span_near(options, span_near_opts\\[]) do
if is_list(options) do
span_near_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[span_near: extract(options) ++ span_near_opts]
end
@doc false
def span_not(options, span_not_opts\\[]) do
if is_list(options) do
span_not_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[span_not: extract(options) ++ span_not_opts]
end
@doc false
def span_or(options, span_or_opts\\[]) do
if is_list(options) do
span_or_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[span_or: extract(options) ++ span_or_opts]
end
@doc false
def terms(options) do
[field, value, options] = extract_options(options)
[terms: Dict.put([], to_atom(field), value) ++ options]
end
@doc false
def top_children(options, top_children_opts\\[]) do
if is_list(options) do
top_children_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[top_children: extract(options) ++ top_children_opts]
end
@doc false
def wildcard(options) do
[field, options, _] = extract_options(options)
[wildcard: Dict.put([], to_atom(field), options)]
end
@doc false
def indices(options, indices_opts\\[]) do
if is_list(options) do
indices_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[indices: extract(options) ++ indices_opts]
end
@doc false
def text(options) do
[field, values, _] = extract_options(options)
[text: Dict.put([], to_atom(field), values)]
end
@doc false
def geo_shape(options, geo_shape_opts\\[]) do
if is_list(options) do
geo_shape_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[geo_shape: extract(options) ++ geo_shape_opts]
end
@doc false
def nested(options, nested_opts\\[]) do
if is_list(options) do
nested_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[nested: extract(options) ++ nested_opts]
end
@doc false
def rescore_query(options, rescore_opts\\[]) do
if is_list(options) do
rescore_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[rescore_query: extract(options) ++ rescore_opts]
end
@doc false
def facet_filter(options, facet_opts\\[]) do
if is_list(options) do
facet_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[facet_filter: extract(options) ++ facet_opts]
end
@doc false
def custom_filters_score(options, custom_filters_score_opts\\[]) do
if is_list(options) do
custom_filters_score_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
custom_filters_score = extract(options) ++ custom_filters_score_opts
query = [query: custom_filters_score[:query]]
filters = custom_filters_score[:filters]
query = Dict.put(query, :filters, without_array(filters, []))
[custom_filters_score: query ++ custom_filters_score_opts]
end
@doc false
def boost(options) do
[value, _, _] = extract_options(options)
[boost: value]
end
@doc false
def group(options, _object_opts\\[]) do
if is_list(options) do
_object_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[extract(options)]
end
@doc false
def bool(block) do
[bool: extract(block)]
end
@doc false
def must(block) do
[must: to_array(extract(block))]
end
@doc false
def should(block) do
[should: to_array(extract(block))]
end
@doc false
def must_not(block) do
[must_not: to_array(extract(block))]
end
@doc false
def positive(options) do
[positive: extract(extract_do(options))]
end
@doc false
def negative(options) do
[negative: extract(extract_do(options))]
end
@doc false
def queries(options) do
[queries: to_array(extract(options))]
end
@doc false
def location(options, location_opts\\[]) do
if is_list(options) do
location_opts = Enum.fetch!(options, 0)
options = extract_do(options, 1)
end
[location: extract(options) ++ location_opts]
end
@doc false
def shape(options) do
[shape: options]
end
@doc false
def indexed_shape(options) do
[indexed_shape: options]
end
@doc false
def no_match_query(options) when is_binary(options) do
[no_match_query: options]
end
@doc false
def no_match_query(options) do
[no_match_query: extract(options)]
end
@doc false
def clauses(options) do
[clauses: to_array(extract(options))]
end
@doc false
def include(options) do
[include: extract(options[:do])]
end
@doc false
def exclude(options) do
[exclude: extract(options[:do])]
end
@doc false
def text_phrase(options) do
[field, values, _] = extract_options(options)
[text_phrase: Dict.put([], to_atom(field), values)]
end
@doc false
def text_phrase_prefix(options) do
[field, values, _] = extract_options(options)
[text_phrase_prefix: Dict.put([], to_atom(field), values)]
end
@doc false
def create_resource(definition) do
create_resource(definition, record_config())
end
@doc false
def create_resource(definition, settings, opts\\[]) do
url = if definition[:type] do
"#{definition[:index]}/#{definition[:type]}"
else
"#{definition[:index]}"
end
{ url, json } = { "#{url}/_search" <> to_param(opts, ""), to_resource_json(definition) }
case Tirexs.ElasticSearch.post(url, json, settings) do
{:ok, _, result} ->
count = result[:hits][:total]
hits = result[:hits][:hits]
facets = result[:facets]
max_score = result[:hits][:max_score]
scroll_id = result[:_scroll_id]
aggregations = result[:aggregations]
record_result(count: count, hits: hits, facets: facets, max_score: max_score, _scroll_id: scroll_id, aggregations: aggregations)
result -> result
end
end
@doc false
def to_resource_json(definition), do: JSEX.encode!(definition[:search])
end
|
lib/tirexs/query.ex
| 0.545286
| 0.511046
|
query.ex
|
starcoder
|
defmodule Legend.Hook do
@moduledoc """
"""
alias Legend.{Event, Stage, Utils}
@typedoc """
"""
@type hook_state :: term
@typedoc """
"""
@type hook_result ::
:ok |
{:ok, hook_state} |
{:error, reason :: term} |
{:error, reason :: term, hook_state}
@typedoc """
"""
@type hook_context :: {Event.t, Hook.hook_result} | Event.t
@typedoc """
"""
@type accumulator :: %{
hooks_left: [Hook.t],
effects_so_far: Legend.effects,
}
@typedoc """
"""
@type t :: %__MODULE__{
name: Legend.name,
filter: (Event.t, hook_state -> boolean) | nil,
fun: (Event.t, hook_state -> hook_result)
}
defstruct [
name: nil,
filter: nil,
fun: nil
]
@doc """
"""
@spec merge_hooks(Legend.stage, Stage.execute_opts) :: [t]
def merge_hooks(stage, opts \\ [])
def merge_hooks(stage, opts) when is_list(stage) do
stage
|> Keyword.get(:hooks, [])
|> reduce_hooks(opts)
end
def merge_hooks(stage, opts) when is_atom(stage) do
stage
|> apply(:list_hooks, [])
|> reduce_hooks(opts)
end
@doc false
@spec reduce_hooks([t], Stage.execute_opts) :: [t]
defp reduce_hooks(stage_hooks, opts) do
opts
|> Keyword.get(:hooks, [])
|> Enum.reduce(stage_hooks, fn
{h, hopts}, hs -> add_hook(h, hs, hopts)
h, hs -> add_hook(h, hs)
end)
end
@doc false
@spec add_hook(t, [t], Stage.hook_opts) :: [t]
defp add_hook(new_hook, hooks, opts \\ []) do
with %__MODULE__{} <- Enum.find(hooks, fn h -> h.name == new_hook.name end),
{false, _} <- Keyword.pop(opts, :override?, false) do
hooks
else
_ -> [new_hook | hooks]
end
end
@doc """
"""
@spec step(Event.t, accumulator, Stage.step_options) :: {Event.t | nil, accumulator}
def step(event, state, opts \\ [])
def step(event, %{hooks_left: []} = s, opts) do
case maybe_update_hook_state(event.context, s) do
{:ok, new_state} ->
{nil, new_state}
{:error, _reason, new_state} ->
handle_hook_error(event.context, new_state, opts)
end
end
def step(event, %{hooks_left: [h|hs]} = s, opts) do
case maybe_update_hook_state(event.context, s) do
{:ok, new_state} ->
{maybe_execute_hook(h, event, new_state, opts), %{new_state | hooks_left: hs}}
{:error, _reason, new_state} ->
handle_hook_error(event.context, new_state, opts)
end
end
def step(_event, state, _opts), do: {nil, state}
@doc false
@spec maybe_update_hook_state(hook_context, accumulator) ::
{:ok, accumulator} |
{:error, reason :: term, accumulator}
defp maybe_update_hook_state({_, hook_result}, state) do
update_hook_state(hook_result, state)
end
defp maybe_update_hook_state(_, state), do: {:ok, state}
@doc false
@spec update_hook_state(hook_result, accumulator) ::
{:ok, accumulator} |
{:error, reason :: term, accumulator}
defp update_hook_state(:ok, state), do: {:ok, state}
defp update_hook_state({:ok, hook_state}, state) do
{:ok, put_in(state, [:effects_so_far, :__hookstate__], hook_state)}
end
defp update_hook_state({:error, reason}, state),
do: {:error, reason, state}
defp update_hook_state({:error, reason, hook_state}, state) do
{:error, reason, put_in(state, [:effects_so_far, :__hookstate__], hook_state)}
end
@doc false
@spec handle_hook_error(hook_context, accumulator, Stage.step_options) ::
{Event.t | nil, accumulator}
defp handle_hook_error({event, {:error, reason, _}}, state, opts),
do: handle_hook_error({event, {:error, reason}}, state, opts)
defp handle_hook_error({%Event{name: [_, _, :compensation]}, _}, %{hooks_left: []} = state, _opts) do
{nil, state}
end
defp handle_hook_error({%Event{name: [_, _, :compensation]} = e, _}, %{hooks_left: [h|hs]} = s, opts) do
{maybe_execute_hook(h, e, s, opts), %{s | hooks_left: hs}}
end
defp handle_hook_error({event, {:error, reason}}, state, _opts) do
%{effects_so_far: effects_so_far} = state
# TODO: this needs to work for all stages, not just the leaves...
# probably need to change the spec to return an `error` response
# then have the calling code figure out what event that is...
{
Event.update(event, event: [:starting, :compensation],
context: {reason,
Event.get_effect(event, effects_so_far),
effects_so_far}),
state
}
end
@doc """
"""
@spec maybe_execute_hook(t, Event.t, accumulator, Stage.step_options) :: Event.t
def maybe_execute_hook(hook, event, state, opts) do
hook_state = get_in(state, [:effects_so_far, :__hookstate__])
case Utils.execute(hook.filter, [event, hook_state]) do
{:ok, true} ->
result = execute_hook(hook, [event, hook_state], opts)
Event.update(event, name: [:completed, :hook, hook.name],
context: {event, result})
_ ->
Event.update(event, name: [:skipped, :hook, hook.name],
context: event)
end
end
@doc """
"""
@spec execute_hook(t, Event.t, hook_state, Stage.step_options) :: hook_result
def execute_hook(hook, event, state, opts \\ []) do
if Keyword.get(opts, :dry_run?, false) do
Keyword.get(opts, :dry_run_result, :ok)
else
timeout = Keyword.get(opts, :timeout, :infinity)
case Utils.execute(hook.fun, [event, state], timeout) do
:ok -> :ok
{:ok, hook_state} -> {:ok, hook_state}
{:error, reason} -> {:error, reason}
{:error, reason, hook_state} -> {:error, reason, hook_state}
otherwise -> {:error, {:unsupported_hook_result_form, otherwise}}
end
end
end
end
|
lib/legend/hook.ex
| 0.718792
| 0.426859
|
hook.ex
|
starcoder
|
defimpl Timex.Protocol, for: Date do
@moduledoc """
This module represents all functions specific to creating/manipulating/comparing Dates (year/month/day)
"""
use Timex.Constants
import Timex.Macros
alias Timex.Types
@epoch_seconds :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
@spec to_julian(Date.t()) :: float
def to_julian(%Date{:year => y, :month => m, :day => d}) do
Timex.Calendar.Julian.julian_date(y, m, d)
end
@spec to_gregorian_seconds(Date.t()) :: non_neg_integer
def to_gregorian_seconds(date), do: to_seconds(date, :zero)
@spec to_gregorian_microseconds(Date.t()) :: non_neg_integer
def to_gregorian_microseconds(date), do: to_seconds(date, :zero) * (1_000 * 1_000)
@spec to_unix(Date.t()) :: non_neg_integer
def to_unix(date), do: trunc(to_seconds(date, :epoch))
@spec to_date(Date.t()) :: Date.t()
def to_date(date), do: date
@spec to_datetime(Date.t(), timezone :: Types.valid_timezone()) :: DateTime.t() | {:error, term}
def to_datetime(%Date{:year => y, :month => m, :day => d}, timezone) do
case Timex.DateTime.Helpers.construct({{y, m, d}, {0, 0, 0, 0}}, 0, timezone, :wall) do
{:error, _} ->
# This happens for date/times that fall on a timezone boundary and don't exist,
# advance forward an hour and try again
Timex.DateTime.Helpers.construct({{y, m, d}, {1, 0, 0, 0}}, 0, timezone, :wall)
datetime ->
datetime
end
end
@spec to_naive_datetime(Date.t()) :: NaiveDateTime.t()
def to_naive_datetime(%Date{:year => y, :month => m, :day => d}) do
%NaiveDateTime{year: y, month: m, day: d, hour: 0, minute: 0, second: 0, microsecond: {0, 0}}
end
@spec to_erl(Date.t()) :: Types.date()
def to_erl(%Date{year: y, month: m, day: d}), do: {y, m, d}
@spec century(Date.t()) :: non_neg_integer
def century(%Date{:year => year}), do: Timex.century(year)
@spec is_leap?(Date.t()) :: boolean
def is_leap?(%Date{year: year}), do: :calendar.is_leap_year(year)
@spec beginning_of_day(Date.t()) :: Date.t()
def beginning_of_day(%Date{} = date), do: date
@spec end_of_day(Date.t()) :: Date.t()
def end_of_day(%Date{} = date), do: date
@spec beginning_of_week(Date.t(), Types.weekstart()) :: Date.t()
def beginning_of_week(%Date{} = date, weekstart) do
case Timex.days_to_beginning_of_week(date, weekstart) do
{:error, _} = err -> err
days -> shift(date, days: -days)
end
end
@spec end_of_week(Date.t(), Types.weekstart()) :: Date.t()
def end_of_week(%Date{} = date, weekstart) do
case Timex.days_to_end_of_week(date, weekstart) do
{:error, _} = err ->
err
days_to_end ->
shift(date, days: days_to_end)
end
end
@spec beginning_of_year(Date.t()) :: Date.t()
def beginning_of_year(%Date{} = date),
do: %{date | :month => 1, :day => 1}
@spec end_of_year(Date.t()) :: Date.t()
def end_of_year(%Date{} = date),
do: %{date | :month => 12, :day => 31}
@spec beginning_of_quarter(Date.t()) :: Date.t()
def beginning_of_quarter(%Date{month: month} = date) do
month = 1 + 3 * (Timex.quarter(month) - 1)
%{date | :month => month, :day => 1}
end
@spec end_of_quarter(Date.t()) :: Date.t()
def end_of_quarter(%Date{month: month} = date) do
month = 3 * Timex.quarter(month)
end_of_month(%{date | :month => month, :day => 1})
end
@spec beginning_of_month(Date.t()) :: Date.t()
def beginning_of_month(%Date{} = date),
do: %{date | :day => 1}
@spec end_of_month(Date.t()) :: Date.t()
def end_of_month(%Date{} = date),
do: %{date | :day => days_in_month(date)}
@spec quarter(Date.t()) :: 1..4
def quarter(%Date{month: month}), do: Timex.quarter(month)
def days_in_month(%Date{:year => y, :month => m}), do: Timex.days_in_month(y, m)
def week_of_month(%Date{:year => y, :month => m, :day => d}), do: Timex.week_of_month(y, m, d)
def weekday(%Date{:year => y, :month => m, :day => d}), do: :calendar.day_of_the_week({y, m, d})
def day(%Date{} = date),
do: 1 + Timex.diff(date, %Date{:year => date.year, :month => 1, :day => 1}, :days)
def is_valid?(%Date{:year => y, :month => m, :day => d}) do
:calendar.valid_date({y, m, d})
end
def iso_week(%Date{:year => y, :month => m, :day => d}),
do: Timex.iso_week(y, m, d)
def from_iso_day(%Date{year: year}, day) when is_day_of_year(day) do
{year, month, day_of_month} = Timex.Helpers.iso_day_to_date_tuple(year, day)
%Date{year: year, month: month, day: day_of_month}
end
@doc """
See docs for Timex.set/2 for details.
"""
@spec set(Date.t(), list({atom(), term})) :: Date.t() | {:error, term}
def set(%Date{} = date, options) do
validate? = Keyword.get(options, :validate, true)
Enum.reduce(options, date, fn
_option, {:error, _} = err ->
err
option, %Date{} = result ->
case option do
{:validate, _} ->
result
{:datetime, {{y, m, d}, {_, _, _}}} ->
if validate? do
%{
result
| :year => Timex.normalize(:year, y),
:month => Timex.normalize(:month, m),
:day => Timex.normalize(:day, {y, m, d})
}
else
%{result | :year => y, :month => m, :day => d}
end
{:date, {y, m, d}} ->
if validate? do
{yn, mn, dn} = Timex.normalize(:date, {y, m, d})
%{result | :year => yn, :month => mn, :day => dn}
else
%{result | :year => y, :month => m, :day => d}
end
{:day, d} ->
if validate? do
%{result | :day => Timex.normalize(:day, {result.year, result.month, d})}
else
%{result | :day => d}
end
{name, val} when name in [:year, :month] ->
if validate? do
Map.put(result, name, Timex.normalize(name, val))
else
Map.put(result, name, val)
end
{name, _} when name in [:time, :timezone, :hour, :minute, :second, :microsecond] ->
result
{option_name, _} ->
{:error, {:invalid_option, option_name}}
end
end)
end
@spec shift(Date.t(), list({atom(), term})) :: Date.t() | {:error, term}
def shift(%Date{} = date, [{_, 0}]), do: date
def shift(%Date{} = date, options) do
allowed_options =
Enum.filter(options, fn
{:hours, value} when value >= 24 or value <= -24 ->
true
{:hours, _} ->
false
{:minutes, value} when value >= 24 * 60 or value <= -24 * 60 ->
true
{:minutes, _} ->
false
{:seconds, value} when value >= 24 * 60 * 60 or value <= -24 * 60 * 60 ->
true
{:seconds, _} ->
false
{:milliseconds, value}
when value >= 24 * 60 * 60 * 1000 or value <= -24 * 60 * 60 * 1000 ->
true
{:milliseconds, _} ->
false
{:microseconds, {value, _}}
when value >= 24 * 60 * 60 * 1000 * 1000 or value <= -24 * 60 * 60 * 1000 * 1000 ->
true
{:microseconds, value}
when value >= 24 * 60 * 60 * 1000 * 1000 or value <= -24 * 60 * 60 * 1000 * 1000 ->
true
{:microseconds, _} ->
false
{_type, _value} ->
true
end)
case Timex.shift(to_naive_datetime(date), allowed_options) do
{:error, _} = err ->
err
%NaiveDateTime{:year => y, :month => m, :day => d} ->
%Date{year: y, month: m, day: d}
end
end
def shift(_, _), do: {:error, :badarg}
defp to_seconds(%Date{year: y, month: m, day: d}, :zero),
do: :calendar.datetime_to_gregorian_seconds({{y, m, d}, {0, 0, 0}})
defp to_seconds(%Date{year: y, month: m, day: d}, :epoch),
do: :calendar.datetime_to_gregorian_seconds({{y, m, d}, {0, 0, 0}}) - @epoch_seconds
end
|
lib/date/date.ex
| 0.8119
| 0.677161
|
date.ex
|
starcoder
|
defmodule Expublish.Options do
@moduledoc """
Validate and parse mix task arguments.
"""
require Logger
@defaults %{
allow_untracked: false,
as_major: false,
as_minor: false,
changelog_date_time: false,
disable_publish: false,
disable_push: false,
disable_test: false,
dry_run: false,
help: false,
branch: "master",
remote: "origin",
tag_prefix: "v",
commit_prefix: "Version release",
version_file: "mix.exs"
}
@aliases [
h: :help,
d: :dry_run
]
@invalid_as_option_levels [:stable, :major, :minor, :patch]
@type t :: %__MODULE__{}
defstruct Enum.into(@defaults, [])
@doc """
Default options used for every run.
```
%Expublish.Options{
#{
@defaults
|> Enum.map(fn {k, v} -> " #{k}: #{inspect(v)}" end)
|> Enum.join(",\n")
}
}
```
"""
@spec defaults :: t()
def defaults,
do: struct(__MODULE__, @defaults)
@doc """
Parse mix task arguments and merge with default options.
"""
@spec parse(list(String.t())) :: struct()
def parse(args) do
process_options(
OptionParser.parse(args,
aliases: @aliases,
strict: typed_options_from_default()
)
)
end
@doc """
Validates options and level combinations.
Returns :ok or error message.
"""
@type level() :: :major | :minor | :patch | :rc | :beta | :alpha | :stable
@spec validate(__MODULE__.t(), level()) :: :ok | String.t()
def validate(%__MODULE__{as_major: true}, level) when level in @invalid_as_option_levels do
"Invalid task invokation. Can not use --as-major for #{level} version increase."
end
def validate(%__MODULE__{as_minor: true}, level) when level in @invalid_as_option_levels do
"Invalid task invokation. Can not use --as-minor for #{level} version increase."
end
def validate(_options, _level), do: :ok
@doc """
Print help to stdout.
"""
@spec print_help() :: :ok
def print_help do
IO.puts(help_string())
:ok
end
@doc false
def print_help?(%{help: help}), do: help
@doc false
def git_tag_prefix(%{tag_prefix: tag_prefix}), do: sanitize(tag_prefix)
@doc false
def git_commit_prefix(%{commit_prefix: commit_prefix}), do: sanitize(commit_prefix)
defp process_options({options, _, []}) do
options = Map.merge(defaults(), Enum.into(options, %{}))
if print_help?(options) do
print_help()
exit(:shutdown)
end
options
end
defp process_options({_, _, errors}) do
option = if length(errors) == 1, do: "option", else: "options"
invalid_options = errors |> Enum.map(fn {option, _} -> option end) |> Enum.join(", ")
Logger.error("Invalid #{option}: #{invalid_options}. Abort.")
exit({:shutdown, 1})
end
defp sanitize(string) do
string
|> String.replace("\"", "")
|> String.replace("'", "")
|> String.trim()
end
defp typed_options_from_default do
@defaults
|> Enum.map(fn {k, v} -> {k, to_option_type(v)} end)
|> Enum.into([])
end
defp to_option_type(default) when is_boolean(default), do: :boolean
defp to_option_type(default) when is_binary(default), do: :string
defp help_string do
"""
Usage: mix expublish.[level] [options]
level:
major - Publish next major version
minor - Publish next minor version
patch - Publish next patch version
stable - Publish current stable version from pre-release
rc - Publish release-candidate pre-release of next patch version
beta - Publish beta pre-release of next patch version
alpha - Publish alpha pre-release of next patch version
options:
-d, --dry-run - Perform dry run (no writes, no commits)
--allow-untracked - Allow untracked files during release
--as-major - Only for pre-release level
--as-minor - Only for pre-release level
--disable-publish - Disable hex publish
--disable-push - Disable git push
--disable-test - Disable test run
--changelog-date-time - Use date-time instead of date in new changelog entry
--branch=string - Remote branch to push to, default: #{
inspect(Map.get(@defaults, :branch))
}
--remote=string - Remote name to push to, default: #{
inspect(Map.get(@defaults, :remote))
}
--commit-prefix=string - Custom commit prefix, default: #{
inspect(Map.get(@defaults, :commit_prefix))
}
--tag-prefix=string - Custom tag prefix, default: #{
inspect(Map.get(@defaults, :tag_prefix))
}
--version-file=string - When working with a separate file version file
"""
end
end
|
lib/expublish/options.ex
| 0.72331
| 0.584775
|
options.ex
|
starcoder
|
defmodule State.Prediction do
@moduledoc "State for Predictions"
use State.Server,
indices: [:stop_id, :trip_id, :route_id, :route_pattern_id],
parser: Parse.TripUpdates,
recordable: Model.Prediction,
hibernate: false
@doc """
Selects a distinct group of Prediction state sources, with filtering.
## Examples
iex> [
State.Prediction
]
|> State.Prediction.select_grouped(matchers, index, opts)
[...]
"""
import Parse.Time, only: [service_date: 1]
import State.Route, only: [by_types: 1]
def select_grouped(sources, matchers, index, opts \\ []) do
sources
|> Stream.flat_map(&apply(&1, :select, [matchers, index]))
|> Enum.uniq_by(&prediction_key/1)
|> State.all(opts)
end
def filter_by_route_type(predictions, nil), do: predictions
def filter_by_route_type(predictions, []), do: predictions
def filter_by_route_type(predictions, route_types) do
route_ids =
route_types
|> by_types()
|> MapSet.new(& &1.id)
Enum.filter(predictions, &(&1.route_id in route_ids))
end
defp prediction_key(%Model.Prediction{stop_sequence: stop_seq} = mod) do
{stop_seq, mod.stop_id, mod.route_id, mod.trip_id, mod.direction_id}
end
@spec by_stop_route(Model.Stop.id(), Model.Route.id()) :: [Model.Prediction.t()]
def by_stop_route(stop_id, route_id) do
match(%{stop_id: stop_id, route_id: route_id}, :stop_id)
end
@impl State.Server
def pre_insert_hook(prediction) do
trips =
case prediction do
%{trip_id: trip_id} when is_binary(trip_id) ->
State.Trip.by_id(trip_id)
_ ->
[]
end
prediction
|> fill_trip_match(trips)
|> fill_missing_direction_ids(trips)
|> update_route_from_alternate_trips(trips)
end
defp fill_trip_match(prediction, [_ | _]) do
%{prediction | trip_match?: true}
end
defp fill_trip_match(prediction, []) do
prediction
end
defp fill_missing_direction_ids(%{direction_id: direction_id} = prediction, _trips)
when is_integer(direction_id) do
prediction
end
defp fill_missing_direction_ids(
prediction,
trips
) do
case trips do
[%{direction_id: direction} | _] -> %{prediction | direction_id: direction}
_ -> prediction
end
end
defp update_route_from_alternate_trips(prediction, [_ | _] = trips) do
for trip <- trips do
%{prediction | route_id: trip.route_id, route_pattern_id: trip.route_pattern_id}
end
end
defp update_route_from_alternate_trips(prediction, _trips) do
[prediction]
end
@spec prediction_for(Model.Schedule.t(), Date.t()) :: Model.Prediction.t()
def prediction_for(%Model.Schedule{} = schedule, %Date{} = date) do
stop_ids =
case State.Stop.siblings(schedule.stop_id) do
[_ | _] = stops -> Enum.map(stops, & &1.id)
[] -> [schedule.stop_id]
end
queries =
for stop_id <- stop_ids do
%{
trip_id: schedule.trip_id,
stop_id: stop_id,
stop_sequence: schedule.stop_sequence
}
end
[
State.Prediction
]
|> State.Prediction.select_grouped(
queries,
:stop_id
)
|> Enum.find(&on_day?(&1, date))
end
@spec prediction_for_many([Model.Schedule.t()], Date.t()) :: map
def prediction_for_many(schedules, %Date{} = date) do
Map.new(schedules, &{{&1.trip_id, &1.stop_sequence}, prediction_for(&1, date)})
end
@spec on_day?(Model.Prediction.t(), Date.t()) :: boolean()
defp on_day?(prediction, date) do
[:arrival_time, :departure_time]
|> Enum.any?(fn time_key ->
case Map.get(prediction, time_key) do
%DateTime{} = dt ->
dt
|> service_date
|> Kernel.==(date)
nil ->
false
end
end)
end
end
|
apps/state/lib/state/prediction.ex
| 0.730866
| 0.491273
|
prediction.ex
|
starcoder
|
defmodule Cloudinary.Format do
@moduledoc """
The cloudinary supported formats of images, videos and audios.
## Official documentation
* https://cloudinary.com/documentation/image_transformations#supported_image_formats
* https://cloudinary.com/documentation/video_manipulation_and_delivery#supported_video_formats
* https://cloudinary.com/documentation/audio_transformations#supported_audio_formats
"""
@typedoc """
The cloudinary supported formats of images, videos and audios.
"""
@type t ::
:ai
| :gif
| :webp
| :bmp
| :djvu
| :ps
| :ept
| :eps
| :eps3
| :fbx
| :flif
| :gltf
| :heif
| :heic
| :ico
| :indd
| :jpg
| :jpe
| :jpeg
| :jp2
| :wdp
| :jxr
| :hdp
| :pdf
| :png
| :psd
| :arw
| :cr2
| :svg
| :tga
| :tif
| :tiff
| :"3g2"
| :"3gp"
| :avi
| :flv
| :m3u8
| :ts
| :m2ts
| :mts
| :mov
| :mkv
| :mp4
| :mpeg
| :mpd
| :mxf
| :ogv
| :webm
| :wmv
| :aac
| :aiff
| :amr
| :flac
| :m4a
| :mp3
| :ogg
| :opus
| :wav
@doc """
Returns true if the format of the image, video or audio is supported by the cloudinary.
## Example
iex> Cloudinary.Format.is_supported(:png)
true
iex> Cloudinary.Format.is_supported(:txt)
false
"""
defguard is_supported(format)
when format in [
:ai,
:gif,
:webp,
:bmp,
:djvu,
:ps,
:ept,
:eps,
:eps3,
:fbx,
:flif,
:gltf,
:heif,
:heic,
:ico,
:indd,
:jpg,
:jpe,
:jpeg,
:jp2,
:wdp,
:jxr,
:hdp,
:pdf,
:png,
:psd,
:arw,
:cr2,
:svg,
:tga,
:tif,
:tiff,
:"3g2",
:"3gp",
:avi,
:flv,
:m3u8,
:ts,
:m2ts,
:mts,
:mov,
:mkv,
:mp4,
:mpeg,
:mpd,
:mxf,
:ogv,
:webm,
:wmv,
:aac,
:aiff,
:amr,
:flac,
:m4a,
:mp3,
:ogg,
:opus,
:wav
]
end
|
lib/cloudinary/format.ex
| 0.780997
| 0.473353
|
format.ex
|
starcoder
|
defmodule Membrane.WAV.Serializer do
@moduledoc """
Element responsible for raw audio serialization to WAV format.
Creates WAV header (its description can be found with `Membrane.WAV.Parser`) from received caps
and puts it before audio samples. The element assumes that audio is in PCM format. `File length`
and `data length` can be calculated only after processing all samples, so these values are
invalid (always set to 0). Use `Membrane.WAV.Postprocessing.fix_wav_header/1` module to fix them.
The element has one option - `frames_per_buffer`. User can specify number of frames sent in one
buffer when demand unit on the output is `:buffers`. One frame contains `bits per sample` x
`number of channels` bits.
"""
use Membrane.Filter
alias Membrane.Buffer
alias Membrane.Caps.Audio.Raw, as: Caps
alias Membrane.Caps.Audio.Raw.Format
@file_length 0
@data_length 0
@audio_format 1
@format_chunk_length 16
def_options frames_per_buffer: [
type: :integer,
spec: pos_integer(),
description: """
Assumed number of raw audio frames in each buffer.
Used when converting demand from buffers into bytes.
""",
default: 2048
]
def_output_pad :output,
mode: :pull,
availability: :always,
caps: :any
def_input_pad :input,
mode: :pull,
availability: :always,
demand_unit: :bytes,
caps: Caps
@impl true
def handle_init(options) do
state =
options
|> Map.from_struct()
|> Map.put(:header_created, false)
{:ok, state}
end
@impl true
def handle_caps(:input, caps, _context, state) do
buffer = %Buffer{payload: create_header(caps)}
state = %{state | header_created: true}
{{:ok, caps: {:output, caps}, buffer: {:output, buffer}, redemand: :output}, state}
end
@impl true
def handle_demand(:output, _size, _unit, _context, %{header_created: false} = state) do
{:ok, state}
end
def handle_demand(:output, size, :bytes, _context, %{header_created: true} = state) do
{{:ok, demand: {:input, size}}, state}
end
def handle_demand(
:output,
buffers_count,
:buffers,
context,
%{header_created: true, frames_per_buffer: frames} = state
) do
caps = context.pads.output.caps
demand_size = Caps.frames_to_bytes(frames, caps) * buffers_count
{{:ok, demand: {:input, demand_size}}, state}
end
@impl true
def handle_process(:input, buffer, _context, %{header_created: true} = state) do
{{:ok, buffer: {:output, buffer}, redemand: :output}, state}
end
def handle_process(:input, _buffer, _context, %{header_created: false}) do
raise(RuntimeError, "buffer received before caps, so the header is not created yet")
end
defp create_header(%Caps{channels: channels, sample_rate: sample_rate, format: format}) do
{_signedness, bits_per_sample, _endianness} = Format.to_tuple(format)
data_transmission_rate = ceil(channels * sample_rate * bits_per_sample / 8)
block_alignment_unit = ceil(channels * bits_per_sample / 8)
<<
"RIFF",
@file_length::32-little,
"WAVE",
"fmt ",
@format_chunk_length::32-little,
@audio_format::16-little,
channels::16-little,
sample_rate::32-little,
data_transmission_rate::32-little,
block_alignment_unit::16-little,
bits_per_sample::16-little,
"data",
@data_length::32-little
>>
end
end
|
lib/membrane_wav/serializer.ex
| 0.911967
| 0.474327
|
serializer.ex
|
starcoder
|
defmodule VegaLite do
@moduledoc """
Elixir bindings to [Vega-Lite](https://vega.github.io/vega-lite).
Vega-Lite offers a high-level grammar for composing interactive graphics,
where every graphic is specified in a declarative fashion relying solely
on JSON syntax. To learn more about Vega-Lite please refer to
the [documentation](https://vega.github.io/vega-lite/docs)
and explore numerous [examples](https://vega.github.io/vega-lite/examples).
This package offers a tiny layer of functionality that makes it easier
to build a Vega-Lite graphics specification.
## Composing graphics
We offers a light-weight pipeline API akin to the JSON specification.
Translating existing Vega-Lite specifications to such specification
should be very intuitive in most cases.
Composing a basic Vega-Lite graphic usually consists of the following steps:
alias VegaLite, as: Vl
# Initialize the specification, optionally with some top-level properties
Vl.new(width: 400, height: 400)
# Specify data source for the graphic, see the data_from_* functions
|> Vl.data_from_series(iteration: 1..100, score: 1..100)
# |> Vl.data_from_values([%{iteration: 1, score: 1}, ...])
# |> Vl.data_from_url("...")
# Pick a visual mark for the graphic
|> Vl.mark(:line)
# |> Vl.mark(:point, tooltip: true)
# Map data fields to visual properties of the mark, like position or shape
|> Vl.encode_field(:x, "iteration", type: :quantitative)
|> Vl.encode_field(:y, "score", type: :quantitative)
# |> Vl.encode(:color, "country", type: :nominal)
# |> Vl.encode(:size, "count", type: :quantitative)
Then, you can compose multiple graphics using `layers/2`, `concat/3`,
`repeat/3` or `facet/3`.
Vl.new()
|> Vl.data_from_url("https://vega.github.io/editor/data/weather.csv")
|> Vl.transform(filter: "datum.location == 'Seattle'")
|> Vl.concat([
Vl.new()
|> Vl.mark(:bar)
|> Vl.encode_field(:x, "date", time_unit: :month, type: :ordinal)
|> Vl.encode_field(:y, "precipitation", aggregate: :mean),
Vl.new()
|> Vl.mark(:point)
|> Vl.encode_field(:x, "temp_min", bin: true)
|> Vl.encode_field(:y, "temp_max", bin: true)
|> Vl.encode(:size, aggregate: :count)
])
Additionally, you can use `transform/2` to preprocess the data,
`param/3` for introducing interactivity and `config/2` for
global customization.
### Using JSON specification
Alternatively you can parse a Vega-Lite JSON specification directly.
This approach makes it easy to explore numerous examples available online.
alias VegaLite, as: Vl
Vl.from_json(\"\"\"
{
"data": { "url": "https://vega.github.io/editor/data/cars.json" },
"mark": "point",
"encoding": {
"x": { "field": "Horsepower", "type": "quantitative" },
"y": { "field": "Miles_per_Gallon", "type": "quantitative" }
}
}
\"\"\")
The result of `VegaLite.from_json/1` function can then be passed
through any other function to further customize the specification.
In particular, it may be useful to parse a JSON specification
and add your custom data with `VegaLite.data_from_values/3`
or `VegaLite.data_from_series/3`.
## Options
Most `VegaLite` functions accept an optional list of options,
which are converted directly as the specification properties.
To provide a more Elixir-friendly experience, the options
are automatically normalized, so you can use keyword lists
and snake-case atom keys.
"""
@schema_url "https://vega.github.io/schema/vega-lite/v5.json"
defstruct spec: %{"$schema" => @schema_url}
alias VegaLite.Utils
@type t :: %__MODULE__{
spec: spec()
}
@type spec :: map()
@doc """
Returns a new specification wrapped in the `VegaLite` struct.
All provided options are converted to top-level properties
of the specification.
## Examples
Vl.new(
title: "My graph",
width: 200,
height: 200
)
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/spec.html) for more details.
"""
@spec new(keyword()) :: t()
def new(opts \\ []) do
vl = %VegaLite{}
vl_props = opts_to_vl_props(opts)
update_in(vl.spec, fn spec -> Map.merge(spec, vl_props) end)
end
@compile {:no_warn_undefined, {Jason, :decode!, 1}}
@compile {:no_warn_undefined, {Jason, :encode!, 1}}
@doc """
Parses the given Vega-Lite JSON specification
and wraps in the `VegaLite` struct for further processing.
## Examples
Vl.from_json(\"\"\"
{
"data": { "url": "https://vega.github.io/editor/data/cars.json" },
"mark": "point",
"encoding": {
"x": { "field": "Horsepower", "type": "quantitative" },
"y": { "field": "Miles_per_Gallon", "type": "quantitative" }
}
}
\"\"\")
See [the docs](https://vega.github.io/vega-lite/docs/spec.html) for more details.
"""
@spec from_json(String.t()) :: t()
def from_json(json) do
Utils.assert_jason!("from_json/1")
json
|> Jason.decode!()
|> from_spec()
end
@doc """
Wraps the given Vega-Lite specification in the `VegaLite`
struct for further processing.
There is also `from_json/1` that handles JSON parsing for you.
See [the docs](https://vega.github.io/vega-lite/docs/spec.html) for more details.
"""
@spec from_spec(spec()) :: t()
def from_spec(spec) do
%VegaLite{spec: spec}
end
@doc """
Returns the underlying Vega-Lite specification.
The result is a nested Elixir datastructure that serializes
to Vega-Lite JSON specification.
See [the docs](https://vega.github.io/vega-lite/docs/spec.html) for more details.
"""
@spec to_spec(t()) :: spec()
def to_spec(vl) do
vl.spec
end
@doc """
Sets data properties in the specification.
Defining the data source is usually the first step
when building a graphic. For most use cases it's preferable
to use more specific functions like `data_from_url/3`, `data_from_values/3`,
or `data_from_series/3`.
All provided options are converted to data properties.
## Examples
Vl.new()
|> Vl.data(sequence: [start: 0, stop: 12.7, step: 0.1, as: "x"])
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/data.html) for more details.
"""
@spec data(t(), keyword()) :: t()
def data(vl, opts) do
validate_at_least_one!(opts, "data property")
update_in(vl.spec, fn spec ->
vl_props = opts_to_vl_props(opts)
Map.put(spec, "data", vl_props)
end)
end
defp validate_at_least_one!(opts, name) do
if not is_list(opts) do
raise ArgumentError, "expected opts to be a list, got: #{inspect(opts)}"
end
if opts == [] do
raise ArgumentError, "expected at least one #{name}, but none was given"
end
end
@doc """
Sets data URL in the specification.
The URL should be accessible by whichever client renders
the specification, so preferably an absolute one.
All provided options are converted to data properties.
## Examples
Vl.new()
|> Vl.data_from_url("https://vega.github.io/editor/data/penguins.json")
|> ...
Vl.new()
|> Vl.data_from_url("https://vega.github.io/editor/data/stocks.csv", format: :csv)
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/data.html#url) for more details.
"""
@spec data_from_url(t(), String.t(), keyword()) :: t()
def data_from_url(vl, url, opts \\ []) when is_binary(url) do
opts = put_in(opts[:url], url)
data(vl, opts)
end
@doc """
Sets inline data in the specification.
`values` should be an enumerable of data records,
where each record is a key-value structure.
All provided options are converted to data properties.
## Examples
data = [
%{"category" => "A", "score" => 28},
%{"category" => "B", "score" => 55}
]
Vl.new()
|> Vl.data_from_values(data)
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/data.html#inline) for more details.
"""
@spec data_from_values(t(), Enumerable.t(), keyword()) :: t()
def data_from_values(vl, values, opts \\ []) do
values =
Enum.map(values, fn value ->
Map.new(value, fn {key, value} ->
{to_string(key), value}
end)
end)
opts = put_in(opts[:values], values)
data(vl, opts)
end
@doc """
Sets inline data in the specification.
This is an alternative to `data_from_values/3`,
useful when you have a separate list of values
for each data column.
## Examples
xs = 1..100
ys = 1..100
Vl.new()
|> Vl.data_from_series(x: xs, y: ys)
|> ...
"""
@spec data_from_series(t(), Enumerable.t(), keyword()) :: t()
def data_from_series(vl, series, opts \\ []) do
{keys, value_series} = Enum.unzip(series)
values =
value_series
|> Enum.zip()
|> Enum.map(fn row ->
row = Tuple.to_list(row)
Enum.zip(keys, row)
end)
data_from_values(vl, values, opts)
end
@channels ~w(
x y x2 y2 x_error y_error x_error2 y_error2
theta theta2 radius radius2
longitude latitude longitude2 latitude2
angle color fill stroke opacity fill_opacity stroke_opacity shape size stroke_dash stroke_width
text tooltip
href
description
detail
key
order
facet row column
)a
@doc """
Adds an encoding entry to the specification.
Visual channel represents a property of a visual mark,
for instance the `:x` and `:y` channels specify where
a point should be placed.
Encoding defines the source of values for those channels.
In most cases you want to map specific data field
to visual channels, prefer the `encode_field/4` function for that.
All provided options are converted to channel properties.
## Examples
Vl.new()
|> Vl.encode(:x, value: 2)
|> ...
Vl.new()
|> Vl.encode(:y, aggregate: :count, type: :quantitative)
|> ...
Vl.new()
|> Vl.encode(:y, field: "price")
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/encoding.html) for more details.
"""
@spec encode(t(), atom(), keyword()) :: t()
def encode(vl, channel, opts) do
validate_channel!(channel)
if not Enum.any?([:field, :value, :datum], &Keyword.has_key?(opts, &1)) and
opts[:aggregate] != :count do
raise ArgumentError,
"channel definition must include one of the following keys: :field, :value, :datum, but none was given"
end
update_in(vl.spec, fn spec ->
vl_channel = to_vl_key(channel)
vl_props = opts_to_vl_props(opts)
encoding =
spec
|> Map.get("encoding", %{})
|> Map.put(vl_channel, vl_props)
Map.put(spec, "encoding", encoding)
end)
end
defp validate_channel!(channel) do
validate_inclusion!(@channels, channel, "channel")
end
defp validate_inclusion!(list, value, name) do
if value not in list do
list_str = list |> Enum.map(&inspect/1) |> Enum.join(", ")
raise ArgumentError,
"unknown #{name}, expected one of #{list_str}, got: #{inspect(value)}"
end
end
@doc """
Adds field encoding entry to the specification.
A shorthand for `encode/3`, mapping a data field to a visual channel.
For example, if the data has `"price"` and `"time"` fields,
you could map `"time"` to the `:x` channel and `"price"`
to the `:y` channel. This, combined with a line mark,
would then result in price-over-time plot.
All provided options are converted to channel properties.
## Types
Field data type is automatically inferred, but oftentimes
needs to be specified explicitly to get the desired result.
The `:type` option can be either of:
* `:quantitative` - when the field expresses some kind of quantity, typically numerical
* `:temporal` - when the field represents a point in time
* `:nominal` - when the field represents a category
* `:ordinal` - when the field represents a ranked order.
It is similar to `:nominal`, but there is a clear order of values
* `:geojson` - when the field represents a geographic shape
adhering to the [GeoJSON](https://geojson.org) specification
See [the docs](https://vega.github.io/vega-lite/docs/type.html) for more details on types.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.mark(:point)
|> Vl.encode_field(:x, "time", type: :temporal)
|> Vl.encode_field(:y, "price", type: :quantitative)
|> Vl.encode_field(:color, "country", type: :nominal)
|> Vl.encode_field(:size, "count", type: :quantitative)
|> ...
Vl.new()
|> Vl.encode_field(:x, "date", time_unit: :month, title: "Month")
|> Vl.encode_field(:y, "price", type: :quantitative, aggregate: :mean, title: "Mean product price")
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/encoding.html#field-def) for more details.
"""
@spec encode_field(t(), atom(), String.t(), keyword()) :: t()
def encode_field(vl, channel, field, opts \\ []) do
if not is_binary(field) do
raise ArgumentError, "field must be a string, got: #{inspect(field)}"
end
opts = put_in(opts[:field], field)
encode(vl, channel, opts)
end
@doc """
Adds repeated field encoding entry to the specification.
A shorthand for `encode/3`, mapping a field to a visual channel,
as given by the repeat operator.
Repeat type must be either `:repeat`, `:row`, `:column` or `:layer`
and correspond to the repeat definition.
All provided options are converted to channel properties.
## Examples
See `repeat/3` to see the full picture.
See [the docs](https://vega.github.io/vega-lite/docs/repeat.html) for more details.
"""
@spec encode_repeat(t(), atom(), :repeat | :row | :column | :layer, keyword()) :: t()
def encode_repeat(vl, channel, repeat_type, opts \\ []) do
validate_inclusion!([:repeat, :row, :column, :layer], repeat_type, "repeat type")
opts = Keyword.put(opts, :field, repeat: repeat_type)
encode(vl, channel, opts)
end
@mark_types ~w(
arc area bar boxplot circle errorband errorbar geoshape image line point rect rule square text tick trail
)a
@doc """
Sets mark type in the specification.
Mark is a predefined visual object like a point or a line.
Visual properties of the mark are defined by encoding.
All provided options are converted to mark properties.
## Examples
Vl.new()
|> Vl.mark(:point)
|> ...
Vl.new()
|> Vl.mark(:point, tooltip: true)
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/mark.html) for more details.
"""
@spec mark(t(), atom(), keyword()) :: t()
def mark(vl, type, opts \\ [])
def mark(vl, type, []) do
validate_blank_view!(vl, "cannot add mark to the view")
validate_inclusion!(@mark_types, type, "mark type")
update_in(vl.spec, fn spec ->
vl_type = to_vl_key(type)
Map.put(spec, "mark", vl_type)
end)
end
def mark(vl, type, opts) do
validate_blank_view!(vl, "cannot add mark to the view")
validate_inclusion!(@mark_types, type, "mark type")
update_in(vl.spec, fn spec ->
vl_type = to_vl_key(type)
vl_props =
opts
|> opts_to_vl_props()
|> Map.put("type", vl_type)
Map.put(spec, "mark", vl_props)
end)
end
@doc """
Adds a transformation to the specification.
Transformation describes an operation on data,
like calculating new fields, aggregating or filtering.
All provided options are converted to transform properties.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.transform(calculate: "sin(datum.x)", as: "sin_x")
|> ...
Vl.new()
|> Vl.data_from_values(...)
|> Vl.transform(filter: "datum.height > 150")
|> ...
Vl.new()
|> Vl.data_from_values(...)
|> Vl.transform(regression: "price", on: "date")
|> ...
See [the docs](https://vega.github.io/vega-lite/docs/transform.html) for more details.
"""
@spec transform(t(), keyword()) :: t()
def transform(vl, opts) do
validate_at_least_one!(opts, "transform property")
update_in(vl.spec, fn spec ->
transforms = Map.get(spec, "transform", [])
transform = opts_to_vl_props(opts)
Map.put(spec, "transform", transforms ++ [transform])
end)
end
@doc """
Adds a parameter to the specification.
Parameters are the basic building blocks for introducing
interactions to graphics.
All provided options are converted to parameter properties.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.concat([
Vl.new()
# Define a parameter named "brush", whose value is a user-selected interval on the x axis
|> Vl.param("brush", select: [type: :interval, encodings: [:x]])
|> Vl.mark(:area)
|> Vl.encode_field(:x, "date", type: :temporal)
|> ...,
Vl.new()
|> Vl.mark(:area)
# Use the "brush" parameter value to limit the domain of this view
|> Vl.encode_field(:x, "date", type: :temporal, scale: [domain: [param: "brush"]])
|> ...
])
See [the docs](https://vega.github.io/vega-lite/docs/parameter.html) for more details.
"""
@spec param(t(), String.t(), keyword()) :: t()
def param(vl, name, opts) do
validate_at_least_one!(opts, "parameter property")
update_in(vl.spec, fn spec ->
params = Map.get(spec, "params", [])
param =
opts
|> opts_to_vl_props()
|> Map.put("name", name)
Map.put(spec, "params", params ++ [param])
end)
end
@doc """
Adds view configuration to the specification.
Configuration allows for setting general properties of the visualization.
All provided options are converted to configuration properties
and merged with the existing configuration in a shallow manner.
## Examples
Vl.new()
|> ...
|> Vl.config(
view: [stroke: :transparent],
padding: 100,
background: "#333333"
)
See [the docs](https://vega.github.io/vega-lite/docs/config.html) for more details.
"""
@spec config(t(), keyword()) :: t()
def config(vl, opts) do
validate_at_least_one!(opts, "config property")
update_in(vl.spec, fn spec ->
vl_props = opts_to_vl_props(opts)
config =
spec
|> Map.get("config", %{})
|> Map.merge(vl_props)
Map.put(spec, "config", config)
end)
end
@doc """
Adds a projection spec to the specification.
Projection maps longitude and latitude pairs to x, y coordinates.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.projection(type: :albers_usa)
|> Vl.mark(:circle)
|> Vl.encode_field(:longitude, "longitude", type: :quantitative)
|> Vl.encode_field(:latitude, "latitude", type: :quantitative)
See [the docs](https://vega.github.io/vega-lite/docs/projection.html) for more details.
"""
@spec projection(t(), keyword()) :: t()
def projection(vl, opts) do
validate_at_least_one!(opts, "projection property")
update_in(vl.spec, fn spec ->
vl_props = opts_to_vl_props(opts)
Map.put(spec, "projection", vl_props)
end)
end
@doc """
Builds a layered multi-view specification from the given
list of single view specifications.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.layers([
Vl.new()
|> Vl.mark(:line)
|> Vl.encode_field(:x, ...)
|> Vl.encode_field(:y, ...),
Vl.new()
|> Vl.mark(:rule)
|> Vl.encode_field(:y, ...)
|> Vl.encode(:size, value: 2)
])
Vl.new()
|> Vl.data_from_values(...)
# Note: top-level data, encoding, transforms are inherited
# by the child views unless overriden
|> Vl.encode_field(:x, ...)
|> Vl.layers([
...
])
See [the docs](https://vega.github.io/vega-lite/docs/layer.html) for more details.
"""
@spec layers(t(), list(t())) :: t()
def layers(vl, child_views) do
multi_view_from_children(vl, child_views, "layer", "cannot build a layered view")
end
@doc """
Builds a concatenated multi-view specification from
the given list of single view specifications.
The concat type must be either `:wrappable` (default), `:horizontal` or `:vertical`.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.concat([
Vl.new()
|> ...,
Vl.new()
|> ...,
Vl.new()
|> ...
])
Vl.new()
|> Vl.data_from_values(...)
|> Vl.concat(
[
Vl.new()
|> ...,
Vl.new()
|> ...
],
:horizontal
)
See [the docs](https://vega.github.io/vega-lite/docs/concat.html) for more details.
"""
@spec concat(t(), list(t()), :wrappable | :horizontal | :vertical) :: t()
def concat(vl, child_views, type \\ :wrappable) do
vl_key =
case type do
:wrappable ->
"concat"
:horizontal ->
"hconcat"
:vertical ->
"vconcat"
type ->
raise ArgumentError,
"invalid concat type, expected :wrappable, :horizontal or :vertical, got: #{inspect(type)}"
end
multi_view_from_children(vl, child_views, vl_key, "cannot build a concatenated view")
end
defp multi_view_from_children(vl, child_views, vl_key, error_message) do
validate_blank_view!(vl, error_message)
child_specs = Enum.map(child_views, &to_child_view_spec!/1)
update_in(vl.spec, fn spec ->
Map.put(spec, vl_key, child_specs)
end)
end
@doc """
Builds a facet multi-view specification from the given
single-view template.
Facet definition must be either a [field definition](https://vega.github.io/vega-lite/docs/facet.html#field-def)
or a [row/column mapping](https://vega.github.io/vega-lite/docs/facet.html#mapping).
Note that you can also create facet graphics by using
the `:facet`, `:column` and `:row` encoding channels.
## Examples
Vl.new()
|> Vl.data_from_values(...)
|> Vl.facet(
[field: "country"],
Vl.new()
|> Vl.mark(:bar)
|> Vl.encode_field(:x, ...)
|> Vl.encode_field(:y, ...)
)
Vl.new()
|> Vl.data_from_values(...)
|> Vl.facet(
[
row: [field: "country", title: "Country"],
column: [field: "year", title: "Year"]
]
Vl.new()
|> Vl.mark(:bar)
|> Vl.encode_field(:x, ...)
|> Vl.encode_field(:y, ...)
)
See [the docs](https://vega.github.io/vega-lite/docs/facet.html#facet-operator) for more details.
"""
@spec facet(t(), keyword(), t()) :: t()
def facet(vl, facet_def, child_view) do
validate_blank_view!(vl, "cannot build a facet view")
vl_facet =
cond do
Keyword.keyword?(facet_def) and
Enum.any?([:field, :row, :column], &Keyword.has_key?(facet_def, &1)) ->
opts_to_vl_props(facet_def)
true ->
raise ArgumentError,
"facet definition must be either a field definition (keywrod list with the :field key) or a mapping with :row/:column keys, got: #{inspect(facet_def)}"
end
child_spec = to_child_view_spec!(child_view)
update_in(vl.spec, fn spec ->
spec
|> Map.put("facet", vl_facet)
|> Map.put("spec", child_spec)
end)
end
@doc """
Builds a repeated multi-view specification from the given
single-view template.
Repeat definition must be either a list of fields
or a [row/column/layer mapping](https://vega.github.io/vega-lite/docs/repeat.html#repeat-mapping).
Then some channels can be bound to a repeated field using `encode_repeat/4`.
## Examples
# Simple repeat
Vl.new()
|> Vl.data_from_values(...)
|> Vl.repeat(
["temp_max", "precipitation", "wind"],
Vl.new()
|> Vl.mark(:line)
|> Vl.encode_field(:x, "date", time_unit: :month)
# The graphic will be reapeated with :y mapped to "temp_max",
# "precipitation" and "wind" respectively
|> Vl.encode_repeat(:y, :repeat, aggregate: :mean)
)
# Grid repeat
Vl.new()
|> Vl.data_from_values(...)
|> Vl.repeat(
[
row: [
"beak_length",
"beak_depth",
"flipper_length",
"body_mass"
],
column: [
"body_mass",
"flipper_length",
"beak_depth",
"beak_length"
]
],
Vl.new()
|> Vl.mark(:point)
# The graphic will be repeated for every combination of :x and :y
# taken from the :row and :column lists above
|> Vl.encode_repeat(:x, :column, type: :quantitative)
|> Vl.encode_repeat(:y, :row, type: :quantitative)
)
See [the docs](https://vega.github.io/vega-lite/docs/repeat.html) for more details.
"""
@spec repeat(t(), keyword(), t()) :: t()
def repeat(vl, repeat_def, child_view) do
validate_blank_view!(vl, "cannot build a repeated view")
vl_repeat =
cond do
is_list(repeat_def) and Enum.all?(repeat_def, &is_binary/1) ->
repeat_def
Keyword.keyword?(repeat_def) and
Enum.any?([:row, :column, :layer], &Keyword.has_key?(repeat_def, &1)) ->
opts_to_vl_props(repeat_def)
true ->
raise ArgumentError,
"repeat definition must be either list of fields or a mapping with :row/:column/:layer keys, got: #{inspect(repeat_def)}"
end
child_spec = to_child_view_spec!(child_view)
update_in(vl.spec, fn spec ->
spec
|> Map.put("repeat", vl_repeat)
|> Map.put("spec", child_spec)
end)
end
@single_view_only_keys ~w(mark)a
@multi_view_only_keys ~w(layer hconcat vconcat concat repeat facet spec)a
# Validates if the given specification is already either single-view or multi-view
defp validate_blank_view!(vl, error_message) do
for key <- @single_view_only_keys, Map.has_key?(vl.spec, to_vl_key(key)) do
raise ArgumentError,
"#{error_message}, because it is already a single-view specification (has the #{inspect(key)} key defined)"
end
for key <- @multi_view_only_keys, Map.has_key?(vl.spec, to_vl_key(key)) do
raise ArgumentError,
"#{error_message}, because it is already a multi-view specification (has the #{inspect(key)} key defined)"
end
end
@top_level_keys ~w($schema background padding autosize config usermeta)a
defp to_child_view_spec!(vl) do
spec = vl |> to_spec() |> Map.delete("$schema")
for key <- @top_level_keys, Map.has_key?(spec, to_vl_key(key)) do
raise ArgumentError,
"child view specification cannot have top-level keys, found: #{inspect(key)}"
end
spec
end
@resolve_keys ~w(scale axis legend)a
@doc """
Adds a resolve entry to the specification.
Resolution defines how multi-view graphics are combined
with regard to scales, axis and legend.
## Example
Vl.new()
|> Vl.data_from_values(...)
|> Vl.layers([
Vl.new()
|> ...,
Vl.new()
|> ...
])
|> Vl.resolve(:scale, y: :independent)
See [the docs](https://vega.github.io/vega-lite/docs/resolve.html) for more details.
"""
@spec resolve(t(), atom(), keyword()) :: t()
def resolve(vl, key, opts) do
validate_inclusion!(@resolve_keys, key, "resolution key")
validate_at_least_one!(opts, "resolve property")
for {channel, resolution} <- opts do
validate_inclusion!(@channels, channel, "resolution channel")
validate_inclusion!([:shared, :independent], resolution, "resolution type")
end
update_in(vl.spec, fn spec ->
vl_key = to_vl_key(key)
vl_props = opts_to_vl_props(opts)
config =
spec
|> Map.get("resolve", %{})
|> Map.put(vl_key, vl_props)
Map.put(spec, "resolve", config)
end)
end
# Helpers
defp opts_to_vl_props(opts) do
opts |> Map.new() |> to_vl()
end
defp to_vl(value) when value in [true, false, nil], do: value
defp to_vl(atom) when is_atom(atom), do: to_vl_key(atom)
defp to_vl(map) when is_map(map) do
Map.new(map, fn {key, value} ->
{to_vl(key), to_vl(value)}
end)
end
defp to_vl([{key, _} | _] = keyword) when is_atom(key) do
Map.new(keyword, fn {key, value} ->
{to_vl(key), to_vl(value)}
end)
end
defp to_vl(list) when is_list(list) do
Enum.map(list, &to_vl/1)
end
defp to_vl(value), do: value
defp to_vl_key(key) when is_atom(key) do
key |> to_string() |> snake_to_camel()
end
defp snake_to_camel(string) do
[part | parts] = String.split(string, "_")
Enum.join([String.downcase(part, :ascii) | Enum.map(parts, &String.capitalize(&1, :ascii))])
end
end
|
lib/vega_lite.ex
| 0.855429
| 0.715474
|
vega_lite.ex
|
starcoder
|
defmodule Bacen.CCS.Message do
@moduledoc """
The base message from CCS messages.
This part of a XML is required to any message,
requested or received from Bacen's system.
This message has the following XML example:
```xml
<?xml version="1.0"?>
<CCSDOC>
<BCARQ>
<IdentdEmissor>12345678</IdentdEmissor>
<IdentdDestinatario>87654321</IdentdDestinatario>
<NomArq>ACCS001</NomArq>
<NumRemessaArq>000000000000</NumRemessaArq>
</BCARQ>
<SISARQ>
<!-- Any ACCS/CCS messsage here -->
</SISARQ>
</CCSDOC>
```
"""
use Ecto.Schema
import Ecto.Changeset
alias Bacen.CCS.MessageBody
@typedoc """
The base message from CCS
"""
@type t :: %__MODULE__{}
@message_fields ~w(body)a
@header_fields ~w(issuer_id recipient_id file_name file_id)a
@header_fields_source_sequence ~w(IdentdEmissor IdentdDestinatario NomArq NumRemessaArq)a
@primary_key false
embedded_schema do
embeds_one :message, BaseMessage, source: :CCSDOC, primary_key: false do
embeds_one :header, Header, source: :BCARQ, primary_key: false do
field :issuer_id, :string, source: :IdentdEmissor
field :recipient_id, :string, source: :IdentdDestinatario
field :file_name, :string, source: :NomArq
field :file_id, :string, source: :NumRemessaArq
end
field :body, MessageBody, source: :SISARQ
end
end
@doc """
Creates a new CCS message from given attributes.
"""
@spec new(map()) :: {:ok, t()} | {:error, Ecto.Changeset.t()}
def new(attrs) when is_map(attrs) do
attrs
|> changeset()
|> apply_action(:insert)
end
@doc false
def changeset(schema \\ %__MODULE__{}, attrs) when is_map(attrs) do
schema
|> cast(attrs, [])
|> cast_embed(:message, with: &message_changeset/2, required: true)
end
@doc false
def message_changeset(message, attrs) when is_map(attrs) do
message
|> cast(attrs, @message_fields)
|> cast_embed(:header, with: &header_changeset/2, required: true)
|> validate_required(@message_fields)
end
@doc false
def header_changeset(header, attrs) when is_map(attrs) do
header
|> cast(attrs, @header_fields)
|> validate_required(@header_fields)
|> validate_length(:issuer_id, is: 8)
|> validate_format(:issuer_id, ~r/[0-9]{8}/)
|> validate_length(:recipient_id, is: 8)
|> validate_format(:recipient_id, ~r/[0-9]{8}/)
|> validate_length(:file_id, is: 12)
|> validate_format(:file_id, ~r/[0-9]{12}/)
|> validate_length(:file_name, max: 80)
end
@doc """
Returns the field sequence for given root xml element
## Examples
iex> Bacen.CCS.Message.sequence(:BCARQ)
[:IdentdEmissor, :IdentdDestinatario, :NomArq, :NumRemessaArq]
"""
@spec sequence(:BCARQ) :: list(atom())
def sequence(:BCARQ), do: @header_fields_source_sequence
@doc """
Returns the module name from `BaseMessage` body
## Examples
iex> alias Bacen.CCS.Message
iex> alias Bacen.CCS.ACCS002
iex> message = %Message{message: %Message.BaseMessage{body: %ACCS002{}}}
iex> Message.get_schema_from_body(message)
Bacen.CCS.ACCS002
"""
@spec get_schema_from_body(t()) :: Bacen.CCS.schemas()
def get_schema_from_body(%__MODULE__{message: %__MODULE__.BaseMessage{body: %module{}}}),
do: module
end
|
lib/bacen/ccs/message.ex
| 0.836955
| 0.495056
|
message.ex
|
starcoder
|
defmodule Ibanity.Sandbox.FinancialInstitutionAccount do
@moduledoc """
[Financial institution account](https://documentation.ibanity.com/xs2a/api#financial-institution-account) API wrapper
"""
use Ibanity.Resource
defstruct id: nil,
available_balance: nil,
currency: nil,
current_balance: nil,
description: nil,
reference: nil,
reference_type: nil,
subtype: nil,
created_at: nil,
updated_at: nil,
internal_reference: nil,
product: nil,
holder_name: nil,
current_balance_changed_at: nil,
current_balance_variation_observed_at: nil,
current_balance_reference_date: nil,
available_balance_changed_at: nil,
available_balance_variation_observed_at: nil,
available_balance_reference_date: nil,
authorized_at: nil,
authorization_expiration_expected_at: nil,
financial_institution_user: nil,
financial_institution_user_id: nil,
financial_institution_id: nil,
transactions: nil
@api_schema_path ["sandbox", "financialInstitution", "financialInstitutionAccounts"]
@resource_type "financial_institution_account"
@doc """
[Creates a new financial institution account](https://documentation.ibanity.com/xs2a/api#create-financial-institution-account).
Returns `{:ok, account}` when successful, `{:error, reason}` otherwise.
## Example
iex> [
...> subtype: "checking",
...> reference: "BE456789012",
...> reference_type: "IBAN",
...> description: "Savings account",
...> currency: "EUR"
...> ]
...> |> Request.attributes
...> |> Request.id(:financial_institution_id, "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7")
...> |> Request.id(:financial_institution_user_id, "a64f42ec-c801-41a7-8801-0f815ca42e9e")
...> |> FinancialInstitutionAccount.create
{:ok, %Ibanity.FinancialInstitutionAccount{id: "3034fe85-29ee-4ebc-9a2d-33df4e2f4602", ...}}
"""
def create(%Request{} = request) do
request
|> Request.id(:id, "")
|> Request.resource_type(@resource_type)
|> Client.execute(:post, @api_schema_path)
end
@doc """
[List all accounts](https://documentation.ibanity.com/xs2a/api#list-financial-institution-accounts)
belonging to a user, within a financial institution.
Returns `{:ok, accounts}` where `accounts` is a `Ibanity.Collection` where items are `Ibanity.FinancialInstitutionAccount`.
"""
def list(institution_id, user_id) do
Request.id(:financial_institution_id, institution_id)
|> Request.id(:financial_institution_user_id, user_id)
|> list
end
@doc """
[List all accounts](https://documentation.ibanity.com/xs2a/api#list-financial-institution-accounts)
belonging to a user, within a financial institution.
Returns `{:ok, accounts}` where `accounts` is a `Ibanity.Collection` where items are `Ibanity.FinancialInstitutionAccount`.
## Examples
iex> |> Request.id(:financial_institution_id, "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7")
...> |> Request.id(:financial_institution_user_id, "a64f42ec-c801-41a7-8801-0f815ca42e9e")
...> |> FinancialInstitutionAccount.list
{:ok, %Ibanity.Collection{items: [%Ibanity.FinancialInstitutionAccount{...}], ...}}
"""
def list(%Request{} = request) do
request
|> Request.id(:id, "")
|> Client.execute(:get, @api_schema_path)
end
@doc """
[Retrieves an account](https://documentation.ibanity.com/xs2a/api#get-financial-institution-account)
belonging to a user and a financial institution.
Returns `{:ok, account}` when successful, `{:error, reason}` otherwise.
## Example
iex> FinancialInstitutionAccount.find(
...> "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7",
...> "a64f42ec-c801-41a7-8801-0f815ca42e9e",
...> "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7",
...> )
{:ok, Ibanity.FinancialInstitutionAccount{id: "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7", ...}}
"""
def find(institution_id, user_id, account_id) do
Request.id(:financial_institution_id, institution_id)
|> Request.id(:financial_institution_user_id, user_id)
|> Request.id(:id, account_id)
|> find
end
@doc """
[Retrieves an account](https://documentation.ibanity.com/xs2a/api#get-financial-institution-account)
belonging to a user and a financial institution.
Returns `{:ok, account}` when successful, `{:error, reason}` otherwise.
## Example
iex> Request.id(:financial_institution_id, "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7")
...> |> Request.id(:financial_institution_user_id, "a64f42ec-c801-41a7-8801-0f815ca42e9e")
...> |> Request.id(:id, "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7")
...> |> FinancialInstitutionAccount.find
{:ok, Ibanity.FinancialInstitutionAccount{id: "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7", ...}}
"""
def find(%Request{} = request) do
request
|> Client.execute(:get, @api_schema_path)
end
@doc """
[Deletes an account](https://documentation.ibanity.com/xs2a/api#delete-financial-institution-account)
belonging to a user and a financial institution.
Returns `{:ok, account}` when successful, `{:error, reason}` otherwise.
## Example
iex> Ibanity.FinancialInstitutionAccount.delete(
...> "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7",
...> "a64f42ec-c801-41a7-8801-0f815ca42e9e",
...> "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7",
...> )
{:ok, FinancialInstitutionAccount{id: "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7", ...}}
"""
def delete(institution_id, user_id, account_id) do
Request.id(:financial_institution_id, institution_id)
|> Request.id(:financial_institution_user_id, user_id)
|> Request.id(:id, account_id)
|> delete
end
@doc """
[Deletes an account](https://documentation.ibanity.com/xs2a/api#delete-financial-institution-account)
belonging to a user and a financial institution.
Returns `{:ok, account}` when successful, `{:error, reason}` otherwise.
## Example
iex> Request.id(:financial_institution_id, "b5d6c5f9-e1d2-4cd1-a2aa-7baf964f7bf7")
...> |> Request.id(:financial_institution_user_id, "a64f42ec-c801-41a7-8801-0f815ca42e9e")
...> |> Request.id(:id, "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7")
...> |> FinancialInstitutionAccount.delete
{:ok, Ibanity.FinancialInstitutionAccount{id: "b1c1e046-309b-49b8-bc5d-c4b1f82f61a7", ...}}
"""
def delete(%Request{} = request) do
request
|> Client.execute(:delete, @api_schema_path)
end
@doc """
Fetches the transactions associated to this account.
Returns:
* `{:ok, transactions}` if successful, where `transactions` is an `Ibanity.Collection`
* `nil` if no transaction link was set on the structure
* `{:error, reason}` otherwise
"""
def transactions(%__MODULE__{} = account) do
if account.transactions, do: Client.get(account.transactions), else: nil
end
@doc """
Fetches the financial institution user this account belongs to.
Returns:
* `{:ok, institution}` if successful,
* `nil` if no financial institution user link was set on the structure
* `{:error, reason}` otherwise
"""
def financial_institution_user(%__MODULE__{} = account) do
if account.financial_institution_user,
do: Client.get(account.financial_institution_user),
else: nil
end
@doc false
def key_mapping do
[
id: {~w(id), :string},
available_balance: {~w(attributes availableBalance), :float},
currency: {~w(attributes currency), :string},
current_balance: {~w(attributes currentBalance), :float},
description: {~w(attributes description), :string},
reference: {~w(attributes reference), :string},
reference_type: {~w(attributes referenceType), :string},
subtype: {~w(attributes subtype), :string},
internal_reference: {~w(attributes internalReference), :string},
product: {~w(attributes product), :string},
holder_name: {~w(attributes holderName), :string},
current_balance_changed_at: {~w(attributes currentBalanceChangedAt), :datetime},
current_balance_variation_observed_at: {~w(attributes currentBalanceVariationObservedAt), :datetime},
current_balance_reference_date: {~w(attributes currentBalanceReferenceDate), :datetime},
available_balance_changed_at: {~w(attributes availableBalanceChangedAt), :datetime},
available_balance_variation_observed_at: {~w(attributes availableBalanceVariationObservedAt), :datetime},
available_balance_reference_date: {~w(attributes availableBalanceReferenceDate), :datetime},
authorized_at: {~w(attributes authorizedAt), :datetime},
authorization_expiration_expected_at: {~w(attributes authorizationExpirationExpectedAt), :datetime},
transactions: {~w(relationships financialInstitutionTransactions links related), :string},
financial_institution_user:
{~w(relationships financialInstitutionUser links related), :string},
financial_institution_user_id:
{~w(relationships financialInstitutionUser data id), :string},
financial_institution_id: {~w(relationships financialInstitution data id), :string},
created_at: {~w(attributes createdAt), :datetime},
updated_at: {~w(attributes updatedAt), :datetime}
]
end
end
|
lib/ibanity/api/sandbox/financial_institution_account.ex
| 0.831383
| 0.404802
|
financial_institution_account.ex
|
starcoder
|
defmodule Tablespoon.Communicator.Btd do
@moduledoc """
Communication with the Boston Transportation Department (BTD).
The communication is over NTCIP1211 Extended packets.
- group: passed-in
- id: always 0
- id in message: increases with each request, up to 255 where it wraps back to 1
- vehicle_id: the vehicle's ID
- vehicle_class: always 2
- vehicle_class_level: always 0
- strategy: 1 - North, 2 - East, 3 - South, 4 - West
- time_of_service_desired: always 0
- time_of_estimated_departure: always 0
- intersection_id: passed-in
"""
@behaviour Tablespoon.Communicator
alias Tablespoon.Protocol.NTCIP1211Extended, as: NTCIP
alias Tablespoon.{Query, Transport, UniqueRangeCounter}
require Logger
@enforce_keys [:transport, :group, :intersection_id, :ref]
defstruct @enforce_keys ++ [timeout: 5_000, next_id: 1, in_flight: %{}]
@impl Tablespoon.Communicator
def new(transport, opts) do
struct!(__MODULE__, [transport: transport, ref: make_ref()] ++ opts)
end
@impl Tablespoon.Communicator
def connect(%__MODULE__{} = comm) do
with {:ok, transport} <- Transport.connect(comm.transport) do
{:ok, %{comm | transport: transport}, []}
end
end
@impl Tablespoon.Communicator
def close(%__MODULE__{} = comm) do
# send cancellations for requests
{comm, events} =
comm.in_flight
|> Map.values()
|> Enum.reduce({comm, []}, fn {q, timer}, {comm, events} ->
_ = Process.cancel_timer(timer)
events = [{:failed, q, :close} | events]
with %{type: :request} <- q,
q = %{q | type: :cancel},
{:ok, transport} <- raw_send(comm, q) do
{%{comm | transport: transport}, events}
else
_ -> {comm, events}
end
end)
events = Enum.reverse(events)
transport = Transport.close(comm.transport)
comm = %{comm | transport: transport, next_id: 1, in_flight: %{}}
{:ok, comm, events}
end
@impl Tablespoon.Communicator
def send(%__MODULE__{} = comm, %Query{} = q) do
case raw_send(comm, q) do
{:ok, transport} ->
# send ourselves a message to bail out if we don't get a response
timer = send_after(self(), {comm.ref, :timeout, comm.next_id, q}, comm.timeout)
in_flight = Map.put(comm.in_flight, comm.next_id, {q, timer})
{:ok,
%{comm | next_id: next_id(comm.next_id), in_flight: in_flight, transport: transport}, []}
{:error, e} ->
{:ok, comm, [{:failed, q, e}]}
end
end
@impl Tablespoon.Communicator
def stream(%__MODULE__{ref: ref} = comm, {ref, :timeout, id, q}) do
case Map.get(comm.in_flight, id) do
{^q, _} ->
in_flight = Map.delete(comm.in_flight, id)
comm = %{comm | in_flight: in_flight}
{:ok, comm, [{:failed, q, :timeout}]}
_ ->
:unknown
end
end
def stream(%__MODULE__{}, {ref, :timeout, _id, _}) when is_reference(ref) do
# timeout from an earlier version of this connection
:unknown
end
def stream(%__MODULE__{} = comm, message) do
with {:ok, transport, results} <- Transport.stream(comm.transport, message) do
comm = %{comm | transport: transport}
Enum.reduce_while(results, {:ok, comm, []}, &handle_stream_results/2)
end
end
defp raw_send(comm, q) do
# ensure the request ID is always one byte
request_id = UniqueRangeCounter.unique_integer(:btd_request_id, -128, 127)
ntcip =
NTCIP.encode(%NTCIP{
group: comm.group,
pdu_type: :set,
request_id: request_id,
message: ntcip_message(comm, q)
})
Transport.send(comm.transport, ntcip)
end
defp ntcip_message(comm, %{type: :request} = q) do
%NTCIP.PriorityRequest{
id: comm.next_id,
vehicle_id: q.vehicle_id,
vehicle_class: 2,
vehicle_class_level: 0,
strategy: ntcip_strategy(q.approach),
time_of_service_desired: 0,
time_of_estimated_departure: 0,
intersection_id: comm.intersection_id
}
end
defp ntcip_message(comm, %{type: :cancel} = q) do
%NTCIP.PriorityCancel{
id: comm.next_id,
vehicle_id: q.vehicle_id,
vehicle_class: 2,
vehicle_class_level: 0,
strategy: ntcip_strategy(q.approach),
intersection_id: comm.intersection_id
}
end
defp ntcip_strategy(:north), do: 1
defp ntcip_strategy(:east), do: 2
defp ntcip_strategy(:south), do: 3
defp ntcip_strategy(:west), do: 4
@doc """
Return the next valid ID, given the current ID.
iex> next_id(1)
2
iex> next_id(254)
255
iex> next_id(255)
1
"""
def next_id(int) when int < 255 do
int + 1
end
def next_id(_) do
1
end
defp handle_stream_results({:data, binary}, {:ok, comm, events}) do
case NTCIP.decode(binary) do
{:ok, ntcip} ->
handle_ntcip(comm, ntcip, events)
{:error, e} ->
_ =
Logger.warn(fn ->
"unexpected error decoding NTCIP comm=#{inspect(comm)} error=#{inspect(e)} body=#{inspect(binary, limit: :infinity)}"
end)
{:cont, {:ok, comm, events}}
end
end
defp handle_stream_results(:closed, {:ok, comm, events}) do
failed =
for {q, timer} <- Map.values(comm.in_flight) do
_ = Process.cancel_timer(timer)
{:failed, q, :closed}
end
comm = %{comm | next_id: 1, in_flight: %{}}
{:halt, {:ok, comm, events ++ failed ++ [{:error, :closed}]}}
end
defp handle_ntcip(%{group: group} = comm, %{group: group, pdu_type: :response} = ntcip, events) do
case Map.pop(comm.in_flight, ntcip.message.id) do
{nil, _} ->
# we got a response to a message we weren't waiting for. This isn't a
# big deal, as we'll have already sent a :timeout reply if it was a
# message we wanted.
_ =
Logger.debug(fn ->
"unexpected response for message comm=#{inspect(comm)} message=#{inspect(ntcip)}"
end)
{:cont, {:ok, comm, events}}
{{query, timer}, in_flight} ->
_ = Process.cancel_timer(timer)
events = [sent: query] ++ events
comm = %{comm | in_flight: in_flight}
{:cont, {:ok, comm, events}}
end
end
defp handle_ntcip(comm, ntcip, events) do
_ =
Logger.warn(fn ->
"unexpected NTCIP1211 message comm=#{inspect(comm)} message=#{inspect(ntcip)}"
end)
{:cont, {:ok, comm, events}}
end
defp send_after(pid, message, after_time) when after_time > 0 do
Process.send_after(pid, message, after_time)
end
defp send_after(pid, message, _after_time) do
Kernel.send(pid, message)
# fake timer
make_ref()
end
end
|
lib/tablespoon/communicator/btd.ex
| 0.831074
| 0.406214
|
btd.ex
|
starcoder
|
alias Graphqexl.Schema
alias Graphqexl.Utils.FakeData
defmodule Graphqexl.Schema.Executable do
@moduledoc """
Establishes a `GenServer` to cache the loadedGraphQL schema.
Future improvement: use the GenServer as a basis for hot-reloading
"""
require Logger
use GenServer
# TODO: pull schema_path from the init arg / env
@schema_path "./lib/graphqexl/utils/schema.gql"
@table :schema
@doc """
Handle request messages from external processes for a `:get` operation,
returning the executable schema. Callback for the GenServer's `call` handling.
Returns: `t:Graphqexl.Schema.t/0`
"""
@doc since: "0.1.0"
@spec handle_call(:get, tuple, term) :: Schema.t
def handle_call(:get, _, _), do: {:reply, get_schema(), nil}
@doc """
Loads, parses and uses `:ets` to cache the configured schema definition.
`c:GenServer.init/1` callback implementation, called at application bootstrap.
Returns:
`{:ok, nil}` when successful
`{:error, t:String.t/1}` when unsuccessful
"""
@doc since: "0.1.0"
@spec init(term):: {:ok, nil} | {:error, String.t}
def init(init_arg) do
"Loading schema from #{@schema_path}" |> Logger.debug
init_arg
|> cache_init
|> load_schema
|> cache_put
|> succeed
end
@doc """
Starts the schema cache. Triggers the `c:GenServer.init/1` callback and blocks until the callback
returns. Returns a tuple with contents dependent on the success state of this function and the
`c:GenServer.init/1` callback.
Returns
`{:ok, pid}`
When successful, where `pid` is the resulting process
`{:error, {:already_started, pid} | term}`
When there is an error. If there is already a process for this application, the first key will
be `:already_started` and `pid` will be the existing process.
`:ignore`
When this child should be ignored by the containing supervisor tree
"""
def start_link(init_arg), do: GenServer.start_link(__MODULE__, init_arg, name: ExecutableSchema)
@doc false
defp cache_get(key), do: @table |> :ets.lookup_element(key, 2)
@doc false
defp cache_init(_arg), do: @table |> :ets.new([:named_table])
@doc false
defp cache_put(value), do: :schema |> cache_put(value)
defp cache_put(key, value), do: @table |> :ets.insert({key, value})
@doc false
defp load_schema(_cache) do
@schema_path
|> File.read
|> elem(1)
|> schema
end
@doc false
defp create_post(_parent, _args, _context), do: FakeData.posts |> Enum.random
@doc false
defp get_post(_parent, args, _context), do: args.id |> FakeData.post
@doc false
defp get_user_comments(_parent, args, _context), do: args.userId |> FakeData.user_comments
@doc false
defp get_schema, do: cache_get(:schema)
@doc false
defp schema(gql) do
gql |> Schema.executable(%{
createPost: &create_post/3,
getPost: &get_post/3,
getUserComments: &get_user_comments/3,
})
end
@doc false
defp succeed(false) do
"Failed to load executable schema" |> Logger.info
{:stop, "Could not load executable schema"}
end
defp succeed(true) do
"Loaded executable schema" |> Logger.info
:schema |> cache_get |> inspect |> Logger.debug
{:ok, nil}
end
end
|
lib/graphqexl/schema/executable.ex
| 0.663451
| 0.418548
|
executable.ex
|
starcoder
|
defmodule VectorClock do
@moduledoc """
Elixir implementation of vector clocks.
## About
Vector clocks are used in distributed systems as a way of maintaining a
logical ordering of events. A vector clock consists of a list of dots,
which each dot representing a node in a distributed system. A dot consists
of an identifier for the node, it's current count, and a timestamp from
the last time it was incremented. When a node sends an event to another
node it increments the it's dot in it's vector clock and sends the clock
along side the message. A node receiving a message can determine whether
it has seen the effect of that message already by comparing it's vector clock
with the received vector clock.
## Source
Based on the erlang version from
[`:riak_core`](https://github.com/basho/riak_core/blob/develop/src/vclock.erl).
"""
@opaque t :: [dot]
@opaque dot :: Dot.t
@opaque pure_dot :: PureDot.t
@type vclock_node :: term
@type counter :: integer
@type timestamp :: integer
defmodule Dot do
@moduledoc false
@type t :: %__MODULE__{
node: VectorClock.vclock_node,
counter: VectorClock.counter,
timestamp: VectorClock.timestamp
}
defstruct [:node, :counter, :timestamp]
end
defmodule PureDot do
@moduledoc false
@type t :: %__MODULE__{
node: VectorClock.vclock_node,
counter: VectorClock.counter
}
defstruct [:node, :counter]
end
@doc """
Create a new empty vector clock.
"""
@spec fresh() :: t
def fresh, do: []
@doc """
Create a new vectory clock with an initial dot.
"""
@spec fresh(vclock_node, counter) :: t
def fresh(node, count) do
[%Dot{node: node, counter: count, timestamp: timestamp()}]
end
@doc """
Check if vector clock `va` is a descendent of vector clock `vb`.
"""
@spec descends(t, t) :: boolean
def descends(_va, []) do
true # all vectory clocks descend from the empty vector clock
end
def descends(va, vb) do
[%{node: node_b, counter: counter_b}|rest_b] = vb
case find_dot(va, node_b) do
nil ->
false
%{counter: counter_a} ->
(counter_a >= counter_b) and descends(va, rest_b)
end
end
@doc """
Check whether a vector clock decends from a given dot.
"""
@spec descends_dot(t, dot) :: boolean
def descends_dot(vclock, dot) do
descends(vclock, [dot])
end
@doc """
Converts a dot to a pure dot, for when timestamp data is not needed.
"""
@spec pure_dot(dot) :: pure_dot
def pure_dot(dot) do
%PureDot{node: dot.node, counter: dot.counter}
end
@doc """
Checks if vector clock `va` strictly dominates vector clock `vb`.
A vector clock is said to dominate another when it's clock represents a later
logical time than the other.
"""
@spec dominates(t, t) :: boolean
def dominates(va, vb) do
descends(va, vb) and not descends(vb, va)
end
@doc """
Combines a list of vector clocks into their least possible common descendant.
"""
@spec merge([t]) :: t
def merge([]), do: []
def merge([single]), do: single
def merge([head|tail]), do: merge(tail, sort_vclock(head))
defp merge([], vclock), do: vclock
defp merge([va|rest], vclock) do
sorted_vclock = sort_vclock(va)
merge(rest, merge(sorted_vclock, vclock, []))
end
defp merge([], [], acc), do: :lists.reverse(acc)
defp merge([], left, acc), do: :lists.reverse(acc, left)
defp merge(left, [], acc), do: :lists.reverse(acc, left)
defp merge([%{node: node1} = dot|vclock],
n = [%{node: node2}|_], acc) when node1 < node2 do
merge(vclock, n, [dot|acc])
end
defp merge(v = [%{node: node1}|_],
[%{node: node2} = dot|nclock], acc) when node1 > node2 do
merge(v, nclock, [dot|acc])
end
defp merge([vdot|vclock], [ndot|nclock], acc) do
{counter, timestamp} = cond do
vdot.counter > ndot.counter -> {vdot.counter, vdot.timestamp}
vdot.counter < ndot.counter -> {ndot.counter, ndot.timestamp}
true -> {vdot.counter, max(vdot.timestamp, ndot.timestamp)}
end
merge(vclock, nclock, [%{vdot| counter: counter, timestamp: timestamp}|acc])
end
@doc """
Get the counter value from a vector clock for a specific node.
"""
@spec get_counter(t, vclock_node) :: counter
def get_counter(vclock, node) do
case find_dot(vclock, node) do
nil -> 0
dot -> dot.counter
end
end
@doc """
Get the timestamp value from a vector clock for a specific node.
"""
@spec get_timestamp(t, vclock_node) :: timestamp | nil
def get_timestamp(vclock, node) do
case find_dot(vclock, node) do
nil -> nil
dot -> dot.timestamp
end
end
@doc """
Get the dot entry from a vector clock for a specific node.
"""
@spec get_dot(t, vclock_node) :: {:ok, dot} | {:error, :not_found}
def get_dot(vclock, node) do
case find_dot(vclock, node) do
nil -> {:error, :not_found}
dot -> {:ok, dot}
end
end
@doc """
Checks if the given argument is a valid dot.
"""
@spec valid_dot?(term) :: boolean
def valid_dot?(%Dot{counter: cnt, timestamp: ts}) when is_integer(cnt) and is_integer(ts) do
true
end
def valid_dot?(_), do: false
@doc """
Increment the vector clock at node.
"""
@spec increment(t, vclock_node) :: t
def increment(vclock, node) do
increment(vclock, node, timestamp())
end
@doc """
Increment the vector clock at node.
"""
@spec increment(t, vclock_node, timestamp) :: t
def increment(vclock, node, timestamp) do
{new_vclock, new_counter, new_timestamp} = case nodetake(vclock, node) do
false ->
{vclock, 1, timestamp}
{dot, mod_vclock} ->
{mod_vclock, dot.counter + 1, timestamp}
end
[%Dot{node: node, counter: new_counter, timestamp: new_timestamp}|new_vclock]
end
@doc """
Get all nodes in the vector clock.
"""
@spec all_nodes(t) :: [vclock_node]
def all_nodes(vclock) do
for %{node: node} <- vclock, do: node
end
@days_from_gregorian_base_to_epoch (1970*365+478)
@seconds_from_gregorian_base_to_epoch (@days_from_gregorian_base_to_epoch*24*60*60)
@doc """
Current timestamp for a vector clock.
"""
@spec timestamp() :: timestamp
def timestamp do
{mega, sec, _} = :os.timestamp()
@seconds_from_gregorian_base_to_epoch + mega*1000000 + sec
end
@doc """
Compares vector clocks for equality.
"""
@spec equal?(t, t) :: boolean
def equal?(va, vb) do
sort_vclock(va) === sort_vclock(vb)
end
# TODO: what should the default values be?
@doc """
Prunes a vector clock based on various parameters.
Vector clocks get pruned when they are either considered too large
or when the top-most dot is too old. Entries are removed one-by-one
off the top until neither of the two conditions are met.
## Options
* `:small_vclock` - max size for a vector clock to be not pruned.
* `:young_vclock` - max difference between `now` and the timestamp on
the latest dot for a vector clock to not be pruned.
* `:big_vclock` - vector clocks larger than this will be pruned.
* `:old_vclock` - max difference between `now` and the timestamp on
the latest dot for it to get pruned.
"""
@spec prune(t, timestamp, Keyword.t) :: t
def prune(vclock, now, opts \\ []) do
sorted_vclock = Enum.sort(vclock, fn dot_a, dot_b ->
{dot_a.timestamp, dot_a.node} < {dot_b.timestamp, dot_b.node}
end)
prune_small(sorted_vclock, now, opts)
end
defp prune_small(vclock, now, opts) do
case length(vclock) <= Keyword.get(opts, :small_vclock, 100) do
true -> vclock
false -> prune_young(vclock, now, opts)
end
end
defp prune_young(vclock, now, opts) do
%{timestamp: head_time} = hd(vclock)
case (now - head_time) < Keyword.get(opts, :young_vclock, 1_000) do
true -> vclock
false -> prune_big_or_old(vclock, now, head_time, opts)
end
end
defp prune_big_or_old(vclock, now, head_time, opts) do
case (length(vclock) > Keyword.get(opts, :big_vclock, 10_000)) or
((now - head_time) > Keyword.get(opts, :old_vclock, 100_000)) do
true -> prune_small(tl(vclock), now, opts)
false -> vclock
end
end
# private helpers
defp sort_vclock(vclock) do
Enum.sort_by(vclock, &Map.get(&1, :node))
end
defp find_dot(vclock, node) do
Enum.find(vclock, fn dot -> dot.node === node end)
end
defp nodetake(vclock, node, acc \\ [])
defp nodetake([], _node, _acc) do
false
end
defp nodetake([%{node: node} = dot|rest], node, acc) do
{dot, :lists.reverse(acc, rest)}
end
defp nodetake([dot|rest], node, acc) do
nodetake(rest, node, [dot|acc])
end
end
|
lib/vector_clock.ex
| 0.922058
| 0.790207
|
vector_clock.ex
|
starcoder
|
defmodule BroadwayRabbitMQ.AmqpClient do
@moduledoc false
alias AMQP.{
Connection,
Channel,
Basic,
Queue
}
require Logger
@behaviour BroadwayRabbitMQ.RabbitmqClient
@connection_opts_schema [
username: [type: :any],
password: [type: :any],
virtual_host: [type: :any],
host: [type: :any],
port: [type: :any],
channel_max: [type: :any],
frame_max: [type: :any],
heartbeat: [type: :any],
connection_timeout: [type: :any],
ssl_options: [type: :any],
client_properties: [type: :any],
socket_options: [type: :any],
auth_mechanisms: [type: :any]
]
@binding_opts_schema [
routing_key: [type: :any],
no_wait: [type: :any],
arguments: [type: :any]
]
@opts_schema [
queue: [
type: :string,
required: true,
doc: """
The name of the queue. If `""`, then the queue name will
be autogenerated by the server but for this to work you have to declare
the queue through the `:declare` option.
"""
],
connection: [
type:
{:or,
[
{:custom, __MODULE__, :__validate_amqp_uri__, []},
keyword_list: @connection_opts_schema
]},
default: [],
doc: """
Defines an AMQP URI or a set of options used by
the RabbitMQ client to open the connection with the RabbitMQ broker. See
`AMQP.Connection.open/1` for the full list of options.
"""
],
qos: [
type: :keyword_list,
keys: [
prefetch_size: [type: :non_neg_integer],
prefetch_count: [type: :non_neg_integer, default: 50]
],
default: [],
doc: """
Defines a set of prefetch options used by the RabbitMQ client.
See `AMQP.Basic.qos/2` for the full list of options. Note that the
`:global` option is not supported by Broadway since each producer holds only one
channel per connection.
"""
],
name: [
type: {:or, [:string, {:in, [:undefined]}]},
default: :undefined,
doc: """
The name of the AMQP connection to use.
"""
],
backoff_min: [
type: :non_neg_integer,
doc: """
The minimum backoff interval (default: `1_000`).
"""
],
backoff_max: [
type: :non_neg_integer,
doc: """
The maximum backoff interval (default: `30_000`).
"""
],
backoff_type: [
type: {:in, [:exp, :rand, :rand_exp, :stop]},
doc: """
The backoff strategy. `:stop` for no backoff and
to stop, `:exp` for exponential, `:rand` for random and `:rand_exp` for
random exponential (default: `:rand_exp`).
"""
],
metadata: [
type: {:list, :atom},
default: [],
doc: """
The list of AMQP metadata fields to copy (default: `[]`). Note
that every `Broadway.Message` contains an `:amqp_channel` in its `metadata` field.
See the "Metadata" section.
"""
],
declare: [
type: :keyword_list,
keys: [
durable: [type: :any, doc: false],
auto_delete: [type: :any, doc: false],
exclusive: [type: :any, doc: false],
passive: [type: :any, doc: false],
no_wait: [type: :any, doc: false],
arguments: [type: :any, doc: false]
],
doc: """
A list of options used to declare the `:queue`. The
queue is only declared (and possibly created if not already there) if this
option is present and not `nil`. Note that if you use `""` as the queue
name (which means that the queue name will be autogenerated on the server),
then every producer stage will declare a different queue. If you want all
producer stages to consume from the same queue, use a specific queue name.
You can still declare the same queue as many times as you want because
queue creation is idempotent (as long as you don't use the `passive: true`
option). For the available options, see `AMQP.Queue.declare/3`.
"""
],
bindings: [
type: {:list, {:custom, __MODULE__, :__validate_binding__, []}},
default: [],
doc: """
A list of bindings for the `:queue`. This option
allows you to bind the queue to one or more exchanges. Each binding is a tuple
`{exchange_name, binding_options}` where so that the queue will be bound
to `exchange_name` through `AMQP.Queue.bind/4` using `binding_options` as
the options. Bindings are idempotent so you can bind the same queue to the
same exchange multiple times.
"""
],
merge_options: [
type: {:fun, 1},
doc: """
A function that takes the index of the producer in the
Broadway topology and returns a keyword list of options. The returned options
are merged with the other options given to the producer. This option is useful
to dynamically change options based on the index of the producer. For example,
you can use this option to "shard" load between a few queues where a subset of
the producer stages is connected to each queue, or to connect producers to
different RabbitMQ nodes (for example through partitioning). Note that the options
are evaluated every time a connection is established (for example, in case
of disconnections). This means that you can also use this option to choose
different options on every reconnections. This can be particularly useful
if you have multiple RabbitMQ URLs: in that case, you can reconnect to a different
URL every time you reconnect to RabbitMQ, which avoids the case where the
producer tries to always reconnect to a URL that is down.
"""
],
after_connect: [
type: {:fun, 1},
doc: """
A function that takes the AMQP channel that the producer
is connected to and can run arbitrary setup. This is useful for declaring
complex RabbitMQ topologies with possibly multiple queues, bindings, or
exchanges. RabbitMQ declarations are generally idempotent so running this
function from all producer stages after every time they connect is likely
fine. This function can return `:ok` if everything went well or `{:error, reason}`.
In the error case then the producer will consider the connection failed and
will try to reconnect later (same behavior as when the connection drops, for example).
This function is run **before** the declaring and binding queues according to
the `:declare` and `:bindings` options (described above).
"""
],
consume_options: [
type: :keyword_list,
default: [],
doc: """
Options passed down to `AMQP.Basic.consume/4`. Not all options supported by
`AMQP.Basic.consume/4` are available here as some options would conflict with
the internal implementation of this producer.
""",
keys: [
consumer_tag: [type: :string],
no_local: [type: :boolean],
no_ack: [type: :boolean],
exclusive: [type: :boolean],
arguments: [type: :any]
]
],
broadway: [type: :any, doc: false]
]
@doc false
def __opts_schema__, do: @opts_schema
@impl true
def init(opts) do
with {:ok, opts} <- validate_merge_opts(opts),
{:ok, opts} <- NimbleOptions.validate(opts, @opts_schema),
:ok <- validate_declare_opts(opts[:declare], opts[:queue]) do
{:ok,
%{
connection: Keyword.fetch!(opts, :connection),
queue: Keyword.fetch!(opts, :queue),
name: Keyword.fetch!(opts, :name),
declare_opts: Keyword.get(opts, :declare, nil),
bindings: Keyword.fetch!(opts, :bindings),
qos: Keyword.fetch!(opts, :qos),
metadata: Keyword.fetch!(opts, :metadata),
consume_options: Keyword.fetch!(opts, :consume_options),
after_connect: Keyword.get(opts, :after_connect, fn _channel -> :ok end)
}}
else
{:error, %NimbleOptions.ValidationError{} = error} -> {:error, Exception.message(error)}
{:error, message} when is_binary(message) -> {:error, message}
end
end
# This function should return "{:ok, channel}" if successful. If failing to setup a channel, a
# connection, or if some network error happens at any point, this should close the connection it
# opened.
@impl true
def setup_channel(config) do
{name, config} = Map.pop(config, :name, :undefined)
telemetry_meta = %{connection: config.connection, connection_name: name}
case :telemetry.span([:broadway_rabbitmq, :amqp, :open_connection], telemetry_meta, fn ->
{Connection.open(config.connection, name), telemetry_meta}
end) do
{:ok, conn} ->
# We need to link so that if our process crashes, the AMQP connection will go
# down. We're trapping exits in the producer anyways so on our end this looks
# like a monitor, pretty much.
true = Process.link(conn.pid)
with {:ok, channel} <- Channel.open(conn),
:ok <- call_after_connect(config, channel),
:ok <- Basic.qos(channel, config.qos),
{:ok, queue} <- maybe_declare_queue(channel, config.queue, config.declare_opts),
:ok <- maybe_bind_queue(channel, queue, config.bindings) do
{:ok, channel}
else
{:error, reason} ->
# We don't terminate the caller process when something fails, but just reconnect
# later. So if opening the connection works, but any other step fails (like opening
# the channel), we need to close the connection, or otherwise we would leave the
# connection open and leak it. In amqp_client, closing the connection also closes
# everything related to it (like the channel, so we're good).
_ = Connection.close(conn)
{:error, reason}
end
{:error, reason} ->
{:error, reason}
end
catch
:exit, {:timeout, {:gen_server, :call, [amqp_conn_pid, :connect, timeout]}}
when is_integer(timeout) ->
# Make absolutely sure that this connection doesn't get established *after* the gen_server
# call timeout triggers and becomes a zombie connection.
true = Process.exit(amqp_conn_pid, :kill)
{:error, :timeout}
end
defp call_after_connect(config, channel) do
case config.after_connect.(channel) do
:ok ->
:ok
{:error, reason} ->
{:error, reason}
other ->
raise "unexpected return value from the :after_connect function: #{inspect(other)}"
end
end
defp maybe_declare_queue(_channel, queue, _declare_opts = nil) do
{:ok, queue}
end
defp maybe_declare_queue(channel, queue, declare_opts) do
with {:ok, %{queue: queue}} <- Queue.declare(channel, queue, declare_opts) do
{:ok, queue}
end
end
defp maybe_bind_queue(_channel, _queue, _bindings = []) do
:ok
end
defp maybe_bind_queue(channel, queue, [{exchange, opts} | bindings]) do
case Queue.bind(channel, queue, exchange, opts) do
:ok -> maybe_bind_queue(channel, queue, bindings)
{:error, reason} -> {:error, reason}
end
end
@impl true
def ack(channel, delivery_tag) do
:telemetry.span([:broadway_rabbitmq, :amqp, :ack], _meta = %{}, fn ->
{Basic.ack(channel, delivery_tag), _meta = %{}}
end)
end
@impl true
def reject(channel, delivery_tag, opts) do
:telemetry.span([:broadway_rabbitmq, :amqp, :reject], %{requeue: opts[:requeue]}, fn ->
{Basic.reject(channel, delivery_tag, opts), _meta = %{}}
end)
end
@impl true
def consume(channel, %{queue: queue, consume_options: consume_options} = _config) do
{:ok, consumer_tag} = Basic.consume(channel, queue, _consumer_pid = self(), consume_options)
consumer_tag
end
@impl true
def cancel(channel, consumer_tag) do
Basic.cancel(channel, consumer_tag)
end
@impl true
def close_connection(conn) do
if Process.alive?(conn.pid) do
Connection.close(conn)
else
:ok
end
end
defp validate_merge_opts(opts) do
case Keyword.fetch(opts, :merge_options) do
{:ok, fun} when is_function(fun, 1) ->
index = opts[:broadway][:index] || raise "missing broadway index"
merge_opts = fun.(index)
if Keyword.keyword?(merge_opts) do
{:ok, Keyword.merge(opts, merge_opts)}
else
message =
"The :merge_options function should return a keyword list, " <>
"got: #{inspect(merge_opts)}"
{:error, message}
end
{:ok, other} ->
{:error, ":merge_options must be a function with arity 1, got: #{inspect(other)}"}
:error ->
{:ok, opts}
end
end
def __validate_amqp_uri__(uri) do
case uri |> to_charlist() |> :amqp_uri.parse() do
{:ok, _amqp_params} -> {:ok, uri}
{:error, reason} -> {:error, "failed parsing AMQP URI: #{inspect(reason)}"}
end
end
defp validate_declare_opts(declare_opts, queue) do
if queue == "" and is_nil(declare_opts) do
{:error, "can't use \"\" (server autogenerate) as the queue name without the :declare"}
else
:ok
end
end
def __validate_binding__({exchange, binding_opts}) when is_binary(exchange) do
case NimbleOptions.validate(binding_opts, @binding_opts_schema) do
{:ok, validated_binding_opts} -> {:ok, {exchange, validated_binding_opts}}
{:error, reason} -> {:error, reason}
end
end
def __validate_binding__(other) do
{:error, "expected binding to be a {exchange, opts} tuple, got: #{inspect(other)}"}
end
end
|
lib/broadway_rabbitmq/amqp_client.ex
| 0.850344
| 0.521288
|
amqp_client.ex
|
starcoder
|
defmodule ExRabbitMQ.Producer do
@moduledoc """
A behaviour module that abstracts away the handling of RabbitMQ connections and channels.
It also provides hooks to allow the programmer to publish a message without having to directly
access the AMPQ interfaces.
For a connection configuration example see `ExRabbitMQ.Config.Connection`.
#### Example usage for a producer implementing a `GenServer`
```elixir
defmodule MyExRabbitMQProducer do
@module __MODULE__
use GenServer
use ExRabbitMQ.Producer
def start_link do
GenServer.start_link(@module, :ok)
end
def init(state) do
new_state =
xrmq_init(:my_connection_config, state)
|> xrmq_extract_state()
{:ok, new_state}
end
def handle_cast({:publish, something}, state) do
xrmq_basic_publish(something, "", "my_queue")
{:noreply, state}
end
# optional override when there is a need to do setup the channel right after the connection has been established.
def xrmq_channel_setup(channel, state) do
# any other channel setup goes here...
{:ok, state}
end
end
```
"""
alias ExRabbitMQ.AST.Common, as: CommonAST
alias ExRabbitMQ.Config.SessionConfig, as: SessionConfig
require ExRabbitMQ.AST.Common
require ExRabbitMQ.AST.Producer.GenServer
@doc """
Setup the process for producing messages on RabbitMQ.
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
The function accepts the following arguments:
* `connection` - The configuration information for the RabbitMQ connection.
It can either be a `ExRabbitMQ.Config.Connection` struct or an atom that will be used as the `key` for reading the
the `:exrabbitmq` configuration part from the enviroment.
For more information on how to configure the connection, check `ExRabbitMQ.Config.Connection`.
* `state` - The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_init(CommonAST.connection(), atom | SessionConfig.t(), term) ::
CommonAST.result()
@doc """
This helper function tries to use `c:xrmq_init/3` to set up a connection to RabbitMQ.
In case that fails, it tries again after a configured interval.
The interval can be configured by writing:
```elixir
config :exrabbitmq, :try_init_interval, <THE INTERVAL BETWEEN CONNECTION RETRIES IN MILLISECONDS>
```
The simplest way to use this is to add the following as part of the `GenServer.init/1` callback result:
```elixir
ExRabbitMQ.continue_tuple_try_init(connection_config, session_config, nil)
```
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_try_init(CommonAST.connection(), CommonAST.queue(), term) :: CommonAST.result()
@doc """
This overridable callback is called by `c:xrmq_try_init/3` just before a new connection attempt is made.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_on_try_init(term) :: term
@doc """
This overridable callback is called by `c:xrmq_try_init/3` when a new connection has been established.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:cont, state}`.
"""
@callback xrmq_on_try_init_success(term) :: {:cont, term} | {:halt, term, term}
@doc """
This overridable callback is called by `c:xrmq_try_init/3` when a new connection could not be established
but a new attempt can be made (ie, waiting for a connection to become available).
The error the occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:cont, state}`.
"""
@callback xrmq_on_try_init_error_retry(term, term) :: {:cont, term} | {:halt, term, term}
@doc """
This overridable callback is called by `c:xrmq_try_init/3` when a new connection could not be established
and the error is not normally recoverable (ie, an error not related to a connection being currently unavailable).
The error that occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:halt, reason, state}`.
"""
@callback xrmq_on_try_init_error(term, term) :: {:cont, term} | {:halt, term, term}
@doc false
@callback xrmq_session_setup(AMQP.Channel.t(), atom | SessionConfig.t(), term) ::
Common.result()
@doc """
This hook is called when a connection has been established and a new channel has been opened.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_setup(AMQP.Channel.t(), term) :: CommonAST.result()
@doc """
This hook is called when a connection has been established and a new channel has been opened,
right after `c:xrmq_channel_setup/2`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_open(AMQP.Channel.t(), term) :: CommonAST.result()
@doc """
This overridable function publishes the **binary** `payload` to the `exchange` using the provided `routing_key`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_publish(String.t(), String.t(), String.t(), [term]) ::
CommonAST.basic_publish_result()
@doc """
Helper function that extracts the `state` argument from the passed in tuple.
"""
@callback xrmq_extract_state({:ok, term} | {:error, term, term}) :: state :: term
@doc """
This overridable hook is called when a new connection is established.
It is passed the connection struct and the wrapper process's state is passed in to allow the callback
to mutate it if overriden.
"""
@callback xrmq_on_connection_opened(AMQP.Connection.t(), term) :: term
@doc """
This overridable hook is called when an already established connection has just been re-established.
It is passed the connection struct and the wrapper process's state is passed in to allow the callback
to mutate it if overriden.
"""
@callback xrmq_on_connection_reopened(AMQP.Connection.t(), term) :: term
@doc """
This overridable hook is called when an already established connection is dropped.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_on_connection_closed(term) :: term
@doc """
This overridable hook is called when a connection is (re-)established and there are buffered messages to send.
Message buffering (disabled by default) can be enabled by writing:
```elixir
# this is a compile time constant
config :exrabbitmq, :message_buffering_enabled, true
```
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_flush_buffered_messages([term], term) :: term
defmacro __using__(_) do
inner_ast = ExRabbitMQ.AST.Producer.GenServer.ast()
common_ast = ExRabbitMQ.AST.Common.ast()
quote location: :keep do
require ExRabbitMQ.Logger, as: XRMQLogger
alias ExRabbitMQ.Config.Connection, as: XRMQConnectionConfig
alias ExRabbitMQ.Config.Session, as: XRMQSessionConfig
unquote(inner_ast)
def xrmq_init(connection_config, session_config \\ nil, state) do
connection_config = XRMQConnectionConfig.get(connection_config)
session_config = XRMQSessionConfig.get(session_config)
case xrmq_connection_setup(connection_config) do
:ok ->
XRMQState.set_session_config(session_config)
case xrmq_open_channel_setup(state) do
{:ok, state} ->
state = xrmq_flush_buffered_messages(state)
{:ok, state}
error ->
error
end
{:error, reason} ->
XRMQState.set_connection_status(:disconnected)
{:error, reason, state}
end
end
def xrmq_try_init(connection_config, session_config \\ nil, state) do
xrmq_try_init_producer({connection_config, session_config}, state)
end
def xrmq_open_channel_setup(state) do
case xrmq_open_channel(state) do
{:ok, state} ->
{channel, _} = XRMQState.get_channel_info()
session_config = XRMQState.get_session_config()
xrmq_session_setup(channel, session_config, state)
{:error, _reason, _state} = error ->
error
{:error, reason} ->
{:error, reason, state}
error ->
{:error, error, state}
end
end
unquote(common_ast)
defp xrmq_try_init_producer({connection_config_spec, session_config_spec} = opts, state) do
connection_config_spec
|> xrmq_init(session_config_spec, state)
|> xrmq_try_init_inner(opts)
end
defp xrmq_try_init_producer(connection_config_spec, state) do
connection_config_spec
|> xrmq_init(nil, state)
|> xrmq_try_init_inner(connection_config_spec)
end
end
end
end
|
lib/ex_rabbit_m_q/producer.ex
| 0.91339
| 0.83622
|
producer.ex
|
starcoder
|
defmodule RIG.Session do
@moduledoc """
A session is defined by a user's JWT.
Client connections with the same JWT are associated to the same session. That is, if
a user uses the same JWT to connect to RIG multiple times (e.g., using multiple
devices), all of those connections are associated to the same session.
Sessions can be blacklisted, which makes them illegal to use for a specified amount
of time. Established connections related to a blacklisted session are terminated
automatically.
By default, the session identifier within a JWT is the `jti` claim. You can change
this using the `JWT_SESSION_FIELD` environment variable.
"""
use Rig.Config, [:jwt_session_field]
alias JSONPointer
alias RIG.DistributedSet
alias __MODULE__.Connection
require Logger
@type session_name_t :: String.t()
@type validity_period_t :: pos_integer()
@blacklist_server SessionBlacklist
# ---
@doc "Disallow sessions with the given name for a specific amount of time."
@spec blacklist(session_name_t, validity_period_t) :: nil
def blacklist(session_name, validity_period_s) do
DistributedSet.add(@blacklist_server, session_name, validity_period_s)
Connection.terminate_all_associated_to(session_name)
end
# ---
@doc "Check whether a session name has been disallowed."
@spec blacklisted?(session_name_t) :: boolean
def blacklisted?(session_name) do
DistributedSet.has?(@blacklist_server, session_name)
end
# ---
@doc """
Infers the session name from JWT claims.
- `claims`: The JWT claims map. The claim used to identify a session in an
authorization token is defined by the `:jwt_session_field` in the module
configuration.
"""
@spec from_claims(claims :: map()) :: Result.t(session_name_t, String.t())
def from_claims(claims) do
%{jwt_session_field: jwt_session_field} = config()
JSONPointer.get(claims, jwt_session_field)
end
# ---
@doc """
Associates a connection process to a session identifier.
- `session_name`: If the session with the given name doesn't exist yet, it will be
created.
- `pid`: The client connection process. Once the associated session is terminated,
this process will receive a `{:session_killed, <session name>}` message.
"""
@spec register_connection(session_name_t, pid()) :: :ok
def register_connection(session_name, connection_pid) do
Connection.associate_session(connection_pid, session_name)
Logger.debug(fn ->
"Connection #{inspect(connection_pid)} is now associated to session #{inspect(session_name)}"
end)
:ok
end
end
|
lib/rig/session.ex
| 0.810366
| 0.537102
|
session.ex
|
starcoder
|
defmodule Solid.Tag do
@moduledoc """
Control flow tags can change the information Liquid shows using programming logic.
More info: https://shopify.github.io/liquid/tags/control-flow/
"""
alias Solid.{Expression, Argument, Context}
@doc """
Evaluate a tag and return the condition that succeeded or nil
"""
@spec eval(any, Context.t(), keyword()) :: {iolist | nil, Context.t()}
def eval(tag, context, options) do
case do_eval(tag, context, options) do
{text, context} -> {text, context}
text -> {text, context}
end
end
defp do_eval([], _context, _options), do: nil
defp do_eval([cycle_exp: cycle], context, _options) do
{context, result} = Context.run_cycle(context, cycle)
{[text: result], context}
end
defp do_eval([custom_tag: tag], context, options) do
[tag_name | tag_data] = tag
tags = Keyword.get(options, :tags, %{})
result =
if(Map.has_key?(tags, tag_name)) do
[text: tags[tag_name].render(context, tag_data)]
else
nil
end
{result, context}
end
defp do_eval([{:if_exp, exp} | _] = tag, context, _options) do
if eval_expression(exp[:expression], context), do: throw({:result, exp})
elsif_exps = tag[:elsif_exps]
if elsif_exps do
result = Enum.find(elsif_exps, &eval_elsif(&1, context))
if result, do: throw({:result, elem(result, 1)})
end
else_exp = tag[:else_exp]
if else_exp, do: throw({:result, else_exp})
catch
{:result, result} -> result[:result]
end
defp do_eval([{:unless_exp, exp} | _] = tag, context, _options) do
unless eval_expression(exp[:expression], context), do: throw({:result, exp})
elsif_exps = tag[:elsif_exps]
if elsif_exps do
result = Enum.find(elsif_exps, &eval_elsif(&1, context))
if result, do: throw({:result, elem(result, 1)})
end
else_exp = tag[:else_exp]
if else_exp, do: throw({:result, else_exp})
catch
{:result, result} -> result[:result]
end
defp do_eval([{:case_exp, field} | [{:whens, when_map} | _]] = tag, context, _options) do
result = when_map[Argument.get(field, context)]
if result do
result
else
tag[:else_exp][:result]
end
end
defp do_eval(
[assign_exp: [field: [field_name], argument: argument, filters: filters]],
context,
_options
) do
new_value = Argument.get(argument, context, filters: filters)
context = %{context | vars: Map.put(context.vars, field_name, new_value)}
{nil, context}
end
defp do_eval(
[capture_exp: [field: [field_name], result: result]],
context,
_options
) do
{captured, context} = Solid.render(result, context)
context = %{
context
| vars: Map.put(context.vars, field_name, captured)
}
{nil, context}
end
defp do_eval([counter_exp: [{operation, default}, field]], context, _options) do
value = Argument.get([field], context, scopes: [:counter_vars]) || default
{:field, [field_name]} = field
context = %{
context
| counter_vars: Map.put(context.counter_vars, field_name, value + operation)
}
{[text: to_string(value)], context}
end
defp do_eval([break_exp: _], context, _options) do
throw({:break_exp, [], context})
end
defp do_eval([continue_exp: _], context, _options) do
throw({:continue_exp, [], context})
end
defp do_eval(
[
for_exp:
[
{:field, [enumerable_key]},
{:enumerable, enumerable},
{:parameters, parameters} | _
] = exp
],
context,
_options
) do
enumerable =
enumerable
|> enumerable(context)
|> apply_parameters(parameters)
do_for(enumerable_key, enumerable, exp, context)
end
defp do_eval([raw_exp: raw], context, _options) do
{[text: raw], context}
end
defp do_for(_, [], exp, context) do
exp = Keyword.get(exp, :else_exp)
{exp[:result], context}
end
defp do_for(enumerable_key, enumerable, exp, context) do
exp = Keyword.get(exp, :result)
length = Enum.count(enumerable)
{result, context} =
enumerable
|> Enum.with_index(0)
|> Enum.reduce({[], context}, fn {v, index}, {acc_result, acc_context_initial} ->
acc_context =
acc_context_initial
|> set_enumerable_value(enumerable_key, v)
|> maybe_put_forloop_map(enumerable_key, index, length)
try do
{result, acc_context} = Solid.render(exp, acc_context)
acc_context = restore_initial_forloop_value(acc_context, acc_context_initial)
{[result | acc_result], acc_context}
catch
{:break_exp, partial_result, context} ->
throw({:result, [partial_result | acc_result], context})
{:continue_exp, partial_result, context} ->
{[partial_result | acc_result], context}
end
end)
context = %{context | iteration_vars: Map.delete(context.iteration_vars, enumerable_key)}
{[text: Enum.reverse(result)], context}
catch
{:result, result, context} ->
context = %{context | iteration_vars: Map.delete(context.iteration_vars, enumerable_key)}
{[text: Enum.reverse(result)], context}
end
defp set_enumerable_value(acc_context, key, value) do
iteration_vars = Map.put(acc_context.iteration_vars, key, value)
%{acc_context | iteration_vars: iteration_vars}
end
defp maybe_put_forloop_map(acc_context, key, index, length) when key != "forloop" do
map = build_forloop_map(index, length)
iteration_vars = Map.put(acc_context.iteration_vars, "forloop", map)
%{acc_context | iteration_vars: iteration_vars}
end
defp maybe_put_forloop_map(acc_context, _key, _index, _length) do
acc_context
end
defp build_forloop_map(index, length) do
%{
"index" => index + 1,
"index0" => index,
"rindex" => length - index,
"rindex0" => length - index - 1,
"first" => index == 0,
"last" => length == index + 1,
"length" => length
}
end
defp restore_initial_forloop_value(acc_context, %{
iteration_vars: %{"forloop" => initial_forloop}
}) do
iteration_vars = Map.put(acc_context.iteration_vars, "forloop", initial_forloop)
%{acc_context | iteration_vars: iteration_vars}
end
defp restore_initial_forloop_value(acc_context, _) do
acc_context
end
defp enumerable([range: [first: first, last: last]], context) do
first = integer_or_field(first, context)
last = integer_or_field(last, context)
first..last
end
defp enumerable(field, context), do: Argument.get(field, context) || []
defp apply_parameters(enumerable, parameters) do
enumerable
|> offset(parameters)
|> limit(parameters)
|> reversed(parameters)
end
defp offset(enumerable, %{offset: offset}) do
Enum.slice(enumerable, offset..-1)
end
defp offset(enumerable, _), do: enumerable
defp limit(enumerable, %{limit: limit}) do
Enum.slice(enumerable, 0..(limit - 1))
end
defp limit(enumerable, _), do: enumerable
defp reversed(enumerable, %{reversed: _}) do
Enum.reverse(enumerable)
end
defp reversed(enumerable, _), do: enumerable
defp integer_or_field(value, _context) when is_integer(value), do: value
defp integer_or_field(field, context), do: Argument.get([field], context)
defp eval_elsif({:elsif_exp, elsif_exp}, context) do
eval_expression(elsif_exp[:expression], context)
end
defp eval_expression(exps, context), do: Expression.eval(exps, context)
end
|
lib/solid/tag.ex
| 0.69946
| 0.451085
|
tag.ex
|
starcoder
|
defmodule Formex.Ecto.ChangesetValidator do
@behaviour Formex.Validator
alias Formex.Form
alias Formex.Field
alias Ecto.Changeset
@moduledoc """
Changeset validator adapter for Formex.
It was created to make use of validation functions included in `Ecto.Changeset`. This module
creates a fake changeset to perform validation rules.
You don't need to use this validator - any
[validator](https://hexdocs.pm/formex/Formex.Validator.html) works with Ecto schemas.
You can also add errors in the `c:Formex.Ecto.modify_changeset/2` callback, which modifies real
changeset.
# Limitations
* can be used only with Ecto schemas.
* `length` validation for collections doesn't work.
Maybe there is a way to fix it. If you need this now - use Vex validator instead.
# Installation
See `Formex.Validator` docs.
# Usage
```
defmodule App.UserType do
use Formex.Type
use Formex.Ecto.Type
use Formex.Ecto.ChangesetValidator # <- add this
```
```
def build_form(form) do
form
|> add(:username, :text_input, validation: [
:required
])
|> add(:email, :text_input, validation: [
required: [message: "give me your email!"],
format: [arg: ~r/@/]
])
|> add(:age, :text_input, validation: [
:required,
inclusion: [arg: 13..100, message: "you must be 13."]
])
end
```
Keys from `validation` list are converted to `validate_` functions from
`Ecto.Changeset`. For example `required` -> `Ecto.Changeset.validate_required/3`.
Value is list of options. If function requires additional argument
(e.g. `Ecto.Changeset.validate_format/4` needs format as third argument)
it must be passed as `:arg` option.
"""
defmacro __using__([]) do
quote do
import Ecto.Changeset
end
end
@spec validate(Form.t()) :: Form.t()
def validate(form) do
# the `create_changeset_for_validation` creates changeset without collections
# `length` doesn't validate empty collections so we don't need them
changeset = Formex.Ecto.Changeset.create_changeset_for_validation(form)
errors_fields =
form
|> Form.get_fields_validatable()
|> Enum.flat_map(fn item ->
validate_field(changeset, item)
end)
errors =
errors_fields
|> Enum.reduce([], fn {key, val}, acc ->
Keyword.update(acc, key, [val], &[val | &1])
end)
form
|> Map.put(:errors, errors)
end
@spec validate_field(changeset :: Changeset.t(), field :: Field.t()) :: List.t()
defp validate_field(changeset, field) do
field.validation
|> Enum.reduce(changeset, fn validation, changeset ->
{name, opts} =
case validation do
{name, opts} when is_list(opts) ->
{name, opts}
name ->
{name, []}
end
{arg, opts} = Keyword.pop(opts, :arg)
args =
if arg do
[changeset, field.name, arg, opts]
else
[changeset, field.name, opts]
end
name = ("validate_" <> to_string(name)) |> String.to_atom()
apply(Changeset, name, args)
end)
|> Map.get(:errors)
end
end
|
lib/changeset_validator.ex
| 0.846514
| 0.444444
|
changeset_validator.ex
|
starcoder
|
defmodule GGity.Shapes do
@moduledoc false
alias GGity.Draw
@shape_names %{
square_open: 0,
circle_open: 1,
triangle_open: 2,
plus: 3,
cross: 4,
diamond_open: 5,
triangle_down_open: 6,
square_cross: 7,
asterisk: 8,
diamond_plus: 9,
circle_plus: 10,
star: 11,
square_plus: 12,
circle_cross: 13,
square_triangle: 14,
triangle_square: 14,
square: 15,
circle_small: 16,
triangle: 17,
diamond: 18,
circle: 19,
bullet: 20,
circle_filled: 21,
square_filled: 22,
diamond_filled: 23,
triangle_filled: 24,
triangle_down_filled: 25
}
@doc false
@spec draw(binary() | atom() | non_neg_integer(), {number(), number()}, number(), keyword()) ::
iolist()
def draw(character, {x, y}, size, options) when is_binary(character) do
options =
if Keyword.has_key?(options, :color) do
options
|> Keyword.put(:fill, options[:color])
|> Keyword.delete(:color)
end
Draw.text(character, [
{:x, x},
{:y, y},
{:font_size, :math.sqrt(size)},
{:text_anchor, "middle"},
{:dominant_baseline, "middle"} | options
])
end
def draw(name, {x, y}, area, attributes) when is_map_key(@shape_names, name) do
draw(@shape_names[name], {x, y}, area, attributes)
end
def draw(0, {x, y}, area, attributes) do
attributes
|> make_transparent()
|> replace_color_with_stroke()
|> square()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(1, {x, y}, area, attributes) do
attributes
|> make_transparent()
|> replace_color_with_stroke()
|> circle()
|> wrap_svg({x, y}, size_for(:circle, area))
end
def draw(2, {x, y}, area, attributes) do
attributes
|> make_transparent()
|> replace_color_with_stroke()
|> triangle()
|> wrap_svg({x, y}, size_for(:triangle, area))
end
def draw(3, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> plus()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(4, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> cross()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(5, {x, y}, area, attributes) do
attributes
|> make_transparent()
|> replace_color_with_stroke()
|> diamond()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(6, {x, y}, area, attributes) do
attributes
|> make_transparent()
|> replace_color_with_stroke()
|> flip()
|> triangle()
|> wrap_svg({x, y}, size_for(:triangle, area))
end
def draw(7, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
square = square(attributes)
cross = cross(attributes)
wrap_svg([square, cross], {x, y}, size_for(:square, area))
end
def draw(8, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
cross = cross(attributes)
plus = plus(attributes)
wrap_svg([cross, plus], {x, y}, size_for(:square, area))
end
def draw(9, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
diamond = diamond(attributes)
plus = plus(attributes)
wrap_svg([diamond, plus], {x, y}, size_for(:square, area))
end
def draw(10, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
circle = circle(attributes)
plus = plus(attributes)
wrap_svg([circle, plus], {x, y}, size_for(:circle, area))
end
def draw(11, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
triangle = triangle([{:transform, "translate(0, -1)"} | attributes])
triangle_down =
attributes
|> flip()
|> triangle()
size = size_for(:triangle, area)
Draw.svg(
[triangle, triangle_down],
viewBox: "0 -1 10 11",
x: x,
y: y,
height: to_string(size),
width: to_string(size)
)
end
def draw(12, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
square = square(attributes)
plus = plus(attributes)
wrap_svg([square, plus], {x, y}, size_for(:square, area))
end
def draw(13, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
circle = circle(attributes)
cross = cross(attributes)
wrap_svg([circle, cross], {x, y}, size_for(:circle, area))
end
def draw(14, {x, y}, area, attributes) do
attributes =
attributes
|> make_transparent()
|> replace_color_with_stroke()
square = square(attributes)
triangle = triangle(attributes)
wrap_svg([square, triangle], {x, y}, size_for(:square, area))
end
def draw(15, {x, y}, area, attributes) do
attributes
|> replace_color_with_fill()
|> square()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(16, {x, y}, area, attributes) do
# MAGIC NUMBER - I don't know how exactly 16 vs. 19 vs. 20 relate
attributes
|> replace_color_with_fill()
|> circle()
|> wrap_svg({x, y}, size_for(:circle, area) * 0.8)
end
def draw(17, {x, y}, area, attributes) do
attributes
|> replace_color_with_fill()
|> triangle()
|> wrap_svg({x, y}, size_for(:triangle, area))
end
def draw(18, {x, y}, area, attributes) do
attributes
|> replace_color_with_fill()
|> diamond()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(19, {x, y}, area, attributes) do
attributes
|> replace_color_with_fill()
|> circle()
|> wrap_svg({x, y}, size_for(:circle, area))
end
def draw(20, {x, y}, area, attributes) do
# MAGIC NUMBER - I don't know how exactly 16 vs. 19 vs. 20 relate
attributes
|> replace_color_with_fill()
|> circle()
|> wrap_svg({x, y}, size_for(:circle, area) * 0.5)
end
def draw(21, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> circle()
|> wrap_svg({x, y}, size_for(:circle, area))
end
def draw(22, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> square()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(23, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> diamond()
|> wrap_svg({x, y}, size_for(:square, area))
end
def draw(24, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> Keyword.put(:size, :math.sqrt(area * 2))
|> triangle()
|> wrap_svg({x, y}, size_for(:triangle, area))
end
def draw(25, {x, y}, area, attributes) do
attributes
|> replace_color_with_stroke()
|> flip()
|> triangle()
|> wrap_svg({x, y}, size_for(:triangle, area))
end
defp square(attributes) do
attributes
|> Keyword.merge(height: "10", width: "10")
|> Draw.rect()
end
defp circle(attributes) do
Draw.circle({5, 5}, 4, attributes)
end
defp triangle(attributes) do
Draw.polygon("5,0 10,10 0,10", attributes)
end
defp plus(attributes) do
horizontal_line_points = [x1: "0", x2: "10", y1: "5", y2: "5"]
vertical_line_points = [y1: "0", y2: "10", x1: "5", x2: "5"]
horizontal = horizontal_line_points ++ attributes
vertical = vertical_line_points ++ attributes
[Draw.line(horizontal), Draw.line(vertical)]
end
defp cross(attributes) do
bottom_left_line = [x1: "0", x2: "10", y1: "10", y2: "0"]
top_left_line = [x1: "0", x2: "10", y1: "0", y2: "10"]
bottom_left = bottom_left_line ++ attributes
top_left = top_left_line ++ attributes
[Draw.line(bottom_left), Draw.line(top_left)]
end
defp diamond(attributes) do
Draw.polygon("5,0 10,5 5,10 0,5", attributes)
end
defp make_transparent(attributes) do
Keyword.merge(attributes, fill_opacity: 0)
end
defp replace_color_with_stroke(attributes) do
attributes
|> Keyword.merge(stroke: attributes[:color])
|> Keyword.delete(:color)
end
defp replace_color_with_fill(attributes) do
attributes
|> Keyword.merge(fill: attributes[:color])
|> Keyword.delete(:color)
end
defp flip(attributes) do
Keyword.merge(attributes, transform: "rotate(180, 5, 5)")
end
defp wrap_svg(shapes, {x, y}, size) do
Draw.svg(
shapes,
viewBox: "0 0 10 10",
x: x - size / 2,
y: y - size / 2,
height: to_string(size),
width: to_string(size)
)
end
defp size_for(_shape, area) do
:math.sqrt(area)
end
# defp size_for(:square, area) do
# :math.sqrt(area)
# end
# defp size_for(:triangle, area) do
# :math.sqrt(area)
# end
# defp size_for(:circle, area) do
# 2 * :math.sqrt(area / :math.pi())
# end
end
|
lib/ggity/shapes.ex
| 0.812272
| 0.401658
|
shapes.ex
|
starcoder
|
defmodule GenUtil.KeyVal do
@moduledoc """
Helpers collections of key-value pairs.
"""
@compile {:inline, fetch: 2, fetch!: 2, get: 2, put: 3, delete: 2, has_key?: 2, replace!: 3}
@type key :: term()
@type value :: term()
@typedoc """
A pair of values in a 2-tuple.
"""
@type pair() :: {key(), value()}
@typedoc """
A map or list of pairs.
"""
@type t() :: map() | list(pair)
@doc """
Fetches the value for a specific `key` in the given `map`.
If `map` contains the given `key` then its value is returned in the shape of `{:ok, value}`.
If `map` doesn't contain `key`, `:error` is returned.
## Examples
iex> KeyVal.fetch(%{a: 1}, :a)
{:ok, 1}
iex> KeyVal.fetch(%{"one" => 1}, "one")
{:ok, 1}
iex> KeyVal.fetch(%{1 => "one"}, 1)
{:ok, "one"}
iex> KeyVal.fetch(%{a: 1}, :b)
:error
iex> KeyVal.fetch([a: 1], :a)
{:ok, 1}
iex> KeyVal.fetch([{"one", 1}], "one")
{:ok, 1}
iex> KeyVal.fetch([{1, "one"}], 1)
{:ok, "one"}
iex> KeyVal.fetch([a: 1], :b)
:error
"""
@spec fetch(t(), key()) :: {:ok, value()} | :error
def fetch(props, key) when is_list(props) do
case :proplists.lookup(key, props) do
:none -> :error
{^key, value} -> {:ok, value}
end
end
def fetch(map, key) when is_map(map), do: Map.fetch(map, key)
@spec fetch!(t(), key()) :: value() | no_return()
def fetch!(keyval, key) do
case fetch(keyval, key) do
{:ok, val} -> val
:error -> raise %KeyError{key: key, term: keyval}
end
end
@doc """
Gets the value for the given `key` from the `keyval`.
If `key` is present in the `keyval` then its value is
returned. Otherwise, `default` is returned.
If `default` is not provided, `nil` is used.
## Examples
iex> KeyVal.get(%{}, :a)
nil
iex> KeyVal.get(%{a: 1}, :a)
1
iex> KeyVal.get([a: 1], :b)
nil
iex> KeyVal.get([a: 1], :b, 3)
3
"""
@spec get(t(), key(), value()) :: any()
def get(keyval, key, default \\ nil) do
case fetch(keyval, key) do
{:ok, val} ->
val
:error ->
default
end
end
@doc """
Returns whether the given `key` exists in the given `keyval`.
"""
@spec has_key?(t(), key()) :: boolean()
def has_key?(map, key) when is_map(map), do: Map.has_key?(map, key)
def has_key(props, key) when is_list(props), do: :proplists.is_defined(key, props)
@doc """
Sets a `key` to the given `value`.
"""
@spec put(t(), key(), value()) :: t()
def put(map, key, value) when is_map(map) do
Map.put(map, key, value)
end
def put(props, key, value) when is_list(props) do
[{key, value} | delete(props, key)]
end
@doc """
Returns a list of all the values for the given `key`.
"""
@spec get_all(t(), key()) :: list(value())
def get_all(props, key) when is_list(props) do
props
|> :proplists.lookup_all(key)
|> Enum.map(fn {_k, val} -> val end)
end
def get_all(map, key) when is_map(map) do
case Map.fetch(map, key) do
{:ok, val} -> [val]
:error -> []
end
end
@doc """
Deletes the entry in `keyval` for a specific `key`.
If the `key` does not exist, returns `keyval` unchanged.
Inlined by the compiler.
## Examples
iex> KeyVal.delete(%{a: 1, b: 2}, :a)
%{b: 2}
iex> KeyVal.delete(%{b: 2}, :a)
%{b: 2}
iex> KeyVal.delete([a: 1, b: 2], :b)
[a: 1]
iex> KeyVal.delete([a: 1, b: 2], :c)
[a: 1, b: 2]
"""
def delete(props, key) when is_list(props) do
:proplists.delete(key, props)
end
def delete(map, key) when is_map(map) do
Map.delete(map, key)
end
@doc """
Puts a value under `key` only if the `key` already exists in `keyval`.
If `key` is not present in `keyval`, a `KeyError` exception is raised.
Inlined by the compiler.
## Examples
iex> KeyVal.replace!(%{a: 1, b: 2}, :a, 3)
%{a: 3, b: 2}
iex> KeyVal.replace!(%{a: 1}, :b, 2)
** (KeyError) key :b not found in: %{a: 1}
iex> KeyVal.replace!([a: 1, b: 2], :a, 3)
[a: 3, b: 2]
iex> KeyVal.replace!([{"one", 1}, {"two", 2}, {"one", 3}], "one", "it was one!")
[{"one", "it was one!"}, {"two", 2}, {"one", "it was one!"}]
iex> KeyVal.replace!([a: 1], :b, 2)
** (KeyError) key :b not found in: [a: 1]
"""
@spec replace!(t(), key(), value()) :: t()
def replace!(map, key, value) when is_map(map) do
:maps.update(key, value, map)
end
def replace!(props, key, value) when is_list(props) do
do_replace(props, key, value, false)
catch
:no_key ->
raise %KeyError{key: key, term: props}
end
defp do_replace([], _key, _value, false) do
throw(:no_key)
end
defp do_replace([], _key, _value, true) do
[]
end
defp do_replace([{key, _} | rest], key, value, _seen_key?) do
[{key, value} | do_replace(rest, key, value, true)]
end
defp do_replace([item | rest], key, value, seen_key?) do
[item | do_replace(rest, key, value, seen_key?)]
end
@doc """
Copies the key-value of `key` from `source` into `dest` or raises.
Returns `source` with the key-value pair of `key` from `source`.
Raises if `key` does not exist in `source`.
## Examples
iex> KeyVal.put_copy!(%{}, %{a: "a", b: "b"}, :a)
%{a: "a"}
iex> KeyVal.put_copy([], [a: "a", b: "b"], :a)
[a: "a"]
iex> KeyVal.put_copy(%{}, %{a: "a", b: "b"}, :c)
%{}
iex> KeyVal.put_copy([], [a: "a", b: "b"], :c)
[]
iex> KeyVal.put_copy(%{a: "z"}, %{a: "a", b: "b"}, :a)
%{a: "a"}
iex> KeyVal.put_copy([a: "z"], [a: "a", b: "b"], :a)
[a: "a"]
"""
def put_copy(dest, source, key) do
source
|> fetch(key)
|> case do
{:ok, value} ->
put(dest, key, value)
:error ->
dest
end
end
@doc """
Copies the key-value of `key` from `source` into `dest` only if
`key` exists in `source`.
Returns `source` either unchanged (if the `key` is in source) or
with the key-value pair of `key` from `source`.
## Examples
iex> KeyVal.put_copy!(%{}, %{a: "a", b: "b"}, :a)
%{a: "a"}
iex> KeyVal.put_copy!([], [a: "a", b: "b"], :a)
[a: "a"]
iex> KeyVal.put_copy!(%{}, %{a: "a", b: "b"}, :c)
** (KeyError) key :c not found in: %{a: "a", b: "b"}
iex> KeyVal.put_copy!([], [a: "a", b: "b"], :c)
** (KeyError) key :c not found in: [a: "a", b: "b"]
iex> KeyVal.put_copy!(%{a: "z"}, %{a: "a", b: "b"}, :a)
%{a: "a"}
iex> KeyVal.put_copy!([a: "z"], [a: "a", b: "b"], :a)
[a: "a"]
"""
def put_copy!(dest, source, key) do
put(dest, key, fetch!(source, key))
end
end
|
lib/gen_util/key_val.ex
| 0.889102
| 0.592018
|
key_val.ex
|
starcoder
|
defmodule Day23 do
def part1(input) do
boot_nics(input) |> simple_nat
end
def part2(input) do
boot_nics(input) |> nat
end
defp boot_nics(input) do
0..49
|> Enum.map(fn address ->
nic = Intcode.new(input)
Intcode.set_sink(nic, self())
send(nic, [address])
Intcode.go(nic)
{address, nic}
end)
|> Map.new
end
defp simple_nat(nics) do
receive do
[255, _, y] ->
y
[to, x, y] ->
send(Map.fetch!(nics, to), [x, y])
simple_nat(nics)
other ->
other
end
end
defp nat(nics, last \\ nil, last_idle \\ nil) do
receive do
[255, x, y] ->
nat(nics, [x, y], last_idle)
[to, x, y] ->
send(Map.fetch!(nics, to), [x, y])
nat(nics, last, last_idle)
after 0 ->
# Could be idle, but could be that some
# NICs are still executing. Query all of them
# to make sure.
case all_idle?(nics) do
true ->
send(Map.fetch!(nics, 0), last)
case {last, last_idle} do
{[_, y], [_, y]} ->
y
{_, _} ->
nat(nics, last, last)
end
false ->
nat(nics, last, last_idle)
end
end
end
defp all_idle?(nics) do
nics = Map.values(nics)
Enum.each(nics, fn nic -> send(nic, {:is_idle, self()}) end)
Enum.map(nics, fn _nic ->
receive do
{:idle, is_idle} -> is_idle
end
end)
|> Enum.all?(& &1)
end
end
defmodule Intcode do
def new(program) do
spawn(fn -> machine(program) end)
end
def set_sink(machine, sink) do
send(machine, {:set_sink, sink})
end
def write_memory(machine, addr, val) do
send(machine, {:write_memory, addr, val})
end
def go(machine) do
send(machine, {:go, self()})
end
def terminate(machine) do
send(machine, :terminate)
end
defp machine(input) do
memory = read_program(input)
memory = Map.put(memory, :output, [])
memory = Map.put(memory, :input, :queue.new())
memory = Map.put(memory, :is_idle, false)
machine_loop(memory)
end
defp machine_loop(memory) do
receive do
{:set_sink, sink} ->
memory = Map.put(memory, :sink, sink)
machine_loop(memory)
{:write_memory, addr, val} ->
memory = write(memory, addr, val)
machine_loop(memory)
{:go, from} ->
memory = execute(memory)
send(from, {:halted, self()})
machine_loop(memory)
:terminate ->
nil
end
end
defp execute(memory, ip \\ 0) do
{opcode, modes} = fetch_opcode(memory, ip)
case opcode do
1 ->
memory = exec_arith_op(&+/2, modes, memory, ip)
execute(memory, ip + 4)
2 ->
memory = exec_arith_op(&*/2, modes, memory, ip)
execute(memory, ip + 4)
3 ->
memory = exec_input(modes, memory, ip)
execute(memory, ip + 2)
4 ->
memory = exec_output(modes, memory, ip)
execute(memory, ip + 2)
5 ->
ip = exec_if(&(&1 !== 0), modes, memory, ip)
execute(memory, ip)
6 ->
ip = exec_if(&(&1 === 0), modes, memory, ip)
execute(memory, ip)
7 ->
memory = exec_cond(&(&1 < &2), modes, memory, ip)
execute(memory, ip + 4)
8 ->
memory = exec_cond(&(&1 === &2), modes, memory, ip)
execute(memory, ip + 4)
9 ->
memory = exec_inc_rel_base(modes, memory, ip)
execute(memory, ip + 2)
99 ->
memory
end
end
defp exec_arith_op(op, modes, memory, ip) do
[in1, in2] = read_operand_values(memory, ip + 1, modes, 2)
out_addr = read_out_address(memory, div(modes, 100), ip + 3)
result = op.(in1, in2)
write(memory, out_addr, result)
end
defp exec_input(modes, memory, ip) do
out_addr = read_out_address(memory, modes, ip + 1)
q = Map.fetch!(memory, :input)
q = fill_queue(q, memory.is_idle)
case :queue.out(q) do
{{:value, value}, q} ->
memory = write(memory, out_addr, value)
%{memory | input: q, is_idle: false}
{:empty, q} ->
memory = write(memory, out_addr, -1)
%{memory | input: q, is_idle: true}
end
end
defp fill_queue(q, is_idle) do
receive do
[_ | _] = input ->
q = Enum.reduce(input, q, & :queue.in(&1, &2))
fill_queue(q, is_idle)
{:is_idle, reply_to} ->
send(reply_to, {:idle, :queue.is_empty(q) and is_idle})
fill_queue(q, is_idle)
after 0 ->
q
end
end
defp exec_output(modes, memory, ip) do
[value] = read_operand_values(memory, ip + 1, modes, 1)
case memory do
%{:output => [b, a]} ->
sink = Map.fetch!(memory, :sink)
send(sink, [a, b, value])
%{memory | output: [], is_idle: false }
%{:output => output} ->
%{memory | output: [value | output], is_idle: false}
end
end
defp exec_if(op, modes, memory, ip) do
[value, new_ip] = read_operand_values(memory, ip + 1, modes, 2)
case op.(value) do
true -> new_ip
false -> ip + 3
end
end
defp exec_cond(op, modes, memory, ip) do
[operand1, operand2] = read_operand_values(memory, ip + 1, modes, 2)
out_addr = read_out_address(memory, div(modes, 100), ip + 3)
result = case op.(operand1, operand2) do
true -> 1
false -> 0
end
write(memory, out_addr, result)
end
defp exec_inc_rel_base(modes, memory, ip) do
[offset] = read_operand_values(memory, ip + 1, modes, 1)
base = get_rel_base(memory) + offset
Map.put(memory, :rel_base, base)
end
defp read_operand_values(_memory, _addr, _modes, 0), do: []
defp read_operand_values(memory, addr, modes, n) do
operand = read(memory, addr)
operand = case rem(modes, 10) do
0 -> read(memory, operand)
1 -> operand
2 -> read(memory, operand + get_rel_base(memory))
end
[operand | read_operand_values(memory, addr + 1, div(modes, 10), n - 1)]
end
defp read_out_address(memory, modes, addr) do
out_addr = read(memory, addr)
case modes do
0 -> out_addr
2 -> get_rel_base(memory) + out_addr
end
end
defp fetch_opcode(memory, ip) do
opcode = read(memory, ip)
modes = div(opcode, 100)
opcode = rem(opcode, 100)
{opcode, modes}
end
defp get_rel_base(memory) do
Map.get(memory, :rel_base, 0)
end
defp read(memory, addr) do
Map.get(memory, addr, 0)
end
defp write(memory, addr, value) do
Map.put(memory, addr, value)
end
defp read_program(input) do
String.split(input, ",")
|> Stream.map(&String.to_integer/1)
|> Stream.with_index
|> Stream.map(fn {code, index} -> {index, code} end)
|> Map.new
end
end
|
day23/lib/day23.ex
| 0.569733
| 0.447823
|
day23.ex
|
starcoder
|
defmodule EWallet.TransactionConsumptionFetcher do
@moduledoc """
Handles any kind of retrieval/fetching for the TransactionConsumptionGate.
All functions here are only meant to load and format data related to
transaction consumptions.
"""
alias EWalletDB.{Transaction, TransactionConsumption}
@spec get(String.t()) ::
{:ok, %TransactionConsumption{}}
| {:error, :transaction_consumption_not_found}
def get(nil), do: {:error, :transaction_consumption_not_found}
def get(id) do
%{id: id}
|> get_by()
|> return_consumption()
end
defp return_consumption(nil), do: {:error, :transaction_consumption_not_found}
defp return_consumption(consumption), do: {:ok, consumption}
@spec idempotent_fetch(String.t()) ::
{:ok, nil}
| {:idempotent_call, %TransactionConsumption{}}
| {:error, %TransactionConsumption{}, atom(), String.t()}
| {:error, %TransactionConsumption{}, String.t(), String.t()}
def idempotent_fetch(idempotency_token) do
%{idempotency_token: idempotency_token}
|> get_by()
|> return_idempotent()
end
defp get_by(attrs) do
TransactionConsumption.get_by(
attrs,
preload: [
:account,
:user,
:wallet,
:token,
:transaction_request,
:transaction,
:exchange_account,
:exchange_wallet
]
)
end
defp return_idempotent(nil), do: {:ok, nil}
defp return_idempotent(%TransactionConsumption{transaction: nil} = consumption) do
{:idempotent_call, consumption}
end
defp return_idempotent(%TransactionConsumption{transaction: transaction} = consumption) do
return_transaction_result(consumption, failed_transaction: Transaction.failed?(transaction))
end
defp return_transaction_result(consumption, failed_transaction: true) do
{code, description} = Transaction.get_error(consumption.transaction)
{:error, consumption, code, description}
end
defp return_transaction_result(consumption, failed_transaction: false) do
{:idempotent_call, consumption}
end
end
|
apps/ewallet/lib/ewallet/fetchers/transaction_consumption_fetcher.ex
| 0.817611
| 0.416886
|
transaction_consumption_fetcher.ex
|
starcoder
|
defmodule Ctrl do
@tag_op :|
# We will simply transform the AST in the form of a regular `with` call.
defmacro ctrl([{:do, do_block} | else_catch_rescue] = _input) do
# IO.inspect _input
{main_block, meta} =
case do_block do
{:__block__, meta, exprs} when is_list(exprs) ->
{exprs, meta}
other ->
{[other], []}
end
{with_clauses, body} = split_body(main_block)
# handle the tag operator to tag responses
with_clauses = Enum.map(with_clauses, &wrap_tag/1)
body = {:__block__, [], body}
else_catch_rescue =
case Keyword.get(else_catch_rescue, :else, nil) do
nil ->
else_catch_rescue
elses ->
elses = elses |> Enum.map(&unwrap_tag/1)
:lists.keyreplace(:else, 1, else_catch_rescue, {:else, elses})
end
with_body = with_clauses ++ [[{:do, body} | else_catch_rescue]]
ast = {:with, meta, with_body}
# ast |> Macro.to_string |> IO.puts
ast
end
# Ctrl allow the last clause to have an arrow `<-`. But `with` blocks does not
# accept those inside its `do` block. So we split the body after the last
# arrow expression, and if there is no body after the last `<-`, we just use
# the left operand of the last arrow.
# Caution : the clause will go into the `else` block if it does not match, as
# any other `<-` clause.
defp split_body(exprs) do
# arrow_clauses/_reversed/ can contain other clauses, but ends with the last
# arrow clause.
{body_reversed, arrow_clauses_reversed} =
exprs
|> :lists.reverse
|> Enum.split_while(fn(expr) -> not arrow?(expr) end)
body_reversed_nonempty =
case body_reversed do
# the last clause is an arrow, so we must invent a body for the with
# clause
[] ->
{:<-, _, [left, _]} = hd(arrow_clauses_reversed)
[left |> cleanup_last_clause]
non_empty ->
non_empty
end
{
arrow_clauses_reversed |> :lists.reverse,
body_reversed_nonempty |> :lists.reverse
}
end
defp arrow?({:<-, _, _}), do: true
defp arrow?(_), do: false
defp wrap_tag({:<-, meta, [left, right]} = _clause) do
{left2, right2} =
case left do
{@tag_op, _meta, [tag, inside]} when is_atom(tag) ->
{{tag, inside}, {tag, right}}
{:when, when_meta, [{@tag_op, _meta, [tag, inside]}, when_right]} when is_atom(tag) ->
l = {:when, when_meta, [{tag, inside}, when_right]}
{l, {tag, right}}
_normal ->
{left, right}
end
{:<-, meta, [left2, right2]}
end
defp wrap_tag(clause) do
clause
end
# If the last clause is set in the body, we must remove tags and guards
defp cleanup_last_clause({@tag_op, _, [_tag, content]}),
do: cleanup_last_clause(content)
defp cleanup_last_clause({:when, _, [content, guards]}),
do: cleanup_last_clause(content)
defp cleanup_last_clause(content),
do: content
defp unwrap_tag({:->, meta, [left_match, right]}) do
left =
case left_match do
[{@tag_op, _, [tag, value]}] ->
[{tag, value}]
[{:when, when_meta, [{@tag_op, _meta, [tag, inside]}, when_right]}] when is_atom(tag) ->
[{tag, inside}]
_untagged ->
left_match
end
{:->, meta, [left, right]}
end
end
|
lib/ctrl.ex
| 0.62395
| 0.500916
|
ctrl.ex
|
starcoder
|
defmodule Membrane.Hackney.Sink do
@moduledoc """
An element uploading data over HTTP(S) based on Hackney
"""
use Membrane.Sink
use Membrane.Log, tags: :membrane_hackney_sink
alias Membrane.Buffer
import Mockery.Macro
def_input_pad :input, caps: :any, demand_unit: :bytes
def_options location: [
type: :string,
description: """
The URL of a request
"""
],
method: [
type: :atom,
spec: :post | :put | :patch,
description: "HTTP method that will be used when making a request",
default: :post
],
headers: [
type: :keyword,
description:
"List of additional request headers in format accepted by `:hackney.request/5`",
default: []
],
hackney_opts: [
type: :keyword,
description:
"Additional options for Hackney in format accepted by `:hackney.request/5`",
default: []
],
demand_size: [
type: :integer,
description: "The size of the demand made after each write",
default: 1024
]
defmodule Response do
@moduledoc """
Struct containing HTTP response sent to pipeline via notification after the upload is finished.
"""
@type t :: %__MODULE__{
status: non_neg_integer(),
headers: [{String.t(), String.t()}],
body: String.t()
}
@enforce_keys [:status, :headers, :body]
defstruct @enforce_keys
end
@impl true
def handle_init(opts) do
state = opts |> Map.from_struct() |> Map.merge(%{conn_ref: nil})
{:ok, state}
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
{:ok, conn_ref} =
mockable(:hackney).request(
state.method,
state.location,
state.headers,
:stream,
state.hackney_opts
)
{{:ok, demand: {:input, state.demand_size}}, %{state | conn_ref: conn_ref}}
end
@impl true
def handle_playing_to_prepared(_ctx, state) do
mockable(:hackney).close(state.conn_ref)
{:ok, %{state | conn_ref: nil}}
end
@impl true
def handle_write(:input, %Buffer{payload: payload}, _ctx, state) do
mockable(:hackney).send_body(state.conn_ref, payload)
{{:ok, demand: {:input, state.demand_size}}, state}
end
@impl true
def handle_end_of_stream(:input, _ctx, %{conn_ref: conn_ref} = state) do
{:ok, status, headers, conn_ref} = mockable(:hackney).start_response(conn_ref)
{:ok, body} = mockable(:hackney).body(conn_ref)
response_notification = %__MODULE__.Response{status: status, headers: headers, body: body}
{{:ok, notify: response_notification, notify: {:end_of_stream, :input}}, state}
end
end
|
lib/membrane_hackney_plugin/sink.ex
| 0.879923
| 0.447641
|
sink.ex
|
starcoder
|
defmodule Membrane.Audiometer.Peakmeter do
@moduledoc """
This element computes peaks in each channel of the given signal at
regular time intervals, regardless if it receives data or not.
It uses erlang's `:timer.send_interval/2` which might not provide
perfect accuracy.
It accepts audio samples in any format supported by `Membrane.RawAudio`
module.
It will periodically emit notifications, of the following format:
* `{:audiometer, :underrun}` - if there were not enough data to
compute audio level within given interval,
* `{:audiometer, {:measurement, measurement}}` - where `measurement`
is a `Membrane.Audiometer.Peakmeter.Notification.Measurement`
struct containing computed audio levels. See its documentation for
more details about the actual value format.
See `options/0` for available options.
"""
use Membrane.Filter
alias __MODULE__.Amplitude
alias Membrane.Element.PadData
alias Membrane.RawAudio
@type amplitude_t :: [number | :infinity | :clip]
def_input_pad :input,
availability: :always,
mode: :pull,
caps: RawAudio,
demand_unit: :buffers,
demand_mode: :auto
def_output_pad :output,
availability: :always,
mode: :pull,
demand_mode: :auto,
caps: RawAudio
def_options interval: [
type: :integer,
description: """
How often peakmeter should emit messages containing sound level (in Membrane.Time units).
""",
default: 50 |> Membrane.Time.milliseconds()
]
# Private API
@impl true
def handle_init(%__MODULE__{interval: interval}) do
state = %{
interval: interval,
queue: <<>>
}
{:ok, state}
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
{{:ok, start_timer: {:timer, state.interval}}, state}
end
@impl true
def handle_prepared_to_stopped(_ctx, state) do
{{:ok, stop_timer: :timer}, state}
end
@impl true
def handle_process(
:input,
%Membrane.Buffer{payload: payload} = buffer,
_context,
state
) do
new_state = %{state | queue: state.queue <> payload}
{{:ok, buffer: {:output, buffer}}, new_state}
end
@impl true
def handle_tick(:timer, %{pads: %{input: %PadData{caps: nil}}}, state) do
{{:ok, notify: :underrun}, state}
end
def handle_tick(:timer, %{pads: %{input: %PadData{caps: caps}}}, state) do
frame_size = RawAudio.frame_size(caps)
if byte_size(state.queue) < frame_size do
{{:ok, notify: {:audiometer, :underrun}}, state}
else
{:ok, {amplitudes, rest}} = Amplitude.find_amplitudes(state.queue, caps)
{{:ok, notify: {:amplitudes, amplitudes}}, %{state | queue: rest}}
end
end
end
|
lib/membrane_element_audiometer/peakmeter.ex
| 0.912787
| 0.556008
|
peakmeter.ex
|
starcoder
|
defmodule Beamchmark.Suite.Measurements.SchedulerInfo do
@moduledoc """
Module representing different statistics about scheduler usage.
"""
use Bunch.Access
alias Beamchmark.Math
@type sched_usage_t :: %{
(sched_id :: integer()) =>
{util :: float(), percent :: Math.percent_t() | Math.percent_diff_t()}
}
@type total_sched_usage_t ::
{util :: float(), percent :: Math.percent_t() | Math.percent_diff_t()}
@type weighted_sched_usage_t ::
{util :: float(), percent :: Math.percent_t() | Math.percent_diff_t()}
@type t :: %__MODULE__{
normal: sched_usage_t(),
cpu: sched_usage_t(),
io: sched_usage_t(),
total_normal: total_sched_usage_t(),
total_cpu: total_sched_usage_t(),
total_io: total_sched_usage_t(),
total: total_sched_usage_t(),
weighted: weighted_sched_usage_t()
}
defstruct normal: %{},
cpu: %{},
io: %{},
total_normal: {0, 0},
total_cpu: {0, 0},
total_io: {0, 0},
total: {0, 0},
weighted: {0, 0}
# converts output of `:scheduler.utilization/1 to `SchedulerInfo.t()`
@spec from_sched_util_result(any()) :: t()
def from_sched_util_result(sched_util_result) do
scheduler_info =
sched_util_result
|> Enum.reduce(%__MODULE__{}, fn
{sched_type, sched_id, util, percent}, scheduler_info
when sched_type in [:normal, :cpu, :io] ->
# convert from charlist to string, remove trailing percent sign and convert to float
percent = String.slice("#{percent}", 0..-2//1) |> String.to_float()
put_in(scheduler_info, [sched_type, sched_id], {util, percent})
{type, util, percent}, scheduler_info when type in [:total, :weighted] ->
percent = String.slice("#{percent}", 0..-2//1) |> String.to_float()
put_in(scheduler_info[type], {util, percent})
end)
total_normal = typed_total(scheduler_info.normal)
total_cpu = typed_total(scheduler_info.cpu)
total_io = typed_total(scheduler_info.io)
%__MODULE__{
scheduler_info
| total_normal: total_normal,
total_cpu: total_cpu,
total_io: total_io
}
end
@spec diff(t(), t()) :: t()
def diff(base, new) do
normal_diff = sched_usage_diff(base.normal, new.normal)
cpu_diff = sched_usage_diff(base.cpu, new.cpu)
io_diff = sched_usage_diff(base.io, new.io)
total_normal_diff = sched_usage_diff(base.total_normal, new.total_normal)
total_cpu_diff = sched_usage_diff(base.total_cpu, new.total_cpu)
total_io_diff = sched_usage_diff(base.total_io, new.total_io)
total_diff = sched_usage_diff(base.total, new.total)
weighted_diff = sched_usage_diff(base.weighted, new.weighted)
%__MODULE__{
normal: normal_diff,
cpu: cpu_diff,
io: io_diff,
total_normal: total_normal_diff,
total_cpu: total_cpu_diff,
total_io: total_io_diff,
total: total_diff,
weighted: weighted_diff
}
end
defp typed_total(scheduler_usage) do
count = scheduler_usage |> Map.keys() |> Enum.count()
if count != 0 do
util_sum =
scheduler_usage
|> Map.values()
|> Enum.reduce(0, fn {util, _percent}, util_sum ->
util_sum + util
end)
{util_sum / count, Float.round(util_sum / count * 100, 1)}
else
{0, 0}
end
end
defp sched_usage_diff(base, new) when is_map(base) and is_map(new) do
Enum.zip(base, new)
|> Map.new(fn
{{sched_id, {base_util, base_percent}}, {sched_id, {new_util, new_percent}}} ->
{sched_id, {new_util - base_util, Math.percent_diff(base_percent, new_percent)}}
end)
end
defp sched_usage_diff({base_util, base_percent}, {new_util, new_percent}),
do: {new_util - base_util, Math.percent_diff(base_percent, new_percent)}
end
|
lib/beamchmark/suite/measurements/scheduler_info.ex
| 0.820541
| 0.530236
|
scheduler_info.ex
|
starcoder
|
defmodule Stripe.Account do
@moduledoc """
Work with Stripe account objects.
You can:
- Retrieve your own account
- Retrieve an account with a specified `id`
This module does not yet support managed accounts.
Does not yet render lists or take options.
Stripe API reference: https://stripe.com/docs/api#account
"""
@type t :: %__MODULE__{}
defstruct [
:id, :object,
:business_name, :business_primary_color, :business_url,
:charges_enabled, :country, :default_currency, :details_submitted,
:display_name, :email, :legal_entity, :external_accounts, :managed,
:metadata, :statement_descriptor, :support_email, :support_phone,
:support_url, :timezone, :tos_acceptance, :transfers_enabled,
:payouts_enabled, :verification
]
@singular_endpoint "account"
@plural_endpoint "accounts"
@address_map %{
city: [:create, :retrieve, :update],
country: [:create, :retrieve, :update],
line1: [:create, :retrieve, :update],
line2: [:create, :retrieve, :update],
postal_code: [:create, :retrieve, :update],
state: [:create, :retrieve, :update]
}
@address_kana_kanji_map %{ # Japan only
city: [:create, :retrieve, :update],
country: [:create, :retrieve, :update],
line1: [:create, :retrieve, :update],
line2: [:create, :retrieve, :update],
postal_code: [:create, :retrieve, :update],
state: [:create, :retrieve, :update],
town: [:create, :retrieve, :update]
}
@dob_map %{
day: [:create, :retrieve, :update],
month: [:create, :retrieve, :update],
year: [:create, :retrieve, :update]
}
@schema %{
business_logo: [:create, :retrieve, :update],
business_name: [:create, :retrieve, :update],
business_primary_color: [:create, :retrieve, :update],
business_url: [:create, :retrieve, :update],
country: [:create, :retrieve],
debit_negative_balances: [:create, :retrieve, :update],
decline_charge_on: %{
avs_failure: [:create, :retrieve, :update],
cvc_failure: [:create, :retrieve, :update]
},
default_currency: [:create, :retrieve, :update],
email: [:create, :retrieve, :update],
# TODO: Add ability to have nested external_account object OR the
# token string – can accomplish with a tuple and matching on that.
external_account: [:create, :retrieve, :update],
external_accounts: [:retrieve],
id: [:retrieve],
legal_entity: %{
address: @address_map,
address_kana: @address_kana_kanji_map, # Japan only
address_kanji: @address_kana_kanji_map, # Japan only
business_name: [:create, :retrieve, :update],
business_name_kana: [:create, :retrieve, :update], # Japan only
business_name_kanji: [:create, :retrieve, :update], # Japan only
business_tax_id: [:create, :update],
business_tax_id_provided: [:retrieve],
business_vat_id: [:create, :update],
business_vat_id_provided: [:retrieve],
dob: @dob_map,
first_name: [:create, :retrieve, :update],
first_name_kana: [:create, :retrieve, :update], # Japan only
first_name_kanji: [:create, :retrieve, :update], # Japan only
gender: [:create, :retrieve, :update], # "male" or "female"
last_name: [:create, :retrieve, :update],
last_name_kana: [:create, :retrieve, :update], # Japan only
last_name_kanji: [:create, :retrieve, :update], # Japan only
maiden_name: [:create, :retrieve, :update],
personal_address: @address_map,
personal_address_kana: @address_kana_kanji_map, # Japan only
personal_address_kanji: @address_kana_kanji_map, # Japan only
personal_id_number: [:create, :update],
personal_id_number_provided: [:retrieve],
phone_number: [:create, :retrieve, :update],
ssn_last_4: [:create, :update], # US only
ssn_last_4_provided: [:retrieve],
type: [:create, :update, :retrieve], # "individual" or "company"
verification: %{
details: [:retrieve],
details_code: [:retrieve],
document: [:create, :retrieve, :update],
status: [:retrieve],
}
},
managed: [:create, :retrieve],
metadata: [:create, :retrieve, :update],
object: [:retrieve],
product_description: [:create, :retrieve, :update],
statement_descriptor: [:create, :retrieve, :update],
support_email: [:create, :retrieve, :update],
support_phone: [:create, :retrieve, :update],
support_url: [:create, :retrieve, :update],
timezone: [:retrieve],
tos_acceptance: %{
date: [:create, :retrieve, :update],
ip: [:create, :retrieve, :update],
user_agent: [:create, :retrieve, :update]
},
payouts_enabled: [:retrieve],
transfer_schedule: %{
delay_days: [:create, :retrieve, :update],
interval: [:create, :retrieve, :update],
monthly_anchor: [:create, :retrieve, :update],
weekly_anchor: [:create, :retrieve, :update]
},
transfer_statement_descriptor: [:create, :retrieve, :update],
verification: %{
disabled_reason: [:retrieve],
due_by: [:retrieve],
fields_needed: [:retrieve]
}
}
@doc """
Schema map indicating when a particular argument can be created on, retrieved
from, or updated on the Stripe API.
"""
@spec schema :: map
def schema, do: @schema
@nullable_keys [
:metadata
]
@doc """
Create an account.
"""
@spec create(map, Keyword.t) :: {:ok, t} | {:error, Stripe.api_error_struct}
def create(changes, opts \\ []) do
Stripe.Request.create(@plural_endpoint, changes, @schema, opts)
end
@doc """
Retrieve your own account without options.
"""
@spec retrieve :: {:ok, t} | {:error, Stripe.api_error_struct}
def retrieve, do: retrieve([])
@doc """
Retrieve your own account with options.
"""
@spec retrieve(list) :: {:ok, t} | {:error, Stripe.api_error_struct}
def retrieve(opts) when is_list(opts), do: do_retrieve(@singular_endpoint, opts)
@doc """
Retrieve an account with a specified `id`.
"""
@spec retrieve(binary, list) :: {:ok, t} | {:error, Stripe.api_error_struct}
def retrieve(id, opts \\ []), do: do_retrieve(@plural_endpoint <> "/" <> id, opts)
@spec do_retrieve(String.t, list) :: {:ok, t} | {:error, Stripe.api_error_struct}
defp do_retrieve(endpoint, opts), do: Stripe.Request.retrieve(endpoint, opts)
@doc """
Update an account.
Takes the `id` and a map of changes.
"""
@spec update(binary, map, list) :: {:ok, t} | {:error, Stripe.api_error_struct}
def update(id, changes, opts \\ []) do
endpoint = @plural_endpoint <> "/" <> id
Stripe.Request.update(endpoint, changes, @schema, @nullable_keys, opts)
end
@doc """
List all connected accounts.
"""
@spec list(map, Keyword.t) :: {:ok, Stripe.List.t} | {:error, Stripe.api_error_struct}
def list(params \\ %{}, opts \\ []) do
endpoint = @plural_endpoint
Stripe.Request.retrieve(params, endpoint, opts)
end
end
|
lib/stripe/account.ex
| 0.548069
| 0.603815
|
account.ex
|
starcoder
|
defmodule Geometry.MultiLineStringZ do
@moduledoc """
A set of line-strings from type `Geometry.LineStringZ`
`MultiLineStringMZ` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> MultiLineStringZ.new([
...> LineStringZ.new([
...> PointZ.new(1, 2, 3),
...> PointZ.new(3, 4, 5)
...> ]),
...> LineStringZ.new([
...> PointZ.new(1, 2, 3),
...> PointZ.new(11, 12, 13),
...> PointZ.new(13, 14, 15)
...> ])
...> ]),
...> fn line_string -> length line_string end
...> )
[2, 3]
iex> Enum.into(
...> [LineStringZ.new([PointZ.new(1, 2, 3), PointZ.new(5, 6, 7)])],
...> MultiLineStringZ.new())
%MultiLineStringZ{
line_strings:
MapSet.new([
[[1, 2, 3], [5, 6, 7]]
])
}
"""
alias Geometry.{GeoJson, LineStringZ, MultiLineStringZ, PointZ, WKB, WKT}
defstruct line_strings: MapSet.new()
@type t :: %MultiLineStringZ{line_strings: MapSet.t(Geometry.coordinates())}
@doc """
Creates an empty `MultiLineStringZ`.
## Examples
iex> MultiLineStringZ.new()
%MultiLineStringZ{line_strings: MapSet.new()}
"""
@spec new :: t()
def new, do: %MultiLineStringZ{}
@doc """
Creates a `MultiLineStringZ` from the given `Geometry.MultiLineStringZ`s.
## Examples
iex> MultiLineStringZ.new([
...> LineStringZ.new([
...> PointZ.new(1, 2, 3),
...> PointZ.new(2, 3, 4),
...> PointZ.new(3, 4, 5)
...> ]),
...> LineStringZ.new([
...> PointZ.new(10, 20, 30),
...> PointZ.new(30, 40, 50)
...> ]),
...> LineStringZ.new([
...> PointZ.new(10, 20, 30),
...> PointZ.new(30, 40, 50)
...> ])
...> ])
%Geometry.MultiLineStringZ{
line_strings:
MapSet.new([
[[1, 2, 3], [2, 3, 4], [3, 4, 5]],
[[10, 20, 30], [30, 40, 50]]
])
}
iex> MultiLineStringZ.new([])
%MultiLineStringZ{line_strings: MapSet.new()}
"""
@spec new([LineStringZ.t()]) :: t()
def new([]), do: %MultiLineStringZ{}
def new(line_strings) do
%MultiLineStringZ{
line_strings:
Enum.into(line_strings, MapSet.new(), fn line_string -> line_string.points end)
}
end
@doc """
Returns `true` if the given `MultiLineStringZ` is empty.
## Examples
iex> MultiLineStringZ.empty?(MultiLineStringZ.new())
true
iex> MultiLineStringZ.empty?(
...> MultiLineStringZ.new([
...> LineStringZ.new([PointZ.new(1, 2, 3), PointZ.new(3, 4, 5)])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%MultiLineStringZ{} = multi_line_string),
do: Enum.empty?(multi_line_string.line_strings)
@doc """
Creates a `MultiLineStringZ` from the given coordinates.
## Examples
iex> MultiLineStringZ.from_coordinates([
...> [[-1, 1, 1], [2, 2, 2], [-3, 3, 3]],
...> [[-10, 10, 10], [-20, 20, 20]]
...> ])
%MultiLineStringZ{
line_strings:
MapSet.new([
[[-1, 1, 1], [2, 2, 2], [-3, 3, 3]],
[[-10, 10, 10], [-20, 20, 20]]
])
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(coordinates) do
%MultiLineStringZ{line_strings: MapSet.new(coordinates)}
end
@doc """
Returns an `:ok` tuple with the `MultiLineStringZ` from the given GeoJSON
term. Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "MultiLineString",
...> "coordinates": [
...> [[-1, 1, 1], [2, 2, 2], [-3, 3, 3]],
...> [[-10, 10, 10], [-20, 20, 20]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> MultiLineStringZ.from_geo_json()
{:ok,
%Geometry.MultiLineStringZ{
line_strings:
MapSet.new([
[[-10, 10, 10], [-20, 20, 20]],
[[-1, 1, 1], [2, 2, 2], [-3, 3, 3]]
])
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_multi_line_string(json, MultiLineStringZ)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_multi_line_string(json, MultiLineStringZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `MultiLineStringZ`.
There are no guarantees about the order of line-strings in the returned
`coordinates`.
## Examples
```elixir
[
[[-1, 1, 1], [2, 2, 2], [-3, 3, 3]],
[[-10, 10, 10], [-20, 20, 20]]
]
|> MultiLineStringZ.from_coordinates()
MultiLineStringZ.to_geo_json(
MultiLineStringZ.new([
LineStringZ.new([
PointZ.new(-1, 1, 1),
PointZ.new(2, 2, 2),
PointZ.new(-3, 3, 3)
]),
LineStringZ.new([
PointZ.new(-10, 10, 10),
PointZ.new(-20, 20, 20)
])
])
)
# =>
# %{
# "type" => "MultiLineString",
# "coordinates" => [
# [[-1, 1, 1], [2, 2, 2], [-3, 3, 3]],
# [[-10, 10, 10], [-20, 20, 20]]
# ]
# }
```
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%MultiLineStringZ{line_strings: line_strings}) do
%{
"type" => "MultiLineString",
"coordinates" => MapSet.to_list(line_strings)
}
end
@doc """
Returns an `:ok` tuple with the `MultiLineStringZ` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> MultiLineStringZ.from_wkt("
...> SRID=1234;MultiLineString Z (
...> (10 20 10, 20 10 35, 20 40 10),
...> (40 30 10, 30 30 25)
...> )
...> ")
{:ok, {
%MultiLineStringZ{
line_strings:
MapSet.new([
[[10, 20, 10], [20, 10, 35], [20, 40, 10]],
[[40, 30, 10], [30, 30, 25]]
])
},
1234
}}
iex> MultiLineStringZ.from_wkt("MultiLineString Z EMPTY")
{:ok, %MultiLineStringZ{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, MultiLineStringZ)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, MultiLineStringZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `MultiLineStringZ`. With option `:srid`
an EWKT representation with the SRID is returned.
There are no guarantees about the order of line-strings in the returned
WKT-string.
## Examples
```elixir
MultiLineStringZ.to_wkt(MultiLineStringZ.new())
# => "MultiLineString Z EMPTY"
MultiLineStringZ.to_wkt(
MultiLineStringZ.new([
LineStringZ(
[PointZ.new(7.1, 8.1, 1.1), PointZ.new(9.2, 5.2, 2.2)]
),
LineStringZ(
[PointZ.new(5.5, 9.2, 3.1), PointZ.new(1.2, 3.2, 4.2)]
)
])
)
# Returns a string without any \\n or extra spaces (formatted just for readability):
# MultiLineString Z (
# (5.5 9.2 3.1, 1.2 3.2 4.2),
# (7.1 8.1 1.1, 9.2 5.2 2.2)
# )
MultiLineStringZ.to_wkt(
MultiLineStringZ.new([
LineStringZ(
[PointZ.new(7.1, 8.1, 1.1), PointZ.new(9.2, 5.2, 2.2)]
),
LineStringZ(
[PointZ.new(5.5, 9.2, 3.1), PointZ.new(1.2, 3.2, 4.2)]
)
]),
srid: 555
)
# Returns a string without any \\n or extra spaces (formatted just for readability):
# SRID=555;MultiLineString Z (
# (5.5 9.2 3.1, 1.2 3.2 4.2),
# (7.1 8.1 1.1, 9.2 5.2 2.2)
# )
```
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%MultiLineStringZ{line_strings: line_strings}, opts \\ []) do
WKT.to_ewkt(
<<
"MultiLineString Z ",
line_strings |> MapSet.to_list() |> to_wkt_line_strings()::binary()
>>,
opts
)
end
@doc """
Returns the WKB representation for a `MultiLineStringZ`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZ.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%MultiLineStringZ{} = multi_line_string, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(multi_line_string, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `MultiLineStringZ` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZ.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, MultiLineStringZ)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, MultiLineStringZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `MultiLineStringZ`.
## Examples
iex> MultiLineStringZ.size(
...> MultiLineStringZ.new([
...> LineStringZ.new([
...> PointZ.new(11, 12, 13),
...> PointZ.new(21, 22, 23)
...> ]),
...> LineStringZ.new([
...> PointZ.new(31, 32, 33),
...> PointZ.new(41, 42, 43)
...> ])
...> ])
...> )
2
"""
@spec size(t()) :: non_neg_integer()
def size(%MultiLineStringZ{line_strings: line_strings}), do: MapSet.size(line_strings)
@doc """
Checks if `MultiLineStringZ` contains `line_string`.
## Examples
iex> MultiLineStringZ.member?(
...> MultiLineStringZ.new([
...> LineStringZ.new([
...> PointZ.new(11, 12, 13),
...> PointZ.new(21, 22, 23)
...> ]),
...> LineStringZ.new([
...> PointZ.new(31, 32, 33),
...> PointZ.new(41, 42, 43)
...> ])
...> ]),
...> LineStringZ.new([
...> PointZ.new(31, 32, 33),
...> PointZ.new(41, 42, 43)
...> ])
...> )
true
iex> MultiLineStringZ.member?(
...> MultiLineStringZ.new([
...> LineStringZ.new([
...> PointZ.new(11, 12, 13),
...> PointZ.new(21, 22, 23)
...> ]),
...> LineStringZ.new([
...> PointZ.new(31, 32, 33),
...> PointZ.new(41, 42, 43)
...> ])
...> ]),
...> LineStringZ.new([
...> PointZ.new(11, 12, 13),
...> PointZ.new(41, 42, 43)
...> ])
...> )
false
"""
@spec member?(t(), LineStringZ.t()) :: boolean()
def member?(%MultiLineStringZ{line_strings: line_strings}, %LineStringZ{points: points}) do
MapSet.member?(line_strings, points)
end
@doc """
Converts `MultiLineStringZ` to a list.
"""
@spec to_list(t()) :: [PointZ.t()]
def to_list(%MultiLineStringZ{line_strings: line_strings}), do: MapSet.to_list(line_strings)
@compile {:inline, to_wkt_line_strings: 1}
defp to_wkt_line_strings([]), do: "EMPTY"
defp to_wkt_line_strings([line_string | line_strings]) do
<<"(",
Enum.reduce(line_strings, LineStringZ.to_wkt_points(line_string), fn line_string, acc ->
<<acc::binary(), ", ", LineStringZ.to_wkt_points(line_string)::binary()>>
end)::binary(), ")">>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(t(), srid, endian, mode) :: wkb
when srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(%MultiLineStringZ{line_strings: line_strings}, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_line_strings(line_strings, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_line_strings: 3}
defp to_wkb_line_strings(line_strings, endian, mode) do
Enum.reduce(line_strings, WKB.length(line_strings, endian, mode), fn line_string, acc ->
<<acc::binary(), LineStringZ.to_wkb(line_string, nil, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "80000005"
{:ndr, false} -> "05000080"
{:xdr, true} -> "A0000005"
{:ndr, true} -> "050000A0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x80000005::big-integer-size(32)>>
{:ndr, false} -> <<0x80000005::little-integer-size(32)>>
{:xdr, true} -> <<0xA0000005::big-integer-size(32)>>
{:ndr, true} -> <<0xA0000005::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(multi_line_string) do
{:ok, MultiLineStringZ.size(multi_line_string)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(multi_line_string, val) do
{:ok, MultiLineStringZ.member?(multi_line_string, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(multi_line_string) do
size = MultiLineStringZ.size(multi_line_string)
{:ok, size,
&Enumerable.List.slice(MultiLineStringZ.to_list(multi_line_string), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(multi_line_string, acc, fun) do
Enumerable.List.reduce(MultiLineStringZ.to_list(multi_line_string), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%MultiLineStringZ{line_strings: line_strings}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
map =
Map.merge(
line_strings.map,
Enum.into(list, %{}, fn {line_string, []} -> {line_string.points, []} end)
)
%MultiLineStringZ{line_strings: %{line_strings | map: map}}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/multi_line_string_z.ex
| 0.930126
| 0.568775
|
multi_line_string_z.ex
|
starcoder
|
defmodule ListToCsv do
@moduledoc """
`ListToCsv` is main module of this library.
"""
alias ListToCsv.Key
alias ListToCsv.Option
@type target() :: map() | struct() | keyword()
@doc """
Returns a list with header and body rows
## Options
See `ListToCsv.Option` for details.
- `:headers` - (list(string)) Optional.
- `:Keys` - (list(Key.many())) Required.
Keys can be atoms, strings, numbers, or functions.
- `:length` - (list({Key.many(), length}) | nil) Optional.
The length of the list can be variable, so if it is not fixed, the result
value is not constant width.
## Examples
iex> ListToCsv.parse([%{name: "bob"}], headers: ["name"], keys: [:name])
[["name"], ["bob"]]
iex> ListToCsv.parse([%{name: "bob"}], keys: [:name])
[["bob"]]
iex> ListToCsv.parse(
...> [
...> %{
...> name: "name1",
...> items: [
...> %{title: "title1", code: "code1"},
...> %{title: "title2", code: "code2"},
...> %{title: "title3", code: "code3"}
...> ]
...> },
...> %{
...> name: "name2",
...> items: [
...> %{title: "title4", code: "code4"},
...> %{title: "title5", code: "code5"},
...> %{title: "title6", code: "code6"},
...> %{title: "title7", code: "code7"},
...> %{title: "title8", code: "code8"}
...> ]
...> }
...> ],
...> headers: [
...> "名前",
...> "アイテム#名",
...> "アイテム#コード",
...> "item overflow?"
...> ],
...> keys: [
...> :name,
...> [:items, :N, :title],
...> [:items, :N, :code],
...> [:items, &(length(&1) > 4)]
...> ],
...> length: [items: 4]
...> )
[
["名前", "アイテム1名", "アイテム1コード", "アイテム2名", "アイテム2コード", "アイテム3名", "アイテム3コード", "アイテム4名", "アイテム4コード", "item overflow?"],
["name1", "title1", "code1", "title2", "code2", "title3", "code3", "", "", "false"],
["name2", "title4", "code4", "title5", "code5", "title6", "code6", "title7", "code7", "true"]
]
iex> ListToCsv.parse(
...> [
...> %{
...> name: "name1",
...> items: [
...> %{title: "title1", code: "code1"},
...> %{title: "title2", code: "code2"},
...> %{title: "title3", code: "code3"}
...> ]
...> },
...> %{
...> name: "name2",
...> items: [
...> %{title: "title4", code: "code4"},
...> %{title: "title5", code: "code5"},
...> %{title: "title6", code: "code6"},
...> %{title: "title7", code: "code7"},
...> %{title: "title8", code: "code8"}
...> ]
...> }
...> ],
...> keys: [
...> :name,
...> [:items, :N, :title],
...> [:items, :N, :code],
...> [:items, &(length(&1) > 4)]
...> ],
...> length: [items: 4]
...> )
[
["name1", "title1", "code1", "title2", "code2", "title3", "code3", "", "", "false"],
["name2", "title4", "code4", "title5", "code5", "title6", "code6", "title7", "code7", "true"]
]
"""
@spec parse(list(target()), Option.t()) :: list(list(String.t()))
def parse(list, options) do
case options[:headers] do
nil ->
parse_rows(list, Option.expand(options))
_ ->
{header_list, keys_list} = Option.expand(options) |> Enum.unzip()
[header_list | parse_rows(list, keys_list)]
end
end
@spec parse_rows(list(target()), Key.many()) :: list(list(String.t()))
def parse_rows(list, keys_list), do: Enum.map(list, &parse_row(&1, keys_list))
@spec parse_row(target(), list(Key.many())) :: list(String.t())
def parse_row(map, keys_list), do: Enum.map(keys_list, &parse_cell(map, &1))
@spec parse_cell(any(), Key.many()) :: String.t()
def parse_cell(map, key), do: "#{get(map, key)}"
@spec get(any(), Key.many()) :: any()
def get(map, key) when not is_list(key), do: get(map, [key])
def get(map, [{fun, keys} | rest]) when is_function(fun) and is_list(keys) do
apply(fun, Enum.map(keys, &get(map, &1))) |> get(rest)
end
def get(tuple, [key | rest]) when is_integer(key) and is_tuple(tuple) do
elem(tuple, key - 1) |> get(rest)
rescue
ArgumentError -> nil |> get(rest)
end
def get(list, [key | rest]) when is_integer(key) do
List.pop_at(list || [], key - 1) |> elem(0) |> get(rest)
end
def get(map, [key | rest]) when is_function(key), do: get(key.(map), rest)
def get(map, [key | rest]) when is_struct(map), do: get(Map.get(map, key), rest)
def get(map, [key | rest]), do: get(map[key], rest)
def get(map, []), do: map
end
|
lib/list_to_csv.ex
| 0.819893
| 0.472866
|
list_to_csv.ex
|
starcoder
|
defmodule ForgeAbi.Direction do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t :: integer | :mutual | :one_way | :union
field :mutual, 0
field :one_way, 1
field :union, 2
end
defmodule ForgeAbi.Validity do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t :: integer | :both | :valid | :invalid
field :both, 0
field :valid, 1
field :invalid, 2
end
defmodule ForgeAbi.PageOrder do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
field: String.t(),
type: String.t()
}
defstruct [:field, :type]
field :field, 1, type: :string
field :type, 2, type: :string
end
defmodule ForgeAbi.PageInput do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
cursor: String.t(),
size: non_neg_integer,
order: [ForgeAbi.PageOrder.t()]
}
defstruct [:cursor, :size, :order]
field :cursor, 1, type: :string
field :size, 2, type: :uint32
field :order, 3, repeated: true, type: ForgeAbi.PageOrder
end
defmodule ForgeAbi.TypeFilter do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
types: [String.t()]
}
defstruct [:types]
field :types, 1, repeated: true, type: :string
end
defmodule ForgeAbi.TimeFilter do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
start_date_time: String.t(),
end_date_time: String.t()
}
defstruct [:start_date_time, :end_date_time]
field :start_date_time, 1, type: :string
field :end_date_time, 2, type: :string
end
defmodule ForgeAbi.AddressFilter do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
sender: String.t(),
receiver: String.t(),
direction: ForgeAbi.Direction.t()
}
defstruct [:sender, :receiver, :direction]
field :sender, 1, type: :string
field :receiver, 2, type: :string
field :direction, 3, type: ForgeAbi.Direction, enum: true
end
defmodule ForgeAbi.PageInfo do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
cursor: String.t(),
next: boolean,
total: non_neg_integer
}
defstruct [:cursor, :next, :total]
field :cursor, 1, type: :string
field :next, 2, type: :bool
field :total, 3, type: :uint32
end
defmodule ForgeAbi.IndexedTransaction do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
sender: String.t(),
receiver: String.t(),
time: String.t(),
type: String.t(),
tx: ForgeAbi.Transaction.t() | nil,
valid: boolean,
code: ForgeAbi.StatusCode.t()
}
defstruct [:hash, :sender, :receiver, :time, :type, :tx, :valid, :code]
field :hash, 1, type: :string
field :sender, 2, type: :string
field :receiver, 3, type: :string
field :time, 4, type: :string
field :type, 5, type: :string
field :tx, 6, type: ForgeAbi.Transaction
field :valid, 20, type: :bool
field :code, 21, type: ForgeAbi.StatusCode, enum: true
end
defmodule ForgeAbi.IndexedAccountState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
balance: ForgeAbi.BigUint.t() | nil,
num_assets: non_neg_integer,
num_txs: non_neg_integer,
nonce: non_neg_integer,
genesis_time: String.t(),
renaissance_time: String.t(),
moniker: String.t(),
migrated_from: String.t(),
migrated_to: String.t(),
total_received_stakes: ForgeAbi.BigUint.t() | nil,
total_stakes: ForgeAbi.BigUint.t() | nil,
total_unstakes: ForgeAbi.BigUint.t() | nil,
recent_num_txs: [non_neg_integer]
}
defstruct [
:address,
:balance,
:num_assets,
:num_txs,
:nonce,
:genesis_time,
:renaissance_time,
:moniker,
:migrated_from,
:migrated_to,
:total_received_stakes,
:total_stakes,
:total_unstakes,
:recent_num_txs
]
field :address, 1, type: :string
field :balance, 2, type: ForgeAbi.BigUint
field :num_assets, 3, type: :uint64
field :num_txs, 4, type: :uint64
field :nonce, 5, type: :uint64
field :genesis_time, 6, type: :string
field :renaissance_time, 7, type: :string
field :moniker, 8, type: :string
field :migrated_from, 9, type: :string
field :migrated_to, 10, type: :string
field :total_received_stakes, 11, type: ForgeAbi.BigUint
field :total_stakes, 12, type: ForgeAbi.BigUint
field :total_unstakes, 13, type: ForgeAbi.BigUint
field :recent_num_txs, 14, repeated: true, type: :uint64
end
defmodule ForgeAbi.IndexedAssetState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
owner: String.t(),
genesis_time: String.t(),
renaissance_time: String.t(),
moniker: String.t(),
readonly: boolean,
consumed_time: String.t(),
issuer: String.t(),
parent: String.t(),
transferrable: boolean,
ttl: non_neg_integer,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:address,
:owner,
:genesis_time,
:renaissance_time,
:moniker,
:readonly,
:consumed_time,
:issuer,
:parent,
:transferrable,
:ttl,
:data
]
field :address, 1, type: :string
field :owner, 2, type: :string
field :genesis_time, 3, type: :string
field :renaissance_time, 4, type: :string
field :moniker, 5, type: :string
field :readonly, 6, type: :bool
field :consumed_time, 7, type: :string
field :issuer, 8, type: :string
field :parent, 9, type: :string
field :transferrable, 10, type: :bool
field :ttl, 11, type: :uint64
field :data, 50, type: Google.Protobuf.Any
end
defmodule ForgeAbi.IndexedStakeState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
balance: ForgeAbi.BigUint.t() | nil,
sender: String.t(),
receiver: String.t(),
genesis_time: String.t(),
renaissance_time: String.t(),
message: String.t(),
type: non_neg_integer
}
defstruct [
:address,
:balance,
:sender,
:receiver,
:genesis_time,
:renaissance_time,
:message,
:type
]
field :address, 1, type: :string
field :balance, 2, type: ForgeAbi.BigUint
field :sender, 3, type: :string
field :receiver, 4, type: :string
field :genesis_time, 5, type: :string
field :renaissance_time, 6, type: :string
field :message, 7, type: :string
field :type, 8, type: :uint32
end
defmodule ForgeAbi.IndexedBlock do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: non_neg_integer,
time: String.t(),
proposer: String.t(),
num_txs: non_neg_integer,
num_invalid_txs: non_neg_integer
}
defstruct [:height, :time, :proposer, :num_txs, :num_invalid_txs]
field :height, 1, type: :uint64
field :time, 2, type: :string
field :proposer, 3, type: :string
field :num_txs, 4, type: :uint64
field :num_invalid_txs, 5, type: :uint64
end
defmodule ForgeAbi.HealthStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
consensus: ForgeAbi.ConsensusStatus.t() | nil,
network: ForgeAbi.NetworkStatus.t() | nil,
storage: ForgeAbi.StorageStatus.t() | nil,
forge: ForgeAbi.ForgeStatus.t() | nil
}
defstruct [:consensus, :network, :storage, :forge]
field :consensus, 1, type: ForgeAbi.ConsensusStatus
field :network, 2, type: ForgeAbi.NetworkStatus
field :storage, 3, type: ForgeAbi.StorageStatus
field :forge, 4, type: ForgeAbi.ForgeStatus
end
defmodule ForgeAbi.ConsensusStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
health: boolean,
synced: boolean,
block_height: non_neg_integer
}
defstruct [:health, :synced, :block_height]
field :health, 1, type: :bool
field :synced, 2, type: :bool
field :block_height, 3, type: :uint64
end
defmodule ForgeAbi.NetworkStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
health: boolean,
num_peers: non_neg_integer
}
defstruct [:health, :num_peers]
field :health, 1, type: :bool
field :num_peers, 2, type: :uint32
end
defmodule ForgeAbi.StorageStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
health: boolean,
indexer_server: String.t(),
state_db: String.t(),
disk_space: ForgeAbi.DiskSpaceStatus.t() | nil
}
defstruct [:health, :indexer_server, :state_db, :disk_space]
field :health, 1, type: :bool
field :indexer_server, 2, type: :string
field :state_db, 3, type: :string
field :disk_space, 4, type: ForgeAbi.DiskSpaceStatus
end
defmodule ForgeAbi.DiskSpaceStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
forge_usage: String.t(),
total: String.t()
}
defstruct [:forge_usage, :total]
field :forge_usage, 1, type: :string
field :total, 2, type: :string
end
defmodule ForgeAbi.ForgeStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
health: boolean,
abi_server: String.t(),
forge_web: String.t(),
abci_server: ForgeAbi.AbciServerStatus.t() | nil
}
defstruct [:health, :abi_server, :forge_web, :abci_server]
field :health, 1, type: :bool
field :abi_server, 2, type: :string
field :forge_web, 3, type: :string
field :abci_server, 4, type: ForgeAbi.AbciServerStatus
end
defmodule ForgeAbi.AbciServerStatus do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
abci_consensus: String.t(),
abci_info: String.t()
}
defstruct [:abci_consensus, :abci_info]
field :abci_consensus, 1, type: :string
field :abci_info, 2, type: :string
end
defmodule ForgeAbi.ValidityFilter do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
validity: ForgeAbi.Validity.t()
}
defstruct [:validity]
field :validity, 1, type: ForgeAbi.Validity, enum: true
end
defmodule ForgeAbi.RangeFilter do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
from: non_neg_integer,
to: non_neg_integer
}
defstruct [:from, :to]
field :from, 1, type: :uint64
field :to, 2, type: :uint64
end
|
lib/protobuf/gen/trace_type.pb.ex
| 0.821331
| 0.635194
|
trace_type.pb.ex
|
starcoder
|
defmodule Patch.Macro do
@doc """
Utility function that acts like `inspect/1` but prints out the Macro as code.
"""
@spec debug(ast :: Macro.t()) :: Macro.t()
def debug(ast) do
ast
|> Macro.to_string()
|> IO.puts()
ast
end
@doc """
Performs an non-hygienic match.
If the match succeeds true is returned, otherwise a MatchError is raised.
Since the match is non-hygienic pins can be used from the user-scope and binds will effect
user-scope.
"""
@spec match(pattern :: Macro.t(), expression :: Macro.t()) :: Macro.t()
defmacro match(pattern, expression) do
user_pattern = user_variables(pattern)
pattern_expression = pattern_expression(pattern)
variables = variables(pattern)
quote generated: true do
unquote(pattern_expression) =
case unquote(expression) do
unquote(user_pattern) ->
_ = unquote(variables)
unquote(expression)
_ ->
raise MatchError, term: unquote(expression)
end
_ = unquote(variables)
true
end
end
@doc """
Performs a match, return true if match matches, false otherwise.
"""
@spec match?(pattern :: Macro.t(), expression :: Macro.t()) :: Macro.t()
defmacro match?(pattern, expression) do
quote generated: true do
try do
Patch.Macro.match(unquote(pattern), unquote(expression))
true
rescue
MatchError ->
false
end
end
end
## Private
defp pattern_expression(pattern) do
Macro.prewalk(pattern, fn
{:^, _, [{name, meta, _}]} ->
{name, meta, nil}
{:_, _, _} ->
unique_variable()
node ->
node
end)
end
defp unique_variable do
{:"_ignore#{:erlang.unique_integer([:positive])}", [generated: true], nil}
end
defp user_variables(pattern) do
Macro.prewalk(pattern, fn
{name, meta, context} when is_atom(name) and is_atom(context) ->
{name, meta, nil}
node ->
node
end)
end
defp variables(pattern) do
pattern
|> Macro.prewalk([], fn
{:_, _, _} = node, acc ->
{node, acc}
{:@, anno, _}, acc ->
# Replace module attribute with wildcard so we don't convert into a variable
{{:_, anno, nil}, acc}
{name, meta, context} = node, acc when is_atom(name) and is_atom(context) ->
ignored? =
name
|> Atom.to_string()
|> String.starts_with?("_")
if ignored? do
{node, acc}
else
{node, [{name, meta, nil} | acc]}
end
node, acc ->
{node, acc}
end)
|> elem(1)
end
end
|
lib/patch/macro.ex
| 0.720958
| 0.514583
|
macro.ex
|
starcoder
|
defmodule Radixir.Core.Request.BuildTransaction.Operation.DataObject.TokenData do
@moduledoc false
# @moduledoc """
# Methods to create each map in `TokenData` map.
# """
alias Radixir.StitchPlan
@type stitch_plans :: list(keyword)
@type params :: keyword
@doc """
Generates stitch plan for `type` map in `TokenData` map. Value is set to `TokenData`.
## Parameters
- `stitch_plans`: On-going stitch plans that will be stitched into a map.
"""
@spec type(stitch_plans) :: stitch_plans
def type(stitch_plans) do
StitchPlan.type(stitch_plans, [type: "TokenData"], [:data, :data_object])
end
@doc """
Generates stitch plan for `granularity` map in `TokenData` map.
## Parameters
- `stitch_plans`: On-going stitch plans that will be stitched into a map.
- `params`: Keyword list that contains:
- `granularity` (required, string): Granularity.
"""
@spec granularity(stitch_plans, params) :: stitch_plans
def granularity(stitch_plans, params) do
schema = [
granularity: [
type: :string,
required: true
]
]
granularity =
NimbleOptions.validate!(params, schema)
|> Keyword.get(:granularity)
stitch_plan = [[keys: [:data, :data_object, :granularity], value: granularity]]
stitch_plan ++ stitch_plans
end
@doc """
Generates stitch plan for `is_mutable` map in `TokenData` map.
## Parameters
- `stitch_plans`: On-going stitch plans that will be stitched into a map.
- `params`: Keyword list that contains:
- `is_mutable` (required, boolean): Is token mutable?.
"""
@spec is_mutable(stitch_plans, params) :: stitch_plans
def is_mutable(stitch_plans, params) do
schema = [
is_mutable: [
type: :boolean,
required: true
]
]
is_mutable =
NimbleOptions.validate!(params, schema)
|> Keyword.get(:is_mutable)
stitch_plan = [[keys: [:data, :data_object, :is_mutable], value: is_mutable]]
stitch_plan ++ stitch_plans
end
@doc """
Generates stitch plan for `owner` map in `TokenData` map.
## Parameters
- `stitch_plans`: On-going stitch plans that will be stitched into a map.
- `params`: Keyword list that contains:
- `address` (required, string): Owner address.
"""
@spec owner(stitch_plans, params) :: stitch_plans
def owner(stitch_plans, params) do
StitchPlan.owner(stitch_plans, params, [:data, :data_object])
end
@doc """
Generates stitch plan for `sub_entity` map in `TokenData` map.
## Parameters
- `stitch_plans`: On-going stitch plans that will be stitched into a map.
- `params`: Keyword list that contains:
- `sub_entity_address` (required, string): Sub Entity address.
- `validator_address` (optional, string): Validator address.
- `epoch_unlock` (optional, integer): Epoch unlock.
"""
@spec sub_entity(stitch_plans, params) :: stitch_plans
def sub_entity(stitch_plans, params) do
StitchPlan.sub_entity(stitch_plans, params, [:data, :data_object, :owner])
end
end
|
lib/radixir/core/request/build_transaction/operation/data_object/token_data.ex
| 0.896668
| 0.482673
|
token_data.ex
|
starcoder
|
defmodule Chronos do
import Chronos.Validation
@datetime1970 {{1970, 1, 1}, {0, 0, 0}}
@doc """
Chronos is an Elixir library for working with dates and times.
iex(1)> Chronos.today
{2013, 8, 21}
"""
def today, do: :erlang.date
def now, do: :calendar.now_to_datetime(:erlang.timestamp)
@doc """
The epoch_time/1 function returns the number of seconds since January 1, 1970 00:00:00.
If the date is prior to January 1, the integer will be negative.
iex(1)> Chronos.epoch_time({2012, 12, 21}, {12, 30, 55}}
"""
def epoch_time({y, m, d}), do: epoch_time({{y, m, d}, {0,0,0}})
def epoch_time(datetime) do
datetime_to_seconds(datetime) - datetime_to_seconds(@datetime1970)
end
def datetime_to_seconds(datetime), do: :calendar.datetime_to_gregorian_seconds(datetime)
@doc """
The from_epoch_time/1 function converts from epoch time to datetime tuple.
iex(1)> Chronos.from_epoch_time(1356048000)
"""
def from_epoch_time(timestamp) do
timestamp
|> Kernel.+(datetime_to_seconds(@datetime1970))
|> :calendar.gregorian_seconds_to_datetime
end
@doc """
The year function allows you to extract the year from a date tuple
iex(1)> Chronos.year({2013, 8, 21})
2013
iex(2)> {2012, 12, 21} |> Chronos.year
2012
"""
def year(date \\ today()) do
date
|> validate()
|> _extract_seg(:year)
end
@doc """
The month function allows you to extract the month from a date tuple
iex(1)> Chronos.month({2013, 8, 21})
8
iex(2)> {2012, 12, 21} |> Chronos.month
8
"""
def month(date \\ today()) do
date
|> validate()
|> _extract_seg(:month)
end
@doc """
The day function allows you to extract the day from a date tuple
iex(1)> Chronos.day({2013, 8, 21})
21
iex(2)> {2012, 12, 21} |> Chronos.day
21
"""
def day(date \\ today()) do
date
|> validate()
|> _extract_seg(:day)
end
@doc """
The hour function allows you to extract the hour from a date/time tuple
iex> Chronos.hour({{2013, 8, 21}, {13, 34, 45}})
13
iex> {{2013, 8, 21}, {13, 34, 45}} |> Chronos.hour
13
"""
def hour(datetime \\ now()) do
datetime
|> validate()
|> _extract_seg(:hour)
end
@doc """
The min function allows you to extract the minutes from a date/time tuple
iex> Chronos.min({{2013, 8, 21}, {13, 34, 45}})
34
iex> {{2013, 8, 21}, {13, 34, 45}} |> Chronos.min
34
"""
def min(datetime \\ now()) do
datetime
|> validate()
|> _extract_seg(:min)
end
@doc """
The sec function allows you to extract the seconds from a date/time tuple
iex> Chronos.sec({{2013, 8, 21}, {13, 34, 45}})
45
iex> {{2013, 8, 21}, {13, 34, 45}} |> Chronos.sec
45
"""
def sec(datetime \\ now()) do
datetime
|> validate()
|> _extract_seg(:sec)
end
@doc """
Returns an integer representing the day of the week, 1..7, with Monday == 1.
iex(1)> Chronos.wday({2013, 8, 21})
3
"""
def wday(date \\ today()), do: :calendar.day_of_the_week(date)
def sunday?(date \\ today()), do: wday(date) == 7
def monday?(date \\ today()), do: wday(date) == 1
def tuesday?(date \\ today()), do: wday(date) == 2
def wednesday?(date \\ today()), do: wday(date) == 3
def thursday?(date \\ today()), do: wday(date) == 4
def friday?(date \\ today()), do: wday(date) == 5
def saturday?(date \\ today()), do: wday(date) == 6
@doc """
The yday function allows you to extract the day of the year (1-366) from a
date tuple
iex(1)> Chronos.yday({2013, 8, 21})
233
iex(2)> {2012, 12, 21} |> Chronos.day
356
"""
def yday(date \\ today()) do
yd =
date
|> validate()
|> _extract_seg(:year)
|> :calendar.date_to_gregorian_days(1,1)
:calendar.date_to_gregorian_days(date) - yd + 1
end
@doc """
The yesterday function is based on the current date
iex(1)> Chronos.yesterday
{2013, 8, 20}
or you can pass it a date:
iex(2)> {2012, 12, 21} |> Chronos.yesterday
{2012, 12, 20}
"""
def yesterday(date \\ today()), do: calculate_date_for_days(date, -1)
@doc """
The tomorrow function is based on the current date
iex(1)> Chronos.tomorrow
{2013, 8, 22}
or you can pass it a date:
iex(2)> {2012, 12, 21} |> Chronos.tomorrow
{2012, 12, 22}
"""
def tomorrow(date \\ today()), do: calculate_date_for_days(date, 1)
@doc """
#beginning_of_week/2 function returns the date of starting day of the week for given date.
It defaults to today for given date and Monday(1) for starting day of the week.
Mon = 1, Tue = 2, Wed =3 , Thu = 4 , Fri = 5 , Sat = 6 , Sun = 7
If today is {2012,12,21}
iex(1)> Chronos.beginning_of_week
{2012,12,17}
iex(2)> Chronos.beginning_of_week({2015,1,20})
{2015,1,19}
iex(3)> Chronos.beginning_of_week({2015,1,20},3)
{2015,1,14}
"""
def beginning_of_week(date \\ today(), start_day \\ 1) do
days = [1,2,3,4,5,6,7]
offset = start_day - 1
days = (days |> Enum.reverse |> Enum.take(7-offset) |> Enum.reverse)
++ (days |> Enum.take(offset)) #list rotation hack
Enum.find_index(days,&(&1 == wday(date))) |> days_ago(date)
end
@doc """
#end_of_week/2 function returns the date of starting day of the week for given date.
It defaults to today for given date and Sunday(7) for ending day of the week.
Mon = 1, Tue = 2, Wed =3 , Thu = 4 , Fri = 5 , Sat = 6 , Sun = 7
If today is {2012,12,21}
iex(1)> Chronos.end_of_week
{2012,12,23}
iex(2)> Chronos.end_of_week({2015,1,20})
{2015,1,25}
iex(3)> Chronos.end_of_week({2015,1,20},3)
{2015,1,21}
"""
def end_of_week(date \\ today(), end_day \\ 7) do
days = [1,2,3,4,5,6,7]
offset = wday(date)- 1
days = (days |> Enum.reverse |> Enum.take(7-offset) |> Enum.reverse)
++ (days |> Enum.take(offset)) #list rotation hack
Enum.find_index(days,&(&1 == end_day)) |> days_from(date)
end
@doc """
The following functions all have similar behavior. The days_ago/2 and weeks_ago/2
functions take a integer representing the number of days or weeks in the past and
return the corresponding date. There is a optional argument for a date to base the
calculation on but if no date is provided then the current date is used.
iex(1)> Chronos.days_ago(5)
{2013, 8, 16}
iex(2)> Chronos.weeks_ago(3)
{2013, 3, 31}
The days_from/2 and weeks_from/2 return a future date calculated by the number
of days or weeks. There is a optional argument for a date to base the
calculation on but if no date is provided then the current date is used.
iex(1)> Chronos.days_from(5)
{2013, 8, 26}
iex(2)> Chronos.weeks_from(3)
{2013, 9, 11}
"""
def days_ago(days, date \\ today())
def days_ago(days, date) when days >= 0 do
calculate_date_for_days(date, -days)
end
def days_ago(days, _) when days < 0 do
raise ArgumentError, message: "Number of days must be zero or greater"
end
def days_from(days, date \\ today())
def days_from(days, date) when days >= 0 do
calculate_date_for_days(date, days)
end
def days_from(_, _) do
raise ArgumentError, message: "Number of days must be zero or greater"
end
def weeks_ago(weeks, date \\ today())
def weeks_ago(weeks, date) when weeks >= 0 do
calculate_date_for_weeks(date, -weeks)
end
def weeks_ago(_, _) do
raise ArgumentError, message: "Number of weeks must be zero or greater"
end
def weeks_from(weeks, date \\ today())
def weeks_from(weeks, date) when weeks >= 0 do
calculate_date_for_weeks(date, weeks)
end
def weeks_from(_, _) do
raise ArgumentError, message: "Number of weeks must be zero or greater"
end
defp calculate_date_for_days(date, days) do
date
|> covert_date_to_days()
|> date_for_days(days)
end
defp calculate_date_for_weeks(date, weeks) do
date
|> covert_date_to_days()
|> date_for_weeks(weeks)
end
defp covert_date_to_days(date) do
date
|> validate()
|> days_for_date()
end
defp days_for_date(date), do: :calendar.date_to_gregorian_days(date)
defp date_for_days(days, offset) when is_integer(days) do
:calendar.gregorian_days_to_date(days + offset)
end
defp date_for_weeks(days, weeks) when is_integer(days) do
date_for_days(days, weeks * 7)
end
defp _extract_seg({ year, _, _ }, :year), do: year
defp _extract_seg({ _, month, _ }, :month), do: month
defp _extract_seg({ _, _, day }, :day), do: day
defp _extract_seg({ _date, {hour, _, _}}, :hour), do: hour
defp _extract_seg({ _date, {_, min, _}}, :min), do: min
defp _extract_seg({ _date, {_, _, sec}}, :sec), do: sec
defp _opts_date({ :ok, date }), do: date
@doc """
There is an option to supply a date. This is handy for testing.
defmodule YourModule do
use Chronos, date: {2012, 12, 21}
end
iex(1)> YourModule.today
{2012, 12, 21}
"""
defmacro __using__(opts \\ []) do
date = fn() -> cond do
opts[:date] ->
opts
|> Keyword.values()
|> Enum.fetch(0)
|> _opts_date
:else ->
:erlang.date
end
end
quote do
def today, do: unquote(date.())
def now, do: unquote(__MODULE__).now
def epoch_time(datetime), do: unquote(__MODULE__).epoch_time(datetime)
def from_epoch_time(timestamp), do: unquote(__MODULE__).from_epoch_time(timestamp)
def year(date), do: unquote(__MODULE__).year(date)
def month(date), do: unquote(__MODULE__).month(date)
def day(date), do: unquote(__MODULE__).day(date)
def hour(datetime), do: unquote(__MODULE__).hour(datetime)
def min(datetime), do: unquote(__MODULE__).min(datetime)
def sec(datetime), do: unquote(__MODULE__).sec(datetime)
def wday(date), do: unquote(__MODULE__).wday(date)
def sunday?(date), do: unquote(__MODULE__).sunday?(date)
def monday?(date), do: unquote(__MODULE__).monday?(date)
def tuesday?(date), do: unquote(__MODULE__).tuesday?(date)
def wednesday?(date), do: unquote(__MODULE__).wednesday?(date)
def thursday?(date), do: unquote(__MODULE__).thursday?(date)
def friday?(date), do: unquote(__MODULE__).friday?(date)
def saturday?(date), do: unquote(__MODULE__).saturday?(date)
def yday(date), do: unquote(__MODULE__).yday(date)
def yesterday(date \\ unquote(date.())) do
unquote(__MODULE__).yesterday(date)
end
def tomorrow(date \\ unquote(date.())) do
unquote(__MODULE__).tomorrow(date)
end
def days_ago(days, date \\ unquote(date.())) do
unquote(__MODULE__).days_ago(days, date)
end
def days_from(days, date \\ unquote(date.())) do
unquote(__MODULE__).days_from(days, date)
end
def weeks_ago(weeks, date \\ unquote(date.())) do
unquote(__MODULE__).weeks_ago(weeks, date)
end
def weeks_from(weeks, date \\ unquote(date.())) do
unquote(__MODULE__).weeks_from(weeks, date)
end
def beginning_of_week(date \\ unquote(date.()),start_day \\ 1) do
unquote(__MODULE__).beginning_of_week(date, start_day)
end
def end_of_week(date \\ unquote(date.()),end_day \\ 7) do
unquote(__MODULE__).end_of_week(date, end_day)
end
end
end
end
|
lib/chronos.ex
| 0.684791
| 0.652435
|
chronos.ex
|
starcoder
|
defmodule EctoBootMigration do
@moduledoc """
Helper module that can be used to easily ensure that Ecto database was
migrated before rest of the application was started.
## Rationale
There are many strategies how to deal with this issue,
e.g. see https://hexdocs.pm/distillery/guides/running_migrations.html
However, if you have any workers that are relying on the DB schema that are
launched upon boot with some methods, such as release post_start hooks you
can easily enter race condition. Application may crash as these workers will
not find tables or columns they expect and it will happen sooner than the
post_start hook script will send the commands to the the application process.
In stateless environments such as Docker it is just sometimes more convenient
to perform migration upon boot. This is exactly what this library does.
Currently it works only with PostgreSQL databases but that will be easy to
extend.
## Usage
```elixir
defmodule MyApp do
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
{:ok, _} = EctoBootMigration.migrate(:my_app)
children = [
supervisor(MyApp.Endpoint, []),
worker(MyApp.Repo, []),
]
Supervisor.start_link(children, [strategy: :one_for_one, name: MyApp.Supervisor])
end
end
```
## Credits
Inspired by https://github.com/bitwalker/distillery/blob/master/docs/Running%20Migrations.md
```
"""
@apps [
:crypto,
:ssl,
:postgrex,
:ecto
]
@doc """
Tries to run migrations.
Returns `true` if any migrations have happened.
Returns `false` if no migrations have happened.
Throws if error occured.
"""
@spec migrated?(any) :: boolean
def migrated?(app) do
case migrate(app) do
{:ok, :noop} ->
false
{:ok, {:migrated, _}} ->
true
{:error, reason} ->
throw(reason)
end
end
@doc """
Tries to run migrations.
Returns `{:ok, {:migrated, list_of_migration_ids}}` if any migrations have
happened.
Returns `{:ok, :noop}` if no migrations have happened.
Returns `{:error, reason}` if error occured.
"""
@spec migrate(any) ::
{:ok, :noop}
| {:ok, {:migrated, [pos_integer]}}
| {:error, any}
def migrate(app) do
log("Loading application #{inspect(app)}...")
if loaded?(app) do
start_dependencies()
repos = Application.get_env(app, :ecto_repos, [])
repos_pids = start_repos(repos)
migrations = run_migrations(repos)
stop_repos(repos_pids)
log("Done")
case migrations do
[] ->
{:ok, :noop}
migrations ->
{:ok, {:migrated, migrations}}
end
else
{:error, :not_loaded}
end
end
def loaded?(app) do
case Application.load(app) do
:ok ->
log("Loaded application #{inspect(app)}")
true
{:error, {:already_loaded, ^app}} ->
log("Application #{inspect(app)} is already loaded")
true
{:error, reason} ->
log("Failed to start the application: reason = #{inspect(reason)}")
false
end
end
@doc """
Start the Repo(s) for app, returns pids
"""
def start_repos(repos) do
log("Starting repos...")
repos_pids =
repos
|> Enum.reduce([], fn repo, acc ->
log("Starting repo: #{inspect(repo)}")
case repo.start_link(pool_size: 2) do
{:ok, pid} ->
log("Started repo: pid = #{inspect(pid)}")
[pid | acc]
{:error, {:already_started, pid}} ->
log("Repo was already started: pid = #{inspect(pid)}")
acc
{:error, reason} ->
log("Failed to start the repo: reason = #{inspect(reason)}")
acc
end
end)
log("Started repos, pids = #{inspect(repos_pids)}")
repos_pids
end
def run_migrations(repos) do
log("Running migrations")
migrations =
repos
|> Enum.reduce([], fn repo, acc ->
log("Running migration on repo #{inspect(repo)}")
result = Ecto.Migrator.run(repo, migrations_path(repo), :up, all: true)
log("Run migration on repo #{inspect(repo)}: result = #{inspect(result)}")
acc ++ result
end)
log("Run migrations: count = #{length(migrations)}")
migrations
end
@doc """
Start apps necessary for executing migrations
"""
def start_dependencies do
log("Starting dependencies...")
@apps
|> Enum.each(fn app ->
log("Starting dependency: application #{inspect(app)}")
Application.ensure_all_started(app)
end)
log("Started dependencies")
end
def stop_repos(repos_pids) do
log("Cleaning up...")
log("Stopping repos...")
repos_pids
|> Enum.map(fn repo_pid ->
# monitor each repo process, then send an exit message
log("Stopping repo #{inspect(repo_pid)}...")
ref = Process.monitor(repo_pid)
Process.exit(repo_pid, :normal)
{ref, repo_pid}
end)
|> Enum.each(fn {ref, repo_pid} ->
# wait for a DOWN message from each repo process
receive do
{:DOWN, ^ref, _, _, _} ->
log("repo #{inspect(repo_pid)} shutdown successfully")
after
5_000 ->
raise "timeout waiting for repo #{inspect(repo_pid)} to shutdown!"
end
end)
log("Stopped repos")
log("Cleaned up")
end
def log(msg), do: log(msg, debug?())
def log(msg, true), do: IO.puts("[EctoBootMigration] #{msg}")
def log(_msg, false) do
# noop
end
def debug? do
Application.get_env(:ecto_boot_migration, :debug, false)
end
defp priv_dir(app) do
"#{:code.priv_dir(app)}"
end
defp migrations_path(repo) do
priv_path_for(repo, "migrations")
end
defp priv_path_for(repo, filename) do
app = Keyword.get(repo.config, :otp_app)
repo_underscore = repo |> Module.split() |> List.last() |> Macro.underscore()
Path.join([priv_dir(app), repo_underscore, filename])
end
end
|
lib/ecto_boot_migration.ex
| 0.72331
| 0.692135
|
ecto_boot_migration.ex
|
starcoder
|
defmodule Expwd.Hashed do
@type t :: %__MODULE__{
alg: atom(),
hash: binary()
}
@enforce_keys [:alg, :hash]
defstruct [:alg, :hash]
@doc """
Securely and randomly generates a new application password
Returns a 2-element tuple containing:
- the plaintext password
- the hashed password encapsulated in the `%Expwd.Hashed{}` structure
## Example
```elixir
iex> {plaintext, hashedpwd} = Expwd.Hashed.gen()
{"3vDkDVeQFzH5HhE3s0yAO96CjwgzXOS2TJOOlenoERg",
%Expwd.Hashed{
alg: :sha256,
hash: <<125, 4, 175, 176, 235, 73, 77, 50, 180, 102, 178, 13, 255, 140, 175,
223, 181, 53, 249, 77, 121, 81, 78, 77, 194, 111, 26, 216, 46, 80, 94,
175>>
}}
```
"""
@spec gen() :: {String.t, t}
def gen() do
pwd = Base.encode64(:crypto.strong_rand_bytes(32), padding: false)
{
pwd,
new(pwd)
}
end
@doc """
Returns a `%Expwd.Hashed{}` structure from a cleartext password. The cleartext password is hashed
## Example
```elixir
iex(2)> Expwd.Hashed.new("VDr7o6JbU24u4hFxqCcWun7u1R1fsD8IAWdtC0YtRRc")
%Expwd.Hashed{
alg: :sha256,
hash: <<124, 206, 231, 219, 155, 9, 125, 107, 29, 98, 59, 95, 162, 249, 196,
69, 195, 215, 178, 123, 204, 111, 18, 236, 13, 136, 117, 95, 252, 79, 180,
27>>
}
```
"""
@spec new(binary, Expwd.supported_algs) :: t
def new(password, alg \\ :sha256)
def new(password, alg) do
if alg not in Expwd.supported_hash_algorithms()
do
raise Expwd.UnsupportedHashAlgorithm, message: "Unsupported hash algorithm #{alg}"
end
%__MODULE__{
alg: alg,
hash: :crypto.hash(alg, password)
}
end
defmodule Portable do
@type t :: String.t()
@doc """
Returns a string representation of an `%Expwd.Hashed{}`
The portable string format can contains all characters of the base64 URL encoding and ":". It
starts with the string `"expwd:"`.
The portable representation does not contains the cleartext password and can therefore
safely be stored (in databases, plain files, configuration files...).
## Example
```elixir
iex> {pwd, hashed} = Expwd.Hashed.gen()
{"mkEMrKBnw/qKP9KOx66EEvPtb06vzvJdFzz7m0m2P8c",
%Expwd.Hashed{
alg: :sha256,
hash: <<43, 13, 74, 254, 1, 242, 163, 122, 21, 174, 244, 13, 155, 124, 183,
88, 139, 42, 121, 153, 0, 95, 172, 100, 34, 230, 255, 182, 250, 50, 239,
146>>
}}
iex> Expwd.Hashed.Portable.to_portable(hashed)
"expwd:sha256:Kw1K/gHyo3oVrvQNm3y3WIsqeZkAX6xkIub/tvoy75I"
```
"""
@spec to_portable(Expwd.Hashed.t) :: t
def to_portable(%Expwd.Hashed{alg: alg, hash: hash}) do
"expwd:" <> to_string(alg) <> ":" <> Base.encode64(hash, padding: false)
end
@doc """
Returns a `%Expwd.Hashed{}` from a portable representation generated by the `to_portable/1`
function
## Example
```elixir
iex> Expwd.Hashed.Portable.from_portable("expwd:sha256:Kw1K/gHyo3oVrvQNm3y3WIsqeZkAX6xkIub/tvoy75I")
%Expwd.Hashed{
alg: :sha256,
hash: <<43, 13, 74, 254, 1, 242, 163, 122, 21, 174, 244, 13, 155, 124, 183,
88, 139, 42, 121, 153, 0, 95, 172, 100, 34, 230, 255, 182, 250, 50, 239,
146>>
}
```
"""
@spec from_portable(t) :: Expwd.Hashed.t
def from_portable("expwd:" <> alg_and_digest) do
[alg_str, digest_b64] = String.split(alg_and_digest, ":")
alg = String.to_existing_atom(alg_str)
if alg not in Expwd.supported_hash_algorithms() do
raise Expwd.UnsupportedHashAlgorithm, message: "Unsupported hash algorithm " <> to_string(alg)
end
%Expwd.Hashed{
alg: alg,
hash: Base.decode64!(digest_b64, padding: false)
}
end
end
end
|
lib/expwd/hashed.ex
| 0.927108
| 0.793426
|
hashed.ex
|
starcoder
|
defmodule GenRMQ.Consumer.Telemetry do
@moduledoc """
GenRMQ emits [Telemetry][telemetry] events for consumers. It exposes several events for RabbitMQ connections, and message
publishing.
### Connection events
- `[:gen_rmq, :consumer, :connection, :start]` - Dispatched by a GenRMQ consumer when a connection to RabbitMQ is started.
- Measurement: `%{system_time: integer}`
- Metadata: `%{module: atom, attempt: integer, queue: String.t, exchange: String.t, routing_key: String.t}`
- `[:gen_rmq, :consumer, :connection, :stop]` - Dispatched by a GenRMQ consumer when a connection to RabbitMQ has been established. If an error
occurs when a connection is being established then the optional `:error` key will be present in the `metadata`.
- Measurement: `%{duration: native_time}`
- Metadata: `%{module: atom, attempt: integer, queue: String.t, exchange: String.t, routing_key: String.t, error: term()}`
- `[:gen_rmq, :consumer, :connection, :down]` - Dispatched by a GenRMQ consumer when a connection to RabbitMQ has been lost.
- Measurement: `%{system_time: integer}`
- Metadata: `%{module: atom, reason: atom}`
### Message events
- `[:gen_rmq, :consumer, :message, :ack]` - Dispatched by a GenRMQ consumer when a message has been acknowledged.
- Measurement: `%{system_time: integer}`
- Metadata: `%{message: String.t}`
- `[:gen_rmq, :consumer, :message, :reject]` - Dispatched by a GenRMQ consumer when a message has been rejected.
- Measurement: `%{system_time: integer}`
- Metadata: `%{message: String.t, requeue: boolean}`
- `[:gen_rmq, :consumer, :message, :start]` - Dispatched by a GenRMQ consumer when the processing of a message has begun.
- Measurement: `%{system_time: integer}`
- Metadata: `%{message: String.t, module: atom}`
- `[:gen_rmq, :consumer, :message, :stop]` - Dispatched by a GenRMQ consumer when the processing of a message has completed.
- Measurement: `%{duration: native_time}`
- Metadata: `%{message: String.t, module: atom}`
- `[:gen_rmq, :consumer, :message, :exception]` - Dispatched by a GenRMQ consumer when a message fails to be processed.
- Measurement: `%{duration: native_time}`
- Metadata: `%{module: atom, reason: tuple, message: GenRMQ.Message.t, kind: atom, reason: term(), stacktrace: list() }`
[telemetry]: https://github.com/beam-telemetry/telemetry
"""
@doc false
def emit_message_ack_event(message) do
measurements = %{system_time: System.system_time()}
metadata = %{message: message}
:telemetry.execute([:gen_rmq, :consumer, :message, :ack], measurements, metadata)
end
@doc false
def emit_message_reject_event(message, requeue) do
measurements = %{system_time: System.system_time()}
metadata = %{message: message, requeue: requeue}
:telemetry.execute([:gen_rmq, :consumer, :message, :reject], measurements, metadata)
end
@doc false
def emit_message_start_event(message, module) do
measurements = %{system_time: System.system_time()}
metadata = %{message: message, module: module}
:telemetry.execute([:gen_rmq, :consumer, :message, :start], measurements, metadata)
end
@doc false
def emit_message_stop_event(start_time, message, module) do
stop_time = System.monotonic_time()
measurements = %{duration: stop_time - start_time}
metadata = %{message: message, module: module}
:telemetry.execute([:gen_rmq, :consumer, :message, :stop], measurements, metadata)
end
@doc false
def emit_message_exception_event(module, message, start_time, {reason, stacktrace}) do
emit_message_exception_event(module, message, start_time, :error, reason, stacktrace)
end
def emit_message_exception_event(module, message, start_time, :killed) do
emit_message_exception_event(module, message, start_time, :exit, :killed, nil)
end
def emit_message_exception_event(module, message, start_time, _) do
emit_message_exception_event(module, message, start_time, :error, nil, nil)
end
def emit_message_exception_event(module, message, start_time, kind, reason, stacktrace) do
stop_time = System.monotonic_time()
measurements = %{duration: stop_time - start_time}
metadata = %{
module: module,
message: message,
kind: kind,
reason: reason,
stacktrace: stacktrace
}
:telemetry.execute([:gen_rmq, :consumer, :message, :exception], measurements, metadata)
end
@doc false
def emit_connection_down_event(module, reason) do
measurements = %{system_time: System.system_time()}
metadata = %{module: module, reason: reason}
:telemetry.execute([:gen_rmq, :consumer, :connection, :down], measurements, metadata)
end
@doc false
def emit_connection_start_event(module, attempt, queue, exchange, routing_key) do
measurements = %{system_time: System.system_time()}
metadata = %{
module: module,
attempt: attempt,
queue: queue,
exchange: exchange,
routing_key: routing_key
}
:telemetry.execute([:gen_rmq, :consumer, :connection, :start], measurements, metadata)
end
@doc false
def emit_connection_stop_event(start_time, module, attempt, queue, exchange, routing_key) do
stop_time = System.monotonic_time()
measurements = %{duration: stop_time - start_time}
metadata = %{
module: module,
attempt: attempt,
queue: queue,
exchange: exchange,
routing_key: routing_key
}
:telemetry.execute([:gen_rmq, :consumer, :connection, :stop], measurements, metadata)
end
@doc false
def emit_connection_stop_event(start_time, module, attempt, queue, exchange, routing_key, error) do
stop_time = System.monotonic_time()
measurements = %{duration: stop_time - start_time}
metadata = %{
module: module,
attempt: attempt,
queue: queue,
exchange: exchange,
routing_key: routing_key,
error: error
}
:telemetry.execute([:gen_rmq, :consumer, :connection, :stop], measurements, metadata)
end
end
|
lib/gen_rmq/consumer/telemetry.ex
| 0.859987
| 0.46721
|
telemetry.ex
|
starcoder
|
defmodule ATECC508A.Transport do
@moduledoc """
ATECC508A transport behaviour
"""
@type t :: {module(), any()}
@callback init(args :: any()) :: {:ok, t()} | {:error, atom()}
@callback request(
id :: any(),
payload :: binary(),
timeout :: non_neg_integer(),
response_payload_len :: non_neg_integer()
) :: {:ok, binary()} | {:error, atom()}
@callback transaction(
id :: any(),
callback :: (request :: fun() -> {:ok, any()} | {:error, atom()})
) :: {:ok, any()} | {:error, atom()}
@callback detected?(arg :: any) :: boolean()
@callback info(id :: any()) :: map()
@doc """
Send a request to the ATECC508A and wait for a response
This is the raw request. The transport implementation takes care of adding
and removing CRCs.
"""
@spec request(t(), binary(), non_neg_integer(), non_neg_integer()) ::
{:ok, binary()} | {:error, atom()}
def request({mod, arg}, payload, timeout, response_payload_len) do
mod.request(arg, payload, timeout, response_payload_len)
end
@doc """
Run a callback function inside a transaction that doesn't sleep
Use a transaction when multiple requests need to be sent without putting the
chip to sleep. For example, when a value needs to be stored in SRAM and then
acted on, since sleeping will clear the SRAM.
`callback` is a function that provides one argument, `request`, and expects a
return value of `{:ok, data}` or `{:error, reason}`. `request` is an anonymous
function whose args follow the public function `ATECC508A.Transport.request/4`,
except without the first arg (`t()`) since this is provided to `transaction`.
The success/error tuple returned by the callback function is returned
by `transaction`.
## Example
```ex
{:ok, transport} = ATECC508A.Transport.I2C.init()
{:ok, signature} =
ATECC508A.Transport.transaction(transport, fn request ->
# NONCE command (0x16)
{:ok, <<0>>} = request.(<<0x16, 0x43, 0, 0, signature_digest::binary>>, 29, 1)
# SIGN command (0x41)
request.(<<0x41, 0xA0, 0, 0>>, 115, 64)
end)
```
"""
@spec transaction(t(), (fun() -> {:ok, any()} | {:error, atom()})) ::
{:ok, any()} | {:error, atom()}
def transaction({mod, arg}, callback) do
mod.transaction(arg, callback)
end
@doc """
Check whether the ATECC508A is present
The transport implementation should do the minimum work to figure out whether
an ATECC508A is actually present. This is called by users who are unsure
whether the device has an ATECC508A and want to check before sending requests
to it.
"""
@spec detected?(t()) :: boolean()
def detected?({mod, arg}) do
mod.detected?(arg)
end
@doc """
Return information about this transport
This information is specific to this transport. No fields are required.
"""
@spec info(t()) :: map()
def info({mod, arg}) do
mod.info(arg)
end
end
|
lib/atecc508a/transport.ex
| 0.891448
| 0.402656
|
transport.ex
|
starcoder
|
defmodule Neotomex.PEG do
@moduledoc """
# Neotomex.PEG
Implements a PEG specification parser using the internal PEG
specification, and functions for parsing PEG grammars. There
are separate functions for parsing entire grammars or
only expressions.
Neotomex's expressions add onto <NAME>'s original PEG
grammar specification with:
- **Expression Pruning** allows for matching expressions
which aren't passed into the transform function. They're
indicated by bracketing an expression with angle brackets,
i.e. `'<' expression '>'`
- ** Case Insensitive Suffix** allows for matching
setting terminal expressions contained within to
match without paying attention to case. Use a ~ character
to do so.
i.e. `'wombat'~` will match, "WomBat"
"""
# Specification of PEG, in PEG:
# Hierarchical syntax
# Grammar <- Spacing Definition+ EndOfFile
# Definition <- Identifier LEFTARROW Expression
# Expression <- Sequence (SLASH Sequence)*
# Sequence <- (('<' Prefix '>') / Prefix)*
# Prefix <- (AND / NOT)? Suffix
# Suffix <- Primary (QUESTION / STAR / PLUS)?
# Primary <- Identifier !LEFTARROW
# / OPEN Expression CLOSE
# / Literal / Class / DOT
# Lexical syntax
# Identifier <- IdentStart IdentCont* Spacing
# IdentStart <- [a-zA-Z_]
# IdentCont <- IdentStart / [0-9]
# Literal <- ['] (!['] Char)* ['] Spacing
# / ["] (!["] Char)* ["] Spacing
# Class <- '[' (!']' Range)* ']' Spacing
# Range <- Char '-' Char / Char
# Char <- '\\' [nrt'"\[\]\\]
# / '\\' [0-2][0-7][0-7]
# / '\\' [0-7][0-7]?
# / !'\\' .
# LEFTARROW <- '<-' Spacing
# SLASH <- '/' Spacing
# AND <- '&' Spacing
# NOT <- '!' Spacing
# QUESTION <- '?' Spacing
# STAR <- '*' Spacing
# PLUS <- '+' Spacing
# OPEN <- '(' Spacing
# CLOSE <- ')' Spacing
# OPENANGLE <- '<' Spacing
# CLOSEANGLE <- '>' Spacing
# DOT <- '.' Spacing
# Spacing <- (Space / Comment)*
# Comment <- '#' (!EndOfLine .)* EndOfLine
# Space <- ' ' / '\t' / EndOfLine
# EndOfLine <- '\r\n' / '\n' / '\r'
# EndOfFile <- !.
# Parse a PEG expression for the Neotoma grammar interface.
@doc """
Parse the input using the `peg_grammar`.
"""
@spec parse(binary()) :: {:ok, Neotomex.Grammar.grammar()}
def parse(input) do
unwrap_parse(grammar(), input)
end
@doc """
Match against the input using the `peg_grammar`.
This could be useful for basic validation of grammars.
"""
@spec match(binary()) :: {:ok, Neotomex.Grammar.match(), binary()}
def match(input) do
Neotomex.Grammar.match(grammar(), input)
end
@doc """
Parse the input as a PEG expression rather than a full PEG grammar.
"""
def parse_expression(input) do
unwrap_parse(expression_grammar(), input)
end
@doc """
PEG parser grammar defined in Neotomex internal PEG format
"""
@spec grammar :: Neotomex.Grammar.grammar()
def grammar do
Neotomex.Grammar.new(:grammar, grammar_definitions())
end
def expression_grammar do
Neotomex.Grammar.new(:expression, expression_definitions())
end
## Private functions
## Helper for simplifying the parse function return
@doc false
defp unwrap_parse(grammar, input) do
case Neotomex.Grammar.parse(grammar, input) do
{:ok, grammar, ""} ->
{:ok, grammar}
otherwise ->
otherwise
end
end
# Definitions for parsing a PEG grammar
@doc false
defp grammar_definitions do
grammar_definitions =
%{:grammar =>
{{:sequence, [{:nonterminal, :spacing},
{:one_or_more, {:nonterminal, :definition}},
{:nonterminal, :EOF}]},
{:transform,
fn [:spacing, [{root, _}] = definitions, :EOF] ->
Neotomex.Grammar.new(root, Enum.into(definitions, %{}))
[:spacing, [{root, _} | _] = definitions, :EOF] ->
Neotomex.Grammar.new(root, Enum.into(definitions, %{}))
end}},
:definition =>
{{:sequence, [{:nonterminal, :identifier},
{:nonterminal, :LEFTARROW},
{:nonterminal, :expression}]},
{:transform, fn [id, _, expr] -> {id, expr} end}}}
Map.merge(grammar_definitions, expression_definitions())
end
# Definitions for parsing a PEG expression
@doc false
defp expression_definitions do
%{:expression =>
{{:sequence, [{:nonterminal, :sequence},
{:zero_or_more, {:sequence,
[{:nonterminal, :SLASH},
{:nonterminal, :sequence}]}}]},
{:transform,
fn [seq, []] -> seq
[seq, rest] -> {:priority, [seq | (for [:SLASH, p] <- rest, do: p)]}
end}},
:sequence =>
{{:zero_or_more, {:priority, [{:sequence,
[{:prune, {:nonterminal, :OPENANGLE}},
{:nonterminal, :prefix},
{:prune, {:nonterminal, :CLOSEANGLE}}]},
{:nonterminal, :prefix}]}},
{:transform,
# TODO - this is pretty ugly and could use some refactoring
fn [[sub_expr]] -> {:prune, sub_expr}
[sub_expr] -> sub_expr
sub_exprs when is_list(sub_exprs) ->
{:sequence, (for e <- sub_exprs do
(case e do [e] -> {:prune, e}
e -> e
end) end)}
end}},
:prefix =>
{{:sequence, [{:zero_or_one, {:priority, [{:nonterminal, :AND},
{:nonterminal, :NOT}]}},
{:nonterminal, :suffix}]},
{:transform,
fn [nil, suffix] -> suffix
[:NOT, suffix] -> {:not, suffix}
[:AND, suffix] -> {:and, suffix}
end}},
:suffix =>
{{:sequence, [{:nonterminal, :primary},
{:zero_or_one, {:priority, [{:nonterminal, :QUESTION},
{:nonterminal, :STAR},
{:nonterminal, :PLUS},
{:nonterminal, :TILDE}]}}]},
{:transform,
fn [primary, nil] -> primary
[primary, :QUESTION] -> {:zero_or_one, primary}
[primary, :STAR] -> {:zero_or_more, primary}
[primary, :PLUS] -> {:one_or_more, primary}
[primary, :TILDE] -> {:insensitive, primary}
end}},
:primary =>
{{:priority, [{:sequence, [{:nonterminal, :identifier},
{:not, {:nonterminal, :LEFTARROW}}]},
{:sequence, [{:nonterminal, :OPEN},
{:nonterminal, :expression},
{:nonterminal, :CLOSE}]},
{:nonterminal, :literal},
{:nonterminal, :class},
{:nonterminal, :DOT}]},
{:transform,
fn [id, _] -> {:nonterminal, id}
[:OPEN, expr, :CLOSE] -> expr
:DOT -> {:terminal, ~r/^./u}
x -> x
end}},
# Lexical syntax
:identifier =>
{{:sequence, [{:nonterminal, :ident_start},
{:zero_or_more, {:nonterminal, :ident_cont}},
{:nonterminal, :spacing}]},
{:transform,
fn [ident_start, ident_cont, :spacing] ->
Enum.join([ident_start | ident_cont]) |> String.to_atom
end}},
:ident_start => {:terminal, ~r/^[a-zA-Z_]/u},
:ident_cont => {:priority, [{:nonterminal, :ident_start},
{:terminal, ~r/^[0-9]/}]},
:literal =>
{{:priority, [{:sequence, [{:terminal, ?'},
{:zero_or_more,
{:sequence, [{:not, {:terminal, 39}},
{:nonterminal, :char}]}},
{:terminal, ?'},
{:nonterminal, :spacing}]},
{:sequence, [{:terminal, ?"},
{:zero_or_more,
{:sequence, [{:not, {:terminal, 34}},
{:nonterminal, :char}]}},
{:terminal, ?"},
{:nonterminal, :spacing}]}]},
{:transform,
fn [quot, chars, quot, :spacing] ->
{:terminal, Enum.join(for [nil, char] <- chars, do: char)}
end}},
:class =>
{{:sequence, [{:terminal, ?[},
{:zero_or_more, {:sequence, [{:not, {:terminal, 93}},
{:nonterminal, :range}]}},
{:terminal, ?]},
{:nonterminal, :spacing}]},
{:transform,
fn [?[, ranges, ?], :spacing] ->
{:ok, regex} = Enum.join(["^[" | (for [nil, r] <- ranges, do: r)]
++ ["]"])
|> Regex.compile()
{:terminal, regex}
end}},
:range =>
{{:priority, [{:sequence, [{:nonterminal, :char},
{:terminal, ?-},
{:nonterminal, :char}]},
{:nonterminal, :char}]},
{:transform,
fn [start, ?-, stop] -> Enum.join([start, "-", stop])
char -> char
end}},
# TODO: Fix single character match
:char =>
{{:priority, [{:sequence, [{:terminal, "\\"},
{:nonterminal, :escaped_char}]},
{:sequence, [{:not, {:terminal, "\\"}},
{:terminal, ~r/^./u}]}]},
{:transform,
fn [_, char] -> char
end
}},
:escaped_char =>
{{:priority, [{:terminal, ~r/^[nrts\[\]\\'"]/},
{:terminal, ~r/^x[0-9a-fA-F]{1,6}/}]},
{:transform,
fn "r" -> "\r"
"n" -> "\n"
"t" -> "\t"
"s" -> "\t"
"[" -> "["
"]" -> "]"
"\\" -> "\\"
"\"" -> "\""
"x" <> escaped -> "\\x#{escaped}"
end
}},
:LEFTARROW => {{:sequence, [{:terminal, "<-"}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :LEFTARROW end}},
:SLASH => {{:sequence, [{:terminal, 47}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :SLASH end}},
:AND => {{:sequence, [{:terminal, ?&}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :AND end}},
:NOT => {{:sequence, [{:terminal, ?!}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :NOT end}},
:QUESTION => {{:sequence, [{:terminal, ??}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :QUESTION end}},
:STAR => {{:sequence, [{:terminal, ?*}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :STAR end}},
:PLUS => {{:sequence, [{:terminal, ?+}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :PLUS end}},
:TILDE => {{:sequence, [{:terminal, ?~}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :TILDE end}},
:OPEN => {{:sequence, [{:terminal, ?(}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :OPEN end}},
:CLOSE => {{:sequence, [{:terminal, ?)}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :CLOSE end}},
:OPENANGLE => {{:sequence, [{:terminal, ?<}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :OPENANGLE end}},
:CLOSEANGLE => {{:sequence, [{:terminal, ?>}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :CLOSEANGLE end}},
:DOT => {{:sequence, [{:terminal, ?.}, {:nonterminal, :spacing}]},
{:transform, fn _ -> :DOT end}},
:spacing => {{:zero_or_more, {:priority, [{:nonterminal, :space},
{:nonterminal, :comment}]}},
{:transform, fn _ -> :spacing end}},
:comment => {:sequence, [{:terminal, ?#},
{:zero_or_more,
{:sequence, [{:not, {:nonterminal, :EOL}},
{:terminal, ~r/./u}]}},
{:nonterminal, :EOL}]},
:space => {{:priority, [{:terminal, " "},
{:terminal, "\t"},
{:nonterminal, :EOL}]},
{:transform, fn _ -> :space end}},
:EOL => {{:priority, [{:terminal, "\r\n"},
{:terminal, "\n"},
{:terminal, "\r"}]},
{:transform, fn _ -> :EOL end}},
:EOF => {{:not, {:terminal, ~r/./u}},
{:transform, fn _ -> :EOF end}}
}
end
end
|
lib/neotomex/peg.ex
| 0.671686
| 0.54583
|
peg.ex
|
starcoder
|
defmodule Telegex.Marked.Rule do
@moduledoc false
# Node matching and parsing rules.
@type match_status :: :match | :nomatch
@type state :: Telegex.Marked.InlineState.t() | Telegex.Marked.BlockState.t()
defmacro __using__(options) do
makrup = options |> Keyword.get(:mark)
node_type = options |> Keyword.get(:type)
if makrup do
if String.length(makrup) > 1 do
implement_multi_markup(makrup, node_type)
else
implement_single_markup(makrup, node_type)
end
else
using()
end
end
defp using() do
quote do
@behaviour Telegex.Marked.Rule
alias Telegex.Marked.Node
alias Telegex.Marked.{State, InlineState, BlockState}
import Telegex.Marked.{Node, Rule}
end
end
defp implement_single_markup(markup, node_type) do
quote do
unquote(using())
@impl true
def match(state) do
%{line: %{src: src, len: len}, pos: pos} = state
prev_char = String.at(src, pos - 1)
next_char = String.at(src, pos + 1)
if ignore_begin?(unquote(markup), String.at(src, pos), prev_char, next_char) do
{:nomatch, state}
else
chars = String.graphemes(String.slice(src, pos + 1, len))
end_index =
chars
|> Enum.with_index()
|> Enum.find_index(fn {char, index} ->
char == unquote(markup) && !escapes_char?(Enum.at(chars, index - 1))
end)
|> calculate_end_index(pos)
if end_index do
state = %{state | pos: end_index}
state =
State.push_node(state, %Node{
type: unquote(node_type),
children: children_text(src, pos, end_index)
})
{:match, state}
else
{:nomatch, state}
end
end
end
end
end
defp implement_multi_markup(mark, type) do
quote do
unquote(using())
@mark unquote(mark)
@mark_length @mark |> String.length()
@impl true
def match(state) do
%{line: %{src: src, len: len}, pos: pos} = state
begin_at_src = String.slice(src, pos, len)
if String.starts_with?(begin_at_src, @mark) do
remainder_src =
String.slice(begin_at_src, @mark_length - 1, String.length(begin_at_src))
case remainder_src |> :binary.match(@mark) do
{begin_index, _} ->
end_index = begin_index |> calculate_end_index(pos, @mark_length)
if end_index != nil do
state = %{state | pos: end_index}
state =
State.push_node(state, %Node{
type: unquote(type),
children: children_text(src, pos, end_index, @mark_length)
})
{:match, state}
else
{:nomatch, state}
end
:nomatch ->
{:nomatch, state}
end
else
{:nomatch, state}
end
end
end
end
@callback match(state :: state()) :: {match_status(), state()}
@spec ignore_begin?(String.t(), String.t(), String.t(), String.t()) :: boolean()
def ignore_begin?(markup, pos_char, prev_char, next_char),
do:
pos_char != markup ||
escapes_char?(prev_char) ||
next_char == markup
@doc ~S"""
iex> <<92>> == "\\"
true
"""
@spec escapes_char?(String.t()) :: boolean()
def escapes_char?(<<92>>), do: true
def escapes_char?(_), do: false
def remove_index({elem, _index}), do: elem
def remove_index(nil), do: nil
def elem_or_nil(nil, _index), do: nil
def elem_or_nil(tuple, index), do: elem(tuple, index)
@spec calculate_end_index(integer() | nil, integer()) :: integer() | nil
def calculate_end_index(index, pos), do: calculate_end_index(index, pos, 1)
@spec calculate_end_index(nil, integer(), integer()) :: nil
def calculate_end_index(nil = _index, _pos, _mark_length), do: nil
@spec calculate_end_index(integer(), integer(), integer()) :: integer()
def calculate_end_index(index, pos, mark_length), do: index + mark_length + pos
@spec children_text(String.t(), integer(), integer()) :: String.t()
def children_text(src, pos, end_index), do: String.slice(src, pos + 1, end_index - pos - 1)
@spec children_text(String.t(), integer(), integer(), integer()) :: String.t()
def children_text(src, pos, end_index, mark_length),
do: String.slice(src, pos + mark_length, end_index - pos - mark_length - 1)
end
|
lib/telegex/marked/rule.ex
| 0.656988
| 0.463566
|
rule.ex
|
starcoder
|
defmodule Inky.RpiIO do
@moduledoc """
An `Inky.InkyIO` implementation intended for use with raspberry pis and relies on
Circuits.GPIO and Cirtuits.SPI.
"""
@behaviour Inky.InkyIO
alias Inky.InkyIO
defmodule State do
@moduledoc false
@state_fields [
:gpio_mod,
:spi_mod,
:busy_pid,
:dc_pid,
:reset_pid,
:spi_pid
]
@enforce_keys @state_fields
defstruct @state_fields
end
@busy_pin 17
@cs0_pin 0
@dc_pin 22
@reset_pin 27
@default_pin_mappings %{
busy_pin: @busy_pin,
cs0_pin: @cs0_pin,
dc_pin: @dc_pin,
reset_pin: @reset_pin
}
@spi_speed_hz 488_000
@spi_command 0
@spi_data 1
@spi_chunk_bytes 4096
# API
@impl InkyIO
def init(opts \\ []) do
gpio = opts[:gpio_mod] || Inky.TestGPIO
spi = opts[:spi_mod] || Inky.TestSPI
pin_mappings = opts[:pin_mappings] || @default_pin_mappings
spi_address = "spidev0." <> to_string(pin_mappings[:cs0_pin])
{:ok, dc_pid} = gpio.open(pin_mappings[:dc_pin], :output)
{:ok, reset_pid} = gpio.open(pin_mappings[:reset_pin], :output)
{:ok, busy_pid} = gpio.open(pin_mappings[:busy_pin], :input)
{:ok, spi_pid} = spi.open(spi_address, speed_hz: @spi_speed_hz)
# Use binary pattern matching to pull out the ADC counts (low 10 bits)
# <<_::size(6), counts::size(10)>> = SPI.transfer(spi_pid, <<0x78, 0x00>>)
%State{
gpio_mod: gpio,
spi_mod: spi,
busy_pid: busy_pid,
dc_pid: dc_pid,
reset_pid: reset_pid,
spi_pid: spi_pid
}
end
@impl InkyIO
def handle_sleep(_state, duration_ms) do
:timer.sleep(duration_ms)
end
@impl InkyIO
def handle_read_busy(state), do: gpio_call(state, :read, [state.busy_pid])
@impl InkyIO
def handle_reset(state, value), do: :ok = gpio_call(state, :write, [state.reset_pid, value])
@impl InkyIO
def handle_command(state, command, data) do
write_command(state, command)
write_data(state, data)
end
@impl InkyIO
def handle_command(state, command) do
write_command(state, command)
end
# IO primitives
defp write_command(state, command) do
value = maybe_wrap_integer(command)
spi_write(state, @spi_command, value)
end
require Logger
defp write_data(state, data) do
value = maybe_wrap_integer(data)
spi_write(state, @spi_data, value)
end
defp spi_write(state, data_or_command, values) when is_list(values),
do: spi_write(state, data_or_command, :erlang.list_to_binary(values))
defp spi_write(state, data_or_command, value) when is_binary(value) do
:ok = gpio_call(state, :write, [state.dc_pid, data_or_command])
case spi_call(state, :transfer, [state.spi_pid, value]) do
{:ok, response} -> {:ok, response}
{:error, :transfer_failed} -> spi_call_chunked(state, value)
end
end
defp spi_call_chunked(state, value) do
size = byte_size(value)
parts = div(size - 1, @spi_chunk_bytes)
for x <- 0..parts do
offset = x * @spi_chunk_bytes
# NOTE: grab the smallest of a chunk or the remainder
length = min(@spi_chunk_bytes, size - offset)
{:ok, <<_::binary>>} =
spi_call(state, :transfer, [state.spi_pid, :binary.part(value, offset, length)])
end
end
# internals
defp maybe_wrap_integer(value), do: if(is_integer(value), do: <<value>>, else: value)
defp gpio_call(state, op, args), do: apply(state.gpio_mod, op, args)
defp spi_call(state, op, args), do: apply(state.spi_mod, op, args)
end
|
lib/hal/rpiio.ex
| 0.753648
| 0.540499
|
rpiio.ex
|
starcoder
|
defmodule GoogleFit.Dataset.Point do
@moduledoc """
This struct represents a datapoint in a dataset
"""
alias GoogleFit.Dataset.{Nutrition, ActivitySummary, NumberSummary, ValueFormatError}
alias GoogleFit.ActivityType
import GoogleFit.Util
@enforce_keys ~w[data_type_name start_time end_time modified_time origin_data_source_id unit value]a
defstruct @enforce_keys
@doc false
def decode(json_map = %{"dataTypeName" => data_type_name}) do
{value, unit} = data_type_name |>
String.replace_prefix("com.google.","") |>
decode_value(json_map["value"])
%__MODULE__{
data_type_name: data_type_name,
start_time: json_map["startTimeNanos"] |> from_nanos,
end_time: json_map["endTimeNanos"] |> from_nanos,
modified_time: json_map["modifiedTimeMillis"] |> from_millis,
origin_data_source_id: json_map["originDataSourceId"],
unit: unit,
value: value
}
end
@numeric_names ~w[calories.expended distance.delta calories.bmr hydration weight speed step_count.delta]
@numeric_units ~w[kCal meters kCal/day liters kg meters/second steps]a
@numeric_map @numeric_names |> Enum.zip(@numeric_units) |> Map.new
def decode_value(dtn, [%{"fpVal" => val}]) when dtn in @numeric_names do
{val, Map.fetch!(@numeric_map, dtn)}
end
def decode_value(dtn, [%{"intVal" => val}]) when dtn in @numeric_names do
{val, Map.fetch!(@numeric_map, dtn)}
end
@decoder_names ~w[nutrition nutrition.summary activity.summary activity.segment weight.summary]
@decoder_modules [Nutrition, Nutrition, ActivitySummary, ActivityType,
NumberSummary]
@decoder_map @decoder_names |> Enum.zip(@decoder_modules) |> Map.new
@doc false
def decode_value(dtn, json_list) when dtn in @decoder_names do
module = Map.fetch!(@decoder_map, dtn)
{module.decode(json_list), module}
rescue
ValueFormatError -> decode_value("",json_list)
end
@unknown_units :unknown
def decode_value(_, json_map), do: {json_map, @unknown_units}
@doc false
def decoder_names(), do: @decoder_names
end
|
lib/google_fit/dataset/point.ex
| 0.782704
| 0.54577
|
point.ex
|
starcoder
|
defmodule ForgeSdk.Rpc.Stub do
@moduledoc """
Aggregate all RPCs
"""
defdelegate send_tx(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_tx(chan, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_unconfirmed_txs(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_block(chan, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_blocks(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_chain_info(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_node_info(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate search(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_net_info(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_validators_info(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
defdelegate get_config(chan, req, opts \\ []), to: ForgeAbi.ChainRpc.Stub
# wallet rpc
defdelegate declare_node(chan, req, opts \\ []), to: ForgeAbi.WalletRpc.Stub
# state rpc
defdelegate get_account_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_asset_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_forge_state(chan, req, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_protocol_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_stake_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_swap_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
defdelegate get_delegate_state(chan, opts \\ []), to: ForgeAbi.StateRpc.Stub
# event rpc
defdelegate subscribe(chan, req, opts \\ []), to: ForgeAbi.EventRpc.Stub
defdelegate unsubscribe(chan, req, opts \\ []), to: ForgeAbi.EventRpc.Stub
# stats rpc
defdelegate get_forge_stats(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_transactions(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_assets(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_stakes(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_account(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_top_accounts(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_asset_transactions(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_blocks(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate list_swap(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
defdelegate get_health_status(chan, req, opts \\ []), to: ForgeAbi.StatsRpc.Stub
end
|
lib/forge_sdk/rpc/stub.ex
| 0.617974
| 0.63696
|
stub.ex
|
starcoder
|
defmodule Hypex.Array do
@moduledoc """
This module provides a Hypex register implementation using an Erlang Array under
the hood.
Using an Array switches out the memory efficiency of the Bitstring implementation
for performance, operating at 10x the throughput of Bitstring on updates.
Even though this implementation uses higher amounts of memory, it's still pretty
low-cost and as such is the default register module for Hypex. Typically only
those working in memory-constrained environments should consider the Bitstring
register.
"""
# define behaviour
@behaviour Hypex.Register
# define the Array typespec
@type array :: :array.array(number)
@doc """
Creates a new Array with a size of `2 ^ width` with all elements initialized to 0.
"""
@spec init(width :: number) :: array
def init(width) do
1
|> :erlang.bsl(width)
|> :array.new({ :default, 0 })
end
@doc """
Takes a list of bits and converts them to an Array.
The Array has it's size fixed before being returned just for some extra safety.
"""
@spec from_list([ bit :: number ]) :: array
def from_list(bits) do
bits
|> :array.from_list(0)
|> :array.fix
end
@doc """
Converts an Array register implementation to a list of bits.
We can just delegate to the internal Array implementation as it provides the
functionality we need built in.
"""
@spec to_list(array) :: [ bit :: number ]
defdelegate to_list(registers), to: :array, as: :to_list
@doc """
Returns a bit from the list of registers.
"""
@spec get_value(array, idx :: number, width :: number) :: result :: number
def get_value(registers, idx, _width) do
:array.get(idx, registers)
end
@doc """
Sets a bit inside the list of registers.
"""
@spec set_value(array, idx :: number, width :: number, value :: number) :: array
def set_value(registers, idx, _width, value) do
:array.set(idx, value, registers)
end
@doc """
Converts a list of registers into a provided accumulator.
Internally we pass everything to `:array.foldl/3`, as there's already a native
implementation for accumulation.
"""
@spec reduce(array, width :: number, accumulator :: any, (number, any -> any)) :: accumulator :: any
def reduce(registers, _width, acc, fun) do
:array.foldl(fn(_, int, acc) ->
fun.(int, acc)
end, acc, registers)
end
end
|
lib/hypex/array.ex
| 0.850794
| 0.816187
|
array.ex
|
starcoder
|
defmodule Sherbet.Service.Contact.Communication.Method do
@moduledoc """
Manages the interactions with communication methods.
Communication implementations will implement the given callbacks to handle the
specific communication method.
##Implementing a communication method
Communication should be implemented in a module conforming to
`#{String.slice(to_string(__MODULE__), 7..-1)}.method`. Where type is the capitalized
communication method.
e.g. For a communication that should be identified using the :email atom, then the
implementation for that communication method should fall under
`#{String.slice(to_string(__MODULE__), 7..-1)}.Email`.
"""
@type uuid :: String.t
@doc """
Implement the behaviour for adding a new communication method and associating it with
the given identity.
If the contact is already in use, it will return an error.
If the operation was successful return `:ok`.
"""
@callback add(identity :: uuid, contact :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for removing a communication method associated with the
given identity.
If the operation was successful return `:ok`. Otherwise return the error.
"""
@callback remove(identity :: uuid, contact :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for setting a communication method associated with the
given identity to a new priority.
Only one communication method per identity may be set as `:primary`. If one already
exists, change it to secondary to allow for this new one to be made `:primary`.
If the operation was successful return `:ok`. Otherwise return the error.
"""
@callback set_priority(identity :: uuid, contact :: String.t, priority :: :secondary | :primary) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for requesting an unverified communication method be removed.
This should generate the unique key that will be sent to the contact, that the
requester will require in order to finalise the removal.
If the communication method has been verified, then it should return an error.
If the operation was successful return `:ok`.
"""
@callback request_removal(contact :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for finalising a removal request of an unverified
communication method.
If the communication method has been verified, then it should return an error.
If the operation was successful return `:ok`.
"""
@callback finalise_removal(contact :: String.t, key :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for identifying if a communicaton method belonging to the given
identity has been verified.
If the operation was successful return whether it was verified or not (true if it was
verified, otherwise false). Otherwise return an error.
"""
@callback verified?(identity :: uuid, contact :: String.t) :: { :ok, verified :: boolean } | { :error, reason :: String.t }
@doc """
Implement the behaviour for requesting an unverified communication method be verified.
This should generate the unique key that will be sent to the contact, that the
requester will require in order to finalise the verification of that contact.
If the communication method has already been verified, then it should return an error.
If the operation was successful return `:ok`. Otherwise return an error.
"""
@callback request_verification(identity :: uuid, contact :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for finalising a verification request of an unverified
communication method.
If the communication method has already been verified, then it should return an error.
If the operation was successful return `:ok`. Otherwise return an error.
"""
@callback finalise_verification(identity :: uuid, contact :: String.t, key :: String.t) :: :ok | { :error, reason :: String.t }
@doc """
Implement the behaviour for retrieving the contacts of the communication method for
the given identity.
If the operation was successful return `{ :ok, contacts }`, where `contacts` is
the list of communication methods associated with the given identity and their
current verification status and priority. Otherwise return an error.
"""
@callback contacts(identity :: uuid) :: { :ok, contacts :: [{ :unverified | :verified, :secondary | :primary, String.t }] } | { :error, reason :: String.t }
@doc """
Implement the behaviour for retrieving the primary contact of the communication
method for the given identity.
If the operation was successful return `{ :ok, contact }`, where `contact` is
the primary communication method associated with the given identity. Otherwise return
an error.
"""
@callback primary_contact(identity :: uuid) :: { :ok, contact :: { :unverified | :verified, String.t } } | { :error, reason :: String.t }
@doc """
Implement the behaviour for retrieving the owning identity for the specific communicaton
method.
If the operation was successful return `{ :ok, identity }`, where `identity` is the UUID
reference that is associated with the given communication method. Otherwise return an
error..
"""
@callback owner(contact :: String.t) :: { :ok, identity :: uuid } | { :error, String.t }
@doc """
Associate a new contact with the given identity.
If the contact is already in use, it will return an error.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec add(atom, uuid, String.t) :: :ok | { :error, String.t }
def add(type, identity, contact) do
atom_to_module(type).add(identity, contact)
end
@doc """
Remove the contact associated with the identity.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec remove(atom, uuid, String.t) :: :ok | { :error, String.t }
def remove(type, identity, contact) do
atom_to_module(type).remove(identity, contact)
end
@doc """
Change a contact associated with the identity's priority.
Will turn any other primary contact for that identity into a secondary contact.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec set_priority(atom, uuid, String.t, :secondary | :primary) :: :ok | { :error, String.t }
def set_priority(type, identity, contact, priority) do
atom_to_module(type).set_priority(identity, contact, priority)
end
@doc """
Request a contact be removed.
If the contact is verified, then it should return an error.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec request_removal(atom, String.t) :: :ok | { :error, String.t }
def request_removal(type, contact) do
atom_to_module(type).request_removal(contact)
end
@doc """
Finalise the request that a contact be removed.
If the contact is verified, then it should return an error.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec finalise_removal(atom, String.t, String.t) :: :ok | { :error, String.t }
def finalise_removal(type, contact, key) do
atom_to_module(type).finalise_removal(contact, key)
end
@doc """
Check if a contact belonging to the given identity has been verified.
Returns `{ :ok, verified }` if the operation was successful, where `verified`
is whether the email was verified (`true`) or not (`false`). Otherwise returns
an error.
"""
@spec verified?(atom, uuid, String.t) :: { :ok, boolean } | { :error, String.t }
def verified?(type, identity, contact) do
atom_to_module(type).verified?(identity, contact)
end
@doc """
Request a contact be verified.
If the contact is already verified, then it should return an error.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec request_verification(atom, uuid, String.t) :: :ok | { :error, String.t }
def request_verification(type, identity, contact) do
atom_to_module(type).request_verification(identity, contact)
end
@doc """
Finalise the verification request for a contact.
If the contact is already verified, then it should return an error.
Returns `:ok` if the operation was successful, otherwise returns an error.
"""
@spec finalise_verification(atom, uuid, String.t, String.t) :: :ok | { :error, String.t }
def finalise_verification(type, identity, contact, key) do
atom_to_module(type).finalise_verification(identity, contact, key)
end
@doc """
Get a list of contacts associated with the given identity.
Returns `{ :ok, contacts }` if the operation was successful, where `contacts` is
the list of communication methods associated with the given identity and their
current verification status and priority. Otherwise returns the reason of failure.
"""
@spec contacts(atom, uuid) :: { :ok, [{ :unverified | :verified, :secondary | :primary, String.t }] } | { :error, String.t }
def contacts(type, identity) do
atom_to_module(type).contacts(identity)
end
@doc """
Get the primary contact associated with the given identity.
Returns `{ :ok, contact }` if the operation was successful, where `contact` is
the primary communication method associated with the given identity and its
current verification status. Otherwise returns the reason of failure.
"""
@spec primary_contact(atom, uuid) :: { :ok, { :unverified | :verified, String.t } } | { :error, String.t }
def primary_contact(type, identity) do
atom_to_module(type).primary_contact(identity)
end
@doc """
Get the owning identity for the specific contact.
Returns `{ :ok, identity }` if the operation was successful. Otherwise returns
the reason of failure.
"""
@spec owner(atom, String.t) :: { :ok, uuid } | { :error, String.t }
def owner(type, contact) do
atom_to_module(type).owner(contact)
end
@spec atom_to_module(atom) :: atom
defp atom_to_module(name) do
String.to_atom(to_string(__MODULE__) <> "." <> format_as_module(to_string(name)))
end
@spec format_as_module(String.t) :: String.t
defp format_as_module(name) do
name
|> String.split(".")
|> Enum.map(fn module ->
String.split(module, "_") |> Enum.map(&String.capitalize(&1)) |> Enum.join
end)
|> Enum.join(".")
end
end
|
apps/sherbet_service/lib/sherbet.service/contact/communication/method.ex
| 0.917626
| 0.488954
|
method.ex
|
starcoder
|
defmodule FlowAssertions.Ecto do
@moduledoc """
This is a library of assertions for code that works with Ecto schemas or changesets. It is built on top of `FlowAssertions`.
1. Making tests easier to scan by capturing frequently-used assertions in
functions that can be used in a pipeline.
This library will appeal to people who prefer this:
```elixir
VM.ServiceGap.accept_form(params, @institution)
|> ok_content
|> assert_valid
|> assert_changes(id: 1,
in_service_datestring: @iso_date_1,
out_of_service_datestring: @iso_date_2,
reason: "reason")
```
... to this:
```elixir
assert {:ok, changeset} = VM.ServiceGap.accept_form(params, @institution)
assert changeset.valid?
changes = changeset.changes
assert changes.id == 1
assert changes.in_service_datestring == @iso_date_1
assert changes.out_of_service_datestring == @iso_date_2
assert changes.reason == "reason"
```
The key point here is that all of the `assert_*` functions in this package
return their first argument to be used with later chained functions.
2. Error messages as helpful as those in the base ExUnit assertions:
<img src="https://raw.githubusercontent.com/marick/flow_assertions/main/pics/error2.png"/>
## Installation
Add `ecto_flow_assertions` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:flow_assertions, "~> 0.1", only: :test},
]
end
```
Your project should also have a dependency on Ecto version 3.x.
## Use
The easiest way is `use FlowAssertions.Ecto`, which imports everything else.
If you prefer to `alias` rather than `import`, note that all the
assertion modules end in `A`. That way, there's no conflict between
the module with changeset assertions (`FlowAssertions.Ecto.ChangesetA`
and the `Ecto.Changeset` module itself.
## Reading error output
`ExUnit` has very nice reporting for assertions where a left-hand side is compared to a right-hand side, as in:
```elixir
assert x == y
```
The error output shows the values of both `x` and `y`, using
color-coding to highlight differences.
`FlowAssertions.Ecto` uses that mechanism when appropriate. However, it
does more complicated comparisons, so the words `left` and `right`
aren't strictly accurate. So, suppose you're reading errors from code
like this:
```elixir
calculation
|> assert_something(expected)
|> assert_something_else(expected)
```
In the output, `left` will refer to some value extracted from
`calculation` and `right` will refer to a value extracted from
`expected` (most likely `expected` itself).
## Related code
* `FlowAssertions` is the base upon which `FlowAssertions.Ecto` is built.
* Although it was designed for integration testing, `PhoenixIntegration` also uses
flow-style macros.
test "details about form structure", %{conn: conn} do
get_via_action(conn, :bulk_create_form)
|> form_inputs(:bulk_animal)
|> assert_fields(in_service_datestring: @today,
out_of_service_datestring: @never,
species_id: to_string(@bovine_id),
names: ~r/^\W*$/
end
"""
defmacro __using__(_) do
quote do
import FlowAssertions.Ecto.ChangesetA
import FlowAssertions.Ecto.SchemaA
end
end
end
|
lib/ecto.ex
| 0.872741
| 0.925365
|
ecto.ex
|
starcoder
|
defmodule Snitch.Domain.Order.DefaultMachine do
@moduledoc """
The (default) Order state machine.
The state machine is describe using DSL provided by `BeepBop`.
Features:
* handle both cash-on-delivery and credit/debit card payments
## Customizing the state machine
There is no DSL or API to change the `DefaultMachine`, the developer must make
their own module, optionally making use of DSL from `BeepBop`.
This allows the developer to change everything, from the names of the state to
the names of the event-callbacks.
## Writing a new State Machine
The state machine module must define the following functions:
_document this pls!_
### Tips
`BeepBop` is specifically designed to used in defining state-machines for
Snitch. You will find that the design and usage is inspired from
`Ecto.Changeset` and `ExUnit` setups
The functions that it injects conform to some simple rules:
1. signature:
```
@spec the_event(BeepBop.Context.t) :: BeepBop.Context.t
```
2. The events consume and return contexts. BeepBop can manage simple DB
operations for you like,
- accumulating DB updates in an `Ecto.Multi`, and run it only if the
whole event transition goes smoothly without any errors.
Essentially run the event callback in a DB transaction.
- auto updating the `order`'s `:state` as the last step of the callback.
Make use of the helpers provided in `Snitch.Domain.Order.Transitions`! They
are well documented and can be composed really well.
### Additional information
The "states" of an `Order` are known only at compile-time. Hence other
modules/functions that perform some logic based on the state need to be
generated or configured at compile-time as well.
"""
# TODO: How to attach the additional info like ability, etc with the states?
# TODO: make the order state machine a behaviour to simplify things.
use Snitch.Domain
use BeepBop, ecto_repo: Repo
alias Snitch.Data.Schema.Order
alias Snitch.Domain.Order.Transitions
state_machine Order, :state, ~w(cart address payment delivery processing rts shipping
complete cancelled confirmed balance_due)a do
event(:add_addresses, %{from: [:cart], to: :address}, fn context ->
context
|> Transitions.associate_address()
|> Transitions.compute_shipments()
|> Transitions.persist_shipment()
end)
event(:payment_to_address, %{from: [:payment], to: :address}, fn context ->
context
|> Transitions.remove_shipment()
|> Transitions.remove_payment_record()
|> Transitions.associate_address()
|> Transitions.compute_shipments()
|> Transitions.persist_shipment()
end)
event(:delivery_to_address, %{from: [:delivery, :address], to: :address}, fn context ->
context
|> Transitions.remove_shipment()
|> Transitions.associate_address()
|> Transitions.compute_shipments()
|> Transitions.persist_shipment()
end)
event(:add_shipments, %{from: [:address], to: :delivery}, fn context ->
Transitions.persist_shipping_preferences(context)
end)
event(:add_payment, %{from: [:delivery], to: :payment}, fn context ->
Transitions.make_payment_record(context)
end)
event(:save_shipping_preferences, %{from: [:address], to: :delivery}, fn context ->
Transitions.persist_shipping_preferences(context)
end)
event(:confirm_purchase_payment, %{from: [:payment], to: :confirmed}, fn context ->
context
|> Transitions.confirm_order_payment_status()
|> Transitions.process_shipments()
|> Transitions.update_stock()
|> Transitions.send_email_confirmation()
end)
event(:confirm_cod_payment, %{from: [:payment], to: :confirmed}, fn context ->
context
|> Transitions.process_shipments()
|> Transitions.update_stock()
|> Transitions.send_email_confirmation()
end)
event(:complete_order, %{from: [:confirmed], to: :complete}, fn context ->
Transitions.check_order_completion(context)
end)
event(:captured, %{from: [:processing], to: :rts}, fn context ->
context
end)
event(
:payment_pending,
%{from: %{not: ~w(cart address payment cancelled)a}, to: :payment},
fn context ->
context
end
)
event(:ship, %{from: ~w[rts processing]a, to: :shipping}, fn context ->
context
end)
event(:recieved, %{from: [:shipping], to: :complete}, fn context ->
context
end)
event(:cancel, %{from: %{not: ~w(shipping complete cart)a}, to: :cancelled}, fn context ->
context
end)
end
def persist(%Order{} = order, to_state) do
order
|> Order.partial_update_changeset(%{state: to_state})
|> Repo.update()
end
end
|
apps/snitch_core/lib/core/domain/order/default_machine.ex
| 0.804175
| 0.950457
|
default_machine.ex
|
starcoder
|
defmodule Plaid.LinkToken do
@moduledoc """
[Pliad link token API](https://plaid.com/docs/api/tokens/) calls and schema.
"""
alias Plaid.Castable
alias Plaid.LinkToken.{
DepositSwitch,
Metadata,
PaymentInitiation,
User
}
defmodule CreateResponse do
@moduledoc """
[Plaid API /link/token/create response schema.](https://plaid.com/docs/api/tokens/#linktokencreate)
"""
@behaviour Castable
@type t :: %__MODULE__{
link_token: String.t(),
expiration: String.t(),
request_id: String.t()
}
defstruct [:link_token, :expiration, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
link_token: generic_map["link_token"],
expiration: generic_map["expiration"],
request_id: generic_map["request_id"]
}
end
end
@doc """
Creates a token for Plaid Link.
Does a `POST /link/token/create` call which creates a link token which is
required to initialize Plaid Link.
Params:
* `client_name` - The name of your application, as it should be displayed in Link.
* `language` - The language that Link should be displayed in.
* `country_codes` - Array of country codes to launch Link with.
* `user` - An object specifying information about the end user who will be linking their account.
* `products` - List of Plaid product(s) you wish to use.
* `webhook` - The destination URL to which any webhooks should be sent.
* `access_token` - The access_token associated with the Item to update.
* `link_customization_name` - The name of the Link customization from the Plaid Dashboard to be applied to Link.
* `redirect_uri` - A URI indicating the destination where a user should be forwarded after completing the Link flow.
* `android_package_name` - The name of your app's Android package. Required if initializing android Link.
* `account_filters` - Filter the accounts shown in Link.
* `payment_initiation` - For initializing Link for use with the Payment Initiation.
* `deposit_switch` - For initializing Link for use with the Deposit Switch.
## Examples
LinkToken.create(
%{
client_name: "<NAME>",
language: "en",
country_codes: ["US", "CA"],
user: %LinkToken.User{
client_user_id: "123-test-user",
legal_name: "Test User",
phone_number: "+19995550123",
phone_number_verified_time: "2020-01-01T00:00:00Z",
email_address: "<EMAIL>",
email_address_verified_time: "2020-01-01T00:00:00Z",
ssn: "444-33-2222",
date_of_birth: "1990-01-01"
},
products: ["auth", "transactions"],
webhook: "https://example.com/webhook",
access_token: "<KEY>",
link_customization_name: "vip-user",
redirect_uri: "https://example.com/redirect",
android_package_name: "com.service.user",
account_filters: %{
depository: %{
account_subtypes: ["401k", "529"]
}
},
payment_initiation: %LinkToken.PaymentInitiation{
payment_id: "payment-id-sandbox-123xxx"
},
deposit_switch: %LinkToken.DepositSwitch{
deposit_switch_id: "deposit-switch-id-sandbox-123xxx"
}
},
test_api_host: api_host,
client_id: "123",
secret: "abc"
)
{:ok, LinkToken.CreateResponse{}}
"""
@spec create(payload, Plaid.config()) :: {:ok, CreateResponse.t()} | {:error, Plaid.Error.t()}
when payload: %{
:client_name => String.t(),
:language => String.t(),
:country_codes => [String.t()],
:user => User.t(),
optional(:products) => [String.t()],
optional(:webhook) => String.t(),
optional(:access_token) => String.t(),
optional(:link_customization_name) => String.t(),
optional(:redirect_uri) => String.t(),
optional(:android_package_name) => String.t(),
optional(:account_filters) => map(),
optional(:payment_initiation) => PaymentInitiation.t(),
optional(:deposit_switch) => DepositSwitch.t()
}
def create(payload, config) do
Plaid.Client.call("/link/token/create", payload, CreateResponse, config)
end
defmodule GetResponse do
@moduledoc """
[Plaid API /link/token/get response schema.](https://plaid.com/docs/api/tokens/#linktokenget)
"""
@behaviour Castable
@type t :: %__MODULE__{
created_at: String.t() | nil,
expiration: String.t() | nil,
link_token: String.t() | nil,
metadata: Metadata.t(),
request_id: String.t()
}
defstruct [
:created_at,
:link_token,
:expiration,
:metadata,
:request_id
]
@impl true
def cast(generic_map) do
%__MODULE__{
created_at: generic_map["created_at"],
expiration: generic_map["expiration"],
link_token: generic_map["link_token"],
metadata: Castable.cast(Metadata, generic_map["metadata"]),
request_id: generic_map["request_id"]
}
end
end
@doc """
Get information about a previously created link token.
Does a `POST /link/token/get` call which returns details about a link token which
was previously created.
Params:
* `link_token` - A link_token from a previous invocation of /link/token/create.
## Examples
LinkToken.get("link-prod-123xxx", client_id: "123", secret: "abc")
{:ok, %Plaid.LinkToken.GetResponse{}}
"""
@spec get(String.t(), Plaid.config()) :: {:ok, GetResponse.t()} | {:error, Plaid.Error.t()}
def get(link_token, config) do
Plaid.Client.call("/link/token/get", %{link_token: link_token}, GetResponse, config)
end
end
|
lib/plaid/link_token.ex
| 0.86009
| 0.4231
|
link_token.ex
|
starcoder
|
defmodule Hawk.Client do
@moduledoc """
This module provides functions to create request headers and authenticate response.
## Examples
defmodule Myapp.Hawk do
def request_and_authenticate(uri, credentials) do
result = Hawk.Client.header(uri, :get, credentials)
case :httpc.request(:get, {[uri], [{'authorization', [result.header]}]}) do
{:error, reason} ->
{:error, reason}
{:ok, {_status_line, headers, _body}} ->
Hawk.Client.authenticate(headers, result)
end
end
end
"""
@typedoc false
@type headers() :: [{binary() | charlist(), binary() | charlist()}]
alias Hawk.{Crypto, Header, Request, Now}
@algorithms Crypto.algorithms()
@methods ~w(delete get patch post put)a
@doc """
Generate an Authorization header for a given request.
Options
* `:ext` Application specific data sent via the ext attribute
* `:ts` A pre-calculated timestamp in seconds
* `:nonce` A pre-generated nonce
* `:localtime_offset_msec` Time offset to sync with server time (ignored if timestamp provided)
* `:payload` UTF-8 encoded string for body hash generation (ignored if hash provided)
* `:content_type` Payload content-type (ignored if hash provided)
* `:hash` Pre-calculated payload hash
* `:app` Oz application id
* `:dlg` Oz delegated-by application id
"""
@spec header(binary() | URI.t(), :delete | :get | :patch | :post | :put, map(), Hawk.opts()) :: %{artifacts: map(), credentials: map(), header: binary()}
def header(uri, method, credentials, options \\ %{})
def header(uri, method, credentials, options) when is_list(options), do: header(uri, method, credentials, Map.new(options))
def header(uri, method, %{algorithm: algorithm, id: _, key: _} = credentials, options) when is_binary(uri) and byte_size(uri) > 0 and algorithm in @algorithms and method in @methods do
uri
|> URI.parse()
|> header(method, credentials, options)
end
def header(%URI{} = uri, method, %{algorithm: algorithm, id: _id, key: _key} = credentials, %{hash: _hash} = options) when algorithm in @algorithms and method in @methods do
artifacts = create_artifacts(uri, method, options)
%{artifacts: artifacts, credentials: credentials, header: create_header(artifacts, credentials)}
end
def header(%URI{} = uri, method, %{algorithm: algorithm, id: _id, key: _key} = credentials, %{payload: payload} = options) when algorithm in @algorithms and method in @methods do
artifacts = uri |> create_artifacts(method, options) |> Map.put(:hash, Crypto.calculate_payload_hash(algorithm, "#{payload}", "#{options[:content_type]}"))
%{artifacts: artifacts, credentials: credentials, header: create_header(artifacts, credentials)}
end
def header(%URI{} = uri, method, %{algorithm: algorithm, id: _id, key: _key} = credentials, options) when algorithm in @algorithms and method in @methods do
artifacts = create_artifacts(uri, method, options)
%{artifacts: artifacts, credentials: credentials, header: create_header(artifacts, credentials)}
end
defp create_artifacts(%URI{host: host, port: port} = uri, method, options) do
%{ts: Now.sec(options), nonce: Kryptiles.random_string(6)}
|> Map.merge(options)
|> Map.merge(%{host: host, port: port, method: String.upcase("#{method}"), resource: Request.resource(uri)})
end
defp create_header(artifacts, %{id: id} = credentials) do
artifacts
|> Map.merge(%{id: id, mac: Crypto.calculate_mac("header", credentials, artifacts)})
|> header_string()
end
defp header_string(map, pos \\ 0, acc \\ "Hawk")
defp header_string(_rest, 8, acc), do: :erlang.iolist_to_binary(acc)
for {key, [pos, sep]} <- [id: [0, " "], ts: [1, ", "], nonce: [2, ", "], hash: [3, ", "], ext: [4, ", "], mac: [5, ", "], app: [6, ", "], dlg: [7, ", "]] do
defp header_string(%{unquote(key) => v} = rest, unquote(pos), acc) do
header_string(rest, unquote(pos) + 1, [acc | "#{unquote(sep)}#{unquote(key)}=\"#{v}\""])
end
end
defp header_string(rest, pos, acc), do: header_string(rest, pos + 1, acc)
@doc """
Authenticate response `headers`
## Options
* `:payload` optional payload received
* `:required` specifies if a Server-Authorization header is required. Defaults to `false`
"""
@spec authenticate(headers(), map(), Hawk.opts()) :: {:ok, map()} | {:error, {integer(), binary()}}
def authenticate(headers, result, options \\ %{})
def authenticate(headers, result, options) when is_list(options), do: authenticate(headers, result, Map.new(options))
def authenticate(headers, %{credentials: %{algorithm: algorithm, id: _id, key: _key} = credentials, artifacts: artifacts}, options) when algorithm in @algorithms and is_map(artifacts) and is_list(headers) do
headers
|> parse_headers()
|> validate_headers(credentials, artifacts, options)
|> case do
{:error, reason} -> {:error, reason}
{:ok, headers} -> {:ok, Map.drop(headers, ["content-type"])}
end
end
defp parse_headers(headers, header \\ %{})
defp parse_headers(_headers, {:error, reason}), do: {:error, reason}
defp parse_headers([], headers), do: headers
for header <- ['www-authenticate', "www-authenticate"] do
defp parse_headers([{unquote(header), value} | rest], headers) do
case Header.parse(value) do
{:ok, result} -> parse_headers(rest, Map.put(headers, "#{unquote(header)}", result))
{:error, _reason} -> parse_headers(rest, {:error, {500, "Invalid WWW-Authenticate header"}})
end
end
end
for header <- ['server-authorization', "server-authorization"] do
defp parse_headers([{unquote(header), value} | rest], headers) do
case Header.parse(value) do
{:ok, result} -> parse_headers(rest, Map.put(headers, "#{unquote(header)}", result))
{:error, _reason} -> parse_headers(rest, {:error, {500, "Invalid Server-Authorization header"}})
end
end
end
for header <- ['content-type', "content-type"] do
defp parse_headers([{unquote(header), value} | rest], headers) do
[header | _] = :string.split(value, ';')
headers = Map.put(headers, "content-type", header)
parse_headers(rest, headers)
end
end
defp parse_headers([_ | rest], headers), do: parse_headers(rest, headers)
defp validate_headers({:error, reason}, _credentials, _artifacts, _options), do: {:error, reason}
defp validate_headers(%{"server-authorization" => _} = headers, %{algorithm: algorithm} = credentials, artifacts, options) do
headers
|> validate_timestamp(credentials)
|> validate_mac(credentials, artifacts)
|> validate_hash(algorithm, options)
end
defp validate_headers(headers, %{algorithm: _algorithm} = credentials, _artifacts, _options) do
validate_timestamp(headers, credentials)
end
defp validate_timestamp(%{"www-authenticate" => %{ts: ts, tsm: tsm}} = headers, credentials) do
case tsm !== Crypto.calculate_ts_mac(ts, credentials) do
true -> {:error, {500, "Invalid server timestamp hash"}}
false -> {:ok, headers}
end
end
# defp validate_timestamp(%{"www-authenticate" => %{error: "Stale timestamp"}} = headers, _credentials) do
# InternalServerError.error("Invalid WWW-Authenticate header")
# end
# defp validate_timestamp(%{"www-authenticate" => %{error: error}} = headers, _credentials) do
# InternalServerError.error("Invalid WWW-Authenticate header")
# end
defp validate_timestamp(headers, _credentials), do: {:ok, headers}
defp validate_mac({:error, reason}, _credentials, _artifacts), do: {:error, reason}
defp validate_mac({:ok, %{"server-authorization" => %{ext: ext, hash: hash, mac: mac}}} = headers, credentials, artifacts) do
case mac !== Crypto.calculate_mac("response", credentials, %{artifacts | ext: ext, hash: hash}) do
true -> {:error, {500, "Bad response mac"}}
false -> headers
end
end
defp validate_mac(headers, _credentials, _artifacts), do: headers
defp validate_hash({:error, reason}, _algorithm, _options), do: {:error, reason}
defp validate_hash(headers, _algorithm, %{payload: ""}), do: headers
defp validate_hash({:ok, %{"server-authorization" => %{hash: hash}} = headers} = ok, algorithm, %{payload: payload}) do
case hash !== Crypto.calculate_payload_hash(algorithm, payload, headers["content-type"]) do
true -> {:error, {500, "Bad response payload mac"}}
false -> ok
end
end
defp validate_hash({:ok, %{"server-authorization" => _}}, _algorithm, %{payload: _payload}) do
{:error, {500, "Missing response hash attribute"}}
end
defp validate_hash(headers, _algorithm, _options), do: headers
@doc """
Generate a bewit value for a given URI.
## Options
* `:ext` Application specific data sent via the ext attribute
* `:localtime_offset_msec` Time offset to sync with server time
## Examples
iex> Hawk.Client.get_bewit("http://example.com/resource?a=b", %{algorithm: :sha256, id: "dh37fgj492je", key: "<KEY>"}, 3600, ext: "application-specific", localtime_offset_msec: 400)
%{
artifacts: %{
ext: "application-specific",
host: "example.com",
method: "GET",
nounce: "",
port: 80,
resource: "/resource?a=b",
ts: 1535315623
},
bewit: "ZGgzN2ZnajQ5MmplXDE1MzUzMTU2MjNcZE9laXcxL1Z4SjVSeVFKOXFJT0l1cFhVQ3VwTzZiMG5OeDBRMWROOXZVcz1cYXBwbGljYXRpb24tc3BlY2lmaWM"
}
"""
@spec get_bewit(binary() | URI.t(), map(), integer(), Hawk.opts()) :: %{artifacts: map(), bewit: binary()}
def get_bewit(uri, credentials, ttl, options \\ %{})
def get_bewit(uri, credentials, ttl, options) when is_list(options), do: get_bewit(uri, credentials, ttl, Map.new(options))
def get_bewit(uri, %{algorithm: algorithm, id: _, key: _} = credentials, ttl, options) when is_binary(uri) and byte_size(uri) > 0 and is_integer(ttl) and algorithm in @algorithms do
uri
|> URI.parse()
|> get_bewit(credentials, ttl, options)
end
def get_bewit(%URI{host: host, port: port} = uri, %{algorithm: algorithm, id: id, key: _} = credentials, ttl, options) when is_integer(ttl) and algorithm in @algorithms do
exp = options |> Now.sec() |> :math.floor() |> Kernel.+(ttl) |> Kernel.round()
artifacts = Map.merge(%{ts: exp, nounce: "", method: "GET", resource: Request.resource(uri), host: host, port: port}, Map.take(options, [:ext]))
mac = Crypto.calculate_mac("bewit", credentials, artifacts)
%{artifacts: artifacts, bewit: Base.url_encode64("#{id}\\#{exp}\\#{mac}\\#{options[:ext]}", padding: false)}
end
@doc """
Generate an authorization string for a UTF-8 encoded string for body hash generation
## Options
* `:timestamp` A pre-calculated timestamp in seconds
* `:nonce` A pre-generated nonce
* `:localtime_offset_msec` Time offset to sync with server time (ignored if timestamp provided)
## Examples
iex> Hawk.Client.message("example.com", 8000, "{\\"some\\":\\"payload\\"}", %{algorithm: :sha256, id: "dh37fgj492je", key: "<KEY>"}, hash: "osPwIDqS9cUeJnQRQEdq8upF/tGVVyo6KFgUiUoDoLs=", timestamp: 1531684204, nonce: "x0AIzk")
%{hash: "osPwIDqS9cUeJnQRQEdq8upF/tGVVyo6KFgUiUoDoLs=", id: "dh37fgj492je", mac: "Yb4eQ2MXJAc4MFvyouOOGhLKE9Ys/PqdYYub6gYwgrI=", nonce: "x0AIzk", ts: 1531684204}
"""
@spec message(binary(), 0..65535, binary(), map(), Hawk.opts()) :: %{hash: binary(), id: binary(), mac: binary(), host: binary(), port: 0..65535, nonce: binary(), ts: integer()}
def message(host, port, message, credentials, options \\ %{})
def message(host, port, message, credentials, options) when is_list(options), do: message(host, port, message, credentials, Map.new(options))
def message(host, port, message, %{algorithm: algorithm, id: id, key: _} = credentials, options) when is_binary(host) and byte_size(host) > 0 and is_binary(message) and port in 0..65535 and algorithm in @algorithms do
artifacts = %{ts: options[:timestamp] || Now.sec(options), nonce: options[:nonce] || Kryptiles.random_string(6), host: host, port: port, hash: Crypto.calculate_payload_hash(algorithm, message, "")}
Map.merge(artifacts, %{id: id, mac: Crypto.calculate_mac("message", credentials, artifacts)})
end
end
|
lib/hawk/client.ex
| 0.861261
| 0.41182
|
client.ex
|
starcoder
|
defmodule Csvto.Reader do
@type column_def :: nil | Csvto.Field.t
@type context :: %{path: String.t, columns: [column_def], column_count: integer, aggregate_column: column_def, schema: Csvto.Schema.t, fields: %{String.t => Csvto.Field.t}, aggregate_fields: %{String.t => Csvto.Field.t}, unspecified: [Csvto.Field.t], opts: Map.t}
@csv_errors [CSV.Parser.SyntaxError, CSV.Lexer.EncodingError, CSV.Decoder.RowLengthError, CSV.LineAggregator.CorruptStreamError]
@doc """
Read from csv specified by path and convert to stream of map accoriding to given schema
"""
def from(path, module, schema_name, opts \\ []) do
try do
do_from!(path, module, schema_name, opts) |> Enum.to_list
rescue
x in Csvto.Error ->
{:error, x.message}
x in @csv_errors ->
{:error, x.message}
end
end
def from!(path, module, schema_name, opts \\ []) do
try do
do_from!(path, module, schema_name, opts) |> Enum.to_list
rescue
x in @csv_errors ->
stacktrace = System.stacktrace
reraise Csvto.Error, [message: x.message], stacktrace
end
end
defp do_from!(path, module, schema_name, opts) do
schema = validate_schema_name!(module, schema_name)
stream = build_stream!(path)
stream
|> CSV.decode!
|> add_index_and_context!(path, schema, opts)
|> convert_row
end
defp raise_error(message) do
raise Csvto.Error, message
end
defp validate_schema_name!(module, schema_name) do
case module.__csvto__(:schema, schema_name) do
nil -> raise_error("schema #{inspect schema_name} is undefined for #{inspect module}")
schema -> schema
end
end
defp build_stream!(path) do
case File.open(path, [:read, :utf8]) do
{:ok, file} ->
IO.stream(file, :line)
{:error, reason} ->
raise_error("cannot open file #{path} for #{inspect reason}")
end
end
defp add_index_and_context!(stream, path, schema, opts) do
stream
|> Stream.with_index()
|> Stream.transform(init_context!(path, schema, opts), &do_add_context!/2)
end
defp init_context!(path, schema, opts) do
context = %{path: path, schema: schema, columns: nil, column_count: 0, fields: nil, aggregate_column: nil, aggregate_fields: %{}, opts: Map.new(opts), unspecified: []}
case Keyword.get(opts, :headers) do
nil ->
case schema.index_mode do
:index ->
context
|> build_index_mode_context
:name ->
context
|> build_name_mode_context
end
headers when is_list(headers) ->
context
|> build_context_from_list_headers!(headers)
headers when is_map(headers) ->
context
|> build_context_from_map_headers!(headers)
headers ->
raise ArgumentError, "headers should either be an [atom] or a %{String.t => atom}, but got #{inspect headers}"
end
end
defp build_index_mode_context(context) do
schema = context[:schema]
{column_defs, {_, aggregate_column}} = Enum.flat_map_reduce(schema.fields, {-1, nil}, fn
%{field_type: :aggregate} = aggregate_field, {last_index, nil} ->
if aggregate_field.field_index - last_index <= 1 do
{[], {aggregate_field.field_index, aggregate_field}}
else
{List.duplicate(nil, aggregate_field.field_index - last_index - 1), {aggregate_field.field_index, aggregate_field}}
end
field, {last_index, nil} ->
if field.field_index - last_index <= 1 do
{[field], {field.field_index, nil}}
else
{List.duplicate(nil, field.field_index - last_index - 1) ++ [field], {field.field_index, nil}}
end
end)
%{context | columns: column_defs, column_count: Enum.count(column_defs), aggregate_column: aggregate_column}
end
defp build_name_mode_context(context) do
schema = context[:schema]
{fields, aggregate_fields} = Enum.reduce(schema.fields, {Map.new, Map.new}, fn
%{field_type: :aggregate} = field, {fields, aggregate_fields} ->
{fields, Map.put(aggregate_fields, field.field_name, field)}
field, {fields, aggregate_fields} ->
{Map.put(fields, field.field_name, field), aggregate_fields}
end)
%{context | fields: fields, aggregate_fields: aggregate_fields}
end
defp build_context_from_map_headers!(context, headers) do
schema = context[:schema]
fields_and_usage_by_name = schema.fields |> Enum.reduce(Map.new, &(Map.put(&2, &1.name, {&1, false})))
# Try to associate header with fields and checking for duplicate
{name_and_fields, fields_and_usage_by_name} = Enum.map_reduce(headers, fields_and_usage_by_name, fn
{field_name, name}, fields_and_usage_by_name ->
Map.get_and_update(fields_and_usage_by_name, name, fn
nil ->
raise ArgumentError, "cannot find field #{inspect name} on schema #{inspect schema.name}"
{%{field_type: :aggregate} = field, true} ->
{{field_name, field}, {field, true}}
{_field, true} ->
raise ArgumentError, "field #{inspect name} has been mapped more than once in given headers #{inspect headers}"
{field, false} ->
{{field_name, field}, {field, true}}
end)
end)
# Try to assign default field_names to remaining fields
{fields_and_usage_by_name, name_and_fields} = Enum.map_reduce(fields_and_usage_by_name, name_and_fields, fn
{name, {field, false}}, name_and_fields ->
if field.field_name do
{{name, {field, true}}, [{field.field_name, field} | name_and_fields]}
else
{{name, {field, false}}, name_and_fields}
end
field_and_usage, name_and_fields ->
{field_and_usage, name_and_fields}
end)
# Extract out unspecified fields and try to raise errors
unspecified = Enum.reduce(fields_and_usage_by_name, [], fn
{_, {field, false}}, acc ->
[field | acc]
_, acc ->
acc
end)
case extract_name_of_required(unspecified) do
[] ->
:ok
required_fields ->
required_fields_missing_error(required_fields)
end
{fields, aggregate_fields} = Enum.split_with(name_and_fields, fn
{_, %{field_type: :aggregate}} -> false
_ -> true
end)
%{context | fields: fields |> Enum.into(%{}), aggregate_fields: aggregate_fields |> Enum.into(%{}), unspecified: unspecified}
end
defp build_context_from_list_headers!(context, headers) do
schema = context[:schema]
fields_and_usage_by_name = Enum.reduce(schema.fields, Map.new, &(Map.put(&2, &1.name, {&1, false})))
{columns, fields_and_usage_by_name} = Enum.map_reduce(headers, fields_and_usage_by_name, fn
nil, fields_and_usage_by_name ->
{nil, fields_and_usage_by_name}
header, fields_and_usage_by_name ->
Map.get_and_update(fields_and_usage_by_name, header, fn
nil -> raise ArgumentError, "the specified header #{inspect header} cannot be found on schema #{inspect schema.name}"
{%{field_type: :aggregate} = field, true} ->
{field, {field, true}}
{_field, true} ->
raise ArgumentError, "non-aggregate field #{inspect header} has been defined more than once in the specified headers #{inspect headers}"
{field, false} ->
{field, {field, true}}
end)
end)
unspecified = fields_and_usage_by_name |> filter_out_unused
case extract_name_of_required(unspecified) do
[] ->
%{context | columns: columns, column_count: Enum.count(columns), unspecified: unspecified}
required_fields ->
required_fields_missing_error(required_fields)
end
end
defp required_fields_missing_error(required_fields) do
raise ArgumentError, "required fields #{Enum.join(required_fields, ",")} are not specified in the given header options"
end
defp do_add_context!({row, 0}, %{columns: nil, fields: fields, aggregate_fields: aggregate_fields, unspecified: unspecified_in_opts} = context) do
row = preprocess_row(row)
fields_and_usage = Enum.reduce(fields, Map.new, fn
{field_name, field}, map ->
Map.put(map, field_name, {field, false})
end)
{column_defs, fields_and_usage} = Enum.map_reduce(row, fields_and_usage, fn
column_name, fields_and_usage ->
Map.get_and_update(fields_and_usage, column_name, fn
nil ->
case find_by_prefix(aggregate_fields, column_name) do
nil ->
:pop
field ->
{field, {field, true}}
end
{%{field_type: :aggregate} = field, true} ->
{field, {field, true}}
{_field, true} ->
raise_error("duplicate non aggregate field #{column_name} found in file #{context[:path]}")
{field, false} ->
{field, {field, true}}
end)
end)
unspecified = fields_and_usage |> filter_out_unused
case extract_name_of_required(unspecified) do
[] ->
context = %{context | columns: column_defs, column_count: Enum.count(column_defs), unspecified: unspecified ++ unspecified_in_opts}
{[], context}
required_fields ->
raise_error("required fields #{Enum.join(required_fields, ",")} cannot be found in file #{context[:path]}")
end
end
defp do_add_context!({row, index}, context), do: {[{row, index, context}], context}
defp convert_row(stream) do
stream
|> Stream.map(&do_convert_row!/1)
end
def extract_name_of_required([]), do: []
def extract_name_of_required(fields) do
fields |> Enum.filter(&(&1.required?)) |> Enum.map(&(&1.name))
end
defp filter_out_unused(fields_and_usage) do
Enum.flat_map(fields_and_usage, fn
{_, {field, false}}->
[field]
_->
[]
end)
end
defp do_convert_row!({row, index, %{columns: columns, column_count: column_count, aggregate_column: aggregate_column, unspecified: unspecified} = context}) do
value_count = Enum.count(row)
{value_and_fields, unspecified_fields, extra_values} = cond do
value_count <= column_count ->
{specified, unspecified_fields} = Enum.split(columns, value_count)
{Enum.zip(row, specified), unspecified_fields, []}
true ->
{matched_values, unmatched_values} = Enum.split(row, column_count)
{Enum.zip(matched_values, columns), [], unmatched_values}
end
case extract_name_of_required(unspecified_fields) do
[] ->
:ok
required_fields ->
raise_error("required fields #{Enum.join(required_fields, ",")} is missing on file #{context[:path]}, line #{index + 1}")
end
result = value_and_fields |> Enum.with_index |> Enum.reduce(init_result(row, index, context), fn
{{_raw_value, nil}, _}, map ->
map
{{raw_value, field}, column_index}, map ->
with {:ok, value} <- do_cast_value(context, field, raw_value),
{:ok, value} <- do_validate_value(context[:schema].module, field, value) do
update_map_value(map, field, value)
else
{:error, reason} ->
raise_illegal_value_error(context, raw_value, index, column_index, reason)
end
end)
result = Enum.reduce(unspecified ++ unspecified_fields, result, &(Map.put(&2, &1.name, &1.default)))
if extra_values != [] && aggregate_column do
Map.put(result, aggregate_column.name, cast_aggregate_value!(context, index, aggregate_column, extra_values))
else
result
end
end
defp init_result(_row, index, context) do
if key = Map.get(context.opts, :line_number, false) do
key = case key do
true -> :__line__
key -> key
end
%{key => index + 1}
else
Map.new
end
end
defp update_map_value(map, %{field_type: :aggregate} = field, value) do
Map.update(map, field.name, [value], &(&1 ++ [value]))
end
defp update_map_value(map, field, value) do
Map.put(map, field.name, value)
end
defp do_cast_value(context, %{field_type: :aggregate} = field, raw_value) do
case field.type do
:array ->
{:ok, raw_value}
{:array, subtype} ->
do_cast_value(subtype, raw_value, default_value(context, subtype, nil, field.opts), field.opts)
end
end
defp do_cast_value(context, field, raw_value) do
do_cast_value(field.type, raw_value, default_value(context, field.type, field.default, field.opts), field.opts)
end
defp do_cast_value(type, raw_value, default, opts) do
raw_value = maybe_trim(raw_value, type, Map.get(opts, :keep, false))
if raw_value == "" do
{:ok, default}
else
case Csvto.Type.cast(type, raw_value, opts) do
:error ->
{:error, "cast to #{inspect type} error"}
{:ok, _} = ok -> ok
end
end
end
defp default_value(context, type, nil, opts) do
nilable = case Map.fetch(opts, :nilable) do
:error ->
Map.get(context.opts, :nilable, false)
{:ok, value} ->
value
end
if nilable do
nil
else
Csvto.Type.default(type)
end
end
defp default_value(_context, _type, default, _opts), do: default
@keepable_types ~w{binary string}a
defp maybe_trim(raw_value, keepable_type, true) when keepable_type in @keepable_types, do: raw_value
defp maybe_trim(raw_value, _type, _keep), do: String.trim(raw_value)
defp cast_aggregate_value!(context, index, aggregate_field, values) do
values |> Enum.with_index(index) |> Enum.map(fn
{raw_value, column_index} ->
case do_cast_value(context, aggregate_field, raw_value) do
{:ok, value} ->
value
{:error, reason} ->
raise_illegal_value_error(context, raw_value, index, column_index, reason)
end
end)
end
defp find_by_prefix(map, name) do
Enum.find_value(map, fn
{prefix, value} ->
if String.starts_with?(name, prefix) do
value
else
nil
end
end)
end
defp do_validate_value(_module, %{validator: nil}, value), do: {:ok, value}
defp do_validate_value(module, %{validator: method}, value) when is_atom(method) do
apply(module, method, [value]) |> process_validate_result(value)
end
defp do_validate_value(module, %{validator: {method, opts}}, value) when is_atom(method) do
apply(module, method, [value, opts]) |> process_validate_result(value)
end
defp raise_illegal_value_error(context, raw_value, index, column_index, reason) do
raise_error("illegal value #{inspect raw_value} in file #{context[:path]} at line #{index + 1}, column #{column_index + 1}: #{reason}")
end
defp preprocess_row(row), do: row |> Enum.map(&(String.trim(&1)))
defp process_validate_result({:ok, value}, _), do: {:ok, value}
defp process_validate_result(:ok, value), do: {:ok, value}
defp process_validate_result(true, value), do: {:ok, value}
defp process_validate_result({:error, reason}, _value), do: {:error, reason}
defp process_validate_result(false, value), do: {:error, "validation error for #{inspect value}"}
defp process_validate_result(nil, value), do: {:error, "validation error for #{inspect value}"}
defp process_validate_result(_truely, value), do: {:ok, value}
end
|
lib/csvto/reader.ex
| 0.720762
| 0.514339
|
reader.ex
|
starcoder
|
defmodule Ockam.Channel.XX do
@moduledoc """
Defines the XX Key Agreement protocol.
"""
alias Ockam.Message
alias Ockam.Router
alias Ockam.Vault
defstruct [:vault, :e, :s, :rs, :re, :ck, :k, :n, :h, :prologue]
@protocol_name "Noise_XX_25519_AESGCM_SHA256"
defmacro zero_padded_protocol_name do
quote bind_quoted: binding() do
padding_size = (32 - byte_size(@protocol_name)) * 8
<<@protocol_name, 0::size(padding_size)>>
end
end
# @public_key_length 32
# @tag_length 16
# @encrypted_public_key_and_tag_length @public_key_length + @tag_length
def init(%{role: role, vault: vault, s: s} = data) do
prologue = Map.get(data, :prologue, "")
e =
Map.get_lazy(data, :e, fn ->
with {:ok, private_key} <- Vault.generate_secret(vault, type: :curve25519_private),
{:ok, public_key} <- Vault.get_public_key(vault, private_key) do
%{private: private_key, public: public_key}
end
end)
h = zero_padded_protocol_name()
state = %__MODULE__{vault: vault, e: e, s: s, h: h, prologue: prologue}
with {:ok, ck} <- Vault.import_secret(vault, zero_padded_protocol_name()),
{:ok, state} <- mix_hash(%{state | ck: ck}, prologue) do
data = Map.put(data, :key_establisher_state, state)
case role do
:initiator -> {:ok, {:key_establishment, :initiator, :awaiting_trigger}, data}
:responder -> {:ok, {:key_establishment, :responder, :awaing_m1}, data}
end
end
end
def handle({:trigger, onward_route}, {:key_establishment, :initiator, :awaiting_trigger}, data) do
%{key_establisher_state: state} = data
{:ok, m1, state} = encode_message_1(state, "")
Router.route(%Message{payload: m1, onward_route: onward_route, return_route: [data.ciphertext_address]})
data = %{data | key_establisher_state: state}
{:next_state, {:key_establishment, :initiator, :awaing_m2}, data}
end
def handle({:ciphertext, message}, {:key_establishment, :initiator, :awaing_m2}, data) do
%Message{payload: m2, return_route: return_route} = message
%{key_establisher_state: state} = data
{:ok, "", state} = decode_message_2(state, m2)
{:ok, m3, state} = encode_message_3(state, "")
{:ok, [k1, k2]} = split(state)
:ok = Vault.set_secret_type(state.vault, k1, :aes256)
:ok = Vault.set_secret_type(state.vault, k2, :aes256)
data =
Map.put(data, :data_state, %{
vault: state.vault,
route_to_peer: return_route,
decrypt: {k1, 0},
encrypt: {k2, 0},
h: state.h
})
Router.route(%Message{payload: m3, onward_route: return_route, return_route: [data.ciphertext_address]})
{:next_state, :data, %{data | key_establisher_state: state}}
end
# responder states
def handle({:ciphertext, message}, {:key_establishment, :responder, :awaing_m1}, data) do
%Message{payload: m1, return_route: return_route} = message
%{key_establisher_state: state} = data
{:ok, "", state} = decode_message_1(state, m1)
{:ok, m2, state} = encode_message_2(state, "")
Router.route(%Message{payload: m2, onward_route: return_route, return_route: [data.ciphertext_address]})
{:next_state, {:key_establishment, :responder, :awaing_m3},
%{data | key_establisher_state: state}}
end
def handle({:ciphertext, message}, {:key_establishment, :responder, :awaing_m3}, data) do
%Message{payload: m3, return_route: return_route} = message
%{key_establisher_state: state} = data
{:ok, "", state} = decode_message_3(state, m3)
{:ok, [k1, k2]} = split(state)
:ok = Vault.set_secret_type(state.vault, k1, :aes256)
:ok = Vault.set_secret_type(state.vault, k2, :aes256)
data =
Map.put(data, :data_state, %{
vault: state.vault,
route_to_peer: return_route,
encrypt: {k1, 0},
decrypt: {k2, 0},
h: state.h
})
{:next_state, :data, %{data | key_establisher_state: state}}
end
def encode_message_1(%__MODULE__{e: e} = state, payload) do
with {:ok, state} <- mix_hash(state, e.public),
{:ok, state} <- mix_hash(state, payload) do
{:ok, e.public <> payload, state}
end
end
def encode_message_2(%__MODULE__{e: e, s: s, re: re} = state, payload) do
with {:ok, state} <- mix_hash(state, e.public),
{:ok, shared_secret} <- dh(state, e, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public),
{:ok, shared_secret} <- dh(state, s, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do
{:ok, e.public <> encrypted_s_and_tag <> encrypted_payload_and_tag, state}
end
end
def encode_message_3(%__MODULE__{s: s, re: re} = state, payload) do
with {:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public),
{:ok, shared_secret} <- dh(state, s, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do
{:ok, encrypted_s_and_tag <> encrypted_payload_and_tag, state}
end
end
def decode_message_1(state, message) do
<<re::32-bytes, payload::binary>> = message
with {:ok, state} <- mix_hash(state, re),
{:ok, state} <- mix_hash(state, payload) do
{:ok, payload, %{state | re: re}}
end
end
def decode_message_2(%__MODULE__{e: e} = state, message) do
<<re::32-bytes, encrypted_rs_and_tag::48-bytes, encrypted_payload_and_tag::binary>> = message
with {:ok, state} <- mix_hash(state, re),
{:ok, shared_secret} <- dh(state, e, re),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag),
{:ok, shared_secret} <- dh(state, e, rs),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do
{:ok, payload, %{state | re: re, rs: rs}}
end
end
def decode_message_3(%__MODULE__{e: e} = state, message) do
<<encrypted_rs_and_tag::48-bytes, encrypted_payload_and_tag::binary>> = message
with {:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag),
{:ok, shared_secret} <- dh(state, e, rs),
{:ok, state} <- mix_key(state, shared_secret),
{:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do
{:ok, payload, %{state | rs: rs}}
end
end
def mix_hash(%__MODULE__{vault: vault, h: h} = state, data) do
case Vault.sha256(vault, h <> data) do
{:ok, h} -> {:ok, %{state | h: h}}
error -> {:error, error}
end
end
def mix_key(%__MODULE__{vault: vault, ck: ck} = state, input_key_material) do
with {:ok, [ck, k]} <- Vault.hkdf_sha256(vault, ck, input_key_material, 2),
:ok <- Vault.set_secret_type(vault, k, :aes256) do
{:ok, %{state | n: 0, k: k, ck: ck}}
end
end
def dh(%__MODULE__{vault: vault}, keypair, peer_public) do
Vault.ecdh(vault, keypair.private, peer_public)
end
def encrypt_and_hash(%__MODULE__{vault: vault, k: k, n: n, h: h} = state, plaintext) do
with {:ok, ciphertext_and_tag} <- Vault.encrypt(vault, k, n, h, plaintext),
{:ok, state} <- mix_hash(state, ciphertext_and_tag) do
{:ok, %{state | n: n + 1}, ciphertext_and_tag}
end
end
def decrypt_and_hash(%__MODULE__{vault: vault, k: k, n: n, h: h} = state, ciphertext_and_tag) do
with {:ok, plaintext} <- Vault.decrypt(vault, k, n, h, ciphertext_and_tag),
{:ok, state} <- mix_hash(state, ciphertext_and_tag) do
{:ok, %{state | n: n + 1}, plaintext}
end
end
def split(%__MODULE__{vault: vault, ck: ck}) do
Vault.hkdf_sha256(vault, ck, nil, 2)
end
end
|
implementations/elixir/lib/ockam/channel/xx.ex
| 0.776411
| 0.467818
|
xx.ex
|
starcoder
|
defmodule Dayron.Model do
@moduledoc """
Defines the functions to convert a module into a Dayron Model.
A Model provides a set of functionalities around mapping the external data
into local structures.
In order to convert an Elixir module into a Model, Dayron provides a
`Dayron.Model` mixin, that requires a `resource` option and a struct
defining the available fields.
## Example
defmodule User do
use Dayron.Model, resource: "users"
defstruct name: "", age: 0
end
The `resource` option value defines the complete API URL when requesting
this model. For the above example, api calls will be made to
http://YOUR_API_URL/users.
Given an module with Ecto.Schema already included, the `Dayron.Model` mixin
will include everything required for Dayron.Repo to get and send data to the
external Rest Api. The `schema` will be used to map external api responses
data to local structs.
## Example
defmodule User do
use Ecto.Schema
use Dayron.Model
schema "users" do
field :name, :string
field :age, :integer, default: 0
end
end
In that case, resource name is defined based on the schema source name, or
"users" in the above example. To replace the value, inform a `resource`
option when including the mixin.
## Example
defmodule User do
use Ecto.Schema
use Dayron.Model, resource: "people"
schema "users" do
field :name, :string
field :age, :integer, default: 0
end
end
If some pre-processing is required to convert the json data into the struct,
it's possible to override __from_json__/2 into the module.
## Example
def __from_json__(data, _options) do
updated_data =
data
|> Map.get(:details)
|> Map.delete(:type)
struct(__MODULE__, updated_data)
end
"""
alias Dayron.Requestable
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@resource opts[:resource]
def __resource__ do
case @resource do
nil -> apply(__MODULE__, :__schema__, [:source])
resource -> resource
end
end
def __url_for__([{:id, id} | _]), do: "/#{__resource__}/#{id}"
def __url_for__(_), do: "/#{__resource__}"
def __from_json__(data, _opts), do: struct(__MODULE__, data)
def __from_json_list__(data, opts) when is_list(data) do
Enum.map(data, &__from_json__(&1, opts))
end
def __from_json_list__(data, _opts), do: struct(__MODULE__, data)
defoverridable [__url_for__: 1, __from_json__: 2, __from_json_list__: 2]
end
end
@doc """
A shortcut for Requestable.url_for/2
"""
def url_for(module, opts \\ []) do
Requestable.url_for(module, opts)
end
@doc """
A shortcut for Requestable.from_json/3
"""
def from_json(module, data, opts \\ []) do
Requestable.from_json(module, data, opts)
end
@doc """
A shortcut for Requestable.from_json_list/3
"""
def from_json_list(module, data, opts \\ []) do
Requestable.from_json_list(module, data, opts)
end
end
|
lib/dayron/model.ex
| 0.84481
| 0.667193
|
model.ex
|
starcoder
|
defmodule Raft.Server do
@moduledoc """
"""
alias Raft.Server.State
@type ref :: atom | {atom, atom} | pid
@doc """
Invoked by leader to replicate log entries (§5.3); also used
as heartbeat (§5.2).
## Arguments
- `ref` - a reference to a Raft server
- `term` - leader’s term
- `leader_id` - so follower can redirect clients
- `prev_log_index` - index of log entry immediately preceding
new ones
- `prev_log_term` - term of `prev_log_index` entry
- `entries` - log entries to store (empty for heartbeat; may
send more than one for efficiency)
- `leader_commit` - leader’s `commit_index`
## Result
- `{success, term}` - where:
- `success` is `:ok` if follower contained entry matching
`prev_log_index` and `prev_log_term`
- `term` is `current_term`, for leader to update itself
"""
@spec append_entries(ref, State.current_term, State.server_index State.command_index, State.command, State.log, State.commit_index) :: {:ok, State.current_term} | {:error, nil}
def append_entries(ref, term, leader_id, prev_log_index, prev_log_term, entries, leader_commit) do
end
@doc """
Invoked by candidates to gather votes (§5.2).
## Arguments
- `ref` - a reference to a Raft server
- `term` - candidate’s term
- `candidate_id` - candidate requesting vote
- `last_log_index` - index of candidate’s last log entry (§5.4)
- `last_log_term` - term of candidate’s last log entry (§5.4)
## Result
- `{vote_granted, term}` - where:
- `vote_granted` - `:ok` means candidate received vote
- `term` is `current_term`, for candidate to update itself
"""
@spec request_vote(ref, State.current_term, State.server_index, State.command_index, State.command) :: {:ok, State.current_term} | {:error, nil}
def request_vote(ref, term, candidate_id, last_log_index, last_log_term) do
end
@doc """
Invoked by admin to add a server to the cluster configuration.
## Arguments
- `ref` - a reference to a Raft server
- `new_server` - address of server to add to configuration
## Result
- `{status, leader_hint}` - where:
- `status` - `:ok` if server was added successfully
- `leader_hint` - address of recent leader, if known
"""
@spec add_server(ref, ref | binary) :: {:ok, ref | binary}
def add_server(ref, new_server) do
end
@doc """
Invoked by admin to remove a server from the cluster
configuration.
## Arguments
- `ref` - a reference to a Raft server
- `old_server` - address of server to remove to configuration
## Result
- `{status, leader_hint}` - where:
- `status` - `:ok` if server was removed successfully
- `leader_hint` - address of recent leader, if known
"""
@spec remove_server(ref, ref | binary) :: {:ok, ref | binary}
def remove_server(ref, old_server) do
end
@doc """
Invoked by leader to send chunks of a shapshot to a follower.
Leaders always send chunks in order.
## Arguments
- `ref` - a reference to a Raft server
- `term` - leader's term
- `leader_id` - so follow can redirect clients
- `last_index` - the snapshot replaces all entries up through
and including this index
- `last_term` - term of `last_index`
- `last_config` - latest cluster configuration as of `last_index`
(include only with first chunk)
- `offset` - byte offset where chunk is positioned in the
snapshot file
- `data` - raw bytes of the snapshot chunk, starting at offset
- `done` - `true` if this is the last chunk
## Result
- `term` - `current_term`, for leader to update itself
"""
@spec install_snapshot(ref, State.current_term, State.server_index, State.command_index, State.command, term, non_neg_integer, list, boolean) :: State.current_term
def install_snapshot(ref, term, leader_id, last_index, last_term, last_config, offset, data, done) do
end
@doc """
Invoked by clients to modify the replicated state.
## Arguments
- `ref` - a reference to a Raft server
- `client_id` - client invoking request (§6.3)
- `sequence_num` - to eliminate duplicates (§6.4)
- `command` - request for state machine, may affect state
## Result
- `{status, response, leader_hint}` - where:
- `status` - `:ok` if state machine applied command
- `response` - state machine output, if successful
- `leader_hint` - address of recent leader, if known (§6.2)
"""
@spec client_request(ref, term, term, State.command) :: term
def client_request(ref, client_id, sequence_num, command) do
end
@doc """
Invoked by new clients to open new session,used to eliminate
duplicate requests. (§6.3)
## Result
- `{status, client_id, leader_hint}` - where:
- `status` - `:ok` if state machine applied command
- `client_id` - unique identifier for client session
- `leader_hint` - address of recent leader, if known
"""
@spec register_client(ref) :: term
def register_client(ref) do
end
@doc """
Invoked by clients to query the replicated state (read-only
commands). (§6.4)
## Arguments
- `ref` - a reference to a Raft server
- `query` - request for state machine, read-only
## Result
- `{status, response, leader_hint}` - where:
- `status` - `:ok` if state machine applied command
- `response` - state machine output, if successful
- `leader_hint` - address of recent leader, if known (§6.2)
"""
@spec client_query(ref, term) :: term
def client_query(ref, query) do
end
end
|
lib/raft/server.ex
| 0.852291
| 0.673202
|
server.ex
|
starcoder
|
defmodule Grizzly.Command do
@moduledoc """
Command is a server managing the overall lifecycle of the execution of a command,
from start to completion or timeout.
When starting the execution of a command, the state of the network is checked to see if it is
in one of the allowed states for executing this particular command. The allowed states are listed
in the `pre_states` property of the command being started. If the property is absent, the default
allowed states are [:idle]. If the network is not in an allowed state, {:error, :network_busy} is returned.
If the started command has an `exec_state` property, the network state is set to its value for the duration
of the execution of the command. If there is none, the network state is unchanged.
If the started command has a `post_state` property, the network state is set to it after the command execution
completes or times out. If there is none, the network state is set to :idle.
If the started command has a `timeout` property, a timeout is set to its value. If the command does not complete
before the timeout expires, the command's execution is stopped and a {:timeout, <command module>} message is sent to
the process that started the execution of the command.
"""
use GenServer
alias Grizzly.{Packet, SeqNumber}
alias Grizzly.Network.State, as: NetworkState
alias Grizzly.Command.EncodeError
require Logger
@type t :: pid
@type handle_instruction ::
{:continue, state :: any}
| {:done, response :: any}
| {:retry, state :: any}
| {:send_message, message :: any, state :: any}
| {:queued, state :: any}
@callback init(args :: term) :: :ok | {:ok, command :: any}
@callback encode(command :: any) :: {:ok, binary} | {:error, EncodeError.t() | any()}
@callback handle_response(command :: any, Packet.t()) :: handle_instruction
defmodule State do
@moduledoc false
@type t :: %__MODULE__{
command_module: module(),
command: any,
timeout_ref: pid,
starter: pid
}
defstruct command_module: nil,
command: nil,
timeout_ref: nil,
starter: nil
end
@spec start(module, opts :: keyword) :: GenServer.on_start()
def start(module, opts) do
_ = Logger.debug("Starting command #{inspect(module)} with args #{inspect(opts)}")
command_args = Keyword.put_new(opts, :seq_number, SeqNumber.get_and_inc())
{:ok, command} = apply(module, :init, [command_args])
if not NetworkState.in_allowed_state?(Map.get(command, :pre_states)) do
_ =
Logger.warn(
"Command #{module} not starting in allowed network states #{
inspect(Map.get(command, :pre_states))
}"
)
{:error, :network_busy}
else
:ok = NetworkState.set(Map.get(command, :exec_state))
GenServer.start(
__MODULE__,
command_module: module,
command: command,
starter: self()
)
end
end
@spec encode(t) :: {:ok, binary} | {:error, EncodeError.t()}
def encode(command) do
GenServer.call(command, :encode)
end
@spec handle_response(t, %Packet{}) ::
{:finished, value :: any()}
| :continue
| :retry
| :queued
| {:send_message, message :: any()}
def handle_response(command, packet) do
GenServer.call(command, {:handle_response, packet}, 60_000 * 2)
end
@spec complete(t) :: :ok
def complete(command) do
GenServer.call(command, :complete)
end
@impl true
def init(
command_module: command_module,
command: command,
starter: starter
) do
timeout_ref = setup_timeout(Map.get(command, :timeout))
{
:ok,
%State{
command_module: command_module,
command: command,
timeout_ref: timeout_ref,
starter: starter
}
}
end
@impl true
def terminate(:normal, _state) do
:ok
end
def terminate(reason, %State{command: command}) do
_ =
Logger.warn(
"Command #{inspect(command)} terminated with #{inspect(reason)}. Resetting network state to idle"
)
NetworkState.set(:idle)
:ok
end
# Upon command completion, clear any timeout and
# set the network state to what the command specifies (defaults to :idle).
@impl true
def handle_call(:complete, _from, %State{command: command, timeout_ref: timeout_ref} = state) do
_ = clear_timeout(timeout_ref)
post_state = Map.get(command, :post_state, :idle)
NetworkState.set(post_state)
{:stop, :normal, :ok, %State{state | timeout_ref: nil}}
end
def handle_call(
:encode,
_,
%State{command_module: command_module, command: command} = state
) do
case apply(command_module, :encode, [command]) do
{:ok, binary} ->
{:reply, {:ok, binary}, state}
{:error, _} = error ->
{:stop, :normal, error, state}
end
end
def handle_call(
{:handle_response, %Packet{} = packet},
_from,
%State{command_module: command_module, command: command} = state
) do
case apply(command_module, :handle_response, [command, packet]) do
{:done, value} ->
{:reply, {:finished, value}, state}
{:send_message, message, new_command} ->
{:reply, {:send_message, message}, %{state | command: new_command}}
{:continue, new_command} ->
{:reply, :continue, %{state | command: new_command}}
{:retry, new_command} ->
{:reply, :retry, %{state | command: new_command}}
{:queued, new_command} ->
{:reply, :queued, %{state | command: new_command}}
end
end
@impl true
def handle_info(:timeout, %State{starter: starter, command_module: command_module} = state) do
send(starter, {:timeout, command_module})
{:stop, :normal, %State{state | timeout_ref: nil}}
end
defp setup_timeout(nil), do: nil
defp setup_timeout(timeout) do
Process.send_after(self(), :timeout, timeout)
end
defp clear_timeout(nil), do: :ok
defp clear_timeout(timeout_ref), do: Process.cancel_timer(timeout_ref)
end
|
lib/grizzly/command.ex
| 0.832747
| 0.457985
|
command.ex
|
starcoder
|
defmodule Graph.Pathfindings.BellmanFord do
@spec call(Graph.t(), Graph.vertex()) ::
%{
optional(Graph.vertex()) => [Graph.vertex()]
}
| nil
def call(%Graph{vertices: vs, edges: edges} = graph, root) do
predecessors = Map.new()
distances = %{root => 0}
{predecessors, distances} =
Enum.reduce_while(vs, {predecessors, distances}, fn _vertex, acc ->
Enum.reduce(edges, acc, fn {{u, v}, _}, {predecessors, distances} ->
weight = Graph.Utils.edge_weight(graph, u, v)
label_u = Map.get(vs, u)
label_v = Map.get(vs, v)
distance_u = Map.get(distances, label_u, :infinite)
distance_v = Map.get(distances, label_v, :infinite)
cond do
distance_u == :infinite ->
{predecessors, distances}
distance_v == :infinite or distance_u + weight < distance_v ->
{
Map.put(predecessors, label_v, label_u),
Map.put(distances, label_v, distance_u + weight)
}
:else ->
{predecessors, distances}
end
end)
|> case do
{^predecessors, ^distances} = result -> {:halt, result}
result -> {:cont, result}
end
end)
negative_cycle =
Enum.any?(edges, fn {{u, v}, _} ->
weight = Graph.Utils.edge_weight(graph, u, v)
label_u = Map.get(vs, u)
label_v = Map.get(vs, v)
distance_u = Map.get(distances, label_u, :infinite)
distance_v = Map.get(distances, label_v, :infinite)
cond do
distance_u == :infinite -> false
distance_v == :infinite -> false
distance_u + weight < distance_v -> true
:else -> false
end
end)
if negative_cycle do
nil
else
Enum.reduce(vs, Map.new(), fn {_id, label}, paths ->
if Map.get(distances, label, :infinite) == :infinite do
paths
else
Map.put(paths, label, build_path(label, predecessors))
end
end)
end
end
defp build_path(vertex, predecessors) do
do_build_path(vertex, predecessors, [vertex])
end
defp do_build_path(vertex, predecessors, path) do
case Map.get(predecessors, vertex) do
nil -> path
next -> do_build_path(next, predecessors, [next | path])
end
end
end
|
lib/graph/pathfindings/bellman_ford.ex
| 0.66628
| 0.566019
|
bellman_ford.ex
|
starcoder
|
defmodule Asteroid.Token.AccessToken do
import Asteroid.Utils
alias Asteroid.Context
alias Asteroid.Client
alias Asteroid.Crypto
alias Asteroid.Token
@moduledoc """
Access token structure
## Field naming
The `data` field holds the token data. The following field names are standard and are used
by Asteroid:
- `"exp"`: the expiration unix timestamp of the access token
- `"sub"`: the `t:Asteroid.Subject.id()` of the access token
- `"client_id"`: the `t:Asteroid.Client.id()` of the access token
- `"scope"`: a list of `OAuth2Utils.Scope.scope()` scopes granted to the refresh token
- `"device_id"`: the `t:Asteroid.Device.id()` of the access token
- `"status"`: a `String.t()` for the status of the token. A token that has been revoked is not
necessarily still present in the token store (e.g. for stateful tokens it will be probably
deleted). Optionally one of:
- `"active"`: active token
- `"revoked"`: revoked token
- `"__asteroid_oauth2_initial_flow"`: the initial `t:Asteroid.OAuth2.flow_str/0` that led to
the issuance of this token
- `"__asteroid_oidc_authenticated_session_id"`: the `t:Asteroid.OIDC.AuthenticatedSession.id/0`
, if any
- `"__asteroid_oidc_claims"`: the claims that were requested, if any
"""
@enforce_keys [:id, :serialization_format, :data]
defstruct [:id, :refresh_token_id, :serialization_format, :signing_key, :signing_alg, :data]
@type id :: binary()
@type t :: %__MODULE__{
id: __MODULE__.id(),
refresh_token_id: binary() | nil,
serialization_format: Asteroid.Token.serialization_format(),
signing_key: Asteroid.Crypto.Key.name() | nil,
signing_alg: Asteroid.Crypto.Key.jws_alg() | nil,
data: map()
}
@doc ~s"""
Creates a new access token
## Options
- `:id`: `String.t()` id, **mandatory**
- `:refresh_token_id`: the `t:Asteroid.Token.RefreshToken.id/0` of the refresh token associated
to this access token if any. Defaults to `nil`
- `:data`: a data `map()`
- `:serialization_format`: an `t:Asteroid.Token.serialization_format/0` atom, defaults to
`:opaque`
- `:signing_key`: an `Asteroid.Crypto.Key.name()` for the signing key
"""
@spec new(Keyword.t()) :: t()
def new(opts) do
%__MODULE__{
id: opts[:id] || raise("Missing access token id"),
refresh_token_id: opts[:refresh_token_id] || nil,
data: opts[:data] || %{},
serialization_format: opts[:serialization_format] || :opaque,
signing_key: opts[:signing_key]
}
end
@doc """
Generates a new access token
## Options
- `:refresh_token_id`: the `t:Asteroid.Token.RefreshToken.id/0` of the refresh token associated
to this access token if any. Defaults to `nil`
- `:serialization_format`: an `t:Asteroid.Token.serialization_format/0` atom, defaults to
`:opaque`
- `:signing_key`: an `Asteroid.Crypto.Key.name()` for the signing key
"""
@spec gen_new(Keyword.t()) :: t()
def gen_new(opts \\ []) do
%__MODULE__{
id: secure_random_b64(20),
refresh_token_id: opts[:refresh_token],
data: %{},
serialization_format:
if(opts[:serialization_format], do: opts[:serialization_format], else: :opaque),
signing_key: opts[:signing_key],
signing_alg: opts[:signing_alg]
}
end
@doc """
Gets a access token from the access token store
Unlike the `c:Asteroid.ObjectStore.AccessToken.get/2`, this function returns
`{:error, Exception.t()}` if the access token is not found in the token
store.
## Options
- `:check_active`: determines whether the validity of the access token should be checked.
Defaults to `true`. For validity checking details, see `active?/1`
"""
@spec get(id(), Keyword.t()) :: {:ok, t()} | {:error, Exception.t()}
def get(access_token_id, opts \\ [check_active: true]) do
at_store_module = astrenv(:object_store_access_token)[:module]
at_store_opts = astrenv(:object_store_access_token)[:opts] || []
case at_store_module.get(access_token_id, at_store_opts) do
{:ok, access_token} when not is_nil(access_token) ->
if opts[:check_active] != true or active?(access_token) do
{:ok, access_token}
else
{:error,
Token.InvalidTokenError.exception(
sort: "access token",
reason: "inactive token",
id: access_token_id
)}
end
{:ok, nil} ->
{:error,
Token.InvalidTokenError.exception(
sort: "access token",
reason: "not found in the token store",
id: access_token_id
)}
{:error, error} ->
{:error, error}
end
end
@doc """
Stores an access token
This function only stores access tokens that have an `:opaque` serialization format.
"""
@spec store(t(), Context.t()) :: {:ok, t()} | {:error, any()}
def store(access_token, ctx \\ %{})
def store(%__MODULE__{serialization_format: :opaque} = access_token, ctx) do
at_store_module = astrenv(:object_store_access_token)[:module]
at_store_opts = astrenv(:object_store_access_token)[:opts] || []
access_token = astrenv(:object_store_access_token_before_store_callback).(access_token, ctx)
case at_store_module.put(access_token, at_store_opts) do
:ok ->
{:ok, access_token}
{:error, _} = error ->
error
end
end
def store(access_token, _ctx) do
{:ok, access_token}
end
@doc """
Deletes an access token
"""
@spec delete(t() | id()) :: :ok | {:error, any()}
def delete(%__MODULE__{id: id}) do
delete(id)
end
def delete(access_token_id) do
at_store_module = astrenv(:object_store_access_token)[:module]
at_store_opts = astrenv(:object_store_access_token)[:opts] || []
at_store_module.delete(access_token_id, at_store_opts)
end
@doc """
Puts a value into the `data` field of access token
If the value is `nil`, the access token is not changed and the filed is not added.
"""
@spec put_value(t(), any(), any()) :: t()
def put_value(access_token, _key, nil), do: access_token
def put_value(access_token, key, val) do
%{access_token | data: Map.put(access_token.data, key, val)}
end
@doc """
Removes a value from the `data` field of a access token
If the value does not exist, does nothing.
"""
@spec delete_value(t(), any()) :: t()
def delete_value(access_token, key) do
%{access_token | data: Map.delete(access_token.data, key)}
end
@doc """
Serializes the access token, using its inner `t:Asteroid.Token.serialization_format/0`
information
Supports serialization to `:opaque` and `:jws` serialization formats.
In case of the serialization to the `jws` format:
- if the signing algorithm was set, uses this algorithm
- otherwise uses the default signer of `JOSE.JWT.sign/2`
"""
@spec serialize(t()) :: String.t()
def serialize(%__MODULE__{id: id, serialization_format: :opaque}) do
id
end
def serialize(%__MODULE__{serialization_format: :jws} = access_token) do
jwt =
Enum.reduce(
access_token.data,
%{},
fn
{"__asteroid" <> _, _v}, acc ->
acc
{k, v}, acc ->
Map.put(acc, k, v)
end
)
{:ok, jwk} = Crypto.Key.get(access_token.signing_key)
if access_token.signing_alg do
jws = JOSE.JWS.from_map(%{"alg" => access_token.signing_alg})
JOSE.JWT.sign(jwk, jws, jwt)
|> JOSE.JWS.compact()
|> elem(1)
else
JOSE.JWT.sign(jwk, jwt)
|> JOSE.JWS.compact()
|> elem(1)
end
end
@doc """
Returns `true` if the token is active, `false` otherwise
The following data, *when set*, are used to determine that a token is active:
- `"nbf"`: must be lower than current time
- `"exp"`: must be higher than current time
- `"revoked"`: must be the boolean `false`
"""
@spec active?(t()) :: boolean()
def active?(access_token) do
(is_nil(access_token.data["nbf"]) or access_token.data["nbf"] < now()) and
(is_nil(access_token.data["exp"]) or access_token.data["exp"] > now()) and
(is_nil(access_token.data["status"]) or access_token.data["status"] != "revoked")
# FIXME: implement the following items from https://tools.ietf.org/html/rfc7662#section-4
# o If the token has been signed, the authorization server MUST
# validate the signature.
# o If the token can be used only at certain resource servers, the
# authorization server MUST determine whether or not the token can
# be used at the resource server making the introspection call.
end
@doc """
Returns the access token lifetime
## Processing rules
- If the client has the following field set to an integer value for the corresponding flow
returns that value:
- `"__asteroid_oauth2_flow_ropc_access_token_lifetime"`
- `"__asteroid_oauth2_flow_client_credentials_access_token_lifetime"`
- `"__asteroid_oauth2_flow_authorization_code_access_token_lifetime"`
- `"__asteroid_oauth2_flow_implicit_access_token_lifetime"`
- `"__asteroid_oauth2_flow_device_authorization_access_token_lifetime"`
- `"__asteroid_oidc_flow_authorization_code_access_token_lifetime"`
- `"__asteroid_oidc_flow_implicit_access_token_lifetime"`
- `"__asteroid_oidc_flow_hybrid_access_token_lifetime"`
- Otherwise, if the following configuration option is set to an integer for the corresponding
flow, returns its value:
- #{Asteroid.Config.link_to_option(:oauth2_flow_ropc_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oauth2_flow_client_credentials_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oauth2_flow_authorization_code_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oauth2_flow_implicit_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oauth2_flow_device_authorization_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oidc_flow_authorization_code_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oidc_flow_implicit_access_token_lifetime)}
- #{Asteroid.Config.link_to_option(:oidc_flow_hybrid_access_token_lifetime)}
- Otherwise returns
#{Asteroid.Config.link_to_option(:oauth2_access_token_lifetime)}, or `0` if not set
In any case, the returned value is capped by the scope configuration.
"""
@spec lifetime(Context.t()) :: non_neg_integer()
def lifetime(%{flow: flow, granted_scopes: granted_scopes} = ctx) do
scope_config = Asteroid.OAuth2.Scope.configuration_for_flow(flow)
case Asteroid.OAuth2.Scope.max_access_token_lifetime(granted_scopes, scope_config) do
capped_lifetime when is_integer(capped_lifetime) ->
min(lifetime_for_client(ctx), capped_lifetime)
nil ->
lifetime_for_client(ctx)
end
end
# no scopes
def lifetime(ctx) do
lifetime_for_client(ctx)
end
@spec lifetime_for_client(Context.t()) :: non_neg_integer()
def lifetime_for_client(%{flow: flow, client: client}) do
attr =
case flow do
:ropc ->
"__asteroid_oauth2_flow_ropc_access_token_lifetime"
:client_credentials ->
"__asteroid_oauth2_flow_client_credentials_access_token_lifetime"
:authorization_code ->
"__asteroid_oauth2_flow_authorization_code_access_token_lifetime"
:implicit ->
"__asteroid_oauth2_flow_implicit_access_token_lifetime"
:device_authorization ->
"__asteroid_oauth2_flow_device_authorization_access_token_lifetime"
:oidc_authorization_code ->
"__asteroid_oidc_flow_authorization_code_access_token_lifetime"
:oidc_implicit ->
"__asteroid_oidc_flow_implicit_access_token_lifetime"
:oidc_hybrid ->
"__asteroid_oidc_flow_hybrid_access_token_lifetime"
end
client = Client.fetch_attributes(client, [attr])
case client.attrs[attr] do
lifetime when is_integer(lifetime) ->
lifetime
_ ->
conf_opt =
case flow do
:ropc ->
:oauth2_flow_ropc_access_token_lifetime
:client_credentials ->
:oauth2_flow_client_credentials_access_token_lifetime
:authorization_code ->
:oauth2_flow_authorization_code_access_token_lifetime
:implicit ->
:oauth2_flow_implicit_access_token_lifetime
:device_authorization ->
:oauth2_flow_device_authorization_access_token_lifetime
:oidc_authorization_code ->
:oidc_flow_authorization_code_access_token_lifetime
:oidc_implicit ->
:oidc_flow_implicit_access_token_lifetime
:oidc_hybrid ->
:oidc_flow_hybrid_access_token_lifetime
end
astrenv(conf_opt, astrenv(:oauth2_access_token_lifetime, 0))
end
end
@doc """
Returns the serialization format for an access token
Formalisation format is necessarily `:opaque`, except for access tokens for which the
following rules apply (<FLOW> is to be replace by a `t:Asteroid.OAuth2.flow_str()/0`):
- if the `__asteroid_oauth2_flow_<FLOW>_access_token_serialization_format` is set, returns
this value
- otherwise, if the `:oauth2_flow_<FLOW>_access_token_serialization_format` is set, returns
this value
- otherwise, returns the value of the
#{Asteroid.Config.link_to_option(:oauth2_access_token_serialization_format)} configuration
option
- otherwise, returns `:opaque`
"""
@spec serialization_format(Context.t()) :: Asteroid.Token.serialization_format()
def serialization_format(%{flow: flow, client: client}) do
attr = "__asteroid_oauth2_flow_#{Atom.to_string(flow)}_access_token_serialization_format"
case flow do
:ropc ->
"__asteroid_oauth2_flow_ropc_access_token_serialization_format"
:client_credentials ->
"__asteroid_oauth2_flow_client_credentials_access_token_serialization_format"
:authorization_code ->
"__asteroid_oauth2_flow_authorization_code_access_token_serialization_format"
:implicit ->
"__asteroid_oauth2_flow_implicit_access_token_serialization_format"
:device_authorization ->
"__asteroid_oauth2_flow_device_authorization_access_token_serialization_format"
:oidc_authorization_code ->
"__asteroid_oidc_flow_authorization_code_access_token_serialization_format"
:oidc_implicit ->
"__asteroid_oidc_flow_implicit_access_token_serialization_format"
:oidc_hybrid ->
"__asteroid_oidc_flow_hybrid_access_token_serialization_format"
end
client = Client.fetch_attributes(client, [attr])
if client.attrs[attr] == "jws" do
:jws
else
conf_opt =
case flow do
:ropc ->
:oauth2_flow_ropc_access_token_serialization_format
:client_credentials ->
:oauth2_flow_client_credentials_access_token_serialization_format
:authorization_code ->
:oauth2_flow_authorization_code_access_token_serialization_format
:implicit ->
:oauth2_flow_implicit_access_token_serialization_format
:device_authorization ->
:oauth2_flow_device_authorization_access_token_serialization_format
:oidc_authorization_code ->
:oidc_flow_authorization_code_access_token_serialization_format
:oidc_implicit ->
:oidc_flow_implicit_access_token_serialization_format
:oidc_hybrid ->
:oidc_flow_hybrid_access_token_serialization_format
end
astrenv(conf_opt, astrenv(:oauth2_access_token_serialization_format, :opaque))
end
end
@doc """
Returns the signing key name for an access token
The following rules apply (<FLOW> is to be replace by a `t:Asteroid.OAuth2.flow_str()/0`):
- if the `__asteroid_oauth2_flow_<FLOW>_access_token_signing_key` is set, returns
this value
- otherwise, if the `:oauth2_flow_<FLOW>_access_token_signing_key` is set, returns
this value
- otherwise, returns the value of the
#{Asteroid.Config.link_to_option(:oauth2_access_token_signing_key)} configuration
option
- otherwise, returns `nil`
"""
@spec signing_key(Context.t()) :: Asteroid.Crypto.Key.name()
def signing_key(%{flow: flow, client: client}) do
attr =
case flow do
:ropc ->
"__asteroid_oauth2_flow_ropc_access_token_signing_key"
:client_credentials ->
"__asteroid_oauth2_flow_client_credentials_access_token_signing_key"
:authorization_code ->
"__asteroid_oauth2_flow_authorization_code_access_token_signing_key"
:implicit ->
"__asteroid_oauth2_flow_implicit_access_token_signing_key"
:device_authorization ->
"__asteroid_oauth2_flow_device_authorization_access_token_signing_key"
:oidc_authorization_code ->
"__asteroid_oidc_flow_authorization_code_access_token_signing_key"
:oidc_implicit ->
"__asteroid_oidc_flow_implicit_access_token_signing_key"
:oidc_hybrid ->
"__asteroid_oidc_flow_hybrid_access_token_signing_key"
end
client = Client.fetch_attributes(client, [attr])
if client.attrs[attr] != nil do
client.attrs[attr]
else
conf_opt =
case flow do
:ropc ->
:oauth2_flow_ropc_access_token_signing_key
:client_credentials ->
:oauth2_flow_client_credentials_access_token_signing_key
:authorization_code ->
:oauth2_flow_authorization_code_access_token_signing_key
:implicit ->
:oauth2_flow_implicit_access_token_signing_key
:device_authorization ->
:oauth2_flow_device_authorization_access_token_signing_key
:oidc_authorization_code ->
:oidc_flow_authorization_code_access_token_signing_key
:oidc_implicit ->
:oidc_flow_implicit_access_token_signing_key
:oidc_hybrid ->
:oidc_flow_hybrid_access_token_signing_key
end
astrenv(conf_opt, astrenv(:oauth2_access_token_signing_key))
end
end
@doc """
Returns the signing algortihm for an access token
The following rules apply (<FLOW> is to be replace by a `t:Asteroid.OAuth2.flow_str()/0`):
- if the `__asteroid_oauth2_flow_<FLOW>_access_token_signing_alg` is set, returns
this value
- otherwise, if the `:oauth2_flow_<FLOW>_access_token_signing_alg` is set, returns
this value
- otherwise, returns the value of the
#{Asteroid.Config.link_to_option(:oauth2_access_token_signing_alg)} configuration
option
- otherwise, returns `nil`
"""
@spec signing_alg(Context.t()) :: Asteroid.Crypto.Key.jws_alg()
def signing_alg(%{flow: flow, client: client}) do
attr =
case flow do
:ropc ->
"__asteroid_oauth2_flow_ropc_access_token_signing_alg"
:client_credentials ->
"__asteroid_oauth2_flow_client_credentials_access_token_signing_alg"
:authorization_code ->
"__asteroid_oauth2_flow_authorization_code_access_token_signing_alg"
:implicit ->
"__asteroid_oauth2_flow_implicit_access_token_signing_alg"
:device_authorization ->
"__asteroid_oauth2_flow_device_authorization_access_token_signing_alg"
:oidc_authorization_code ->
"__asteroid_oidc_flow_authorization_code_access_token_signing_alg"
:oidc_implicit ->
"__asteroid_oidc_flow_implicit_access_token_signing_alg"
:oidc_hybrid ->
"__asteroid_oidc_flow_hybrid_access_token_signing_alg"
end
client = Client.fetch_attributes(client, [attr])
if client.attrs[attr] != nil do
client.attrs[attr]
else
conf_opt =
case flow do
:ropc ->
:oauth2_flow_ropc_access_token_signing_alg
:client_credentials ->
:oauth2_flow_client_credentials_access_token_signing_alg
:authorization_code ->
:oauth2_flow_authorization_code_access_token_signing_alg
:implicit ->
:oauth2_flow_implicit_access_token_signing_alg
:device_authorization ->
:oauth2_flow_device_authorization_access_token_signing_alg
:oidc_authorization_code ->
:oidc_flow_authorization_code_access_token_signing_alg
:oidc_implicit ->
:oidc_flow_implicit_access_token_signing_alg
:oidc_hybrid ->
:oidc_flow_hybrid_access_token_signing_alg
end
astrenv(conf_opt, astrenv(:oauth2_access_token_signing_alg))
end
end
end
|
lib/asteroid/token/access_token.ex
| 0.913235
| 0.48499
|
access_token.ex
|
starcoder
|
defmodule Retrieval do
alias Retrieval.Trie
alias Retrieval.PatternParser
@moduledoc """
Provides an interface for creating and collecting data from the trie data structure.
"""
@doc """
Returns a new trie. Providing no arguments creates an empty trie. Optionally a binary or
list of binaries can be passed to `new/1`.
## Examples
Retrieval.new
%Retrieval.Trie{...}
Retrieval.new("apple")
%Retrieval.Trie{...}
Retrieval.new(~w/apple apply ape ample/)
%Retrieval.Trie{...}
"""
def new, do: %Trie{}
def new(binaries) when is_list(binaries) do
insert(%Trie{}, binaries)
end
def new(binary) when is_binary(binary) do
insert(%Trie{}, binary)
end
@doc """
Inserts a binary or list of binaries into an existing trie.
## Examples
Retrieval.new |> Retrieval.insert("apple")
%Retrieval.Trie{...}
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.insert(~w/zebra corgi/)
%Retrieval.Trie{...}
"""
def insert(%Trie{trie: trie}, binaries) when is_list(binaries) do
%Trie{trie: Enum.reduce(binaries, trie, &_insert(&2, &1))}
end
def insert(%Trie{trie: trie}, binary) when is_binary(binary) do
%Trie{trie: _insert(trie, binary)}
end
defp _insert(trie, <<next, rest :: binary>>) do
case Map.has_key?(trie, next) do
true -> Map.put(trie, next, _insert(trie[next], rest))
false -> Map.put(trie, next, _insert(%{}, rest))
end
end
defp _insert(trie, <<>>) do
Map.put(trie, :mark, :mark)
end
@doc """
Returns whether or not a trie contains a given binary key.
## Examples
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.contains?("apple")
true
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.contains?("zebra")
false
"""
def contains?(%Trie{trie: trie}, binary) when is_binary(binary) do
_contains?(trie, binary)
end
defp _contains?(trie, <<next, rest :: binary>>) do
case Map.has_key?(trie, next) do
true -> _contains?(trie[next], rest)
false -> false
end
end
defp _contains?(%{mark: :mark}, <<>>) do
true
end
defp _contains?(_trie, <<>>) do
false
end
@doc """
Collects all binaries that begin with a given prefix.
## Examples
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.prefix("ap")
["apple", "apply", "ape"]
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.prefix("z")
[]
"""
def prefix(%Trie{trie: trie}, binary) when is_binary(binary) do
_prefix(trie, binary, binary)
end
defp _prefix(trie, <<next, rest :: binary>>, acc) do
case Map.has_key?(trie, next) do
true -> _prefix(trie[next], rest, acc)
false -> []
end
end
# An interesting discovery I made here is that treating the accumulator as a binary is actually quicker
# than converting the prefix to a char list, prepending to it, reversing when a word is found, and converting
# to a binary.
defp _prefix(trie, <<>>, acc) do
Enum.flat_map(trie, fn
{:mark, :mark} -> [acc]
{ch, sub_trie} -> _prefix(sub_trie, <<>>, acc <> <<ch>>)
end)
end
@doc """
Collects all binaries match a given pattern. Returns either a list of matches
or an error in the form `{:error, reason}`.
## Patterns
`*` - Wildcard, matches any character.
`[...]` - Inclusion group, matches any character between brackets.
`[^...]` - Exclusion group, matches any character not between brackets.
`{...}` - Capture group, must be named and can be combined with an
inclusion or exclusion group, otherwise treated as a wildcard.
All future instances of same name captures are swapped with
the value of the initial capture.
## Examples
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.pattern("a{1}{1}**")
["apple", "apply"]
Retrieval.new(~w/apple apply ape ample/) |> Retrieval.pattern("*{1[^p]}{1}**")
[]
Retrieval.new(~w/apple apply zebra house/) |> Retrieval.pattern("[hz]****")
["house", "zebra"]
Retrieval.new(~w/apple apply zebra house/) |> Retrieval.pattern("[hz]***[^ea]")
[]
Retrieval.new(~w/apple apply zebra house/) |> Retrieval.pattern("[hz]***[^ea")
{:error, "Dangling group (exclusion) starting at column 8, expecting ]"}
"""
def pattern(%Trie{trie: trie}, pattern) when is_binary(pattern) do
_pattern(trie, %{}, pattern, <<>>, :parse)
end
defp _pattern(trie, capture_map, pattern, acc, :parse) do
case PatternParser.parse(pattern) do
{:error, message} -> {:error, message}
parsed_pattern -> _pattern(trie, capture_map, parsed_pattern, acc)
end
end
defp _pattern(trie, capture_map, [{:character, ch}|rest], acc) do
case Map.has_key?(trie, ch) do
true -> _pattern(trie[ch], capture_map, rest, acc <> <<ch>>)
false -> []
end
end
defp _pattern(trie, capture_map, [:wildcard|rest], acc) do
Enum.flat_map(trie, fn
{:mark, :mark} -> []
{ch, sub_trie} -> _pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
defp _pattern(trie, capture_map, [{:exclusion, exclusions}|rest], acc) do
pruned_trie = Enum.filter(trie, fn({k, _v}) -> !(Map.has_key?(exclusions, k)) end)
Enum.flat_map(pruned_trie, fn
{:mark, :mark} -> []
{ch, sub_trie} -> _pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
defp _pattern(trie, capture_map, [{:inclusion, inclusions}|rest], acc) do
pruned_trie = Enum.filter(trie, fn({k, _v}) -> Map.has_key?(inclusions, k) end)
Enum.flat_map(pruned_trie, fn
{:mark, :mark} -> []
{ch, sub_trie} -> _pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
defp _pattern(trie, capture_map, [{:capture, name}|rest], acc) do
case Map.has_key?(capture_map, name) do
true ->
match = capture_map[name]
case Map.has_key?(trie, match) do
true -> _pattern(trie[match], capture_map, rest, acc <> <<match>>)
false -> []
end
false ->
Enum.flat_map(trie, fn
{:mark, :mark} -> []
{ch, sub_trie} ->
capture_map = Map.put(capture_map, name, ch)
_pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
end
defp _pattern(trie, capture_map, [{:capture, name, :exclusion, exclusions}|rest], acc) do
case Map.has_key?(capture_map, name) do
true ->
match = capture_map[name]
case Map.has_key?(trie, match) do
true -> _pattern(trie[match], capture_map, rest, acc <> <<match>>)
false -> []
end
false ->
pruned_trie = Enum.filter(trie, fn({k, _v}) -> !(Map.has_key?(exclusions, k)) end)
Enum.flat_map(pruned_trie, fn
{:mark, :mark} -> []
{ch, sub_trie} ->
capture_map = Map.put(capture_map, name, ch)
_pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
end
defp _pattern(trie, capture_map, [{:capture, name, :inclusion, inclusions}|rest], acc) do
case Map.has_key?(capture_map, name) do
true ->
match = capture_map[name]
case Map.has_key?(trie, match) do
true -> _pattern(trie[match], capture_map, rest, acc <> <<match>>)
false -> []
end
false ->
pruned_trie = Enum.filter(trie, fn({k, _v}) -> Map.has_key?(inclusions, k) end)
Enum.flat_map(pruned_trie, fn
{:mark, :mark} -> []
{ch, sub_trie} ->
capture_map = Map.put(capture_map, name, ch)
_pattern(sub_trie, capture_map, rest, acc <> <<ch>>)
end)
end
end
defp _pattern(trie, _capture_map, [], acc) do
case Map.has_key?(trie, :mark) do
true -> [acc]
false -> []
end
end
end
|
lib/retrieval.ex
| 0.934035
| 0.699396
|
retrieval.ex
|
starcoder
|
defmodule AWS.Rekognition do
@moduledoc """
This is the Amazon Rekognition API reference.
"""
@doc """
Compares a face in the *source* input image with each of the 100 largest faces
detected in the *target* input image.
If the source image contains multiple faces, the service detects the largest
face and compares it with each face detected in the target image.
You pass the input and target images either as base64-encoded image bytes or as
references to images in an Amazon S3 bucket. If you use the AWS CLI to call
Amazon Rekognition operations, passing image bytes isn't supported. The image
must be formatted as a PNG or JPEG file.
In response, the operation returns an array of face matches ordered by
similarity score in descending order. For each face match, the response provides
a bounding box of the face, facial landmarks, pose details (pitch, role, and
yaw), quality (brightness and sharpness), and confidence value (indicating the
level of confidence that the bounding box contains a face). The response also
provides a similarity score, which indicates how closely the faces match.
By default, only faces with a similarity score of greater than or equal to 80%
are returned in the response. You can change this value by specifying the
`SimilarityThreshold` parameter.
`CompareFaces` also returns an array of faces that don't match the source image.
For each face, it returns a bounding box, confidence value, landmarks, pose
details, and quality. The response also returns information about the face in
the source image, including the bounding box of the face and confidence value.
The `QualityFilter` input parameter allows you to filter out detected faces that
don’t meet a required quality bar. The quality bar is based on a variety of
common use cases. Use `QualityFilter` to set the quality bar by specifying
`LOW`, `MEDIUM`, or `HIGH`. If you do not want to filter detected faces, specify
`NONE`. The default value is `NONE`.
If the image doesn't contain Exif metadata, `CompareFaces` returns orientation
information for the source and target images. Use these values to display the
images with the correct image orientation.
If no faces are detected in the source or target images, `CompareFaces` returns
an `InvalidParameterException` error.
This is a stateless API operation. That is, data returned by this operation
doesn't persist.
For an example, see Comparing Faces in Images in the Amazon Rekognition
Developer Guide.
This operation requires permissions to perform the `rekognition:CompareFaces`
action.
"""
def compare_faces(client, input, options \\ []) do
request(client, "CompareFaces", input, options)
end
@doc """
Creates a collection in an AWS Region.
You can add faces to the collection using the `IndexFaces` operation.
For example, you might create collections, one for each of your application
users. A user can then index faces using the `IndexFaces` operation and persist
results in a specific collection. Then, a user can search the collection for
faces in the user-specific container.
When you create a collection, it is associated with the latest version of the
face model version.
Collection names are case-sensitive.
This operation requires permissions to perform the
`rekognition:CreateCollection` action.
"""
def create_collection(client, input, options \\ []) do
request(client, "CreateCollection", input, options)
end
@doc """
Creates a new Amazon Rekognition Custom Labels project.
A project is a logical grouping of resources (images, Labels, models) and
operations (training, evaluation and detection).
This operation requires permissions to perform the `rekognition:CreateProject`
action.
"""
def create_project(client, input, options \\ []) do
request(client, "CreateProject", input, options)
end
@doc """
Creates a new version of a model and begins training.
Models are managed as part of an Amazon Rekognition Custom Labels project. You
can specify one training dataset and one testing dataset. The response from
`CreateProjectVersion` is an Amazon Resource Name (ARN) for the version of the
model.
Training takes a while to complete. You can get the current status by calling
`DescribeProjectVersions`.
Once training has successfully completed, call `DescribeProjectVersions` to get
the training results and evaluate the model.
After evaluating the model, you start the model by calling
`StartProjectVersion`.
This operation requires permissions to perform the
`rekognition:CreateProjectVersion` action.
"""
def create_project_version(client, input, options \\ []) do
request(client, "CreateProjectVersion", input, options)
end
@doc """
Creates an Amazon Rekognition stream processor that you can use to detect and
recognize faces in a streaming video.
Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video
Streams. Amazon Rekognition Video sends analysis results to Amazon Kinesis Data
Streams.
You provide as input a Kinesis video stream (`Input`) and a Kinesis data stream
(`Output`) stream. You also specify the face recognition criteria in `Settings`.
For example, the collection containing faces that you want to recognize. Use
`Name` to assign an identifier for the stream processor. You use `Name` to
manage the stream processor. For example, you can start processing the source
video by calling `StartStreamProcessor` with the `Name` field.
After you have finished analyzing a streaming video, use `StopStreamProcessor`
to stop processing. You can delete the stream processor by calling
`DeleteStreamProcessor`.
"""
def create_stream_processor(client, input, options \\ []) do
request(client, "CreateStreamProcessor", input, options)
end
@doc """
Deletes the specified collection.
Note that this operation removes all faces in the collection. For an example,
see `delete-collection-procedure`.
This operation requires permissions to perform the
`rekognition:DeleteCollection` action.
"""
def delete_collection(client, input, options \\ []) do
request(client, "DeleteCollection", input, options)
end
@doc """
Deletes faces from a collection.
You specify a collection ID and an array of face IDs to remove from the
collection.
This operation requires permissions to perform the `rekognition:DeleteFaces`
action.
"""
def delete_faces(client, input, options \\ []) do
request(client, "DeleteFaces", input, options)
end
@doc """
Deletes an Amazon Rekognition Custom Labels project.
To delete a project you must first delete all models associated with the
project. To delete a model, see `DeleteProjectVersion`.
This operation requires permissions to perform the `rekognition:DeleteProject`
action.
"""
def delete_project(client, input, options \\ []) do
request(client, "DeleteProject", input, options)
end
@doc """
Deletes an Amazon Rekognition Custom Labels model.
You can't delete a model if it is running or if it is training. To check the
status of a model, use the `Status` field returned from
`DescribeProjectVersions`. To stop a running model call `StopProjectVersion`. If
the model is training, wait until it finishes.
This operation requires permissions to perform the
`rekognition:DeleteProjectVersion` action.
"""
def delete_project_version(client, input, options \\ []) do
request(client, "DeleteProjectVersion", input, options)
end
@doc """
Deletes the stream processor identified by `Name`.
You assign the value for `Name` when you create the stream processor with
`CreateStreamProcessor`. You might not be able to use the same name for a stream
processor for a few seconds after calling `DeleteStreamProcessor`.
"""
def delete_stream_processor(client, input, options \\ []) do
request(client, "DeleteStreamProcessor", input, options)
end
@doc """
Describes the specified collection.
You can use `DescribeCollection` to get information, such as the number of faces
indexed into a collection and the version of the model used by the collection
for face detection.
For more information, see Describing a Collection in the Amazon Rekognition
Developer Guide.
"""
def describe_collection(client, input, options \\ []) do
request(client, "DescribeCollection", input, options)
end
@doc """
Lists and describes the models in an Amazon Rekognition Custom Labels project.
You can specify up to 10 model versions in `ProjectVersionArns`. If you don't
specify a value, descriptions for all models are returned.
This operation requires permissions to perform the
`rekognition:DescribeProjectVersions` action.
"""
def describe_project_versions(client, input, options \\ []) do
request(client, "DescribeProjectVersions", input, options)
end
@doc """
Lists and gets information about your Amazon Rekognition Custom Labels projects.
This operation requires permissions to perform the
`rekognition:DescribeProjects` action.
"""
def describe_projects(client, input, options \\ []) do
request(client, "DescribeProjects", input, options)
end
@doc """
Provides information about a stream processor created by
`CreateStreamProcessor`.
You can get information about the input and output streams, the input parameters
for the face recognition being performed, and the current status of the stream
processor.
"""
def describe_stream_processor(client, input, options \\ []) do
request(client, "DescribeStreamProcessor", input, options)
end
@doc """
Detects custom labels in a supplied image by using an Amazon Rekognition Custom
Labels model.
You specify which version of a model version to use by using the
`ProjectVersionArn` input parameter.
You pass the input image as base64-encoded image bytes or as a reference to an
image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
operations, passing image bytes is not supported. The image must be either a PNG
or JPEG formatted file.
For each object that the model version detects on an image, the API returns a
(`CustomLabel`) object in an array (`CustomLabels`). Each `CustomLabel` object
provides the label name (`Name`), the level of confidence that the image
contains the object (`Confidence`), and object location information, if it
exists, for the label on the image (`Geometry`).
During training model calculates a threshold value that determines if a
prediction for a label is true. By default, `DetectCustomLabels` doesn't return
labels whose confidence value is below the model's calculated threshold value.
To filter labels that are returned, specify a value for `MinConfidence` that is
higher than the model's calculated threshold. You can get the model's calculated
threshold from the model's training results shown in the Amazon Rekognition
Custom Labels console. To get all labels, regardless of confidence, specify a
`MinConfidence` value of 0.
You can also add the `MaxResults` parameter to limit the number of labels
returned.
This is a stateless API operation. That is, the operation does not persist any
data.
This operation requires permissions to perform the
`rekognition:DetectCustomLabels` action.
"""
def detect_custom_labels(client, input, options \\ []) do
request(client, "DetectCustomLabels", input, options)
end
@doc """
Detects faces within an image that is provided as input.
`DetectFaces` detects the 100 largest faces in the image. For each face
detected, the operation returns face details. These details include a bounding
box of the face, a confidence value (that the bounding box contains a face), and
a fixed set of attributes such as facial landmarks (for example, coordinates of
eye and mouth), presence of beard, sunglasses, and so on.
The face-detection algorithm is most effective on frontal faces. For non-frontal
or obscured faces, the algorithm might not detect the faces or might detect
faces with lower confidence.
You pass the input image either as base64-encoded image bytes or as a reference
to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
Rekognition operations, passing image bytes is not supported. The image must be
either a PNG or JPEG formatted file.
This is a stateless API operation. That is, the operation does not persist any
data.
This operation requires permissions to perform the `rekognition:DetectFaces`
action.
"""
def detect_faces(client, input, options \\ []) do
request(client, "DetectFaces", input, options)
end
@doc """
Detects instances of real-world entities within an image (JPEG or PNG) provided
as input.
This includes objects like flower, tree, and table; events like wedding,
graduation, and birthday party; and concepts like landscape, evening, and
nature.
For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon
Rekognition Developer Guide.
`DetectLabels` does not support the detection of activities. However, activity
detection is supported for label detection in videos. For more information, see
StartLabelDetection in the Amazon Rekognition Developer Guide.
You pass the input image as base64-encoded image bytes or as a reference to an
image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
operations, passing image bytes is not supported. The image must be either a PNG
or JPEG formatted file.
For each object, scene, and concept the API returns one or more labels. Each
label provides the object name, and the level of confidence that the image
contains the object. For example, suppose the input image has a lighthouse, the
sea, and a rock. The response includes all three labels, one for each object.
`{Name: lighthouse, Confidence: 98.4629}`
`{Name: rock,Confidence: 79.2097}`
` {Name: sea,Confidence: 75.061}`
In the preceding example, the operation returns one label for each of the three
objects. The operation can also return multiple labels for the same object in
the image. For example, if the input image shows a flower (for example, a
tulip), the operation might return the following three labels.
`{Name: flower,Confidence: 99.0562}`
`{Name: plant,Confidence: 99.0562}`
`{Name: tulip,Confidence: 99.0562}`
In this example, the detection algorithm more precisely identifies the flower as
a tulip.
In response, the API returns an array of labels. In addition, the response also
includes the orientation correction. Optionally, you can specify `MinConfidence`
to control the confidence threshold for the labels returned. The default is 55%.
You can also add the `MaxLabels` parameter to limit the number of labels
returned.
If the object detected is a person, the operation doesn't provide the same
facial details that the `DetectFaces` operation provides.
`DetectLabels` returns bounding boxes for instances of common object labels in
an array of `Instance` objects. An `Instance` object contains a `BoundingBox`
object, for the location of the label on the image. It also includes the
confidence by which the bounding box was detected.
`DetectLabels` also returns a hierarchical taxonomy of detected labels. For
example, a detected car might be assigned the label *car*. The label *car* has
two parent labels: *Vehicle* (its parent) and *Transportation* (its
grandparent). The response returns the entire list of ancestors for a label.
Each ancestor is a unique label in the response. In the previous example, *Car*,
*Vehicle*, and *Transportation* are returned as unique labels in the response.
This is a stateless API operation. That is, the operation does not persist any
data.
This operation requires permissions to perform the `rekognition:DetectLabels`
action.
"""
def detect_labels(client, input, options \\ []) do
request(client, "DetectLabels", input, options)
end
@doc """
Detects unsafe content in a specified JPEG or PNG format image.
Use `DetectModerationLabels` to moderate images depending on your requirements.
For example, you might want to filter images that contain nudity, but not images
containing suggestive content.
To filter images, use the labels returned by `DetectModerationLabels` to
determine which types of content are appropriate.
For information about moderation labels, see Detecting Unsafe Content in the
Amazon Rekognition Developer Guide.
You pass the input image either as base64-encoded image bytes or as a reference
to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
Rekognition operations, passing image bytes is not supported. The image must be
either a PNG or JPEG formatted file.
"""
def detect_moderation_labels(client, input, options \\ []) do
request(client, "DetectModerationLabels", input, options)
end
@doc """
Detects text in the input image and converts it into machine-readable text.
Pass the input image as base64-encoded image bytes or as a reference to an image
in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
operations, you must pass it as a reference to an image in an Amazon S3 bucket.
For the AWS CLI, passing image bytes is not supported. The image must be either
a .png or .jpeg formatted file.
The `DetectText` operation returns text in an array of `TextDetection` elements,
`TextDetections`. Each `TextDetection` element provides information about a
single word or line of text that was detected in the image.
A word is one or more ISO basic latin script characters that are not separated
by spaces. `DetectText` can detect up to 50 words in an image.
A line is a string of equally spaced words. A line isn't necessarily a complete
sentence. For example, a driver's license number is detected as a line. A line
ends when there is no aligned text after it. Also, a line ends when there is a
large gap between words, relative to the length of the words. This means,
depending on the gap between words, Amazon Rekognition may detect multiple lines
in text aligned in the same direction. Periods don't represent the end of a
line. If a sentence spans multiple lines, the `DetectText` operation returns
multiple lines.
To determine whether a `TextDetection` element is a line of text or a word, use
the `TextDetection` object `Type` field.
To be detected, text must be within +/- 90 degrees orientation of the horizontal
axis.
For more information, see DetectText in the Amazon Rekognition Developer Guide.
"""
def detect_text(client, input, options \\ []) do
request(client, "DetectText", input, options)
end
@doc """
Gets the name and additional information about a celebrity based on his or her
Amazon Rekognition ID.
The additional information is returned as an array of URLs. If there is no
additional information about the celebrity, this list is empty.
For more information, see Recognizing Celebrities in an Image in the Amazon
Rekognition Developer Guide.
This operation requires permissions to perform the
`rekognition:GetCelebrityInfo` action.
"""
def get_celebrity_info(client, input, options \\ []) do
request(client, "GetCelebrityInfo", input, options)
end
@doc """
Gets the celebrity recognition results for a Amazon Rekognition Video analysis
started by `StartCelebrityRecognition`.
Celebrity recognition in a video is an asynchronous operation. Analysis is
started by a call to `StartCelebrityRecognition` which returns a job identifier
(`JobId`). When the celebrity recognition operation finishes, Amazon Rekognition
Video publishes a completion status to the Amazon Simple Notification Service
topic registered in the initial call to `StartCelebrityRecognition`. To get the
results of the celebrity recognition analysis, first check that the status value
published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetCelebrityDetection` and pass the job identifier (`JobId`) from the initial
call to `StartCelebrityDetection`.
For more information, see Working With Stored Videos in the Amazon Rekognition
Developer Guide.
`GetCelebrityRecognition` returns detected celebrities and the time(s) they are
detected in an array (`Celebrities`) of `CelebrityRecognition` objects. Each
`CelebrityRecognition` contains information about the celebrity in a
`CelebrityDetail` object and the time, `Timestamp`, the celebrity was detected.
`GetCelebrityRecognition` only returns the default facial attributes
(`BoundingBox`, `Confidence`, `Landmarks`, `Pose`, and `Quality`). The other
facial attributes listed in the `Face` object of the following response syntax
are not returned. For more information, see FaceDetail in the Amazon Rekognition
Developer Guide.
By default, the `Celebrities` array is sorted by time (milliseconds from the
start of the video). You can also sort the array by celebrity by specifying the
value `ID` in the `SortBy` input parameter.
The `CelebrityDetail` object includes the celebrity identifer and additional
information urls. If you don't store the additional information urls, you can
get them later by calling `GetCelebrityInfo` with the celebrity identifer.
No information is returned for faces not recognized as celebrities.
Use MaxResults parameter to limit the number of labels returned. If there are
more results than specified in `MaxResults`, the value of `NextToken` in the
operation response contains a pagination token for getting the next set of
results. To get the next page of results, call `GetCelebrityDetection` and
populate the `NextToken` request parameter with the token value returned from
the previous call to `GetCelebrityRecognition`.
"""
def get_celebrity_recognition(client, input, options \\ []) do
request(client, "GetCelebrityRecognition", input, options)
end
@doc """
Gets the unsafe content analysis results for a Amazon Rekognition Video analysis
started by `StartContentModeration`.
Unsafe content analysis of a video is an asynchronous operation. You start
analysis by calling `StartContentModeration` which returns a job identifier
(`JobId`). When analysis finishes, Amazon Rekognition Video publishes a
completion status to the Amazon Simple Notification Service topic registered in
the initial call to `StartContentModeration`. To get the results of the unsafe
content analysis, first check that the status value published to the Amazon SNS
topic is `SUCCEEDED`. If so, call `GetContentModeration` and pass the job
identifier (`JobId`) from the initial call to `StartContentModeration`.
For more information, see Working with Stored Videos in the Amazon Rekognition
Devlopers Guide.
`GetContentModeration` returns detected unsafe content labels, and the time they
are detected, in an array, `ModerationLabels`, of `ContentModerationDetection`
objects.
By default, the moderated labels are returned sorted by time, in milliseconds
from the start of the video. You can also sort them by moderated label by
specifying `NAME` for the `SortBy` input parameter.
Since video analysis can return a large number of results, use the `MaxResults`
parameter to limit the number of labels returned in a single call to
`GetContentModeration`. If there are more results than specified in
`MaxResults`, the value of `NextToken` in the operation response contains a
pagination token for getting the next set of results. To get the next page of
results, call `GetContentModeration` and populate the `NextToken` request
parameter with the value of `NextToken` returned from the previous call to
`GetContentModeration`.
For more information, see Detecting Unsafe Content in the Amazon Rekognition
Developer Guide.
"""
def get_content_moderation(client, input, options \\ []) do
request(client, "GetContentModeration", input, options)
end
@doc """
Gets face detection results for a Amazon Rekognition Video analysis started by
`StartFaceDetection`.
Face detection with Amazon Rekognition Video is an asynchronous operation. You
start face detection by calling `StartFaceDetection` which returns a job
identifier (`JobId`). When the face detection operation finishes, Amazon
Rekognition Video publishes a completion status to the Amazon Simple
Notification Service topic registered in the initial call to
`StartFaceDetection`. To get the results of the face detection operation, first
check that the status value published to the Amazon SNS topic is `SUCCEEDED`. If
so, call `GetFaceDetection` and pass the job identifier (`JobId`) from the
initial call to `StartFaceDetection`.
`GetFaceDetection` returns an array of detected faces (`Faces`) sorted by the
time the faces were detected.
Use MaxResults parameter to limit the number of labels returned. If there are
more results than specified in `MaxResults`, the value of `NextToken` in the
operation response contains a pagination token for getting the next set of
results. To get the next page of results, call `GetFaceDetection` and populate
the `NextToken` request parameter with the token value returned from the
previous call to `GetFaceDetection`.
"""
def get_face_detection(client, input, options \\ []) do
request(client, "GetFaceDetection", input, options)
end
@doc """
Gets the face search results for Amazon Rekognition Video face search started by
`StartFaceSearch`.
The search returns faces in a collection that match the faces of persons
detected in a video. It also includes the time(s) that faces are matched in the
video.
Face search in a video is an asynchronous operation. You start face search by
calling to `StartFaceSearch` which returns a job identifier (`JobId`). When the
search operation finishes, Amazon Rekognition Video publishes a completion
status to the Amazon Simple Notification Service topic registered in the initial
call to `StartFaceSearch`. To get the search results, first check that the
status value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetFaceSearch` and pass the job identifier (`JobId`) from the initial call to
`StartFaceSearch`.
For more information, see Searching Faces in a Collection in the Amazon
Rekognition Developer Guide.
The search results are retured in an array, `Persons`, of `PersonMatch` objects.
Each`PersonMatch` element contains details about the matching faces in the input
collection, person information (facial attributes, bounding boxes, and person
identifer) for the matched person, and the time the person was matched in the
video.
`GetFaceSearch` only returns the default facial attributes (`BoundingBox`,
`Confidence`, `Landmarks`, `Pose`, and `Quality`). The other facial attributes
listed in the `Face` object of the following response syntax are not returned.
For more information, see FaceDetail in the Amazon Rekognition Developer Guide.
By default, the `Persons` array is sorted by the time, in milliseconds from the
start of the video, persons are matched. You can also sort by persons by
specifying `INDEX` for the `SORTBY` input parameter.
"""
def get_face_search(client, input, options \\ []) do
request(client, "GetFaceSearch", input, options)
end
@doc """
Gets the label detection results of a Amazon Rekognition Video analysis started
by `StartLabelDetection`.
The label detection operation is started by a call to `StartLabelDetection`
which returns a job identifier (`JobId`). When the label detection operation
finishes, Amazon Rekognition publishes a completion status to the Amazon Simple
Notification Service topic registered in the initial call to
`StartlabelDetection`. To get the results of the label detection operation,
first check that the status value published to the Amazon SNS topic is
`SUCCEEDED`. If so, call `GetLabelDetection` and pass the job identifier
(`JobId`) from the initial call to `StartLabelDetection`.
`GetLabelDetection` returns an array of detected labels (`Labels`) sorted by the
time the labels were detected. You can also sort by the label name by specifying
`NAME` for the `SortBy` input parameter.
The labels returned include the label name, the percentage confidence in the
accuracy of the detected label, and the time the label was detected in the
video.
The returned labels also include bounding box information for common objects, a
hierarchical taxonomy of detected labels, and the version of the label model
used for detection.
Use MaxResults parameter to limit the number of labels returned. If there are
more results than specified in `MaxResults`, the value of `NextToken` in the
operation response contains a pagination token for getting the next set of
results. To get the next page of results, call `GetlabelDetection` and populate
the `NextToken` request parameter with the token value returned from the
previous call to `GetLabelDetection`.
"""
def get_label_detection(client, input, options \\ []) do
request(client, "GetLabelDetection", input, options)
end
@doc """
Gets the path tracking results of a Amazon Rekognition Video analysis started by
`StartPersonTracking`.
The person path tracking operation is started by a call to `StartPersonTracking`
which returns a job identifier (`JobId`). When the operation finishes, Amazon
Rekognition Video publishes a completion status to the Amazon Simple
Notification Service topic registered in the initial call to
`StartPersonTracking`.
To get the results of the person path tracking operation, first check that the
status value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetPersonTracking` and pass the job identifier (`JobId`) from the initial call
to `StartPersonTracking`.
`GetPersonTracking` returns an array, `Persons`, of tracked persons and the
time(s) their paths were tracked in the video.
`GetPersonTracking` only returns the default facial attributes (`BoundingBox`,
`Confidence`, `Landmarks`, `Pose`, and `Quality`). The other facial attributes
listed in the `Face` object of the following response syntax are not returned.
For more information, see FaceDetail in the Amazon Rekognition Developer Guide.
By default, the array is sorted by the time(s) a person's path is tracked in the
video. You can sort by tracked persons by specifying `INDEX` for the `SortBy`
input parameter.
Use the `MaxResults` parameter to limit the number of items returned. If there
are more results than specified in `MaxResults`, the value of `NextToken` in the
operation response contains a pagination token for getting the next set of
results. To get the next page of results, call `GetPersonTracking` and populate
the `NextToken` request parameter with the token value returned from the
previous call to `GetPersonTracking`.
"""
def get_person_tracking(client, input, options \\ []) do
request(client, "GetPersonTracking", input, options)
end
@doc """
Gets the segment detection results of a Amazon Rekognition Video analysis
started by `StartSegmentDetection`.
Segment detection with Amazon Rekognition Video is an asynchronous operation.
You start segment detection by calling `StartSegmentDetection` which returns a
job identifier (`JobId`). When the segment detection operation finishes, Amazon
Rekognition publishes a completion status to the Amazon Simple Notification
Service topic registered in the initial call to `StartSegmentDetection`. To get
the results of the segment detection operation, first check that the status
value published to the Amazon SNS topic is `SUCCEEDED`. if so, call
`GetSegmentDetection` and pass the job identifier (`JobId`) from the initial
call of `StartSegmentDetection`.
`GetSegmentDetection` returns detected segments in an array (`Segments`) of
`SegmentDetection` objects. `Segments` is sorted by the segment types specified
in the `SegmentTypes` input parameter of `StartSegmentDetection`. Each element
of the array includes the detected segment, the precentage confidence in the
acuracy of the detected segment, the type of the segment, and the frame in which
the segment was detected.
Use `SelectedSegmentTypes` to find out the type of segment detection requested
in the call to `StartSegmentDetection`.
Use the `MaxResults` parameter to limit the number of segment detections
returned. If there are more results than specified in `MaxResults`, the value of
`NextToken` in the operation response contains a pagination token for getting
the next set of results. To get the next page of results, call
`GetSegmentDetection` and populate the `NextToken` request parameter with the
token value returned from the previous call to `GetSegmentDetection`.
For more information, see Detecting Video Segments in Stored Video in the Amazon
Rekognition Developer Guide.
"""
def get_segment_detection(client, input, options \\ []) do
request(client, "GetSegmentDetection", input, options)
end
@doc """
Gets the text detection results of a Amazon Rekognition Video analysis started
by `StartTextDetection`.
Text detection with Amazon Rekognition Video is an asynchronous operation. You
start text detection by calling `StartTextDetection` which returns a job
identifier (`JobId`) When the text detection operation finishes, Amazon
Rekognition publishes a completion status to the Amazon Simple Notification
Service topic registered in the initial call to `StartTextDetection`. To get the
results of the text detection operation, first check that the status value
published to the Amazon SNS topic is `SUCCEEDED`. if so, call `GetTextDetection`
and pass the job identifier (`JobId`) from the initial call of
`StartLabelDetection`.
`GetTextDetection` returns an array of detected text (`TextDetections`) sorted
by the time the text was detected, up to 50 words per frame of video.
Each element of the array includes the detected text, the precentage confidence
in the acuracy of the detected text, the time the text was detected, bounding
box information for where the text was located, and unique identifiers for words
and their lines.
Use MaxResults parameter to limit the number of text detections returned. If
there are more results than specified in `MaxResults`, the value of `NextToken`
in the operation response contains a pagination token for getting the next set
of results. To get the next page of results, call `GetTextDetection` and
populate the `NextToken` request parameter with the token value returned from
the previous call to `GetTextDetection`.
"""
def get_text_detection(client, input, options \\ []) do
request(client, "GetTextDetection", input, options)
end
@doc """
Detects faces in the input image and adds them to the specified collection.
Amazon Rekognition doesn't save the actual faces that are detected. Instead, the
underlying detection algorithm first detects the faces in the input image. For
each face, the algorithm extracts facial features into a feature vector, and
stores it in the backend database. Amazon Rekognition uses feature vectors when
it performs face match and search operations using the `SearchFaces` and
`SearchFacesByImage` operations.
For more information, see Adding Faces to a Collection in the Amazon Rekognition
Developer Guide.
To get the number of faces in a collection, call `DescribeCollection`.
If you're using version 1.0 of the face detection model, `IndexFaces` indexes
the 15 largest faces in the input image. Later versions of the face detection
model index the 100 largest faces in the input image.
If you're using version 4 or later of the face model, image orientation
information is not returned in the `OrientationCorrection` field.
To determine which version of the model you're using, call `DescribeCollection`
and supply the collection ID. You can also get the model version from the value
of `FaceModelVersion` in the response from `IndexFaces`
For more information, see Model Versioning in the Amazon Rekognition Developer
Guide.
If you provide the optional `ExternalImageId` for the input image you provided,
Amazon Rekognition associates this ID with all faces that it detects. When you
call the `ListFaces` operation, the response returns the external ID. You can
use this external image ID to create a client-side index to associate the faces
with each image. You can then use the index to find all faces in an image.
You can specify the maximum number of faces to index with the `MaxFaces` input
parameter. This is useful when you want to index the largest faces in an image
and don't want to index smaller faces, such as those belonging to people
standing in the background.
The `QualityFilter` input parameter allows you to filter out detected faces that
don’t meet a required quality bar. The quality bar is based on a variety of
common use cases. By default, `IndexFaces` chooses the quality bar that's used
to filter faces. You can also explicitly choose the quality bar. Use
`QualityFilter`, to set the quality bar by specifying `LOW`, `MEDIUM`, or
`HIGH`. If you do not want to filter detected faces, specify `NONE`.
To use quality filtering, you need a collection associated with version 3 of the
face model or higher. To get the version of the face model associated with a
collection, call `DescribeCollection`.
Information about faces detected in an image, but not indexed, is returned in an
array of `UnindexedFace` objects, `UnindexedFaces`. Faces aren't indexed for
reasons such as:
* The number of faces detected exceeds the value of the `MaxFaces`
request parameter.
* The face is too small compared to the image dimensions.
* The face is too blurry.
* The image is too dark.
* The face has an extreme pose.
* The face doesn’t have enough detail to be suitable for face
search.
In response, the `IndexFaces` operation returns an array of metadata for all
detected faces, `FaceRecords`. This includes:
* The bounding box, `BoundingBox`, of the detected face.
* A confidence value, `Confidence`, which indicates the confidence
that the bounding box contains a face.
* A face ID, `FaceId`, assigned by the service for each face that's
detected and stored.
* An image ID, `ImageId`, assigned by the service for the input
image.
If you request all facial attributes (by using the `detectionAttributes`
parameter), Amazon Rekognition returns detailed facial attributes, such as
facial landmarks (for example, location of eye and mouth) and other facial
attributes. If you provide the same image, specify the same collection, and use
the same external ID in the `IndexFaces` operation, Amazon Rekognition doesn't
save duplicate face metadata.
The input image is passed either as base64-encoded image bytes, or as a
reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call
Amazon Rekognition operations, passing image bytes isn't supported. The image
must be formatted as a PNG or JPEG file.
This operation requires permissions to perform the `rekognition:IndexFaces`
action.
"""
def index_faces(client, input, options \\ []) do
request(client, "IndexFaces", input, options)
end
@doc """
Returns list of collection IDs in your account.
If the result is truncated, the response also provides a `NextToken` that you
can use in the subsequent request to fetch the next set of collection IDs.
For an example, see Listing Collections in the Amazon Rekognition Developer
Guide.
This operation requires permissions to perform the `rekognition:ListCollections`
action.
"""
def list_collections(client, input, options \\ []) do
request(client, "ListCollections", input, options)
end
@doc """
Returns metadata for faces in the specified collection.
This metadata includes information such as the bounding box coordinates, the
confidence (that the bounding box contains a face), and face ID. For an example,
see Listing Faces in a Collection in the Amazon Rekognition Developer Guide.
This operation requires permissions to perform the `rekognition:ListFaces`
action.
"""
def list_faces(client, input, options \\ []) do
request(client, "ListFaces", input, options)
end
@doc """
Gets a list of stream processors that you have created with
`CreateStreamProcessor`.
"""
def list_stream_processors(client, input, options \\ []) do
request(client, "ListStreamProcessors", input, options)
end
@doc """
Returns an array of celebrities recognized in the input image.
For more information, see Recognizing Celebrities in the Amazon Rekognition
Developer Guide.
`RecognizeCelebrities` returns the 64 largest faces in the image. It lists
recognized celebrities in the `CelebrityFaces` array and unrecognized faces in
the `UnrecognizedFaces` array. `RecognizeCelebrities` doesn't return celebrities
whose faces aren't among the largest 64 faces in the image.
For each celebrity recognized, `RecognizeCelebrities` returns a `Celebrity`
object. The `Celebrity` object contains the celebrity name, ID, URL links to
additional information, match confidence, and a `ComparedFace` object that you
can use to locate the celebrity's face on the image.
Amazon Rekognition doesn't retain information about which images a celebrity has
been recognized in. Your application must store this information and use the
`Celebrity` ID property as a unique identifier for the celebrity. If you don't
store the celebrity name or additional information URLs returned by
`RecognizeCelebrities`, you will need the ID to identify the celebrity in a call
to the `GetCelebrityInfo` operation.
You pass the input image either as base64-encoded image bytes or as a reference
to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
Rekognition operations, passing image bytes is not supported. The image must be
either a PNG or JPEG formatted file.
For an example, see Recognizing Celebrities in an Image in the Amazon
Rekognition Developer Guide.
This operation requires permissions to perform the
`rekognition:RecognizeCelebrities` operation.
"""
def recognize_celebrities(client, input, options \\ []) do
request(client, "RecognizeCelebrities", input, options)
end
@doc """
For a given input face ID, searches for matching faces in the collection the
face belongs to.
You get a face ID when you add a face to the collection using the `IndexFaces`
operation. The operation compares the features of the input face with faces in
the specified collection.
You can also search faces without indexing faces by using the
`SearchFacesByImage` operation.
The operation response returns an array of faces that match, ordered by
similarity score with the highest similarity first. More specifically, it is an
array of metadata for each face match that is found. Along with the metadata,
the response also includes a `confidence` value for each face match, indicating
the confidence that the specific face matches the input face.
For an example, see Searching for a Face Using Its Face ID in the Amazon
Rekognition Developer Guide.
This operation requires permissions to perform the `rekognition:SearchFaces`
action.
"""
def search_faces(client, input, options \\ []) do
request(client, "SearchFaces", input, options)
end
@doc """
For a given input image, first detects the largest face in the image, and then
searches the specified collection for matching faces.
The operation compares the features of the input face with faces in the
specified collection.
To search for all faces in an input image, you might first call the `IndexFaces`
operation, and then use the face IDs returned in subsequent calls to the
`SearchFaces` operation.
You can also call the `DetectFaces` operation and use the bounding boxes in the
response to make face crops, which then you can pass in to the
`SearchFacesByImage` operation.
You pass the input image either as base64-encoded image bytes or as a reference
to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
Rekognition operations, passing image bytes is not supported. The image must be
either a PNG or JPEG formatted file.
The response returns an array of faces that match, ordered by similarity score
with the highest similarity first. More specifically, it is an array of metadata
for each face match found. Along with the metadata, the response also includes a
`similarity` indicating how similar the face is to the input face. In the
response, the operation also returns the bounding box (and a confidence level
that the bounding box contains a face) of the face that Amazon Rekognition used
for the input image.
For an example, Searching for a Face Using an Image in the Amazon Rekognition
Developer Guide.
The `QualityFilter` input parameter allows you to filter out detected faces that
don’t meet a required quality bar. The quality bar is based on a variety of
common use cases. Use `QualityFilter` to set the quality bar for filtering by
specifying `LOW`, `MEDIUM`, or `HIGH`. If you do not want to filter detected
faces, specify `NONE`. The default value is `NONE`.
To use quality filtering, you need a collection associated with version 3 of the
face model or higher. To get the version of the face model associated with a
collection, call `DescribeCollection`.
This operation requires permissions to perform the
`rekognition:SearchFacesByImage` action.
"""
def search_faces_by_image(client, input, options \\ []) do
request(client, "SearchFacesByImage", input, options)
end
@doc """
Starts asynchronous recognition of celebrities in a stored video.
Amazon Rekognition Video can detect celebrities in a video must be stored in an
Amazon S3 bucket. Use `Video` to specify the bucket name and the filename of the
video. `StartCelebrityRecognition` returns a job identifier (`JobId`) which you
use to get the results of the analysis. When celebrity recognition analysis is
finished, Amazon Rekognition Video publishes a completion status to the Amazon
Simple Notification Service topic that you specify in `NotificationChannel`. To
get the results of the celebrity recognition analysis, first check that the
status value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetCelebrityRecognition` and pass the job identifier (`JobId`) from the initial
call to `StartCelebrityRecognition`.
For more information, see Recognizing Celebrities in the Amazon Rekognition
Developer Guide.
"""
def start_celebrity_recognition(client, input, options \\ []) do
request(client, "StartCelebrityRecognition", input, options)
end
@doc """
Starts asynchronous detection of unsafe content in a stored video.
Amazon Rekognition Video can moderate content in a video stored in an Amazon S3
bucket. Use `Video` to specify the bucket name and the filename of the video.
`StartContentModeration` returns a job identifier (`JobId`) which you use to get
the results of the analysis. When unsafe content analysis is finished, Amazon
Rekognition Video publishes a completion status to the Amazon Simple
Notification Service topic that you specify in `NotificationChannel`.
To get the results of the unsafe content analysis, first check that the status
value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetContentModeration` and pass the job identifier (`JobId`) from the initial
call to `StartContentModeration`.
For more information, see Detecting Unsafe Content in the Amazon Rekognition
Developer Guide.
"""
def start_content_moderation(client, input, options \\ []) do
request(client, "StartContentModeration", input, options)
end
@doc """
Starts asynchronous detection of faces in a stored video.
Amazon Rekognition Video can detect faces in a video stored in an Amazon S3
bucket. Use `Video` to specify the bucket name and the filename of the video.
`StartFaceDetection` returns a job identifier (`JobId`) that you use to get the
results of the operation. When face detection is finished, Amazon Rekognition
Video publishes a completion status to the Amazon Simple Notification Service
topic that you specify in `NotificationChannel`. To get the results of the face
detection operation, first check that the status value published to the Amazon
SNS topic is `SUCCEEDED`. If so, call `GetFaceDetection` and pass the job
identifier (`JobId`) from the initial call to `StartFaceDetection`.
For more information, see Detecting Faces in a Stored Video in the Amazon
Rekognition Developer Guide.
"""
def start_face_detection(client, input, options \\ []) do
request(client, "StartFaceDetection", input, options)
end
@doc """
Starts the asynchronous search for faces in a collection that match the faces of
persons detected in a stored video.
The video must be stored in an Amazon S3 bucket. Use `Video` to specify the
bucket name and the filename of the video. `StartFaceSearch` returns a job
identifier (`JobId`) which you use to get the search results once the search has
completed. When searching is finished, Amazon Rekognition Video publishes a
completion status to the Amazon Simple Notification Service topic that you
specify in `NotificationChannel`. To get the search results, first check that
the status value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetFaceSearch` and pass the job identifier (`JobId`) from the initial call to
`StartFaceSearch`. For more information, see `procedure-person-search-videos`.
"""
def start_face_search(client, input, options \\ []) do
request(client, "StartFaceSearch", input, options)
end
@doc """
Starts asynchronous detection of labels in a stored video.
Amazon Rekognition Video can detect labels in a video. Labels are instances of
real-world entities. This includes objects like flower, tree, and table; events
like wedding, graduation, and birthday party; concepts like landscape, evening,
and nature; and activities like a person getting out of a car or a person
skiing.
The video must be stored in an Amazon S3 bucket. Use `Video` to specify the
bucket name and the filename of the video. `StartLabelDetection` returns a job
identifier (`JobId`) which you use to get the results of the operation. When
label detection is finished, Amazon Rekognition Video publishes a completion
status to the Amazon Simple Notification Service topic that you specify in
`NotificationChannel`.
To get the results of the label detection operation, first check that the status
value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetLabelDetection` and pass the job identifier (`JobId`) from the initial call
to `StartLabelDetection`.
"""
def start_label_detection(client, input, options \\ []) do
request(client, "StartLabelDetection", input, options)
end
@doc """
Starts the asynchronous tracking of a person's path in a stored video.
Amazon Rekognition Video can track the path of people in a video stored in an
Amazon S3 bucket. Use `Video` to specify the bucket name and the filename of the
video. `StartPersonTracking` returns a job identifier (`JobId`) which you use to
get the results of the operation. When label detection is finished, Amazon
Rekognition publishes a completion status to the Amazon Simple Notification
Service topic that you specify in `NotificationChannel`.
To get the results of the person detection operation, first check that the
status value published to the Amazon SNS topic is `SUCCEEDED`. If so, call
`GetPersonTracking` and pass the job identifier (`JobId`) from the initial call
to `StartPersonTracking`.
"""
def start_person_tracking(client, input, options \\ []) do
request(client, "StartPersonTracking", input, options)
end
@doc """
Starts the running of the version of a model.
Starting a model takes a while to complete. To check the current state of the
model, use `DescribeProjectVersions`.
Once the model is running, you can detect custom labels in new images by calling
`DetectCustomLabels`.
You are charged for the amount of time that the model is running. To stop a
running model, call `StopProjectVersion`.
This operation requires permissions to perform the
`rekognition:StartProjectVersion` action.
"""
def start_project_version(client, input, options \\ []) do
request(client, "StartProjectVersion", input, options)
end
@doc """
Starts asynchronous detection of segment detection in a stored video.
Amazon Rekognition Video can detect segments in a video stored in an Amazon S3
bucket. Use `Video` to specify the bucket name and the filename of the video.
`StartSegmentDetection` returns a job identifier (`JobId`) which you use to get
the results of the operation. When segment detection is finished, Amazon
Rekognition Video publishes a completion status to the Amazon Simple
Notification Service topic that you specify in `NotificationChannel`.
You can use the `Filters` (`StartSegmentDetectionFilters`) input parameter to
specify the minimum detection confidence returned in the response. Within
`Filters`, use `ShotFilter` (`StartShotDetectionFilter`) to filter detected
shots. Use `TechnicalCueFilter` (`StartTechnicalCueDetectionFilter`) to filter
technical cues.
To get the results of the segment detection operation, first check that the
status value published to the Amazon SNS topic is `SUCCEEDED`. if so, call
`GetSegmentDetection` and pass the job identifier (`JobId`) from the initial
call to `StartSegmentDetection`.
For more information, see Detecting Video Segments in Stored Video in the Amazon
Rekognition Developer Guide.
"""
def start_segment_detection(client, input, options \\ []) do
request(client, "StartSegmentDetection", input, options)
end
@doc """
Starts processing a stream processor.
You create a stream processor by calling `CreateStreamProcessor`. To tell
`StartStreamProcessor` which stream processor to start, use the value of the
`Name` field specified in the call to `CreateStreamProcessor`.
"""
def start_stream_processor(client, input, options \\ []) do
request(client, "StartStreamProcessor", input, options)
end
@doc """
Starts asynchronous detection of text in a stored video.
Amazon Rekognition Video can detect text in a video stored in an Amazon S3
bucket. Use `Video` to specify the bucket name and the filename of the video.
`StartTextDetection` returns a job identifier (`JobId`) which you use to get the
results of the operation. When text detection is finished, Amazon Rekognition
Video publishes a completion status to the Amazon Simple Notification Service
topic that you specify in `NotificationChannel`.
To get the results of the text detection operation, first check that the status
value published to the Amazon SNS topic is `SUCCEEDED`. if so, call
`GetTextDetection` and pass the job identifier (`JobId`) from the initial call
to `StartTextDetection`.
"""
def start_text_detection(client, input, options \\ []) do
request(client, "StartTextDetection", input, options)
end
@doc """
Stops a running model.
The operation might take a while to complete. To check the current status, call
`DescribeProjectVersions`.
"""
def stop_project_version(client, input, options \\ []) do
request(client, "StopProjectVersion", input, options)
end
@doc """
Stops a running stream processor that was created by `CreateStreamProcessor`.
"""
def stop_stream_processor(client, input, options \\ []) do
request(client, "StopStreamProcessor", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "rekognition"}
host = build_host("rekognition", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "RekognitionService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/rekognition.ex
| 0.960961
| 0.932207
|
rekognition.ex
|
starcoder
|
defmodule Plymio.Codi.Pattern.Bang do
@moduledoc ~S"""
The *bang* patterns builds bang functions
(e.g. `myfun!(arg)`) using existing base functions (e.g. `myfun(arg)`).
When the base function returns `{:ok, value}`, the bang
function returns `value`.
If the base function returns `{:error, error}`, the `error` is raised.
Bang functions can be built with, optionally, with a `@doc`, `@since`
and/or `@spec`.
See `Plymio.Codi` for an overview and documentation terms.
Note if the base function is in another module, the base mfa
`{module, function, arity}` is validated i.e. the `function` must
exist in the `module` with the given `arity`.
If `:fun_doc` is not in the pattern opts, a default of `:bang` is
used. (It can be disabled by explicitly setting `:fun_doc` to
`nil`)
## Pattern: *bang*
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:bang_module` | *:module, :fun_mod, :bang_module, :function_module* |
| `:bang_name` | *:name, :fun_name, :function_name* |
| `:bang_args` | *:args, :fun_args, :function_args* |
| `:bang_arity` | *:arity, :fun_arity, :function_arity* |
| `:bang_doc` | *:doc, :fun_doc, :function_doc* |
| `:spec_args` | |
| `:spec_result` |*:result, :fun_result, :function_result* |
| `:since` | |
## Examples
Here is the common case of a bang function for a function in the
same module. Note the automatically generated `:bang`-format `@doc`
and explicitly specified `@since`:
iex> {:ok, {forms, _}} = [
...> bang: [as: :fun_tre, arity: 3, since: "1.7.9"]
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `fun_tre/3`\"",
"@since \"1.7.9\"",
"def(fun_tre!(var1, var2, var3)) do",
" case(fun_tre(var1, var2, var3)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
Here the other function is in a different module(`ModuleA`):
iex> {:ok, {forms, _}} = [
...> bang: [as: :fun_tre, arity: 3, to: ModuleA, since: "1.7.9"]
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `ModuleA.fun_tre/3`\"",
"@since \"1.7.9\"",
"def(fun_tre!(var1, var2, var3)) do",
" case(ModuleA.fun_tre(var1, var2, var3)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
The `:fun_args` can be supplied to improve the definition. Note the `:fun_doc` is set to `false`.
iex> {:ok, {forms, _}} = [
...> bang: [as: :fun_tre, args: [:x, :y, :z], to: ModuleA, fun_doc: false]
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc false", "def(fun_tre!(x, y, z)) do",
" case(ModuleA.fun_tre(x, y, z)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
Similary, if the *cpo* contains a `:spec_result` key, a `@spec` will
be generated. The second example has an explicit `:spec_args`
iex> {:ok, {forms, _}} = [
...> bang: [as: :fun_tre, args: [:x, :y, :z], module: ModuleA, result: :tuple]
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `ModuleA.fun_tre/3`\"",
"@spec fun_tre!(any, any, any) :: tuple",
"def(fun_tre!(x, y, z)) do",
" case(ModuleA.fun_tre(x, y, z)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
iex> {:ok, {forms, _}} = [
...> bang: [as: :fun_tre, args: [:x, :y, :z], module: ModuleA,
...> spec_args: [:integer, :binary, :atom], result: :tuple]
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `ModuleA.fun_tre/3`\"",
"@spec fun_tre!(integer, binary, atom) :: tuple",
"def(fun_tre!(x, y, z)) do",
" case(ModuleA.fun_tre(x, y, z)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
## Pattern: *bang_module*
The *bang_module* pattern builds a bang function for one or more
functions in a module. As with `:bang` a `@doc` or `@since` can be generated at
the same time.
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:bang_module` | *:to, :module, :fun_mod, :fun_module, :function_module* |
| `:bang_doc` | *:doc, :fun_doc, :function_doc* |
| `:take` | |
| `:drop` | |
| `:filter` | |
| `:reject` | |
| `:since` | |
## Examples
Here a bang function will be generated for all the functions in the module.
iex> {:ok, {forms, _}} = [
...> bang_module: [module: ModuleA],
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `ModuleA.fun_due/2`\"",
"def(fun_due!(var1, var2)) do",
" case(ModuleA.fun_due(var1, var2)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end",
"@doc \"Bang function for `ModuleA.fun_one/1`\"",
"def(fun_one!(var1)) do", " case(ModuleA.fun_one(var1)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end",
"@doc \"Bang function for `ModuleA.fun_tre/3`\"",
"def(fun_tre!(var1, var2, var3)) do",
" case(ModuleA.fun_tre(var1, var2, var3)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
In the same way as `:delegate_module` the functions can be selected
using e.g. `:take`. Here `:since` is also given.
iex> {:ok, {forms, _}} = [
...> bang_module: [module: ModuleA, take: :fun_due, since: "1.7.9"],
...> ] |> produce_codi
...> forms |> harnais_helper_format_forms!
["@doc \"Bang function for `ModuleA.fun_due/2`\"",
"@since \"1.7.9\"",
"def(fun_due!(var1, var2)) do",
" case(ModuleA.fun_due(var1, var2)) do",
" {:ok, value} ->",
" value",
"",
" {:error, error} ->",
" raise(error)",
" end",
"end"]
"""
alias Plymio.Codi, as: CODI
use Plymio.Fontais.Attribute
use Plymio.Codi.Attribute
import Plymio.Fontais.Guard,
only: [
is_value_unset_or_nil: 1
]
import Plymio.Fontais.Option,
only: [
opts_canonical_keys: 2,
opts_take_canonical_keys: 2,
opts_create_aliases_dict: 1
]
import Plymio.Codi.Utility,
only: [
cpo_resolve_bang_module: 1,
cpo_resolve_bang_name: 1,
cpo_resolve_bang_args: 1,
cpo_resolve_bang_doc: 1,
cpo_resolve_fun_name: 1
]
import Plymio.Codi.Utility.Module,
only: [
reduce_module_fva: 2,
state_validate_mfa: 2,
state_resolve_module_fva: 2
]
import Plymio.Funcio.Enum.Map.Collate,
only: [
map_collate0_enum: 2
]
import Plymio.Codi.CPO
@pattern_bang_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_form,
@plymio_codi_key_alias_since,
@plymio_codi_key_alias_bang_module,
@plymio_codi_key_alias_bang_name,
@plymio_codi_key_alias_bang_doc,
@plymio_codi_key_alias_bang_args,
@plymio_codi_key_alias_bang_arity,
@plymio_codi_key_alias_fun_name,
# limited aliases
{@plymio_codi_key_typespec_spec_args, [:spec_args]},
@plymio_codi_key_alias_typespec_spec_result,
@plymio_codi_key_alias_forms_edit
]
@pattern_bang_dict_alias @pattern_bang_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_bang_normalise(opts, dict \\ nil) do
opts |> opts_take_canonical_keys(dict || @pattern_bang_dict_alias)
end
@pattern_bang_module_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_form,
@plymio_codi_key_alias_since,
@plymio_codi_key_alias_bang_module,
@plymio_codi_key_alias_bang_name,
@plymio_codi_key_alias_bang_doc,
{@plymio_codi_key_take, nil},
{@plymio_codi_key_drop, nil},
{@plymio_codi_key_filter, nil},
{@plymio_codi_key_reject, nil},
@plymio_codi_key_alias_forms_edit
]
@pattern_bang_module_dict_alias @pattern_bang_module_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_bang_module_normalise(opts, dict \\ nil) do
opts |> opts_canonical_keys(dict || @pattern_bang_module_dict_alias)
end
@doc false
def express_pattern(%CODI{} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_bang do
with {:ok, cpo} <- cpo |> cpo_pattern_bang_normalise,
{:ok, bang_module} <- cpo |> cpo_resolve_bang_module,
{:ok, bang_name} <- cpo |> cpo_resolve_bang_name,
{:ok, bang_args} <- cpo |> cpo_resolve_bang_args,
{:ok, cpo} <- cpo |> cpo_maybe_put_bang_doc(@plymio_codi_doc_type_bang),
{:ok, bang_doc} <- cpo |> cpo_resolve_bang_doc,
{:ok, cpo} <- cpo |> cpo_maybe_put_fun_name("#{bang_name}!" |> String.to_atom()),
{:ok, real_name} <- cpo |> cpo_resolve_fun_name,
{:ok, {_, %CODI{} = state}} <-
state |> state_validate_mfa({bang_module, bang_name, length(bang_args)}),
# base dependent cpo
{:ok, depend_cpo} <- cpo |> cpo_mark_status_active,
{:ok, depend_cpo} <- depend_cpo |> cpo_put_fun_module(bang_module),
{:ok, depend_cpo} <- depend_cpo |> cpo_put_fun_doc(bang_doc),
# delete the fun_args to stop confusion over type args; fun_arity will be used if needed
{:ok, depend_cpo} <- depend_cpo |> cpo_drop_fun_args,
# the dependent doc cpo
{:ok, depend_doc_cpo} <- depend_cpo |> cpo_put_pattern(@plymio_codi_pattern_doc),
{:ok, depend_doc_cpo} <- depend_doc_cpo |> cpo_put_fun_arity(length(bang_args)),
{:ok, depend_doc_cpo} <- depend_doc_cpo |> cpo_put_fun_name(bang_name),
# the dependent since cpo
{:ok, depend_since_cpo} <- depend_cpo |> cpo_put_pattern(@plymio_codi_pattern_since),
# the dependent type cpo
{:ok, depend_type_cpo} <-
depend_cpo |> cpo_put_pattern(@plymio_codi_pattern_typespec_spec),
{:ok, depend_type_cpo} <-
depend_type_cpo
|> cpo_maybe_add_typespec_spec_opts([
{@plymio_codi_key_typespec_spec_arity, length(bang_args)}
]),
{:ok, depend_type_cpo} <- depend_type_cpo |> cpo_put_fun_name(real_name) do
pattern_form =
bang_module
|> case do
# local function
x when is_value_unset_or_nil(x) ->
quote do
def unquote(real_name)(unquote_splicing(bang_args)) do
case unquote(bang_name)(unquote_splicing(bang_args)) do
{:ok, value} -> value
{:error, error} -> raise error
end
end
end
# explicit module
_ ->
quote do
def unquote(real_name)(unquote_splicing(bang_args)) do
case unquote(bang_module).unquote(bang_name)(unquote_splicing(bang_args)) do
{:ok, value} -> value
{:error, error} -> raise error
end
end
end
end
depend_patterns = [
depend_doc_cpo,
depend_since_cpo,
depend_type_cpo
]
with {:ok, %CODI{} = depend_state} <- state |> CODI.update_snippets(depend_patterns),
{:ok, {depend_product, %CODI{}}} <-
depend_state |> Plymio.Codi.Stage.Normalise.normalise_snippets(),
{:ok, depend_cpos} <- depend_product |> cpo_fetch_patterns,
{:ok, cpo} <- cpo |> cpo_done_with_edited_form(pattern_form) do
cpos = depend_cpos ++ [cpo]
{:ok, {cpos, state}}
else
{:error, %{__exception__: true}} = result -> result
end
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(%CODI{} = state, pattern, opts)
when pattern == @plymio_codi_pattern_bang_module do
with {:ok, opts} <- opts |> cpo_pattern_bang_module_normalise,
{:ok, bang_module} <- opts |> cpo_resolve_bang_module,
{:ok, {bang_fva, %CODI{} = state}} <- state |> state_resolve_module_fva(bang_module),
{:ok, bang_fva} <- bang_fva |> reduce_module_fva(opts),
{:ok, bang_cpo} <- opts |> cpo_pattern_bang_normalise,
{:ok, bang_cpo} <- bang_cpo |> cpo_mark_status_active,
{:ok, bang_cpo} <- bang_cpo |> cpo_put_pattern(@plymio_codi_pattern_bang) do
bang_fva
|> map_collate0_enum(fn {name, arity} ->
with {:ok, cpo} <- bang_cpo |> cpo_put_bang_name(name),
{:ok, _cpo} = result <- cpo |> cpo_put_bang_arity(arity) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end)
|> case do
{:error, %{__struct__: _}} = result -> result
{:ok, cpos} -> {:ok, {cpos, state}}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
|
lib/codi/pattern/bang/bang.ex
| 0.894683
| 0.728483
|
bang.ex
|
starcoder
|
defmodule Alchemy.Message do
import Alchemy.Structs
alias Alchemy.{User, Attachment, Embed, Reaction, Reaction.Emoji}
@moduledoc """
This module contains the types and functions related to messages in discord.
"""
@type snowflake :: String.t()
@typedoc """
Represents an iso8601 timestamp.
"""
@type timestamp :: String.t()
@typedoc """
Represents a message in a channel.
- `id`
The id of this message.
- `author`
The user who sent this message. This field will be very incomplete
if the message originated from a webhook.
- `content`
The content of the message.
- `timestamp`
The timestamp of the message.
- `edit_timestamp`
The timestamp of when this message was edited, if it ever was.
- `tts`
Whether this was a tts message.
- `mention_everyone`
Whether this message mentioned everyone.
- `mentions`
A list of users this message mentioned.
- `mention_roles`
A list of role ids this message mentioned.
- `attachments`
A list of attachments to the message.
- `embeds`
A list of embeds attached to this message.
- `reactions`
A list of reactions to this message.
- `nonce`
Used for validating a message was sent.
- `pinned`
Whether this message is pinned.
- `webhook_id`
The id of the webhook that sent this message, if it was sent by a webhook.
"""
@type t :: %__MODULE__{
id: snowflake,
channel_id: snowflake,
author: User.t(),
content: String,
timestamp: timestamp,
edited_timestamp: String | nil,
tts: Boolean,
mention_everyone: Boolean,
mentions: [User.t()],
mention_roles: [snowflake],
attachments: [Attachment.t()],
embeds: [Embed.t()],
reactions: [Reaction.t()],
nonce: snowflake,
pinned: Boolean,
webhook_id: String.t() | nil
}
defstruct [
:id,
:channel_id,
:author,
:content,
:timestamp,
:edited_timestamp,
:tts,
:mention_everyone,
:mentions,
:mention_roles,
:attachments,
:embeds,
:reactions,
:nonce,
:pinned,
:webhook_id
]
@typedoc """
Represents a reaction to a message.
- `count`
Times this specific emoji reaction has been used.
- `me`
Whether this client reacted to the message.
- `emoji`
Information about the emoji used.
"""
@type reaction :: %Reaction{
count: Integer,
me: Boolean,
emoji: Emoji.t()
}
@typedoc """
Represents an emoji used to react to a message.
- `id`
The id of this emoji. `nil` if this isn't a custom emoji.
- `name`
The name of this emoji.
"""
@type emoji :: %Emoji{
id: String.t() | nil,
name: String.t()
}
@doc false
def from_map(map) do
map
|> field?("author", User)
|> field_map?("mentions", &map_struct(&1, User))
|> field_map?("attachments", &map_struct(&1, Attachment))
|> field_map?("embeds", &Enum.map(&1, fn x -> Embed.from_map(x) end))
|> field_map?("reactions", &map_struct(&1, Reaction))
|> to_struct(__MODULE__)
end
defmacrop matcher(str) do
quote do
fn
unquote(str) <> r -> r
_ -> nil
end
end
end
@type mention_type :: :roles | :nicknames | :channels | :users
@doc """
Finds a list of mentions in a string.
4 types of mentions exist:
- `roles`
A mention of a specific role.
- `nicknames`
A mention of a user by nickname.
- `users`
A mention of a user by name, or nickname.
- `:channels`
A mention of a channel.
"""
@spec find_mentions(String.t(), mention_type) :: [snowflake]
def find_mentions(content, type) do
matcher =
case type do
:roles ->
matcher("@&")
:nicknames ->
matcher("@!")
:channels ->
matcher("#")
:users ->
fn
"@!" <> r -> r
"@" <> r -> r
_ -> nil
end
end
Regex.scan(~r/<([#@]?[!&]?[0-9]+)>/, content, capture: :all_but_first)
|> Stream.concat()
|> Stream.map(matcher)
|> Enum.filter(&(&1 != nil))
end
end
|
lib/Structs/Messages/message.ex
| 0.831793
| 0.469034
|
message.ex
|
starcoder
|
defmodule Brex.Result.Helpers do
@moduledoc """
Tools for modifying the reason in `error` tuples.
"""
import Brex.Result.Base
alias Brex.Result.Base
require Logger
@type s(x) :: Base.s(x)
@type t(x) :: Base.t(x)
@type p() :: Base.p()
@type s() :: Base.s()
@type t() :: Base.t()
@doc """
Wraps a naked `:error` atom in a tuple with the given reason.
Leaves success values and `error` tuples unchanged.
## Examples:
iex> :error
...> |> normalize_error(:parsing_failure)
{:error, :parsing_failure}
iex> {:ok, 2}
...> |> normalize_error()
{:ok, 2}
"""
@doc since: "0.3.0"
@spec normalize_error(any, any) :: t()
def normalize_error(x, reason \\ :normalized) do
case x do
:error -> {:error, reason}
{:error, _r} -> x
{:ok, _val} -> x
:ok -> :ok
end
end
@doc """
Lifts a value into a success tuple unless:
1) the value matches the second argument
2) when applied to the value, the second argument function returns `true`
In those cases an `error` tuple is returned with either
1) the third argument as the reason
2) the third argument function applied to the value, as the reason
`lift/3` is lazy, this means third argument will only be evaluated when necessary.
## Examples:
iex> nil
...> |> lift(nil, :not_found)
{:error, :not_found}
iex> 2
...> |> lift(nil, :not_found)
{:ok, 2}
iex> "test"
...> |> lift(&(&1 == "test"), fn x -> {:oops, x <> "ed"} end)
{:error, {:oops, "tested"}}
## Typespec:
lift(a | b, b | (a | b -> boolean), c | (a | b -> c)) :: s(a) when a: var, b: var, c: var
"""
@doc updated: "0.2.0"
@doc since: "0.1.0"
defmacro lift(val, p, f) do
quote do
p = unquote(p)
val = unquote(val)
# check if the value passes satisfies the predicate or matches the second argument.
match = if is_function(p), do: p.(val), else: val == p
if match do
f = unquote(f)
# lift to error tuple on match
{:error, if(is_function(f), do: f.(val), else: f)}
else
# ok tuple otherwise
{:ok, val}
end
end
end
@doc """
Applies the function to the reason in an `error` tuple.
Leaves success unchanged.
## Example:
iex> account_name = "test"
...> {:error, :not_found}
...> |> map_error(fn r -> {r, account_name} end)
{:error, {:not_found, "test"}}
"""
@doc since: "0.1.0"
@spec map_error(t(a), (any -> any)) :: t(a) when a: var
def map_error({:error, r}, f), do: error(f.(r))
def map_error({:ok, _val} = ma, _), do: ma
def map_error(:ok, _), do: :ok
@doc """
Replaces the reason in an `error` tuple.
Leaves success unchanged.
Lazy. Only evaluates the second argument if necessary.
## Example:
iex> account_name = "test"
...> {:error, :not_found}
...> |> mask_error({:nonexistent_account, account_name})
{:error, {:nonexistent_account, "test"}}
## Typespec:
mask_error(t(a), any) :: t(a) when a: var
"""
@doc updated: "0.2.0"
@doc since: "0.1.0"
defmacro mask_error(ma, term) do
quote do
case unquote(ma) do
{:error, _} -> {:error, unquote(term)}
{:ok, val} -> {:ok, val}
:ok -> :ok
end
end
end
@spec reason_to_string(any) :: String.t()
defp reason_to_string(value) when is_atom(value), do: Atom.to_string(value)
defp reason_to_string(value) when is_binary(value), do: value
defp reason_to_string(value), do: inspect(value)
@doc """
Logs on `error`, does nothing on success.
## Example:
{:error, :not_found}
|> log_error("There was an error", level: :info, metadata: "some meta")
"""
@doc updated: "0.4.1"
@doc since: "0.1.0"
# TODO: refine the type of second argument
@spec log_error(t(a), String.t() | (any -> any), Keyword.t()) :: t(a) when a: var
def log_error(ma, chardata_or_fun, opts \\ [])
def log_error({:error, r} = ma, chardata_or_fun, opts) when is_binary(chardata_or_fun) do
# default to :error level
{level, metadata} = Keyword.pop(opts, :level, :error)
log_fn =
case level do
:debug -> &Logger.debug/2
:info -> &Logger.info/2
:warn -> &Logger.warn/2
:error -> &Logger.error/2
end
log_fn.(chardata_or_fun, [reason: reason_to_string(r)] ++ metadata)
ma
end
def log_error({:ok, _val} = ma, _, _), do: ma
def log_error(:ok, _, _), do: :ok
@doc """
Converts a matching error to `:ok`
An error matches if the reason is equal to the supplied atom or the reason passes the predicate.
Leaves success and other errors unchanged.
## Examples:
iex> {:error, :already_completed}
...> |> convert_error(:already_completed)
:ok
iex> {:error, :already_completed}
...> |> convert_error(fn r -> r == :already_completed end)
:ok
"""
@doc updated: "0.2.0"
@doc since: "0.1.0"
@spec convert_error(t(a), (any -> boolean) | any) :: t(a) when a: var
def convert_error({:error, r} = ma, p) when is_function(p) do
if p.(r), do: :ok, else: ma
end
def convert_error({:error, r} = ma, term) do
if r == term, do: :ok, else: ma
end
def convert_error({:ok, _val} = ma, _p), do: ma
def convert_error(:ok, _p), do: :ok
@doc """
Converts a matching error to a success with the given value or function.
An error matches if the reason is equal to the supplied atom or the reason passes the predicate.
Leaves success and other errors unchanged.
Lazy. Only evaluates the second argument if necessary.
## Examples:
iex> {:error, :already_completed}
...> |> convert_error(:already_completed, "submitted")
{:ok, "submitted"}
iex> {:error, "test"}
...> |> convert_error(&(&1 == "test"), fn r -> {:ok, r <> "ed"} end)
{:ok, "tested"}
## Typespec:
convert_error(t(), (any -> boolean) | any, b | (any -> t(b))) :: t(b) when b: var
"""
@doc updated: "0.2.0"
@doc since: "0.1.0"
defmacro convert_error(ma, p, f) do
quote do
ma = unquote(ma)
p = unquote(p)
case ma do
{:error, r} ->
match = if is_function(p), do: p.(r), else: r == p
if match do
f = unquote(f)
# convert to ok tuple with new value.
if is_function(f), do: f.(r), else: {:ok, f}
else
ma
end
{:ok, _v} ->
ma
:ok ->
ma
end
end
end
end
|
lib/result/helpers.ex
| 0.850065
| 0.511656
|
helpers.ex
|
starcoder
|
defmodule Behaviour do
@moduledoc """
Utilities for defining behaviour interfaces.
Behaviours can be referenced by other modules
to ensure they implement required callbacks.
For example, you can specify the `URI.Parser`
behaviour as follows:
defmodule URI.Parser do
use Behaviour
@doc "Parses the given URL"
defcallback parse(uri_info :: URI.t) :: URI.t
@doc "Defines a default port"
defcallback default_port() :: integer
end
And then a module may use it as:
defmodule URI.HTTP do
@behaviour URI.Parser
def default_port(), do: 80
def parse(info), do: info
end
If the behaviour changes or `URI.HTTP` does
not implement one of the callbacks, a warning
will be raised.
## Implementation
Since Erlang R15, behaviours must be defined via
`@callback` attributes. `defcallback` is a simple
mechanism that defines the `@callback` attribute
according to the given type specification. `defcallback` allows
documentation to be created for the callback and defines
a custom function signature.
The callbacks and their documentation can be retrieved
via the `__behaviour__` callback function.
"""
@doc """
Defines a function callback according to the given type specification.
"""
defmacro defcallback(spec) do
do_defcallback(split_spec(spec, quote(do: term)), __CALLER__)
end
@doc """
Defines a macro callback according to the given type specification.
"""
defmacro defmacrocallback(spec) do
do_defmacrocallback(split_spec(spec, quote(do: Macro.t)), __CALLER__)
end
defp split_spec({:when, _, [{:::, _, [spec, return]}, guard]}, _default) do
{spec, return, guard}
end
defp split_spec({:when, _, [spec, guard]}, default) do
{spec, default, guard}
end
defp split_spec({:::, _, [spec, return]}, _default) do
{spec, return, []}
end
defp split_spec(spec, default) do
{spec, default, []}
end
defp do_defcallback({spec, return, guards}, caller) do
case Macro.decompose_call(spec) do
{name, args} ->
do_callback(:def, name, args, name, length(args), args, return, guards, caller)
_ ->
raise ArgumentError, "invalid syntax in defcallback #{Macro.to_string(spec)}"
end
end
defp do_defmacrocallback({spec, return, guards}, caller) do
case Macro.decompose_call(spec) do
{name, args} ->
do_callback(:defmacro, :"MACRO-#{name}", [quote(do: env :: Macro.Env.t)|args],
name, length(args), args, return, guards, caller)
_ ->
raise ArgumentError, "invalid syntax in defmacrocallback #{Macro.to_string(spec)}"
end
end
defp do_callback(kind, name, args, docs_name, docs_arity, _docs_args, return, guards, caller) do
:lists.foreach fn
{:::, _, [left, right]} ->
ensure_not_default(left)
ensure_not_default(right)
left
other ->
ensure_not_default(other)
other
end, args
quote do
@callback unquote(name)(unquote_splicing(args)) :: unquote(return) when unquote(guards)
Behaviour.store_docs(__MODULE__, unquote(caller.line), unquote(kind),
unquote(docs_name), unquote(docs_arity))
end
end
defp ensure_not_default({:\\, _, [_, _]}) do
raise ArgumentError, "default arguments \\\\ not supported in defcallback/defmacrocallback"
end
defp ensure_not_default(_), do: :ok
@doc false
def store_docs(module, line, kind, name, arity) do
doc = Module.get_attribute module, :doc
Module.delete_attribute module, :doc
Module.put_attribute module, :behaviour_docs, {{name, arity}, line, kind, doc}
end
@doc false
defmacro __using__(_) do
quote do
Module.register_attribute(__MODULE__, :behaviour_docs, accumulate: true)
@before_compile unquote(__MODULE__)
import unquote(__MODULE__)
end
end
@doc false
defmacro __before_compile__(env) do
docs = if Keyword.get(Code.compiler_options, :docs) do
:lists.reverse Module.get_attribute(env.module, :behaviour_docs)
end
quote do
@doc false
def __behaviour__(:callbacks) do
__MODULE__.behaviour_info(:callbacks)
end
def __behaviour__(:docs) do
unquote(Macro.escape(docs))
end
end
end
end
|
lib/elixir/lib/behaviour.ex
| 0.782787
| 0.436862
|
behaviour.ex
|
starcoder
|
defmodule Plaid.Transactions do
@moduledoc """
Functions for Plaid `transactions` endpoint.
"""
import Plaid, only: [make_request_with_cred: 4, validate_cred: 1]
alias Plaid.Utils
@derive Jason.Encoder
defstruct accounts: [], item: nil, total_transactions: nil, transactions: [], request_id: nil
@type t :: %__MODULE__{
accounts: [Plaid.Accounts.Account.t()],
item: Plaid.Item.t(),
total_transactions: integer,
transactions: [Plaid.Transactions.Transaction.t()],
request_id: String.t()
}
@type params :: %{required(atom) => String.t() | map}
@type config :: %{required(atom) => String.t()}
@endpoint :transactions
defmodule Transaction do
@moduledoc """
Plaid Transaction data structure.
"""
@derive Jason.Encoder
defstruct account_id: nil,
account_owner: nil,
amount: nil,
iso_currency_code: nil,
unofficial_currency_code: nil,
category: nil,
category_id: nil,
date: nil,
location: nil,
name: nil,
payment_meta: nil,
pending: false,
pending_transaction_id: nil,
transaction_id: nil,
transaction_type: nil,
merchant_name: nil,
authorized_date: nil,
payment_channel: nil,
transaction_code: nil
@type t :: %__MODULE__{
account_id: String.t(),
account_owner: String.t(),
amount: float,
iso_currency_code: String.t(),
unofficial_currency_code: String.t(),
category: [String.t()],
category_id: String.t(),
date: String.t(),
location: Plaid.Transactions.Transaction.Location.t(),
name: String.t(),
payment_meta: Plaid.Transactions.Transaction.PaymentMeta.t(),
pending: boolean(),
pending_transaction_id: String.t(),
transaction_id: String.t(),
transaction_type: String.t(),
merchant_name: String.t(),
authorized_date: String.t(),
payment_channel: String.t(),
transaction_code: String.t()
}
defmodule Location do
@moduledoc """
Plaid Transaction Location data structure.
"""
@derive Jason.Encoder
defstruct address: nil,
city: nil,
# Deprecated, use :region instead.
state: nil,
# Deprecated, use :postal_code instead.
zip: nil,
region: nil,
postal_code: nil,
country: nil,
lat: nil,
lon: nil,
store_number: nil
@type t :: %__MODULE__{
address: String.t(),
city: String.t(),
state: String.t(),
zip: String.t(),
region: String.t(),
postal_code: String.t(),
country: String.t(),
lat: float,
lon: float,
store_number: integer
}
end
defmodule PaymentMeta do
@moduledoc """
Plaid Transaction Payment Metadata data structure.
"""
@derive Jason.Encoder
defstruct by_order_of: nil,
payee: nil,
payer: nil,
payment_method: nil,
payment_processor: nil,
ppd_id: nil,
reason: nil,
reference_number: nil
@type t :: %__MODULE__{
by_order_of: String.t(),
payee: String.t(),
payer: String.t(),
payment_method: String.t(),
payment_processor: String.t(),
ppd_id: String.t(),
reason: String.t(),
reference_number: String.t()
}
end
end
@doc """
Gets transactions data associated with an Item.
Parameters
```
%{
access_token: "access-env-identifier",
start_date: "2017-01-01",
end_date: "2017-03-31",
options: %{
count: 20,
offset: 0
}
}
```
"""
@spec get(params, config | nil) :: {:ok, Plaid.Transactions.t()} | {:error, Plaid.Error.t()}
def get(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/get"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
end
|
lib/plaid/transactions.ex
| 0.835886
| 0.466785
|
transactions.ex
|
starcoder
|
defmodule Steve.Storage.Ecto do
@moduledoc """
A storage adapter that uses Ecto to interact with a database.
### Configuration
The application must be configured as shown below.
```elixir
config :steve,
[
storage: Steve.Storage.Ecto,
repository: Example.Repo
]
```
Where `Example.Repo` should be a valid `Ecto.Repo` instance.
### Migrations
One must also define the following migration within their application.
```elixir
defmodule Example.Repo.Migrations.Steve do
use Ecto.Migration
defdelegate change, to: Steve.Storage.Ecto.Migration
end
```
"""
use Steve.Storage
import Ecto.Query
alias Steve.Storage.Ecto.Schema
def enqueue(%Job{id: id, queue: queue} = job, %DateTime{} = time) do
params = %{
uuid: id,
queue: "#{queue}",
content: encode(job),
execute_at: time
}
object = Schema.changeset(params)
case insert(object) do
{:ok, _struct} -> :ok
other -> other
end
end
def dequeue(queue, count) do
handler = fn ->
select = from(
job in Schema,
lock: "FOR UPDATE",
where: job.execute_at <= ^Time.now(),
where: job.status == ^:queued,
where: job.queue == ^"#{queue}",
order_by: [asc: job.retry],
order_by: job.inserted_at,
limit: ^count
)
list = all(select)
uuid = Enum.map(list, & &1.id)
changes = [
set: [status: :running, node: Config.node()],
inc: [retry: 1]
]
update = from(
job in Schema,
where: job.id in ^uuid
)
update_all(update, changes)
Enum.map(list, &decode/1)
end
{:ok, list} = transaction(handler)
list
end
def ack!(%Job{id: id}) do
query = from(
job in Schema,
where: job.status == ^:running,
where: job.uuid == ^id
)
delete_all(query)
:ok
end
def retry!(%Job{id: id, queue: queue, worker: worker, max_retries: maximum, expiry_days: expiry}) do
stored = get_by!(status: :running, uuid: id)
changes = case stored do
%{retry: count} when count < maximum ->
offset = worker.backoff(count)
future = Time.offset_now(offset)
[status: :queued, execute_at: future]
_other ->
expiry = expiry * (24 * 60 * 60)
future = Time.offset_now(expiry)
[status: :failed, expiry_at: future]
end
object = Schema.changeset(stored, changes)
case update(object) do
{:ok, %{status: :failed}} ->
expired(queue)
{:ok, _struct} ->
:ok
end
end
def recover!(queue) do
query = from(
job in Schema,
where: job.node == ^Config.node(),
where: job.status == ^:running,
where: job.queue == ^"#{queue}"
)
changes = [
set: [status: :queued, node: nil],
inc: [retry: -1]
]
update_all(query, changes)
:ok
end
defp expired(queue) do
query = from(
job in Schema,
where: job.expiry_at <= ^Time.now(),
where: job.status == ^:failed,
where: job.queue == ^"#{queue}"
)
delete_all(query)
:ok
end
defp encode(term) do
:erlang.term_to_binary(term)
end
defp decode(%Schema{content: binary}) do
:erlang.binary_to_term(binary)
end
defp repository do
Config.get!(:repository)
end
defp transaction(handler) do
repository().transaction(handler)
end
defp get_by!(clauses) do
repository().get_by!(Schema, clauses)
end
defp insert(changeset) do
repository().insert(changeset)
end
defp update(changeset) do
repository().update(changeset)
end
defp all(query) do
repository().all(query)
end
defp update_all(query, changes) do
repository().update_all(query, changes)
end
defp delete_all(query) do
repository().delete_all(query)
end
end
|
lib/steve/storage/ecto.ex
| 0.854323
| 0.799521
|
ecto.ex
|
starcoder
|
defmodule JPMarc.Record do
@moduledoc """
Tools for working with JPMARC Record
"""
import XmlBuilder
alias JPMarc.Leader
alias JPMarc.ControlField
alias JPMarc.DataField
alias JPMarc.SubField
@rs "\x1d" # Record separator
@fs "\x1e" # Field separator
@ss "\x1f" # Subfield separator
@leader_length 24
@valid_control_fields ~W(001 003 005 007 008) # Valied fields
@typedoc """
Type that represents `JPMarc.Record` struct
This is constructed with `:leader` as `JPMarc.Leader.t`, `:fiels` as List of `JPMarc.ControlField.t` or `JPMarc.DataField.t`
"""
@type t :: %__MODULE__{leader: Leader.t, fields: [ ControlField.t | DataField.t ]}
@typedoc """
Type for field code
"""
@type code_t :: :all | String.t | [String.t]
@typedoc """
Type for indicators
"""
@type ind_t :: String.t | nil
@derive [Poison.Encoder]
defstruct leader: nil, fields: []
@doc """
Return `true` if `tag` is a valid tag number as ControlField, otherwise `false`
"""
@spec control_field?(String.t)::boolean
def control_field?(tag), do: Enum.member?(@valid_control_fields, tag)
@doc """
Returns a list of Fields with `tag`, `ind1` and `ind2` in `record`, [] when it doesn't exist
"""
@spec fields(t, String.t, ind_t, ind_t)::[t]
def fields(record, tag, ind1 \\ nil, ind2 \\ nil) do
if control_field?(tag) do
record.fields |> Enum.filter(&(&1.tag == tag))
else
case {tag, ind1, ind2} do
{tag, nil, nil} ->
record.fields |> Enum.filter(&(&1.tag == tag))
{tag, ind1, nil} ->
record.fields |> Enum.filter(&(&1.tag == tag && &1.ind1 == ind1))
{tag, ind1, ind2} ->
record.fields |> Enum.filter(&(&1.tag == tag && &1.ind1 == ind1 && &1.ind2 == ind2))
end
end
end
@doc """
Returns first DataFields with `tag`, `ind1` and `ind2` in `record`, nil when it doesn't exist
"""
@spec field(t, String.t, ind_t, ind_t)::(t|nil)
def field(record, tag, ind1 \\ nil, ind2 \\ nil), do: fields(record, tag, ind1, ind2) |> Enum.at(0)
@doc """
Returns a list of SubFields with `tag`, `ind1`, `ind2` and `code` in `record`, [] when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec subfields(t, String.t, code_t, ind_t, ind_t)::[SubField.t]
def subfields(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil) do
fields = fields(record, tag, ind1, ind2)
unless Enum.empty?(fields) do
cond do
code == :all ->
fields |> Enum.map(&(&1.subfields))
is_list(code) ->
fields |> Enum.map(fn(df) ->
df.subfields |> Enum.filter(&Enum.member?(code, &1.code))end)
is_binary(code) ->
fields |> Enum.map(fn(df) ->
df.subfields |> Enum.filter(&(&1.code == code))end)
true -> []
end
else
[]
end
end
@doc """
Returns first SubField with `tag`, `ind1`, `ind2` and `code` in `record`, [] when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec subfield(t, String.t, code_t, ind_t, ind_t)::SubField.t
def subfield(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil), do: subfields(record, tag, code, ind1, ind2) |> Enum.at(0)
@doc """
Returns a list of SubFields value with `tag`, `ind1`, `ind2` and `code` in `record`, `[]` when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec field_values(t, String.t, code_t, ind_t, ind_t, String.t)::[String.t]
def field_values(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil, joiner \\ " ") do
if control_field?(tag) do
if cf = field(record, tag), do: [cf.value], else: []
else
subfield_values(record, tag, code, ind1, ind2, joiner)
end
end
@doc """
Returns a list of Field values with `tag`, `ind1`, `ind2` and `code` in `record`, `nil` when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec field_value(t, String.t, code_t, ind_t, ind_t, String.t)::String.t
def field_value(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil, joiner \\ " "), do: field_values(record, tag, code, ind1, ind2, joiner) |> Enum.at(0)
@doc """
Returns a list of SubFields value with `tag`, `ind1`, `ind2` and `code` in `record`, `[]`when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec subfield_values(t, String.t, code_t, ind_t, ind_t, String.t)::[String.t]
def subfield_values(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil, joiner \\ " ") do
subfields(record, tag, code, ind1, ind2) |> Enum.map(fn(sf) -> Enum.map(sf, &("#{&1.value}")) |> Enum.join(joiner) end)
end
@doc """
Returns first SubFields value with `tag`, `ind1`, `ind2` and `code` in `record`, `nil` when it doesn't exist
`code` is either of :all, `code` as String or List of `code`.
Default is `:all`.
"""
@spec subfield_value(t, String.t, code_t, ind_t, ind_t, String.t)::String.t
def subfield_value(record, tag, code \\ :all, ind1 \\ nil, ind2 \\ nil, joiner \\ " "), do: subfield_values(record, tag, code, ind1, ind2, joiner) |> Enum.at(0)
@doc """
Decode the String of a marc and return `JPMarc.Record` struct
"""
@spec from_marc(String.t)::t
def from_marc(marc) do
<<leader::bytes-size(@leader_length), rest::binary>> = marc
leader = Leader.decode(leader)
length_of_dirs = leader.base - @leader_length - 1 # -1 for @fs
<<dir_block::bytes-size(length_of_dirs), @fs, data:: binary>> = rest
directories = get_directories(dir_block)
fields = for {tag, length, position} <- directories do
tag_data = binary_part(data, position, length)
parse_tag_data(tag, tag_data)
end
%__MODULE__{leader: leader, fields: fields}
end
@doc """
Return the MARC Format of the JPMarc struct
"""
@spec to_marc(t)::String.t
def to_marc(record) do
sorted = reset(record)
{directories, data} = make_directories_data(sorted.fields)
Leader.to_marc(sorted.leader) <> directories <> @fs <> data <> @rs
end
@doc """
Return the MARCXML Format of the JPMarc struct
"""
@spec to_xml(t)::tuple
def to_xml(record) do
sorted = reset(record)
xml = sorted.fields |> Enum.map(fn(f) ->
if Enum.member?(@valid_control_fields, f.tag),
do: ControlField.to_xml(f),
else: DataField.to_xml(f)
end)
fields_xml = [Leader.to_xml(sorted.leader)] ++ xml
element(:record, nil, fields_xml)
end
@doc """
Return the Text Format of the JPMarc struct
"""
@spec to_text(t)::String.t
def to_text(record) do
sorted = reset(record)
text = sorted.fields |> Enum.map(fn(f) ->
if Enum.member?(@valid_control_fields, f.tag),
do: ControlField.to_text(f),
else: DataField.to_text(f)
end)
([Leader.to_text(sorted.leader)] ++ text) |> Enum.join("\n")
end
@doc"""
Return a json representing of the record
"""
@spec to_json(t)::String.t
def to_json(record) do
reset(record) |> Poison.encode!()
end
@doc"""
Construct a record from json formatted in the marc-in-json schema
"""
@spec from_json(String.t)::t
def from_json(json) do
jmap = Poison.Parser.parse!(json)
leader = Map.get(jmap, "leader") |> Leader.decode()
fields = Map.get(jmap, "fields") |> Enum.map(&Map.to_list/1) |> Enum.map(fn ([field]) ->
case field do
{tag, %{"ind1" => ind1, "ind2" => ind2, "subfields" => sflds}} ->
subfields = sflds
|> Enum.map(&Map.to_list/1)
|> Enum.map(fn ([{code, value}]) -> %SubField{code: code, value: value} end)
%DataField{tag: tag, ind1: ind1, ind2: ind2, subfields: subfields}
{tag, value} ->
%JPMarc.ControlField{tag: tag, value: value}
end
end)
%JPMarc.Record{leader: leader, fields: fields}
end
def reset(record) do
sorted = sort(record)
{directories, data} = make_directories_data(sorted.fields)
marc = Leader.to_marc(sorted.leader) <> directories <> @fs <> data <> @rs
new_leader = %Leader{sorted.leader | length: byte_size(marc), base: (@leader_length + 1 + byte_size(directories))}
%__MODULE__{sorted | leader: new_leader}
end
@doc """
Sort its fields by tag and subfields of field
"""
@spec sort(t)::t
def sort(record) do
{control_fields, data_fields} = Enum.split_with(record.fields, &(&1.__struct__ == ControlField))
sorted_control_fields = control_fields |> Enum.sort(&(&1.tag <= &2.tag))
{t880, rest} = Enum.split_with(data_fields, &(&1.tag == "880"))
sorted_data_fields = (rest |> Enum.sort(&(&1.tag<>&1.ind1<>&1.ind2 <= &2.tag<>&2.ind1<>&2.ind2)))
++ (t880 |> Enum.sort(&(DataField.subfield_value(&1, "6") <= DataField.subfield_value(&2, "6"))))
%__MODULE__{record | fields: sorted_control_fields ++ sorted_data_fields}
end
defp get_directories(block), do: _get_directories(block, [])
defp _get_directories("", acc) do
Enum.reverse acc
end
defp _get_directories(<<tag::bytes-size(3), length::bytes-size(4), position::bytes-size(5), rest::binary>>, acc) do
acc = [{tag, String.to_integer(length), String.to_integer(position)} | acc]
_get_directories(rest, acc)
end
defp parse_tag_data(tag, <<ind1::bytes-size(1), ind2::bytes-size(1), @ss, rest::binary>>) do
subfields = SubField.decode(rest)
%DataField{tag: tag, ind1: ind1, ind2: ind2, subfields: subfields}
end
defp parse_tag_data(tag, data) do
%ControlField{tag: tag, value: String.trim_trailing(data, @fs)}
end
defp make_directories_data(fields), do: _make_directories_data(fields, {[], []}, 0)
defp _make_directories_data([], {dir, data}, _pos),
do: {
dir |> Enum.reverse |> Enum.join,
data |> Enum.reverse |> Enum.join
}
defp _make_directories_data([head|tail], {dir, data}, pos) do
marc = case head.__struct__ do
ControlField -> ControlField.to_marc(head)
DataField -> DataField.to_marc(head)
end
length = byte_size(marc)
length_str = length |> Integer.to_string |> String.pad_leading(4, "0")
pos_str = pos |> Integer.to_string |> String.pad_leading(5, "0")
_make_directories_data(tail, {[head.tag <> length_str <> pos_str|dir] , [marc|data]}, pos + length)
end
defimpl Inspect do
def inspect(%JPMarc.Record{leader: leader, fields: fields}, _opts) do
"#{leader}\n#{Enum.join(fields, "\n")}"
end
end
defimpl String.Chars, for: JPMarc.Record do
def to_string(%JPMarc.Record{leader: leader, fields: fields}) do
"#{leader}, #{Enum.join(fields, "\n")}"
end
end
end
|
lib/jpmarc/record.ex
| 0.836955
| 0.483953
|
record.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.