code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule SSHSubsystemFwup do
@moduledoc """
SSH subsystem for upgrading Nerves devices
This module provides an SSH subsystem for Erlang's `ssh` application. This
makes it possible to send firmware updates to Nerves devices using plain old
`ssh` like this:
```shell
cat $firmware | ssh -s $ip_address fwup
```
Where `$ip_address` is the IP address of your Nerves device. Depending on how
you have Erlang's `ssh` application set up, you may need to pass more
parameters (like username, port, identities, etc.).
See [`nerves_ssh`](https://github.com/nerves-project/nerves_ssh/) for an easy
way to set this up. If you don't want to use `nerves_ssh`, then in your call
to `:ssh.daemon` add the return value from
`SSHSubsystemFwup.subsystem_spec/1`:
```elixir
devpath = Nerves.Runtime.KV.get("nerves_fw_devpath")
:ssh.daemon([
{:subsystems, [SSHSubsystemFwup.subsystem_spec(devpath: devpath)]}
])
```
See `SSHSubsystemFwup.subsystem_spec/1` for options. You will almost always
need to pass the path to the device that should be updated since that is
device-specific.
"""
@typedoc """
Options:
* `:devpath` - path for fwup to upgrade (Required)
* `:fwup_path` - path to the fwup firmware update utility
* `:fwup_extra_options` - additional options to pass to fwup like for setting
public keys
* `:success_callback` - an MFA to call when a firmware update completes
successfully. Defaults to `{Nerves.Runtime, :reboot, []}`.
* `:task` - the task to run in the firmware update. Defaults to `"upgrade"`
* `:subsystem` - the ssh subsystem name. Defaults to 'fwup'
"""
@type options :: [
devpath: Path.t(),
fwup_path: Path.t(),
fwup_extra_options: [String.t()],
task: String.t(),
success_callback: mfa(),
subsystem: charlist() | String.t()
]
@doc """
Return a subsystem spec for use with `ssh:daemon/[1,2,3]`
"""
@spec subsystem_spec(options()) :: :ssh.subsystem_spec()
def subsystem_spec(options \\ []) do
subsystem_name = Keyword.get(options, :subsystem, 'fwup') |> to_charlist()
{subsystem_name, {SSHSubsystemFwup.Handler, options}}
end
end
|
lib/ssh_subsystem_fwup.ex
| 0.832611
| 0.658949
|
ssh_subsystem_fwup.ex
|
starcoder
|
defmodule AWS.CloudSearch do
@moduledoc """
Amazon CloudSearch Configuration Service
You use the Amazon CloudSearch configuration service to create, configure,
and manage search domains. Configuration service requests are submitted
using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests
submitted via HTTP GET or POST with a query parameter named Action.
The endpoint for configuration service requests is region-specific:
cloudsearch.*region*.amazonaws.com. For example,
cloudsearch.us-east-1.amazonaws.com. For a current list of supported
regions and endpoints, see [Regions and
Endpoints](http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region"
target="_blank).
"""
@doc """
Indexes the search suggestions. For more information, see [Configuring
Suggesters](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html#configuring-suggesters)
in the *Amazon CloudSearch Developer Guide*.
"""
def build_suggesters(client, input, options \\ []) do
request(client, "BuildSuggesters", input, options)
end
@doc """
Creates a new search domain. For more information, see [Creating a Search
Domain](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/creating-domains.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def create_domain(client, input, options \\ []) do
request(client, "CreateDomain", input, options)
end
@doc """
Configures an analysis scheme that can be applied to a `text` or
`text-array` field to define language-specific text processing options. For
more information, see [Configuring Analysis
Schemes](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def define_analysis_scheme(client, input, options \\ []) do
request(client, "DefineAnalysisScheme", input, options)
end
@doc """
Configures an ``Expression`` for the search domain. Used to create new
expressions and modify existing ones. If the expression exists, the new
configuration replaces the old one. For more information, see [Configuring
Expressions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def define_expression(client, input, options \\ []) do
request(client, "DefineExpression", input, options)
end
@doc """
Configures an ``IndexField`` for the search domain. Used to create new
fields and modify existing ones. You must specify the name of the domain
you are configuring and an index field configuration. The index field
configuration specifies a unique name, the index field type, and the
options you want to configure for the field. The options you can specify
depend on the ``IndexFieldType``. If the field exists, the new
configuration replaces the old one. For more information, see [Configuring
Index
Fields](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def define_index_field(client, input, options \\ []) do
request(client, "DefineIndexField", input, options)
end
@doc """
Configures a suggester for a domain. A suggester enables you to display
possible matches before users finish typing their queries. When you
configure a suggester, you must specify the name of the text field you want
to search for possible matches and a unique name for the suggester. For
more information, see [Getting Search
Suggestions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def define_suggester(client, input, options \\ []) do
request(client, "DefineSuggester", input, options)
end
@doc """
Deletes an analysis scheme. For more information, see [Configuring Analysis
Schemes](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def delete_analysis_scheme(client, input, options \\ []) do
request(client, "DeleteAnalysisScheme", input, options)
end
@doc """
Permanently deletes a search domain and all of its data. Once a domain has
been deleted, it cannot be recovered. For more information, see [Deleting a
Search
Domain](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/deleting-domains.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def delete_domain(client, input, options \\ []) do
request(client, "DeleteDomain", input, options)
end
@doc """
Removes an ``Expression`` from the search domain. For more information, see
[Configuring
Expressions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def delete_expression(client, input, options \\ []) do
request(client, "DeleteExpression", input, options)
end
@doc """
Removes an ``IndexField`` from the search domain. For more information, see
[Configuring Index
Fields](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def delete_index_field(client, input, options \\ []) do
request(client, "DeleteIndexField", input, options)
end
@doc """
Deletes a suggester. For more information, see [Getting Search
Suggestions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def delete_suggester(client, input, options \\ []) do
request(client, "DeleteSuggester", input, options)
end
@doc """
Gets the analysis schemes configured for a domain. An analysis scheme
defines language-specific text processing options for a `text` field. Can
be limited to specific analysis schemes by name. By default, shows all
analysis schemes and includes any pending changes to the configuration. Set
the `Deployed` option to `true` to show the active configuration and
exclude pending changes. For more information, see [Configuring Analysis
Schemes](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_analysis_schemes(client, input, options \\ []) do
request(client, "DescribeAnalysisSchemes", input, options)
end
@doc """
Gets the availability options configured for a domain. By default, shows
the configuration with any pending changes. Set the `Deployed` option to
`true` to show the active configuration and exclude pending changes. For
more information, see [Configuring Availability
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_availability_options(client, input, options \\ []) do
request(client, "DescribeAvailabilityOptions", input, options)
end
@doc """
Returns the domain's endpoint options, specifically whether all requests to
the domain must arrive over HTTPS. For more information, see [Configuring
Domain Endpoint
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_domain_endpoint_options(client, input, options \\ []) do
request(client, "DescribeDomainEndpointOptions", input, options)
end
@doc """
Gets information about the search domains owned by this account. Can be
limited to specific domains. Shows all domains by default. To get the
number of searchable documents in a domain, use the console or submit a
`matchall` request to your domain's search endpoint:
`q=matchall&q.parser=structured&size=0`. For more
information, see [Getting Information about a Search
Domain](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_domains(client, input, options \\ []) do
request(client, "DescribeDomains", input, options)
end
@doc """
Gets the expressions configured for the search domain. Can be limited to
specific expressions by name. By default, shows all expressions and
includes any pending changes to the configuration. Set the `Deployed`
option to `true` to show the active configuration and exclude pending
changes. For more information, see [Configuring
Expressions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_expressions(client, input, options \\ []) do
request(client, "DescribeExpressions", input, options)
end
@doc """
Gets information about the index fields configured for the search domain.
Can be limited to specific fields by name. By default, shows all fields and
includes any pending changes to the configuration. Set the `Deployed`
option to `true` to show the active configuration and exclude pending
changes. For more information, see [Getting Domain
Information](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_index_fields(client, input, options \\ []) do
request(client, "DescribeIndexFields", input, options)
end
@doc """
Gets the scaling parameters configured for a domain. A domain's scaling
parameters specify the desired search instance type and replication count.
For more information, see [Configuring Scaling
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_scaling_parameters(client, input, options \\ []) do
request(client, "DescribeScalingParameters", input, options)
end
@doc """
Gets information about the access policies that control access to the
domain's document and search endpoints. By default, shows the configuration
with any pending changes. Set the `Deployed` option to `true` to show the
active configuration and exclude pending changes. For more information, see
[Configuring Access for a Search
Domain](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_service_access_policies(client, input, options \\ []) do
request(client, "DescribeServiceAccessPolicies", input, options)
end
@doc """
Gets the suggesters configured for a domain. A suggester enables you to
display possible matches before users finish typing their queries. Can be
limited to specific suggesters by name. By default, shows all suggesters
and includes any pending changes to the configuration. Set the `Deployed`
option to `true` to show the active configuration and exclude pending
changes. For more information, see [Getting Search
Suggestions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def describe_suggesters(client, input, options \\ []) do
request(client, "DescribeSuggesters", input, options)
end
@doc """
Tells the search domain to start indexing its documents using the latest
indexing options. This operation must be invoked to activate options whose
`OptionStatus` is `RequiresIndexDocuments`.
"""
def index_documents(client, input, options \\ []) do
request(client, "IndexDocuments", input, options)
end
@doc """
Lists all search domains owned by an account.
"""
def list_domain_names(client, input, options \\ []) do
request(client, "ListDomainNames", input, options)
end
@doc """
Configures the availability options for a domain. Enabling the Multi-AZ
option expands an Amazon CloudSearch domain to an additional Availability
Zone in the same Region to increase fault tolerance in the event of a
service disruption. Changes to the Multi-AZ option can take about half an
hour to become active. For more information, see [Configuring Availability
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def update_availability_options(client, input, options \\ []) do
request(client, "UpdateAvailabilityOptions", input, options)
end
@doc """
Updates the domain's endpoint options, specifically whether all requests to
the domain must arrive over HTTPS. For more information, see [Configuring
Domain Endpoint
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def update_domain_endpoint_options(client, input, options \\ []) do
request(client, "UpdateDomainEndpointOptions", input, options)
end
@doc """
Configures scaling parameters for a domain. A domain's scaling parameters
specify the desired search instance type and replication count. Amazon
CloudSearch will still automatically scale your domain based on the volume
of data and traffic, but not below the desired instance type and
replication count. If the Multi-AZ option is enabled, these values control
the resources used per Availability Zone. For more information, see
[Configuring Scaling
Options](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html"
target="_blank) in the *Amazon CloudSearch Developer Guide*.
"""
def update_scaling_parameters(client, input, options \\ []) do
request(client, "UpdateScalingParameters", input, options)
end
@doc """
Configures the access rules that control access to the domain's document
and search endpoints. For more information, see [ Configuring Access for an
Amazon CloudSearch
Domain](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html"
target="_blank).
"""
def update_service_access_policies(client, input, options \\ []) do
request(client, "UpdateServiceAccessPolicies", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "cloudsearch"}
host = build_host("cloudsearch", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2013-01-01"})
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :query)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end
|
lib/aws/generated/cloud_search.ex
| 0.852874
| 0.437223
|
cloud_search.ex
|
starcoder
|
defmodule Certbot.Certificate do
@moduledoc """
The module provides utility functions to deal with the serial and validity
timestamps of a certificate as well as being a struct to store certificate/keys
"""
@type t :: %__MODULE__{
cert: binary(),
key: {atom(), binary()}
}
defstruct [:cert, :key]
@algo [:RSAPrivateKey, :ECPrivateKey]
@spec build(binary, {:ECPrivateKey, binary} | {:RSAPrivateKey, binary}) ::
Certbot.Certificate.t()
@doc """
Build struct to store a certificate in der format and its private key
For example, to generate a
```elixir
cert_file = File.read!("priv/cert/selfsigned.pem")
key_file = File.read!("priv/cert/selfsigned_key.pem")
[certificate] = :public_key.pem_decode(cert_file)
cert = :public_key.pem_entry_decode(certificate)
cert = :public_key.der_encode(:Certificate, cert)
[key] = :public_key.pem_decode(key_file)
key = :public_key.pem_entry_decode(key)
der_key = :public_key.der_encode(:RSAPrivateKey, key)
Certbot.Certificate.build(cert, {:RSAPrivateKey, der_key})
```
"""
def build(cert, {algo, der_key})
when algo in @algo and is_binary(cert) and is_binary(der_key) do
%__MODULE__{
cert: cert,
key: {algo, der_key}
}
end
@spec serial(Certbot.Certificate.t()) :: non_neg_integer
@doc """
Get integer serial number of the certicate
```
iex> Certbot.Certificate.serial(build_certificate())
18163034872729040431
```
"""
def serial(%Certbot.Certificate{cert: cert}) do
cert |> X509.Certificate.from_der!() |> X509.Certificate.serial()
end
@doc """
Get hexadecimal serial number of the certicate
This is what is visible when inspecting a certificate in the browser
## Example
```
iex> Certbot.Certificate.hex_serial(build_certificate())
"FC100FFC200BF62F"
```
"""
@spec hex_serial(Certbot.Certificate.t()) :: String.t()
def hex_serial(%Certbot.Certificate{} = cert) do
cert |> serial() |> Integer.to_string(16)
end
@doc """
Get DateTime of the date the certificate was given out
## Example
```
iex> Certbot.Certificate.valid_from(build_certificate())
~U[2019-07-09 00:00:00Z]
```
"""
@spec valid_from(Certbot.Certificate.t()) :: DateTime.t()
def valid_from(%Certbot.Certificate{cert: cert}) do
validity(:from, cert) |> to_string |> to_datetime
end
@doc """
Get DateTime of the date the certificate will be valid until
## Example
```
iex> Certbot.Certificate.valid_until(build_certificate())
~U[2020-07-09 00:00:00Z]
```
"""
@spec valid_until(Certbot.Certificate.t()) :: DateTime.t()
def valid_until(%Certbot.Certificate{cert: cert}) do
validity(:until, cert) |> to_string |> to_datetime
end
# "190710122644Z"
defp to_datetime(
<<year::binary-size(2)>> <>
<<month::binary-size(2)>> <>
<<day::binary-size(2)>> <>
<<hour::binary-size(2)>> <>
<<minute::binary-size(2)>> <> <<second::binary-size(2)>> <> _rest
) do
# No century info present, hack to fix
year = String.to_integer(year) + 2000
{{year, String.to_integer(month), String.to_integer(day)},
{String.to_integer(hour), String.to_integer(minute), String.to_integer(second)}}
|> NaiveDateTime.from_erl!()
|> DateTime.from_naive!("Etc/UTC")
end
defp validity(:from, cert) do
{:Validity, {:utcTime, from}, _} = validity(cert)
from
end
defp validity(:until, cert) do
{:Validity, _, {:utcTime, until}} = validity(cert)
until
end
defp validity(cert) do
cert |> X509.Certificate.from_der!() |> X509.Certificate.validity()
end
end
|
lib/certbot/certificate.ex
| 0.877556
| 0.865508
|
certificate.ex
|
starcoder
|
defmodule Yodlee.Account do
@moduledoc """
Functions for `accounts` endpoint.
"""
import Yodlee
alias Yodlee.Utils
defstruct id: nil, account_name: nil, account_number: nil, amount_due: nil, interest_rate_type: nil,
balance: nil, due_date: nil, interest_rate: nil, last_payment_amount: nil,
last_payment_date: nil, last_updated: nil, maturity_date: nil,
minimum_amount_due: nil, account_status: nil, account_type: nil,
original_loan_amount: nil, provider_id: nil, principal_balance: nil,
recurring_payment: nil, term: nil, origination_date: nil, created_date: nil,
frequency: nil, refreshinfo: nil, provider_account_id: nil
@type t :: %__MODULE__{id: integer,
account_name: String.t,
account_number: String.t,
amount_due: Yodlee.Money.t,
interest_rate_type: String.t,
balance: Yodlee.Money.t,
due_date: String.t,
interest_rate: float,
last_payment_amount: Yodlee.Money.t,
last_payment_date: String.t,
last_updated: String.t,
maturity_date: String.t,
minimum_amount_due: Yodlee.Money.t,
account_status: String.t,
account_type: String.t,
original_loan_amount: Yodlee.Money.t,
provider_id: String.t,
principal_balance: Yodlee.Money.t,
recurring_payment: Yodlee.Money.t,
term: String.t,
origination_date: String.t,
created_date: String.t,
frequency: String.t,
refreshinfo: Yodlee.Refreshinfo.t,
provider_account_id: integer
}
@type user_session :: String.t
@type error :: Yodlee.Error.t | HTTPoison.Error.t
@endpoint "accounts"
@doc """
Gets all accounts associated with User session.
```
params = %{
providerAccountId: 10502782
}
```
"""
@spec search(user_session, map) :: {:ok, [Yodlee.Account.t]} | {:error, error}
def search(session, params) do
endpoint =
case Map.keys(params) do
[] -> @endpoint
_ -> "#{@endpoint}?" <> Utils.encode_params(params)
end
make_request_in_session(:get, endpoint, session)
|> Utils.handle_resp(:account)
end
@doc """
List all accounts associated with User session.
"""
@spec list(user_session) :: {:ok, [Yodlee.Account.t]} | {:error, error}
def list(session) do
search(session, %{})
end
@doc """
Gets account by id and container.
"""
@spec get(user_session, String.t, String.t | integer) :: {:ok, [Yodlee.Account.t]} | {:error, error}
def get(session, container, id) do
endpoint = "#{@endpoint}/#{id}?container=#{container}"
make_request_in_session(:get, endpoint, session)
|> Utils.handle_resp(:account)
end
end
|
lib/yodlee/account.ex
| 0.77569
| 0.426949
|
account.ex
|
starcoder
|
defmodule Sise do
# SPDX-License-Identifier: Apache-2.0
@moduledoc """
Sise is a library that implements the **si**mple **se**rvice
discovery protocol (SSDP).
Sise will listen on the network for announcements of available UPnP devices
and services. Additionally it will send out search requests from time to
time. All discovered devices will be stored by Sise.
A client of this
library can either fetch the discoveries or subscribe for notifications (on
subscription the listener will be called with the already discovered
devices/services).
Sise implements the Application behaviour and thus will start itself
with detecting devices and service.
"""
@typedoc """
SSDP's "notification type" (aka device or service others might need)
This type is used as a parameter to select the 'notification type' the
caller is interested in.
If atom `:all` is given the client will get _all_ of the discoveries.
Whereas if a string is given it is interpreted as the exact notification
type as given by the peer.
Examples for UPnP notification types are `"upnp:rootdevice"` or
`"urn:schemas-upnp-org:service:SwitchPower:1"`.
"""
@type nt :: :all | String.t()
@doc """
Get discovered device and service information
Call this function to get information about discovered devices and services.
"""
@spec get(nt()) :: [Sise.Discovery.t()]
def get(notification_type) do
Sise.Cache.DeviceDb.get(notification_type)
end
@doc """
Subscribe to receive notifications about discoveries
When calling this function, the caller will
- receive a notification message of type `:ssdp_add` for every already known
discovery
- receive a message for every change in the future
It is possible to subscribe to multiple different notification types.
The possible notification messages to be received are these:
```
{:ssdp_add, Sise.Discovery.t()}
```
This message informs that a new device or service has been discovered. It
will contain a struct with all available information on the discovery.
```
{:ssdp_update, Sise.Discovery.t(), [{atom(), String.t(), String.t()}]}
```
Informs the listener that the available information about a known
device/service has changed. The message will carry the new version of
the Discovery struct. It will also carry a list with the differences
between the old and the new device/service information.
```
{:ssdp_delete, Sise.Discovery.t()}
```
Informs that a device/service is no longer available. The message will
carry the last known version of the Discovery struct.
See also `Sise.Discovery.diff/2`.
"""
@spec subscribe(nt()) :: nil
def subscribe(notification_type) do
Sise.Cache.DeviceDb.subscribe(notification_type)
end
@doc """
Unsubscribe from notifications about discoveries.
Note that the notification mechanism does a very simple matching for
notification types. If you subscribe to multiple concrete services/devices
and then `unsubscribe(:all)`, they will all be removed.
However if you subscribe to `:all` services/devices and then unsubscribe from
a concrete one you will still get all notifications.
"""
@spec unsubscribe(nt()) :: nil
def unsubscribe(notification_type) do
Sise.Cache.DeviceDb.unsubscribe(notification_type)
end
end
|
lib/sise/sise.ex
| 0.842896
| 0.757077
|
sise.ex
|
starcoder
|
defmodule Tds.Date do
@moduledoc """
Struct for MSSQL date. https://msdn.microsoft.com/en-us/library/bb630352.aspx
## Fields
* `year`
* `month`
* `day`
"""
@type t :: %__MODULE__{year: 1..9999, month: 1..12, day: 1..31}
defstruct [
year: 1900,
month: 1,
day: 1
]
end
defmodule Tds.Time do
@moduledoc """
Struct for MSSQL time. https://msdn.microsoft.com/en-us/library/bb677243.aspx
## Fields
* `hour`
* `min`
* `sec`
* `fsec`
"""
@type t :: %__MODULE__{hour: 0..23, min: 0..59, sec: 0..59, fsec: 0..9_999_999}
defstruct [
hour: 0,
min: 0,
sec: 0,
fsec: 0
]
end
defmodule Tds.DateTime do
@moduledoc """
Struct for MSSQL DateTime. https://msdn.microsoft.com/en-us/library/ms187819.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
"""
@type t :: %__MODULE__{year: 1753..9999, month: 1..12, day: 1..31,
hour: 0..23, min: 0..59, sec: 0..59, fsec: 0..999}
defstruct [
year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
fsec: 0
]
end
defmodule Tds.DateTime2 do
@moduledoc """
Struct for MSSQL DateTime2. https://msdn.microsoft.com/en-us/library/bb677335.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{year: 1..9999, month: 1..12, day: 1..31,
hour: 0..23, min: 0..59, sec: 0..59, fsec: 0..9_999_999}
defstruct [
year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
fsec: 0 #fractional secs
]
end
defmodule Tds.DateTimeOffset do
@moduledoc """
Struct for MSSQL DateTimeOffset.
https://msdn.microsoft.com/en-us/library/bb630289.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
* `usec`
"""
@type t :: %__MODULE__{year: 1..9999, month: 1..12, day: 1..31,
hour: 0..23, min: 0..59, sec: 0..59, fsec: 0..9_999_999,
offset_hour: -14..14, offset_min: 0..59 }
defstruct [
year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
fsec: 0, #fractional secs
offset_hour: 0,
offset_min: 0
]
end
defmodule Tds.SmallDateTime do
@moduledoc """
Struct for MSSQL SmallDateTime.
https://msdn.microsoft.com/en-us/library/ms182418.aspx
## Fields
* `year`
* `month`
* `day`
* `hour`
* `min`
* `sec`
"""
@type t :: %__MODULE__{year: 1900..2079, month: 1..12, day: 1..12,
hour: 0..23, min: 0..59, sec: 0..59}
defstruct [
year: 1900,
month: 1,
day: 1,
hour: 0,
min: 0,
sec: 0,
]
end
|
lib/tds/date_time.ex
| 0.877214
| 0.567068
|
date_time.ex
|
starcoder
|
defmodule BeerSong do
@doc ~S"""
Get a single verse of the beer song
## Examples
iex> BeerSong.verse(99)
"99 bottles of beer on the wall, 99 bottles of beer.\nTake one down and pass it around, 98 bottles of beer on the wall.\n"
iex> BeerSong.verse(1)
"1 bottle of beer on the wall, 1 bottle of beer.\nTake it down and pass it around, no more bottles of beer on the wall.\n"
iex> BeerSong.verse(0)
"No more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.\n"
"""
@spec verse(integer) :: String.t()
def verse(number) do
{shelf_bottle_phrase, take_bottle_phrase} = bottle_phrases(number)
{remaining_bottle_phrase, _ } = bottle_phrases(number - 1)
# using string interpolation to replace returned parameters
"""
#{String.capitalize(shelf_bottle_phrase)} of beer on the wall, #{shelf_bottle_phrase} of beer.
#{take_bottle_phrase}, #{remaining_bottle_phrase} of beer on the wall.
"""
end
@doc ~S"""
Get the entire beer song for a given range of numbers of bottles.
## Example
iex> BeerSong.lyrics(3..1)
"3 bottles of beer on the wall, 3 bottles of beer.\nTake one down and pass it around, 2 bottles of beer on the wall.\n\n2 bottles of beer on the wall, 2 bottles of beer.\nTake one down and pass it around, 1 bottle of beer on the wall.\n\n1 bottle of beer on the wall, 1 bottle of beer.\nTake it down and pass it around, no more bottles of beer on the wall.\n"
iex> BeerSong.lyrics(2..0)
"2 bottles of beer on the wall, 2 bottles of beer.\nTake one down and pass it around, 1 bottle of beer on the wall.\n\n1 bottle of beer on the wall, 1 bottle of beer.\nTake it down and pass it around, no more bottles of beer on the wall.\n\nNo more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.\n"
iex> BeerSong.lyrics(0..0)
"No more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy some more, 99 bottles of beer on the wall.\n"
"""
@spec lyrics(Range.t()) :: String.t()
def lyrics(range \\ 99..0) do
# this will enumerate all the range and joined using the second parameter
# verse is called using function capture operator '&'
# https://dockyard.com/blog/2016/08/05/understand-capture-operator-in-elixir
Enum.map_join(range, "\n", &verse/1)
# using full notation
# Enum.map_join(range, "\n", &(BeerSong.verse/1))
end
# using condition instead of function guard
defp bottle_phrases(number) do
cond do
number > 1 ->
{"#{number} bottles", "Take one down and pass it around"}
number == 1 ->
{"1 bottle", "Take it down and pass it around"}
# to cater for bottle_phrases(0)
number < 0 ->
{"99 bottles", ""}
true ->
{"no more bottles", "Go to the store and buy some more"}
end
end
end
|
beer-song/lib/beer_song.ex
| 0.654674
| 0.512083
|
beer_song.ex
|
starcoder
|
defmodule OMG.State do
@moduledoc """
A GenServer serving the ledger, for functional core and more info see `OMG.State.Core`.
Keeps the state of the ledger, mainly the spendable UTXO set that can be employed in both `OMG.ChildChain` and
`OMG.Watcher`.
Maintains the state of the UTXO set by:
- recognizing deposits
- executing child chain transactions
- recognizing exits
Assumes that all stateless transaction validation is done outside of `exec/2`, so it accepts `OMG.State.Transaction.Recovered`
"""
alias OMG.Block
alias OMG.DB
alias OMG.Fees
alias OMG.State.Core
alias OMG.State.Transaction
alias OMG.State.Transaction.Validator
alias OMG.State.UtxoSet
alias OMG.Utxo
use GenServer
use OMG.Utils.LoggerExt
require Utxo
@type exec_error :: Validator.can_process_tx_error()
### Client
@doc """
Starts the `GenServer` maintaining the ledger
"""
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@doc """
Executes a single, statelessly validated child chain transaction. May take information on the fees required, in case
fees are charged.
Checks statefull validity and executes a transaction on `OMG.State` when successful. Otherwise, returns an error and has no effect on
`OMG.State` and the ledger
"""
@spec exec(tx :: Transaction.Recovered.t(), fees :: Fees.optional_fee_t()) ::
{:ok, {Transaction.tx_hash(), pos_integer, non_neg_integer}}
| {:error, exec_error()}
def exec(tx, input_fees) do
GenServer.call(__MODULE__, {:exec, tx, input_fees})
end
@doc """
Intended for the `OMG.Watcher`. "Closes" a block, acknowledging that all transactions have been executed, and the next
`exec/2` will belong to the next block.
Depends on the caller to do persistence.
Synchronous
"""
@spec close_block() :: {:ok, list(Core.db_update())}
def close_block() do
GenServer.call(__MODULE__, :close_block)
end
@doc """
Intended for the `OMG.ChildChain`. Forms a new block and persist it. Broadcasts the block to the internal event bus
to be used in other processes.
Asynchronous
"""
@spec form_block() :: :ok
def form_block() do
GenServer.cast(__MODULE__, :form_block)
end
@doc """
Recognizes a list of deposits based on Ethereum events.
Depends on the caller to do persistence.
"""
@spec deposit(deposits :: [Core.deposit()]) :: {:ok, list(Core.db_update())}
# empty list clause to not block state for a no-op
def deposit([]), do: {:ok, []}
def deposit(deposits) do
GenServer.call(__MODULE__, {:deposit, deposits})
end
@doc """
Recognizes a list of exits based on various triggers. Returns exit validities which indicate which of the UTXO positions
actually pointed to UTXOs in the UTXO set of the ledger.
For a list of things that can be triggers see `OMG.State.Core.extract_exiting_utxo_positions/2`.
Depends on the caller to do persistence.
"""
@spec exit_utxos(exiting_utxo_triggers :: Core.exiting_utxo_triggers_t()) ::
{:ok, list(Core.db_update()), Core.validities_t()}
# empty list clause to not block state for a no-op
def exit_utxos([]), do: {:ok, [], {[], []}}
def exit_utxos(exiting_utxo_triggers) do
GenServer.call(__MODULE__, {:exit_utxos, exiting_utxo_triggers})
end
@doc """
Provides a peek into the UTXO set to check if particular output exist (have not been spent)
"""
@spec utxo_exists?(Utxo.Position.t()) :: boolean()
def utxo_exists?(utxo) do
GenServer.call(__MODULE__, {:utxo_exists, utxo})
end
@doc """
Returns the current `blknum` and whether at the beginning of a block.
The beginning of the block is `true/false` depending on whether there have been no transactions executed yet for
the current child chain block
"""
@spec get_status() :: {non_neg_integer(), boolean()}
def get_status() do
GenServer.call(__MODULE__, :get_status)
end
### Server
@doc """
Initializes the state. UTXO set is not loaded now.
"""
def init(opts) do
{:ok, child_top_block_number} = DB.get_single_value(:child_top_block_number)
child_block_interval = Keyword.fetch!(opts, :child_block_interval)
fee_claimer_address = Keyword.fetch!(opts, :fee_claimer_address)
metrics_collection_interval = Keyword.fetch!(opts, :metrics_collection_interval)
{:ok, _data} =
result = Core.extract_initial_state(child_top_block_number, child_block_interval, fee_claimer_address)
_ = Logger.info("Started #{inspect(__MODULE__)}, height: #{child_top_block_number}}")
{:ok, _} = :timer.send_interval(metrics_collection_interval, self(), :send_metrics)
result
end
def handle_info(:send_metrics, state) do
:ok = :telemetry.execute([:process, __MODULE__], %{}, state)
{:noreply, state}
end
@doc """
see `exec/2`
"""
def handle_call({:exec, tx, fees}, _from, state) do
db_utxos =
tx
|> Transaction.get_inputs()
|> fetch_utxos_from_db(state)
state
|> Core.with_utxos(db_utxos)
|> Core.exec(tx, fees)
|> case do
{:ok, tx_result, new_state} ->
{:reply, {:ok, tx_result}, new_state}
{tx_result, new_state} ->
{:reply, tx_result, new_state}
end
end
@doc """
see `deposit/1`
"""
def handle_call({:deposit, deposits}, _from, state) do
{:ok, db_updates, new_state} = Core.deposit(deposits, state)
{:reply, {:ok, db_updates}, new_state}
end
@doc """
see `exit_utxos/1`
Flow:
- translates the triggers to UTXO positions digestible by the UTXO set
- exits the UTXOs from the ledger if they exists, reports invalidity wherever they don't
- returns the `db_updates` to be applied by the caller
"""
def handle_call({:exit_utxos, exiting_utxo_triggers}, _from, state) do
exiting_utxos = Core.extract_exiting_utxo_positions(exiting_utxo_triggers, state)
db_utxos = fetch_utxos_from_db(exiting_utxos, state)
state = Core.with_utxos(state, db_utxos)
{:ok, {db_updates, validities}, new_state} = Core.exit_utxos(exiting_utxos, state)
{:reply, {:ok, db_updates, validities}, new_state}
end
@doc """
see `utxo_exists/1`
"""
def handle_call({:utxo_exists, utxo_pos}, _from, state) do
db_utxos = fetch_utxos_from_db([utxo_pos], state)
new_state = Core.with_utxos(state, db_utxos)
{:reply, Core.utxo_exists?(utxo_pos, new_state), new_state}
end
@doc """
see `get_status/0`
"""
def handle_call(:get_status, _from, state) do
{:reply, Core.get_status(state), state}
end
@doc """
see `close_block/0`
Works exactly like `handle_cast(:form_block)` but:
- is synchronous
- relies on the caller to handle persistence, instead of handling itself
Someday, one might want to skip some of computations done (like calculating the root hash, which is scrapped)
"""
def handle_call(:close_block, _from, state) do
{:ok, {block, db_updates}, new_state} = Core.form_block(state)
:ok = publish_block_to_event_bus(block)
{:reply, {:ok, db_updates}, new_state}
end
@doc """
see `form_block/0`
Flow:
- generates fee-transactions based on the fees paid in the block
- wraps up accumulated transactions submissions and fee transactions into a block
- triggers db update
- pushes the new block to subscribers of `"blocks"` internal event bus topic
"""
def handle_cast(:form_block, state) do
_ = Logger.debug("Forming new block...")
state = Core.claim_fees(state)
{:ok, {%Block{number: blknum} = block, db_updates}, new_state} = Core.form_block(state)
_ = Logger.debug("Formed new block ##{blknum}")
# persistence is required to be here, since propagating the block onwards requires restartability including the
# new block
:ok = DB.multi_update(db_updates)
:ok = publish_block_to_event_bus(block)
{:noreply, new_state}
end
defp publish_block_to_event_bus(block) do
{:child_chain, "blocks"}
|> OMG.Bus.Event.new(:enqueue_block, block)
|> OMG.Bus.direct_local_broadcast()
end
@spec fetch_utxos_from_db(list(OMG.Utxo.Position.t()), Core.t()) :: UtxoSet.t()
defp fetch_utxos_from_db(utxo_pos_list, state) do
utxo_pos_list
|> Stream.reject(&Core.utxo_processed?(&1, state))
|> Enum.map(&utxo_from_db/1)
|> UtxoSet.init()
end
defp utxo_from_db(input_pointer) do
# `DB` query can return `:not_found` which is filtered out by following `is_input_pointer?`
with {:ok, utxo_kv} <- DB.utxo(Utxo.Position.to_input_db_key(input_pointer)),
do: utxo_kv
end
end
|
apps/omg/lib/omg/state.ex
| 0.867092
| 0.532
|
state.ex
|
starcoder
|
defmodule Validation.Rules.Tld do
@moduledoc false
# List extracted from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
# Version 2019091200, Last Updated Thu Sep 12 07:07:02 2019 UTC
@tld_data [
"AAA",
"AARP",
"ABARTH",
"ABB",
"ABBOTT",
"ABBVIE",
"ABC",
"ABLE",
"ABOGADO",
"ABUDHABI",
"AC",
"ACADEMY",
"ACCENTURE",
"ACCOUNTANT",
"ACCOUNTANTS",
"ACO",
"ACTOR",
"AD",
"ADAC",
"ADS",
"ADULT",
"AE",
"AEG",
"AERO",
"AETNA",
"AF",
"AFAMILYCOMPANY",
"AFL",
"AFRICA",
"AG",
"AGAKHAN",
"AGENCY",
"AI",
"AIG",
"AIGO",
"AIRBUS",
"AIRFORCE",
"AIRTEL",
"AKDN",
"AL",
"ALFAROMEO",
"ALIBABA",
"ALIPAY",
"ALLFINANZ",
"ALLSTATE",
"ALLY",
"ALSACE",
"ALSTOM",
"AM",
"AMERICANEXPRESS",
"AMERICANFAMILY",
"AMEX",
"AMFAM",
"AMICA",
"AMSTERDAM",
"ANALYTICS",
"ANDROID",
"ANQUAN",
"ANZ",
"AO",
"AOL",
"APARTMENTS",
"APP",
"APPLE",
"AQ",
"AQUARELLE",
"AR",
"ARAB",
"ARAMCO",
"ARCHI",
"ARMY",
"ARPA",
"ART",
"ARTE",
"AS",
"ASDA",
"ASIA",
"ASSOCIATES",
"AT",
"ATHLETA",
"ATTORNEY",
"AU",
"AUCTION",
"AUDI",
"AUDIBLE",
"AUDIO",
"AUSPOST",
"AUTHOR",
"AUTO",
"AUTOS",
"AVIANCA",
"AW",
"AWS",
"AX",
"AXA",
"AZ",
"AZURE",
"BA",
"BABY",
"BAIDU",
"BANAMEX",
"BANANAREPUBLIC",
"BAND",
"BANK",
"BAR",
"BARCELONA",
"BARCLAYCARD",
"BARCLAYS",
"BAREFOOT",
"BARGAINS",
"BASEBALL",
"BASKETBALL",
"BAUHAUS",
"BAYERN",
"BB",
"BBC",
"BBT",
"BBVA",
"BCG",
"BCN",
"BD",
"BE",
"BEATS",
"BEAUTY",
"BEER",
"BENTLEY",
"BERLIN",
"BEST",
"BESTBUY",
"BET",
"BF",
"BG",
"BH",
"BHARTI",
"BI",
"BIBLE",
"BID",
"BIKE",
"BING",
"BINGO",
"BIO",
"BIZ",
"BJ",
"BLACK",
"BLACKFRIDAY",
"BLOCKBUSTER",
"BLOG",
"BLOOMBERG",
"BLUE",
"BM",
"BMS",
"BMW",
"BN",
"BNPPARIBAS",
"BO",
"BOATS",
"BOEHRINGER",
"BOFA",
"BOM",
"BOND",
"BOO",
"BOOK",
"BOOKING",
"BOSCH",
"BOSTIK",
"BOSTON",
"BOT",
"BOUTIQUE",
"BOX",
"BR",
"BRADESCO",
"BRIDGESTONE",
"BROADWAY",
"BROKER",
"BROTHER",
"BRUSSELS",
"BS",
"BT",
"BUDAPEST",
"BUGATTI",
"BUILD",
"BUILDERS",
"BUSINESS",
"BUY",
"BUZZ",
"BV",
"BW",
"BY",
"BZ",
"BZH",
"CA",
"CAB",
"CAFE",
"CAL",
"CALL",
"CALVINKLEIN",
"CAM",
"CAMERA",
"CAMP",
"CANCERRESEARCH",
"CANON",
"CAPETOWN",
"CAPITAL",
"CAPITALONE",
"CAR",
"CARAVAN",
"CARDS",
"CARE",
"CAREER",
"CAREERS",
"CARS",
"CARTIER",
"CASA",
"CASE",
"CASEIH",
"CASH",
"CASINO",
"CAT",
"CATERING",
"CATHOLIC",
"CBA",
"CBN",
"CBRE",
"CBS",
"CC",
"CD",
"CEB",
"CENTER",
"CEO",
"CERN",
"CF",
"CFA",
"CFD",
"CG",
"CH",
"CHANEL",
"CHANNEL",
"CHARITY",
"CHASE",
"CHAT",
"CHEAP",
"CHINTAI",
"CHRISTMAS",
"CHROME",
"CHRYSLER",
"CHURCH",
"CI",
"CIPRIANI",
"CIRCLE",
"CISCO",
"CITADEL",
"CITI",
"CITIC",
"CITY",
"CITYEATS",
"CK",
"CL",
"CLAIMS",
"CLEANING",
"CLICK",
"CLINIC",
"CLINIQUE",
"CLOTHING",
"CLOUD",
"CLUB",
"CLUBMED",
"CM",
"CN",
"CO",
"COACH",
"CODES",
"COFFEE",
"COLLEGE",
"COLOGNE",
"COM",
"COMCAST",
"COMMBANK",
"COMMUNITY",
"COMPANY",
"COMPARE",
"COMPUTER",
"COMSEC",
"CONDOS",
"CONSTRUCTION",
"CONSULTING",
"CONTACT",
"CONTRACTORS",
"COOKING",
"COOKINGCHANNEL",
"COOL",
"COOP",
"CORSICA",
"COUNTRY",
"COUPON",
"COUPONS",
"COURSES",
"CR",
"CREDIT",
"CREDITCARD",
"CREDITUNION",
"CRICKET",
"CROWN",
"CRS",
"CRUISE",
"CRUISES",
"CSC",
"CU",
"CUISINELLA",
"CV",
"CW",
"CX",
"CY",
"CYMRU",
"CYOU",
"CZ",
"DABUR",
"DAD",
"DANCE",
"DATA",
"DATE",
"DATING",
"DATSUN",
"DAY",
"DCLK",
"DDS",
"DE",
"DEAL",
"DEALER",
"DEALS",
"DEGREE",
"DELIVERY",
"DELL",
"DELOITTE",
"DELTA",
"DEMOCRAT",
"DENTAL",
"DENTIST",
"DESI",
"DESIGN",
"DEV",
"DHL",
"DIAMONDS",
"DIET",
"DIGITAL",
"DIRECT",
"DIRECTORY",
"DISCOUNT",
"DISCOVER",
"DISH",
"DIY",
"DJ",
"DK",
"DM",
"DNP",
"DO",
"DOCS",
"DOCTOR",
"DODGE",
"DOG",
"DOMAINS",
"DOT",
"DOWNLOAD",
"DRIVE",
"DTV",
"DUBAI",
"DUCK",
"DUNLOP",
"DUPONT",
"DURBAN",
"DVAG",
"DVR",
"DZ",
"EARTH",
"EAT",
"EC",
"ECO",
"EDEKA",
"EDU",
"EDUCATION",
"EE",
"EG",
"EMAIL",
"EMERCK",
"ENERGY",
"ENGINEER",
"ENGINEERING",
"ENTERPRISES",
"EPSON",
"EQUIPMENT",
"ER",
"ERICSSON",
"ERNI",
"ES",
"ESQ",
"ESTATE",
"ESURANCE",
"ET",
"ETISALAT",
"EU",
"EUROVISION",
"EUS",
"EVENTS",
"EVERBANK",
"EXCHANGE",
"EXPERT",
"EXPOSED",
"EXPRESS",
"EXTRASPACE",
"FAGE",
"FAIL",
"FAIRWINDS",
"FAITH",
"FAMILY",
"FAN",
"FANS",
"FARM",
"FARMERS",
"FASHION",
"FAST",
"FEDEX",
"FEEDBACK",
"FERRARI",
"FERRERO",
"FI",
"FIAT",
"FIDELITY",
"FIDO",
"FILM",
"FINAL",
"FINANCE",
"FINANCIAL",
"FIRE",
"FIRESTONE",
"FIRMDALE",
"FISH",
"FISHING",
"FIT",
"FITNESS",
"FJ",
"FK",
"FLICKR",
"FLIGHTS",
"FLIR",
"FLORIST",
"FLOWERS",
"FLY",
"FM",
"FO",
"FOO",
"FOOD",
"FOODNETWORK",
"FOOTBALL",
"FORD",
"FOREX",
"FORSALE",
"FORUM",
"FOUNDATION",
"FOX",
"FR",
"FREE",
"FRESENIUS",
"FRL",
"FROGANS",
"FRONTDOOR",
"FRONTIER",
"FTR",
"FUJITSU",
"FUJIXEROX",
"FUN",
"FUND",
"FURNITURE",
"FUTBOL",
"FYI",
"GA",
"GAL",
"GALLERY",
"GALLO",
"GALLUP",
"GAME",
"GAMES",
"GAP",
"GARDEN",
"GAY",
"GB",
"GBIZ",
"GD",
"GDN",
"GE",
"GEA",
"GENT",
"GENTING",
"GEORGE",
"GF",
"GG",
"GGEE",
"GH",
"GI",
"GIFT",
"GIFTS",
"GIVES",
"GIVING",
"GL",
"GLADE",
"GLASS",
"GLE",
"GLOBAL",
"GLOBO",
"GM",
"GMAIL",
"GMBH",
"GMO",
"GMX",
"GN",
"GODADDY",
"GOLD",
"GOLDPOINT",
"GOLF",
"GOO",
"GOODYEAR",
"GOOG",
"GOOGLE",
"GOP",
"GOT",
"GOV",
"GP",
"GQ",
"GR",
"GRAINGER",
"GRAPHICS",
"GRATIS",
"GREEN",
"GRIPE",
"GROCERY",
"GROUP",
"GS",
"GT",
"GU",
"GUARDIAN",
"GUCCI",
"GUGE",
"GUIDE",
"GUITARS",
"GURU",
"GW",
"GY",
"HAIR",
"HAMBURG",
"HANGOUT",
"HAUS",
"HBO",
"HDFC",
"HDFCBANK",
"HEALTH",
"HEALTHCARE",
"HELP",
"HELSINKI",
"HERE",
"HERMES",
"HGTV",
"HIPHOP",
"HISAMITSU",
"HITACHI",
"HIV",
"HK",
"HKT",
"HM",
"HN",
"HOCKEY",
"HOLDINGS",
"HOLIDAY",
"HOMEDEPOT",
"HOMEGOODS",
"HOMES",
"HOMESENSE",
"HONDA",
"HORSE",
"HOSPITAL",
"HOST",
"HOSTING",
"HOT",
"HOTELES",
"HOTELS",
"HOTMAIL",
"HOUSE",
"HOW",
"HR",
"HSBC",
"HT",
"HU",
"HUGHES",
"HYATT",
"HYUNDAI",
"IBM",
"ICBC",
"ICE",
"ICU",
"ID",
"IE",
"IEEE",
"IFM",
"IKANO",
"IL",
"IM",
"IMAMAT",
"IMDB",
"IMMO",
"IMMOBILIEN",
"IN",
"INC",
"INDUSTRIES",
"INFINITI",
"INFO",
"ING",
"INK",
"INSTITUTE",
"INSURANCE",
"INSURE",
"INT",
"INTEL",
"INTERNATIONAL",
"INTUIT",
"INVESTMENTS",
"IO",
"IPIRANGA",
"IQ",
"IR",
"IRISH",
"IS",
"ISMAILI",
"IST",
"ISTANBUL",
"IT",
"ITAU",
"ITV",
"IVECO",
"JAGUAR",
"JAVA",
"JCB",
"JCP",
"JE",
"JEEP",
"JETZT",
"JEWELRY",
"JIO",
"JLL",
"JM",
"JMP",
"JNJ",
"JO",
"JOBS",
"JOBURG",
"JOT",
"JOY",
"JP",
"JPMORGAN",
"JPRS",
"JUEGOS",
"JUNIPER",
"KAUFEN",
"KDDI",
"KE",
"KERRYHOTELS",
"KERRYLOGISTICS",
"KERRYPROPERTIES",
"KFH",
"KG",
"KH",
"KI",
"KIA",
"KIM",
"KINDER",
"KINDLE",
"KITCHEN",
"KIWI",
"KM",
"KN",
"KOELN",
"KOMATSU",
"KOSHER",
"KP",
"KPMG",
"KPN",
"KR",
"KRD",
"KRED",
"KUOKGROUP",
"KW",
"KY",
"KYOTO",
"KZ",
"LA",
"LACAIXA",
"LADBROKES",
"LAMBORGHINI",
"LAMER",
"LANCASTER",
"LANCIA",
"LANCOME",
"LAND",
"LANDROVER",
"LANXESS",
"LASALLE",
"LAT",
"LATINO",
"LATROBE",
"LAW",
"LAWYER",
"LB",
"LC",
"LDS",
"LEASE",
"LECLERC",
"LEFRAK",
"LEGAL",
"LEGO",
"LEXUS",
"LGBT",
"LI",
"LIAISON",
"LIDL",
"LIFE",
"LIFEINSURANCE",
"LIFESTYLE",
"LIGHTING",
"LIKE",
"LILLY",
"LIMITED",
"LIMO",
"LINCOLN",
"LINDE",
"LINK",
"LIPSY",
"LIVE",
"LIVING",
"LIXIL",
"LK",
"LLC",
"LOAN",
"LOANS",
"LOCKER",
"LOCUS",
"LOFT",
"LOL",
"LONDON",
"LOTTE",
"LOTTO",
"LOVE",
"LPL",
"LPLFINANCIAL",
"LR",
"LS",
"LT",
"LTD",
"LTDA",
"LU",
"LUNDBECK",
"LUPIN",
"LUXE",
"LUXURY",
"LV",
"LY",
"MA",
"MACYS",
"MADRID",
"MAIF",
"MAISON",
"MAKEUP",
"MAN",
"MANAGEMENT",
"MANGO",
"MAP",
"MARKET",
"MARKETING",
"MARKETS",
"MARRIOTT",
"MARSHALLS",
"MASERATI",
"MATTEL",
"MBA",
"MC",
"MCKINSEY",
"MD",
"ME",
"MED",
"MEDIA",
"MEET",
"MELBOURNE",
"MEME",
"MEMORIAL",
"MEN",
"MENU",
"MERCKMSD",
"METLIFE",
"MG",
"MH",
"MIAMI",
"MICROSOFT",
"MIL",
"MINI",
"MINT",
"MIT",
"MITSUBISHI",
"MK",
"ML",
"MLB",
"MLS",
"MM",
"MMA",
"MN",
"MO",
"MOBI",
"MOBILE",
"MODA",
"MOE",
"MOI",
"MOM",
"MONASH",
"MONEY",
"MONSTER",
"MOPAR",
"MORMON",
"MORTGAGE",
"MOSCOW",
"MOTO",
"MOTORCYCLES",
"MOV",
"MOVIE",
"MOVISTAR",
"MP",
"MQ",
"MR",
"MS",
"MSD",
"MT",
"MTN",
"MTR",
"MU",
"MUSEUM",
"MUTUAL",
"MV",
"MW",
"MX",
"MY",
"MZ",
"NA",
"NAB",
"NADEX",
"NAGOYA",
"NAME",
"NATIONWIDE",
"NATURA",
"NAVY",
"NBA",
"NC",
"NE",
"NEC",
"NET",
"NETBANK",
"NETFLIX",
"NETWORK",
"NEUSTAR",
"NEW",
"NEWHOLLAND",
"NEWS",
"NEXT",
"NEXTDIRECT",
"NEXUS",
"NF",
"NFL",
"NG",
"NGO",
"NHK",
"NI",
"NICO",
"NIKE",
"NIKON",
"NINJA",
"NISSAN",
"NISSAY",
"NL",
"NO",
"NOKIA",
"NORTHWESTERNMUTUAL",
"NORTON",
"NOW",
"NOWRUZ",
"NOWTV",
"NP",
"NR",
"NRA",
"NRW",
"NTT",
"NU",
"NYC",
"NZ",
"OBI",
"OBSERVER",
"OFF",
"OFFICE",
"OKINAWA",
"OLAYAN",
"OLAYANGROUP",
"OLDNAVY",
"OLLO",
"OM",
"OMEGA",
"ONE",
"ONG",
"ONL",
"ONLINE",
"ONYOURSIDE",
"OOO",
"OPEN",
"ORACLE",
"ORANGE",
"ORG",
"ORGANIC",
"ORIGINS",
"OSAKA",
"OTSUKA",
"OTT",
"OVH",
"PA",
"PAGE",
"PANASONIC",
"PARIS",
"PARS",
"PARTNERS",
"PARTS",
"PARTY",
"PASSAGENS",
"PAY",
"PCCW",
"PE",
"PET",
"PF",
"PFIZER",
"PG",
"PH",
"PHARMACY",
"PHD",
"PHILIPS",
"PHONE",
"PHOTO",
"PHOTOGRAPHY",
"PHOTOS",
"PHYSIO",
"PIAGET",
"PICS",
"PICTET",
"PICTURES",
"PID",
"PIN",
"PING",
"PINK",
"PIONEER",
"PIZZA",
"PK",
"PL",
"PLACE",
"PLAY",
"PLAYSTATION",
"PLUMBING",
"PLUS",
"PM",
"PN",
"PNC",
"POHL",
"POKER",
"POLITIE",
"PORN",
"POST",
"PR",
"PRAMERICA",
"PRAXI",
"PRESS",
"PRIME",
"PRO",
"PROD",
"PRODUCTIONS",
"PROF",
"PROGRESSIVE",
"PROMO",
"PROPERTIES",
"PROPERTY",
"PROTECTION",
"PRU",
"PRUDENTIAL",
"PS",
"PT",
"PUB",
"PW",
"PWC",
"PY",
"QA",
"QPON",
"QUEBEC",
"QUEST",
"QVC",
"RACING",
"RADIO",
"RAID",
"RE",
"READ",
"REALESTATE",
"REALTOR",
"REALTY",
"RECIPES",
"RED",
"REDSTONE",
"REDUMBRELLA",
"REHAB",
"REISE",
"REISEN",
"REIT",
"RELIANCE",
"REN",
"RENT",
"RENTALS",
"REPAIR",
"REPORT",
"REPUBLICAN",
"REST",
"RESTAURANT",
"REVIEW",
"REVIEWS",
"REXROTH",
"RICH",
"RICHARDLI",
"RICOH",
"RIGHTATHOME",
"RIL",
"RIO",
"RIP",
"RMIT",
"RO",
"ROCHER",
"ROCKS",
"RODEO",
"ROGERS",
"ROOM",
"RS",
"RSVP",
"RU",
"RUGBY",
"RUHR",
"RUN",
"RW",
"RWE",
"RYUKYU",
"SA",
"SAARLAND",
"SAFE",
"SAFETY",
"SAKURA",
"SALE",
"SALON",
"SAMSCLUB",
"SAMSUNG",
"SANDVIK",
"SANDVIKCOROMANT",
"SANOFI",
"SAP",
"SARL",
"SAS",
"SAVE",
"SAXO",
"SB",
"SBI",
"SBS",
"SC",
"SCA",
"SCB",
"SCHAEFFLER",
"SCHMIDT",
"SCHOLARSHIPS",
"SCHOOL",
"SCHULE",
"SCHWARZ",
"SCIENCE",
"SCJOHNSON",
"SCOR",
"SCOT",
"SD",
"SE",
"SEARCH",
"SEAT",
"SECURE",
"SECURITY",
"SEEK",
"SELECT",
"SENER",
"SERVICES",
"SES",
"SEVEN",
"SEW",
"SEX",
"SEXY",
"SFR",
"SG",
"SH",
"SHANGRILA",
"SHARP",
"SHAW",
"SHELL",
"SHIA",
"SHIKSHA",
"SHOES",
"SHOP",
"SHOPPING",
"SHOUJI",
"SHOW",
"SHOWTIME",
"SHRIRAM",
"SI",
"SILK",
"SINA",
"SINGLES",
"SITE",
"SJ",
"SK",
"SKI",
"SKIN",
"SKY",
"SKYPE",
"SL",
"SLING",
"SM",
"SMART",
"SMILE",
"SN",
"SNCF",
"SO",
"SOCCER",
"SOCIAL",
"SOFTBANK",
"SOFTWARE",
"SOHU",
"SOLAR",
"SOLUTIONS",
"SONG",
"SONY",
"SOY",
"SPACE",
"SPORT",
"SPOT",
"SPREADBETTING",
"SR",
"SRL",
"SRT",
"SS",
"ST",
"STADA",
"STAPLES",
"STAR",
"STATEBANK",
"STATEFARM",
"STC",
"STCGROUP",
"STOCKHOLM",
"STORAGE",
"STORE",
"STREAM",
"STUDIO",
"STUDY",
"STYLE",
"SU",
"SUCKS",
"SUPPLIES",
"SUPPLY",
"SUPPORT",
"SURF",
"SURGERY",
"SUZUKI",
"SV",
"SWATCH",
"SWIFTCOVER",
"SWISS",
"SX",
"SY",
"SYDNEY",
"SYMANTEC",
"SYSTEMS",
"SZ",
"TAB",
"TAIPEI",
"TALK",
"TAOBAO",
"TARGET",
"TATAMOTORS",
"TATAR",
"TATTOO",
"TAX",
"TAXI",
"TC",
"TCI",
"TD",
"TDK",
"TEAM",
"TECH",
"TECHNOLOGY",
"TEL",
"TELEFONICA",
"TEMASEK",
"TENNIS",
"TEVA",
"TF",
"TG",
"TH",
"THD",
"THEATER",
"THEATRE",
"TIAA",
"TICKETS",
"TIENDA",
"TIFFANY",
"TIPS",
"TIRES",
"TIROL",
"TJ",
"TJMAXX",
"TJX",
"TK",
"TKMAXX",
"TL",
"TM",
"TMALL",
"TN",
"TO",
"TODAY",
"TOKYO",
"TOOLS",
"TOP",
"TORAY",
"TOSHIBA",
"TOTAL",
"TOURS",
"TOWN",
"TOYOTA",
"TOYS",
"TR",
"TRADE",
"TRADING",
"TRAINING",
"TRAVEL",
"TRAVELCHANNEL",
"TRAVELERS",
"TRAVELERSINSURANCE",
"TRUST",
"TRV",
"TT",
"TUBE",
"TUI",
"TUNES",
"TUSHU",
"TV",
"TVS",
"TW",
"TZ",
"UA",
"UBANK",
"UBS",
"UCONNECT",
"UG",
"UK",
"UNICOM",
"UNIVERSITY",
"UNO",
"UOL",
"UPS",
"US",
"UY",
"UZ",
"VA",
"VACATIONS",
"VANA",
"VANGUARD",
"VC",
"VE",
"VEGAS",
"VENTURES",
"VERISIGN",
"VERSICHERUNG",
"VET",
"VG",
"VI",
"VIAJES",
"VIDEO",
"VIG",
"VIKING",
"VILLAS",
"VIN",
"VIP",
"VIRGIN",
"VISA",
"VISION",
"VISTAPRINT",
"VIVA",
"VIVO",
"VLAANDEREN",
"VN",
"VODKA",
"VOLKSWAGEN",
"VOLVO",
"VOTE",
"VOTING",
"VOTO",
"VOYAGE",
"VU",
"VUELOS",
"WALES",
"WALMART",
"WALTER",
"WANG",
"WANGGOU",
"WARMAN",
"WATCH",
"WATCHES",
"WEATHER",
"WEATHERCHANNEL",
"WEBCAM",
"WEBER",
"WEBSITE",
"WED",
"WEDDING",
"WEIBO",
"WEIR",
"WF",
"WHOSWHO",
"WIEN",
"WIKI",
"WILLIAMHILL",
"WIN",
"WINDOWS",
"WINE",
"WINNERS",
"WME",
"WOLTERSKLUWER",
"WOODSIDE",
"WORK",
"WORKS",
"WORLD",
"WOW",
"WS",
"WTC",
"WTF",
"XBOX",
"XEROX",
"XFINITY",
"XIHUAN",
"XIN",
"XN--11B4C3D",
"XN--1CK2E1B",
"XN--1QQW23A",
"XN--2SCRJ9C",
"XN--30RR7Y",
"XN--3BST00M",
"XN--3DS443G",
"XN--3E0B707E",
"XN--3HCRJ9C",
"XN--3OQ18VL8PN36A",
"XN--3PXU8K",
"XN--42C2D9A",
"XN--45BR5CYL",
"XN--45BRJ9C",
"XN--45Q11C",
"XN--4GBRIM",
"XN--54B7FTA0CC",
"XN--55QW42G",
"XN--55QX5D",
"XN--5SU34J936BGSG",
"XN--5TZM5G",
"XN--6FRZ82G",
"XN--6QQ986B3XL",
"XN--80ADXHKS",
"XN--80AO21A",
"XN--80AQECDR1A",
"XN--80ASEHDB",
"XN--80ASWG",
"XN--8Y0A063A",
"XN--90A3AC",
"XN--90AE",
"XN--90AIS",
"XN--9DBQ2A",
"XN--9ET52U",
"XN--9KRT00A",
"XN--B4W605FERD",
"XN--BCK1B9A5DRE4C",
"XN--C1AVG",
"XN--C2BR7G",
"XN--CCK2B3B",
"XN--CG4BKI",
"XN--CLCHC0EA0B2G2A9GCD",
"XN--CZR694B",
"XN--CZRS0T",
"XN--CZRU2D",
"XN--D1ACJ3B",
"XN--D1ALF",
"XN--E1A4C",
"XN--ECKVDTC9D",
"XN--EFVY88H",
"XN--ESTV75G",
"XN--FCT429K",
"XN--FHBEI",
"XN--FIQ228C5HS",
"XN--FIQ64B",
"XN--FIQS8S",
"XN--FIQZ9S",
"XN--FJQ720A",
"XN--FLW351E",
"XN--FPCRJ9C3D",
"XN--FZC2C9E2C",
"XN--FZYS8D69UVGM",
"XN--G2XX48C",
"XN--GCKR3F0F",
"XN--GECRJ9C",
"XN--GK3AT1E",
"XN--H2BREG3EVE",
"XN--H2BRJ9C",
"XN--H2BRJ9C8C",
"XN--HXT814E",
"XN--I1B6B1A6A2E",
"XN--IMR513N",
"XN--IO0A7I",
"XN--J1AEF",
"XN--J1AMH",
"XN--J6W193G",
"XN--JLQ61U9W7B",
"XN--JVR189M",
"XN--KCRX77D1X4A",
"XN--KPRW13D",
"XN--KPRY57D",
"XN--KPU716F",
"XN--KPUT3I",
"XN--L1ACC",
"XN--LGBBAT1AD8J",
"XN--MGB9AWBF",
"XN--MGBA3A3EJT",
"XN--MGBA3A4F16A",
"XN--MGBA7C0BBN0A",
"XN--MGBAAKC7DVF",
"XN--MGBAAM7A8H",
"XN--MGBAB2BD",
"XN--MGBAH1A3HJKRD",
"XN--MGBAI9AZGQP6J",
"XN--MGBAYH7GPA",
"XN--MGBBH1A",
"XN--MGBBH1A71E",
"XN--MGBC0A9AZCG",
"XN--MGBCA7DZDO",
"XN--MGBERP4A5D4AR",
"XN--MGBGU82A",
"XN--MGBI4ECEXP",
"XN--MGBPL2FH",
"XN--MGBT3DHD",
"XN--MGBTX2B",
"XN--MGBX4CD0AB",
"XN--MIX891F",
"XN--MK1BU44C",
"XN--MXTQ1M",
"XN--NGBC5AZD",
"XN--NGBE9E0A",
"XN--NGBRX",
"XN--NODE",
"XN--NQV7F",
"XN--NQV7FS00EMA",
"XN--NYQY26A",
"XN--O3CW4H",
"XN--OGBPF8FL",
"XN--OTU796D",
"XN--P1ACF",
"XN--P1AI",
"XN--PBT977C",
"XN--PGBS0DH",
"XN--PSSY2U",
"XN--Q9JYB4C",
"XN--QCKA1PMC",
"XN--QXA6A",
"XN--QXAM",
"XN--RHQV96G",
"XN--ROVU88B",
"XN--RVC1E0AM3E",
"XN--S9BRJ9C",
"XN--SES554G",
"XN--T60B56A",
"XN--TCKWE",
"XN--TIQ49XQYJ",
"XN--UNUP4Y",
"XN--VERMGENSBERATER-CTB",
"XN--VERMGENSBERATUNG-PWB",
"XN--VHQUV",
"XN--VUQ861B",
"XN--W4R85EL8FHU5DNRA",
"XN--W4RS40L",
"XN--WGBH1C",
"XN--WGBL6A",
"XN--XHQ521B",
"XN--XKC2AL3HYE2A",
"XN--XKC2DL3A5EE0H",
"XN--Y9A3AQ",
"XN--YFRO4I67O",
"XN--YGBI2AMMX",
"XN--ZFR164B",
"XXX",
"XYZ",
"YACHTS",
"YAHOO",
"YAMAXUN",
"YANDEX",
"YE",
"YODOBASHI",
"YOGA",
"YOKOHAMA",
"YOU",
"YOUTUBE",
"YT",
"YUN",
"ZA",
"ZAPPOS",
"ZARA",
"ZERO",
"ZIP",
"ZM",
"ZONE",
"ZUERICH",
"ZW"
]
@spec validate?(String.t()) :: boolean
def validate?(input) when is_binary(input) do
Enum.member?(@tld_data, String.upcase(input))
end
end
|
lib/validation/rules/tld.ex
| 0.512205
| 0.494873
|
tld.ex
|
starcoder
|
defmodule Serum.HeaderParser do
@moduledoc """
This module takes care of parsing headers of page (or post) source files.
Header is where all page or post metadata goes into, and has the following
format:
```
---
key: value
...
---
```
where `---` in the first and last line delimits the beginning and the end of
the header area, and between these two lines are one or more key-value pair
delimited by a colon, where key is the name of a metadata and value is the
actual value of a metadata.
"""
@date_format1 "{YYYY}-{0M}-{0D} {h24}:{m}:{s}"
@date_format2 "{YYYY}-{0M}-{0D}"
@type options :: [{atom, value_type}]
@type value_type :: :string | :integer | :datetime | {:list, value_type}
@type value :: binary | integer | [binary] | [integer]
@type parse_result :: {:ok, {map(), binary()}} | {:invalid, binary()}
@typep extract_ok :: {:ok, [binary], binary}
@typep extract_err :: {:error, binary}
@doc """
Reads lines from a binary `data` and extracts the header into a map.
`options` is a keyword list which specifies the name and type of metadata the
header parser expects. So the typical `options` should look like this:
[key1: type1, key2: type2, ...]
See "Types" section for avilable value types.
`required` argument is a list of required keys (in atom). If the header parser
cannot find required keys in the header area, it returns an error.
## Types
Currently the HeaderParser supports following types:
* `:string` - A line of string. It can contain spaces.
* `:integer` - A decimal integer.
* `:datetime` - Date and time. Must be specified in the format of
`YYYY-MM-DD hh:mm:ss`. This data will be interpreted as a local time.
* `{:list, <type>}` - A list of multiple values separated by commas. Every
value must have the same type, either `:string`, `:integer`, or `:datetime`.
You cannot make a list of lists.
"""
@spec parse_header(binary(), options(), [atom()]) :: parse_result()
def parse_header(data, options, required \\ []) do
case extract_header(data, [], false) do
{:ok, header_lines, rest_data} ->
key_strings = options |> Keyword.keys() |> Enum.map(&Atom.to_string/1)
kv_list =
header_lines
|> Enum.map(&split_kv/1)
|> Enum.filter(fn {k, _} -> k in key_strings end)
with [] <- find_missing(kv_list, required),
{:ok, new_kv} <- transform_values(kv_list, options, []) do
{:ok, {Map.new(new_kv), rest_data}}
else
error -> handle_error(error)
end
error ->
handle_error(error)
end
end
@spec handle_error(term) :: {:invalid, binary()}
defp handle_error(term)
defp handle_error([missing]) do
{:invalid, "`#{missing}` is required, but it's missing"}
end
defp handle_error([_ | _] = missing) do
repr = missing |> Enum.map(&"`#{&1}`") |> Enum.reverse() |> Enum.join(", ")
{:invalid, "#{repr} are required, but they are missing"}
end
defp handle_error({:error, error}) do
{:invalid, "header parse error: #{error}"}
end
@spec extract_header(binary, [binary], boolean) :: extract_ok | extract_err
defp extract_header(data, acc, open?)
defp extract_header(data, acc, false) do
case String.split(data, ~r/\r?\n/, parts: 2) do
["---", rest] ->
extract_header(rest, acc, true)
[line, rest] when is_binary(line) ->
extract_header(rest, acc, false)
[_] ->
{:error, "header not found"}
end
end
defp extract_header(data, acc, true) do
case String.split(data, ~r/\r?\n/, parts: 2) do
["---", rest] ->
{:ok, acc, rest}
[line, rest] when is_binary(line) ->
extract_header(rest, [line | acc], true)
[_] ->
{:error, "encountered unexpected end of file"}
end
end
@spec split_kv(binary) :: {binary, binary}
defp split_kv(line) do
case String.split(line, ":", parts: 2) do
[x] -> {String.trim(x), ""}
[k, v] -> {k, v}
end
end
@spec find_missing([{binary, binary}], [atom]) :: [atom]
defp find_missing(kvlist, required) do
keys = Enum.map(kvlist, fn {k, _} -> k end)
do_find_missing(keys, required)
end
@spec do_find_missing([binary], [atom], [atom]) :: [atom]
defp do_find_missing(keys, required, acc \\ [])
defp do_find_missing(_keys, [], acc) do
acc
end
defp do_find_missing(keys, [h | t], acc) do
if Atom.to_string(h) in keys do
do_find_missing(keys, t, acc)
else
do_find_missing(keys, t, [h | acc])
end
end
@spec transform_values([{binary, binary}], keyword(atom), keyword(value)) ::
{:error, binary} | {:ok, keyword(value)}
defp transform_values([], _options, acc) do
{:ok, acc}
end
defp transform_values([{k, v} | rest], options, acc) do
atom_k = String.to_existing_atom(k)
case transform_value(k, String.trim(v), options[atom_k]) do
{:error, _} = error -> error
value -> transform_values(rest, options, [{atom_k, value} | acc])
end
end
@spec transform_value(binary, binary, value_type) :: value | {:error, binary}
defp transform_value(_key, valstr, :string) do
valstr
end
defp transform_value(key, valstr, :integer) do
case Integer.parse(valstr) do
{value, ""} -> value
_ -> {:error, "`#{key}`: invalid integer"}
end
end
defp transform_value(key, valstr, :datetime) do
case Timex.parse(valstr, @date_format1) do
{:ok, dt} ->
dt |> Timex.to_erl() |> Timex.to_datetime(:local)
{:error, _msg} ->
case Timex.parse(valstr, @date_format2) do
{:ok, dt} ->
dt |> Timex.to_erl() |> Timex.to_datetime(:local)
{:error, msg} ->
{:error, "`#{key}`: " <> msg}
end
end
end
defp transform_value(key, _valstr, {:list, {:list, _type}}) do
{:error, "`#{key}`: \"list of lists\" type is not supported"}
end
defp transform_value(key, valstr, {:list, type}) when is_atom(type) do
list =
valstr
|> String.split(",")
|> Stream.map(&String.trim/1)
|> Stream.reject(&(&1 == ""))
|> Stream.map(&transform_value(key, &1, type))
case Enum.filter(list, &error?/1) do
[] -> Enum.to_list(list)
[{:error, _} = error | _] -> error
end
end
defp transform_value(key, _valstr, _type) do
{:error, "`#{key}`: invalid value type"}
end
@spec error?(term) :: boolean
defp error?({:error, _}), do: true
defp error?(_), do: false
end
|
lib/serum/header_parser.ex
| 0.846101
| 0.849035
|
header_parser.ex
|
starcoder
|
defmodule Cassandrax.Keyspace do
@moduledoc """
Defines a Keyspace.
A keyspace acts as a repository, wrapping an underlying keyspace in CassandraDB.
## Setup
test_conn_attrs = [
nodes: ["127.0.0.1:9043"],
username: "cassandra",
password: "<PASSWORD>"
]
child = Cassandrax.Supervisor.child_spec(Cassandrax.MyConn, test_conn_attrs)
Cassandrax.start_link([child])
Defining a new keyspace module.
```
defmodule MyKeyspace do
use Cassandrax.Keyspace, cluster: Cassandrax.MyConn, name: "my_keyspace"
end
```
Creating a keyspace.
```
statement = \"""
CREATE KEYSPACE IF NOT EXISTS my_keyspace
WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}
\"""
Cassandrax.cql(Cassandrax.MyConn, statement)
```
Creating a table in the Keyspace.
```
statement = [
"CREATE TABLE IF NOT EXISTS ",
"my_keyspace.user(",
"id integer, ",
"user_name text, ",
"svalue set<text>, ",
"PRIMARY KEY (id))"
]
{:ok, _result} = Cassandrax.cql(Cassandrax.MyConn, statement)
```
"""
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Cassandrax.Keyspace
@keyspace_name Keyword.fetch!(opts, :name)
@cluster Keyword.fetch!(opts, :cluster)
@conn_pool Keyword.get(opts, :pool, @cluster)
def config do
{:ok, config} = Cassandrax.Supervisor.runtime_config(@cluster, [])
config
end
def __default_options__(:write), do: __MODULE__.config() |> Keyword.get(:write_options)
def __default_options__(:read), do: __MODULE__.config() |> Keyword.get(:read_options)
def __keyspace__, do: @keyspace_name
def __conn__, do: @conn_pool
## Keyspace gains its own pool if option `conn_pool` was given
if @conn_pool == __MODULE__ do
def child_spec(_), do: Cassandrax.Supervisor.child_spec(__MODULE__, config())
end
## Schemas
def insert(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.insert(__MODULE__, struct, opts)
def update(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.update(__MODULE__, struct, opts)
def delete(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.delete(__MODULE__, struct, opts)
def insert!(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.insert!(__MODULE__, struct, opts)
def update!(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.update!(__MODULE__, struct, opts)
def delete!(struct, opts \\ []),
do: Cassandrax.Keyspace.Schema.delete!(__MODULE__, struct, opts)
## Queryable
def all(queryable, opts \\ []),
do: Cassandrax.Keyspace.Queryable.all(__MODULE__, queryable, opts)
def get(queryable, primary_key, opts \\ []) do
Cassandrax.Keyspace.Queryable.get(__MODULE__, queryable, primary_key, opts)
end
def one(queryable, opts \\ []),
do: Cassandrax.Keyspace.Queryable.one(__MODULE__, queryable, opts)
## Run Plain CQL Statements
def cql(statement, values \\ [], opts \\ []),
do: Cassandrax.cql(@conn_pool, statement, values, opts)
## Batch
def batch(opts \\ [], fun) do
Cassandrax.Keyspace.Batch.run(__MODULE__, fun, opts)
end
def batch_insert(%Cassandrax.Keyspace.Batch{} = batch, struct),
do: Cassandrax.Keyspace.Schema.batch_insert(__MODULE__, batch, struct)
def batch_update(%Cassandrax.Keyspace.Batch{} = batch, struct),
do: Cassandrax.Keyspace.Schema.batch_update(__MODULE__, batch, struct)
def batch_delete(%Cassandrax.Keyspace.Batch{} = batch, struct),
do: Cassandrax.Keyspace.Schema.batch_delete(__MODULE__, batch, struct)
end
end
## User callbacks
@optional_callbacks init: 2
@doc """
A callback executed when the keyspace starts or when configuration is read.
The first argument is the context the callback is being invoked. If it
is called because the Keyspace supervisor is starting, it will be `:supervisor`.
It will be `:runtime` if it is called for reading configuration without
actually starting a process.
The second argument is the keyspace configuration as stored in the
application environment. It must return `{:ok, keyword}` with the updated
list of configuration or `:ignore` (only in the `:supervisor` case).
"""
@callback init(context :: :supervisor | :runtime, config :: Keyword.t()) ::
{:ok, Keyword.t()} | :ignore
@doc """
Accesses the consistency level that manages availability versus data accuracy.
Consistency level is configured for per individual read or write operation.
Pass `:read` or `:write` to access the consistency level (eg. `[consistency: :one]`).
## Example
```
[consistency: :one] = MyKeyspace.__default_options__(:read)
```
"""
@callback __default_options__(atom :: :read | :write) :: list | nil
@doc """
Accesses the name of the Keyspace.
## Example
```
"my_keyspace" = MyKeyspace.__keyspace__()
```
"""
@callback __keyspace__ :: String.t()
@doc """
Accesses the cluster that was setup in the runtime configuration.
## Example
```
Cassandrax.MyConn = MyKeyspace.__conn__()
```
"""
@callback __conn__ :: Cassandrax.Connection
@doc """
Inserts a struct defined in `Cassandrax.Schema` or a changeset.
If a struct is given, the struct is converted into a changeset with all non-nil fields.
## Example
```
{:ok, user} = MyKeyspace.insert(%User{id: 1, user_name: "bob"})
```
"""
@callback insert(
struct_or_changeset :: Ecto.Changeset.t() | Cassandrax.Schema,
opts :: Keyword.t()
) :: {:ok, Cassandrax.Schema.t()} | {:error, any()}
@doc """
Same as `insert/2` but returns the struct or raises if the changeset is invalid.
"""
@callback insert!(
struct_or_changeset :: Ecto.Changeset.t() | Cassandrax.Schema,
opts :: Keyword.t()
) :: Cassandrax.Schema.t()
@doc """
Updates a changeset using its primary key.
Requires a changeset as it is the only way to track changes.
If the struct has no primary key, Xandra.Error will be raised.
In CassandraDB, UPDATE is also an upsert. If the struct cannot be found, a new entry will be created.
It returns `{:ok, struct}` if the struct has been successfully updated or `{:error, message}`
if there was a validation or a known constraint error.
## Example
```
user = MyKeyspace.get(User, 1)
changeset = Ecto.Changeset.change(user, user_name: "tom")
MyKeyspace.update(changeset)
```
"""
@callback update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, Cassandrax.Schema.t()} | {:error, any()}
@doc """
Same as `update/2` but returns the struct or raises if the changeset is invalid.
"""
@callback update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: Cassandrax.Schema.t()
@doc """
Deletes a struct using its primary key.
If the struct has no primary key, Xandra.Error will be raised.
If the struct has been removed from db prior to call, it will still return `{:ok, Cassandrax.Schema.t()}`
It returns `{:ok, struct}` if the struct has been successfully deleted or `{:error, message}`
if there was a validation or a known constraint error.
## Example
```
MyKeyspace.delete(%User(id: 1, user_name: "bob"))
```
"""
@callback delete(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Cassandrax.Schema.t()} | {:error, any()}
@doc """
Same as `delete/2` but returns the struct or raises if the changeset is invalid.
"""
@callback delete!(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: Cassandrax.Schema.t()
@doc """
Fetches all entries from the data store that matches the given query.
May raise Xandra.Error if query validation fails.
## Example
```
query = where(User, id: 1)
MyKeyspace.all(query)
```
"""
@callback all(queryable :: Cassandrax.Queryable.t(), opts :: Keyword.t()) :: [
Cassandrax.Schema.t()
]
@doc """
## Example
```
MyKeyspace.get(User, 2)
```
"""
@callback get(queryable :: Cassandrax.Queryable.t(), id :: term(), opts :: Keyword.t()) ::
Cassandrax.Schema.t() | nil
@doc """
Fetches a single record from the query.
Returns `nil` if no records were found. May raise Cassandrax.MultipleResultsError,
if query returns more than one entry.
## Example
```
query = where(User, id: 1)
MyKeyspace.one(query)
```
"""
@callback one(queryable :: Cassandrax.Queryable.t(), opts :: Keyword.t()) ::
Cassandrax.Schema.t() | nil
@doc """
Runs plain CQL Statements.
Returns `{:ok, map}` if the CQL is successfully run or `{:error, message}`
if there was a validation or a known constraint error.
## Example
```
statement = \"""
SELECT * my_keyspace.user
\"""
Cassandrax.cql(MyConn, statement)
```
"""
@callback cql(statement :: String.t() | list, values :: list, opts :: Keyword.t()) ::
{:ok, map} | {:error, map}
@doc """
Runs batch queries.
Can be used to group and execute queries as Cassandra `BATCH` query.
## Options
`:logged` is the default behavior in Cassandrax. Logged batch acts like a lightweight
transaction around a batch operation. It enforces atomicity, and fails the batch if any of the queries fail.
Cassandra doesn't enforce any other transactional properties at batch level.
`:unlogged` consider it when there are multiple inserts and updates for the same partition key.
Unlogged batching will give a warning if too many operations or too many partitions are involved.
Read the CassandraDB documents for more information logged and unlogged batch operations.
## Example
```
user = MyKeyspace.get(User, id: 1)
changeset = Ecto.Changeset.change(user, user_name: "trent")
MyKeyspace.batch(fn batch ->
batch
|> MyKeyspace.batch_insert(%User{id: 3, user_name: "eve"})
|> MyKeyspace.batch_insert(%User{id: 4, user_name: "mallory"})
|> MyKeyspace.batch_update(changeset)
|> MyKeyspace.batch_delete(user)
end)
```
"""
@callback batch(opts :: Keyword.t(), fun()) :: :ok | {:error, any()}
@doc """
Adds an `INSERT` query to the given batch.
"""
@callback batch_insert(batch :: Cassandrax.Keyspace.Batch.t(), Cassandrax.Schema.t()) :: :ok
@doc """
Adds an `UPDATE` query to the given batch.
"""
@callback batch_update(batch :: Cassandrax.Keyspace.Batch.t(), Cassandrax.Schema.t()) :: :ok
@doc """
Adds a `DELETE` query to the given batch.
"""
@callback batch_delete(batch :: Cassandrax.Keyspace.Batch.t(), Cassandrax.Schema.t()) :: :ok
end
|
lib/cassandrax/keyspace.ex
| 0.903884
| 0.77389
|
keyspace.ex
|
starcoder
|
defmodule EventSerializer.SchemaRegistryCache do
@moduledoc """
This service is responsible to fetch the schemas from Schema Registry
and cache theirs names and ids.
That result will be saved in a cache so we can re utilize in the
EventSerializer.Publisher
We start the server using the start_link/0 function:
EventSerializer.SchemaRegistryServer.start_link()
Then we can fetch the id of the schema using the fetch/1 function.
EventSerializer.SchemaRegistryServer.fetch("topic-key")
"""
defmodule State do
@moduledoc """
This struct will represent the state of this GenServer.
"""
defstruct [:id, :name]
end
use GenServer
require Logger
alias EventSerializer.Config
@name __MODULE__
def start_link, do: GenServer.start_link(@name, [], name: @name)
@doc """
This function starts the server and perform the cache.
"""
def init(state) do
cache()
{:ok, state}
end
def cache, do: GenServer.cast(@name, :cache)
@doc """
This function returns the schema id for a given schema_name
On application boot the key and value schema ids are saved in this GenServers
state, so here we can quickly retrive them
## Example
iex(1)> SchemaRegistryCache.fetch("a_known_matching_schema_key")
2
iex(2)> SchemaRegistryCache.fetch("a_unknown_schema_key")
nil
"""
def fetch(schema_name), do: GenServer.call(@name, {:fetch, schema_name})
def handle_cast(:cache, _state) do
schemas = fetch_schemas()
new_state = Enum.map(schemas, fn schema -> struct(State, schema) end)
{:noreply, new_state}
end
def handle_call({:fetch, schema_name}, _from, state) do
schema = Enum.find(state, fn subject -> subject.name == schema_name end)
{:reply, extract_id(schema), state}
end
defp extract_id(schema) when is_nil(schema), do: nil
defp extract_id(schema), do: schema.id
@doc """
This function fetches the schema ids from the Schema Registry.
The :avlizer_confluent is used to fetch and cache the schema body.
make_encoder function is resposible to do that. More information in the docs:
https://github.com/klarna/avlizer/blob/master/src/avlizer_confluent.erl#L97
The return of this function is a list of maps containing the schema name and id,
which will be cached for future requests.
The return will be like this:
## Example
[
%{id: 13, name: "topic-value"},
%{id: 12, name: "topic-key"}
]
"""
def fetch_schemas do
topics()
|> Enum.flat_map(&fetch_schema/1)
end
def fetch_id(name), do: name |> schema_registry_adapter().schema_id_for()
def key_schema_name(topic), do: topic <> "-key"
def value_schema_name(topic), do: topic <> "-value"
defp fetch_schema(topic) do
schema_name_id = topic |> key_schema_name() |> fetch_id() |> make_encoder()
schema_value_id = topic |> value_schema_name() |> fetch_id() |> make_encoder()
format_response(topic, schema_name_id, schema_value_id)
end
defp format_response(_topic, _schema_name_id, nil), do: []
defp format_response(_topic, nil, _schema_value_id), do: []
defp format_response(topic, schema_name_id, schema_value_id) do
[
%{id: schema_name_id, name: key_schema_name(topic)},
%{id: schema_value_id, name: value_schema_name(topic)}
]
end
defp make_encoder({:error, _reason}), do: nil
defp make_encoder(value) do
value |> avlizer_confluent().make_encoder()
value
end
# Config from env
defp topics, do: Config.topic_names()
defp avlizer_confluent, do: Config.avlizer_confluent()
defp schema_registry_adapter, do: Config.schema_registry_adapter()
end
|
lib/event_serializer/schema_registry_cache.ex
| 0.776199
| 0.521167
|
schema_registry_cache.ex
|
starcoder
|
defmodule HXL.Eval do
@moduledoc false
alias HXL.Ast.Body
defstruct [:functions, :key_encoder, document: %{}, symbol_table: %{}]
@type t :: %__MODULE__{
document: map(),
functions: map(),
symbol_table: map(),
key_encoder: (binary -> term())
}
@doc """
Evaluates the Ast by walking the tree recursivly.
The resulting document is fully evaluated. Note if any syntax elements such as undefined variables / functions,
will result in an error being raised.
## Examples
hcl = "a = trim(" a ")"
{:ok, %HXL.Ast.Body{} = body} = HXL.decode_as_ast(hcl)
%{"a" => "a"} = HXL.Eval.eval(body, functions: %{"trim" => &String.trim/1})
hcl = "a = b"
{:ok, %HXL.Ast.Body{} = body} = HXL.decode_as_ast(hcl)
%{"a" => 1} = HXL.Eval.eval(body, variables: %{"b" => 1})
"""
@spec eval(term(), Keyword.t()) :: t()
def eval(hcl, opts \\ []) do
functions = Keyword.get(opts, :functions, %{})
symbol_table = Keyword.get(opts, :variables, %{})
evaluator = Keyword.get(opts, :evaluator, HXL.Evaluator.Base)
key_encoder =
opts
|> Keyword.get(:keys, :strings)
|> key_encoder()
ctx = %__MODULE__{
key_encoder: key_encoder,
functions: functions,
symbol_table: symbol_table
}
do_eval(hcl, evaluator, ctx)
end
# Evals the top the top level body
defp do_eval(%Body{statements: stmts}, evaluator, ctx) do
Enum.reduce(stmts, ctx, fn x, acc ->
case evaluator.eval(x, acc) do
{{k, v}, acc} ->
%{acc | document: Map.put(acc.document, ctx.key_encoder.(k), v)}
{map, acc} when is_map(map) ->
%{acc | document: Map.merge(acc.document, map)}
{:ignore, acc} ->
acc
end
end)
end
defp key_encoder(:strings), do: &Function.identity/1
defp key_encoder(:atoms), do: &String.to_atom/1
defp key_encoder(:atoms!), do: &String.to_existing_atom/1
defp key_encoder(fun) when is_function(fun, 1), do: fun
defp key_encoder(arg),
do:
raise(
ArgumentError,
"Invalid :keys option '#{inspect(arg)}', valid options :strings, :atoms, :atoms!, (binary -> term)"
)
end
|
lib/hxl/eval.ex
| 0.73659
| 0.510313
|
eval.ex
|
starcoder
|
defmodule ValidatorsRo.CNP do
@moduledoc """
See `ValidatorsRo`
"""
defmacro __using__(_opts) do
quote location: :keep do
import ValidatorsRo.Utils, only: [control_sum: 2]
require Integer
@cnp_test_key 279146358279
|> Integer.digits
|> Enum.reverse
@cnp_regexp ~r"^\d{13}$"
@cnp_century_map %{ # Maps first number of CNP to century of birthdate
"1" => "19",
"2" => "19",
"3" => "18",
"4" => "18",
"5" => "20",
"6" => "20"
}
@cnp_county_map %{
"01" => "Alba",
"02" => "Arad",
"03" => "Argeș",
"04" => "Bacău",
"05" => "Bihor",
"06" => "Bistrița-Năsăud",
"07" => "Botoșani",
"08" => "Brașov",
"09" => "Brăila",
"10" => "Buzău",
"11" => "Caraș-Severin",
"12" => "Cluj",
"13" => "Constanța",
"14" => "Covasna",
"15" => "Dâmbovița",
"16" => "Dolj",
"17" => "Galați",
"18" => "Gorj",
"19" => "Harghita",
"20" => "Hunedoara",
"21" => "Ialomița",
"22" => "Iași",
"23" => "Ilfov",
"24" => "Maramureș",
"25" => "Mehedinți",
"26" => "Mureș",
"27" => "Neamț",
"28" => "Olt",
"29" => "Prahova",
"30" => "<NAME>",
"31" => "Sălaj",
"32" => "Sibiu",
"33" => "Suceava",
"34" => "Teleorman",
"35" => "Timiș",
"36" => "Tulcea",
"37" => "Vaslui",
"38" => "Vâlcea",
"39" => "Vrancea",
"40" => "București",
"41" => "București Sectorul 1",
"42" => "București Sectorul 2",
"43" => "București Sectorul 3",
"44" => "București Sectorul 4",
"45" => "București Sectorul 5",
"46" => "București Sectorul 6",
"51" => "Călărași",
"52" => "Giurgiu"
}
@cnp_sex_map %{
:odd => "m",
:even => "f"
}
@doc """
Provides validation of Romanian CNPs (equivalent of SSNs)
https://ro.wikipedia.org/wiki/Cod_numeric_personal
"""
@spec valid_cnp?(String.t) :: boolean
def valid_cnp?(cnp) do
cnp_well_formed?(cnp) && cnp_valid_control_sum?(cnp)
end
@doc """
Parses a CNP into a map of parts, with the following keys:
* `:valid` (boolean)
* `:sex`, a string of either "m" or "f"
* `:date_of_birth`, as a string representation in ISO8601 format (YYYY-MM-DD)
* `:county of birth` - a string representing the Romanian name of
the county of birth. `nil` if `:foreign_resident` is true
* `:per_county_index` - a numeric string between 001 - 999, see Wikipedia entry for details
* `:control` - a single digit control
* `:foreign_resident` (boolean), indicating person is a foreign national
For invalid CNPs, no parsing is attempted and only `:valid` is returned.
"""
@spec parse_cnp(String.t) :: map
def parse_cnp(cnp) when is_bitstring(cnp) do
parsed =
if valid = valid_cnp?(cnp) do
<<sex_code::bytes-size(1)>> <>
<<dob_year::bytes-size(2)>> <>
<<dob_month::bytes-size(2)>> <>
<<dob_day::bytes-size(2)>> <>
<<county_of_birth_code::bytes-size(2)>> <>
<<county_index::bytes-size(3)>> <>
<<control::bytes-size(1)>> = cnp
sex_code = sex_code |> String.to_integer()
sex = if Integer.is_odd(sex_code) do @cnp_sex_map.odd else @cnp_sex_map.even end
foreign_resident = sex_code in [7, 8]
date_of_birth =
case sex_code do
n when n in [1, 2, 7, 8] -> "19#{dob_year}-#{dob_month}-#{dob_day}"
n when n in [3, 4] -> "18#{dob_year}-#{dob_month}-#{dob_day}"
n when n in [5, 6] -> "20#{dob_year}-#{dob_month}-#{dob_day}"
end
%{
sex: sex,
date_of_birth: date_of_birth,
county_of_birth_code: county_of_birth_code,
county_of_birth: @cnp_county_map[county_of_birth_code],
county_index: county_index,
control: control,
foreign_resident: foreign_resident
}
else
nil
end
%{parsed: parsed, valid: valid}
end
defp cnp_well_formed?(cnp) do
Regex.match?(@cnp_regexp, cnp) && valid_birthdate?(cnp)
end
defp cnp_valid_control_sum?(cnp) do
{control, sum} = control_sum(cnp, @cnp_test_key)
case rem(sum, 11) do
10 -> control === 1
rest -> control === rest
end
end
defp valid_birthdate?(cnp) do
century = Map.get(@cnp_century_map, String.at(cnp, 0), :guess)
year = century <> String.slice(cnp, 1, 2)
month = cnp |> String.slice(3, 2)
day = cnp |> String.slice(5, 2)
# For foreign nationals the first number of the CNP doesn't map
# to birthday century, so we try all recent centuries
case century do
:guess ->
@cnp_century_map
|> Map.values
|> Enum.uniq
|> Enum.any?(&(valid_birthdate?(&1 <> month, month, day)))
_ -> valid_birthdate?(year, month, day)
end
end
defp valid_birthdate?(year, month, day) do
case Date.from_iso8601("#{year}-#{month}-#{day}") do
{:ok, _} -> true
{:error, _} -> false
end
end
end
end
end
|
lib/cnp/cnp.ex
| 0.60964
| 0.425963
|
cnp.ex
|
starcoder
|
defmodule Bacen.CCS.ACCS001 do
@moduledoc """
The ACCS001 message.
This message is responsible to register or deregister
persons from CCS system.
It has the following XML example:
```xml
<CCSArqAtlzDiaria>
<Repet_ACCS001_Pessoa>
<Grupo_ACCS001_Pessoa>
<TpOpCCS>I</TpOpCCS>
<QualifdrOpCCS>N</QualifdrOpCCS>
<TpPessoa>F</TpPessoa>
<CNPJ_CPFPessoa>12345678901</CNPJ_CPFPessoa>
<DtIni>2002-01-01</DtIni>
<DtFim>2002-01-03</DtFim>
</Grupo_ACCS001_Pessoa>
<Grupo_ACCS001_Pessoa>
<TpOpCCS>I</TpOpCCS>
<QualifdrOpCCS>N</QualifdrOpCCS>
<TpPessoa>F</TpPessoa>
<CNPJ_CPFPessoa>98765432102</CNPJ_CPFPessoa>
<DtIni>2002-02-01</DtIni>
</Grupo_ACCS001_Pessoa>
</Repet_ACCS001_Pessoa>
<QtdOpCCS>2</QtdOpCCS>
<DtMovto>2004-10-10</DtMovto>
</CCSArqAtlzDiaria>
```
"""
use Ecto.Schema
import Brcpfcnpj.Changeset
import Ecto.Changeset
@typedoc """
The ACCS001 type
"""
@type t :: %__MODULE__{}
@daily_update_fields ~w(quantity movement_date)a
@daily_update_fields_source_sequence ~w(Repet_ACCS001_Pessoa QtdOpCCS DtMovto)a
@persons_fields ~w(cnpj)a
@persons_fields_source_sequence ~w(CNPJBasePart Grupo_ACCS001_Pessoa)a
@person_fields ~w(
operation_type operation_qualifier type
cpf_cnpj start_date end_date
)a
@person_required_fields ~w(
operation_type operation_qualifier type
cpf_cnpj start_date
)a
@person_fields_source_sequence ~w(TpOpCCS QualifdrOpCCS TpPessoa CNPJ_CPFPessoa DtIni DtFim)a
@allowed_operation_types ~w(E A I)
@allowed_operation_qualifiers ~w(N P C L H E)
@allowed_person_types ~w(F J)
@primary_key false
embedded_schema do
embeds_one :daily_update, DailyUpdate, source: :CCSArqAtlzDiaria, primary_key: false do
embeds_one :persons, Persons, source: :Repet_ACCS001_Pessoa, primary_key: false do
embeds_many :person, Person, source: :Grupo_ACCS001_Pessoa, primary_key: false do
field :operation_type, :string, source: :TpOpCCS
field :operation_qualifier, :string, source: :QualifdrOpCCS
field :type, :string, source: :TpPessoa
field :cpf_cnpj, :string, source: :CNPJ_CPFPessoa
field :start_date, :date, source: :DtIni
field :end_date, :date, source: :DtFim
end
field :cnpj, :string, source: :CNPJBasePart
end
field :quantity, :integer, default: 0, source: :QtdOpCCS
field :movement_date, :date, source: :DtMovto
end
end
@doc """
Creates a new ACCS001 message from given attributes.
"""
@spec new(map()) :: {:ok, t()} | {:error, Ecto.Changeset.t()}
def new(attrs) when is_map(attrs) do
attrs
|> changeset()
|> apply_action(:insert)
end
@doc false
def changeset(accs001 \\ %__MODULE__{}, attrs) when is_map(attrs) do
accs001
|> cast(attrs, [])
|> cast_embed(:daily_update, with: &daily_update_changeset/2, required: true)
end
@doc false
def daily_update_changeset(daily_update, attrs) when is_map(attrs) do
daily_update
|> cast(attrs, @daily_update_fields)
|> validate_required(@daily_update_fields)
|> validate_number(:quantity, greater_than_or_equal_to: 0)
|> cast_embed(:persons, with: &persons_changeset/2)
|> validate_by_quantity()
|> validate_quantity_digit()
end
defp validate_by_quantity(changeset) do
quantity = get_field(changeset, :quantity, 0)
if quantity > 0 do
cast_embed(changeset, :persons, with: &persons_changeset/2, required: true)
else
changeset
end
end
defp validate_quantity_digit(changeset) do
quantity =
changeset
|> get_field(:quantity, 0)
|> to_string()
if String.length(quantity) > 9 do
add_error(changeset, :quantity, "number should be minor than 9 digits")
else
changeset
end
end
@doc false
def persons_changeset(persons, attrs) when is_map(attrs) do
persons
|> cast(attrs, @persons_fields)
|> validate_required(@persons_fields)
|> validate_length(:cnpj, is: 8)
|> validate_format(:cnpj, ~r/[0-9]{8}/)
|> cast_embed(:person, with: &person_changeset/2, required: true)
end
@doc false
def person_changeset(person, attrs) when is_map(attrs) do
person
|> cast(attrs, @person_fields)
|> validate_required(@person_required_fields)
|> validate_inclusion(:operation_type, @allowed_operation_types)
|> validate_inclusion(:operation_qualifier, @allowed_operation_qualifiers)
|> validate_inclusion(:type, @allowed_person_types)
|> validate_length(:operation_type, is: 1)
|> validate_length(:operation_qualifier, is: 1)
|> validate_length(:type, is: 1)
|> validate_by_operation_type()
|> validate_by_type()
end
defp validate_by_operation_type(changeset) do
case get_field(changeset, :operation_type) do
"A" -> validate_required(changeset, [:end_date])
_ -> changeset
end
end
defp validate_by_type(changeset) do
case get_field(changeset, :type) do
"F" -> validate_cpf(changeset, :cpf_cnpj, message: "invalid CPF format")
"J" -> validate_cnpj(changeset, :cpf_cnpj, message: "invalid CNPJ format")
_ -> changeset
end
end
@doc """
Returns the field sequence for given root xml element
## Examples
iex> Bacen.CCS.ACCS001.sequence(:CCSArqAtlzDiaria)
[:Repet_ACCS001_Pessoa, :QtdOpCCS, :DtMovto]
iex> Bacen.CCS.ACCS001.sequence(:Repet_ACCS001_Pessoa)
[:CNPJBasePart, :Grupo_ACCS001_Pessoa]
iex> Bacen.CCS.ACCS001.sequence(:Grupo_ACCS001_Pessoa)
[:TpOpCCS, :QualifdrOpCCS, :TpPessoa, :CNPJ_CPFPessoa, :DtIni, :DtFim]
"""
@spec sequence(:CCSArqAtlzDiaria | :Repet_ACCS001_Pessoa | :Grupo_ACCS001_Pessoa) ::
list(atom())
def sequence(element)
def sequence(:CCSArqAtlzDiaria), do: @daily_update_fields_source_sequence
def sequence(:Repet_ACCS001_Pessoa), do: @persons_fields_source_sequence
def sequence(:Grupo_ACCS001_Pessoa), do: @person_fields_source_sequence
end
|
lib/bacen/ccs/accs001.ex
| 0.72952
| 0.645092
|
accs001.ex
|
starcoder
|
defmodule BSV.Block do
@moduledoc """
Module for the construction, parsing and serialization of Bitcoin block headers.
"""
use Bitwise
alias BSV.Crypto.Hash
alias BSV.Util
alias BSV.Util.VarBin
alias BSV.Transaction
@enforce_keys [:version, :previous_block, :merkle_root, :timestamp, :bits, :nonce]
@typedoc "A Bitcoin block."
defstruct [
:hash,
:version,
:previous_block,
:merkle_root,
:timestamp,
:bits,
:nonce,
:transactions
]
@type t :: %__MODULE__{
hash: binary() | nil,
version: non_neg_integer(),
previous_block: binary(),
merkle_root: binary(),
timestamp: DateTime.t(),
bits: binary(),
nonce: binary(),
transactions: [Transaction.t()] | nil
}
@doc """
Parse the given binary into a block. Returns a tuple containing the
parsed block and the remaining binary data.
## Arguments
* `include_transactions` - will attempt to parse transactions that follow the block header in the
binary. Disabled by default.
## Options
The accepted options are:
* `:encoding` - Optionally decode the binary with either the `:base64` or `:hex` encoding scheme.
## Examples
iex> raw = "010000006FE28C0AB6F1B372C1A6A246AE63F74F931E8365E15A089C68D6190000000000982051FD1E4BA744BBBE680E1FEE14677BA1A3C3540BF7B1CDB606E857233E0E61BC6649FFFF001D01E36299"
iex> {block, ""} = raw |> Base.decode16!() |> BSV.Block.parse()
iex> Base.encode16(block.hash)
"00000000839A8E6886AB5951D76F411475428AFC90947EE320161BBF18EB6048"
iex> {block, ""} = raw |> BSV.Block.parse(false, encoding: :hex)
iex> Base.encode16(block.hash)
"00000000839A8E6886AB5951D76F411475428AFC90947EE320161BBF18EB6048"
"""
@spec parse(binary | IO.device(), boolean, keyword) :: {__MODULE__.t(), binary}
def parse(data, include_transactions \\ false, options \\ []) do
encoding = Keyword.get(options, :encoding)
input_filter = Keyword.get(options, :input_filter)
output_filter = Keyword.get(options, :output_filter)
transaction_filter = Keyword.get(options, :transaction_filter)
{block_bytes, rest} = data |> Util.decode(encoding) |> VarBin.read_bytes(80)
<<version::little-size(32), previous_block::binary-size(32), merkle_root::binary-size(32),
timestamp::little-size(32), bits::binary-size(4), nonce::binary-size(4)>> = block_bytes
{transactions, rest} =
if include_transactions do
rest
|> VarBin.parse_items(
&Transaction.parse(&1,
input_filter: input_filter,
output_filter: output_filter,
transaction_filter: transaction_filter
)
)
else
{nil, rest}
end
{%__MODULE__{
hash: block_bytes |> Hash.sha256_sha256() |> Util.reverse_bin(),
version: version,
previous_block: previous_block |> Util.reverse_bin(),
merkle_root: merkle_root,
timestamp: DateTime.from_unix!(timestamp),
bits: bits,
nonce: nonce,
transactions: transactions
}, rest}
end
@doc """
Serialises the given block into a binary.
## Arguments
* `include_transactions` - will add transactions into the serialized binary. Disabled by default.
## Options
The accepted options are:
* `:encode` - Optionally encode the returned binary with either the `:base64` or `:hex` encoding scheme.
## Examples
BSV.Block.serialize(input)
<<binary>>
"""
@spec serialize(__MODULE__.t(), boolean, keyword) :: binary
def serialize(block, include_transactions \\ false, options \\ [])
def serialize(%__MODULE__{} = block, false, options) do
encoding = Keyword.get(options, :encoding)
timestamp = DateTime.to_unix(block.timestamp)
(<<block.version::little-size(32)>> <>
(block.previous_block |> Util.reverse_bin()) <>
block.merkle_root <>
<<timestamp::little-size(32)>> <>
block.bits <>
block.nonce)
|> Util.encode(encoding)
end
def serialize(%__MODULE__{transactions: transactions} = block, true, options)
when is_list(transactions) do
encoding = Keyword.get(options, :encoding)
(serialize(block, false) <>
(transactions |> VarBin.serialize_items(&Transaction.serialize/1)))
|> Util.encode(encoding)
end
@doc """
Gets the block id (hash in the hex form).
Examples
iex> raw = "010000006FE28C0AB6F1B372C1A6A246AE63F74F931E8365E15A089C68D6190000000000982051FD1E4BA744BBBE680E1FEE14677BA1A3C3540BF7B1CDB606E857233E0E61BC6649FFFF001D01E36299"
iex> {block, ""} = BSV.Block.parse(raw, false, encoding: :hex)
iex> BSV.Block.id(block)
"00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048"
"""
@spec id(__MODULE__.t()) :: String.t()
def id(block) do
case block.hash do
nil ->
block
|> serialize()
|> Hash.sha256_sha256()
|> Util.reverse_bin()
|> Util.encode(:hex)
_ ->
Util.encode(block.hash, :hex)
end
end
@doc """
Gets the block hash.
## Examples
iex> raw = "010000006FE28C0AB6F1B372C1A6A246AE63F74F931E8365E15A089C68D6190000000000982051FD1E4BA744BBBE680E1FEE14677BA1A3C3540BF7B1CDB606E857233E0E61BC6649FFFF001D01E36299"
iex> {block, ""} = BSV.Block.parse(raw, false, encoding: :hex)
iex> BSV.Block.hash(block)
<<0, 0, 0, 0, 131, 154, 142, 104, 134, 171, 89, 81, 215, 111, 65, 20, 117, 66, 138, 252, 144, 148, 126, 227, 32, 22, 27, 191, 24, 235, 96, 72>>
"""
@spec hash(__MODULE__.t()) :: binary()
def hash(transaction) do
case transaction.hash do
nil ->
transaction
|> serialize()
|> Hash.sha256_sha256()
|> Util.reverse_bin()
_ ->
transaction.hash
end
end
@doc """
Gets the previous block id.
Examples
iex> raw = "010000006FE28C0AB6F1B372C1A6A246AE63F74F931E8365E15A089C68D6190000000000982051FD1E4BA744BBBE680E1FEE14677BA1A3C3540BF7B1CDB606E857233E0E61BC6649FFFF001D01E36299"
iex> {block, ""} = BSV.Block.parse(raw, false, encoding: :hex)
iex> BSV.Block.previous_id(block)
"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
"""
@spec previous_id(__MODULE__.t()) :: String.t()
def previous_id(block) do
block.previous_block |> Util.encode(:hex)
end
end
|
lib/bsv/block.ex
| 0.91727
| 0.55447
|
block.ex
|
starcoder
|
defmodule ZeroMQ.FrameSplitter do
@moduledoc """
A GenServer which when fed a stream of binaries
will split out ZeroMQ frames and return the binary
blob (without parsing into a Command or Message).
"""
use GenServer
@doc """
Starts the splitter.
"""
def start_link do
GenServer.start_link(__MODULE__, :ok, [])
end
@doc """
Adds the provided binary blob to the current stream.
Returns `{:ok, count_of_full_frames_ready}`.
"""
def add_binary(splitter, blob) do
GenServer.call(splitter, {:add_binary, blob})
end
@doc """
Returns the (possibly empty) list of complete frame bodies and flags
as `{:ok, [{flags, frame_body}, ..]}`.
"""
def fetch(splitter) do
GenServer.call(splitter, :fetch)
end
@doc """
Initializes the state with a nil size & flags, the empty stream in progress
and the list for parsed frame bodies.
"""
def init(:ok) do
{:ok, {nil, nil, <<>>, :queue.new}}
end
def handle_call({:add_binary, blob}, _from, {size, flags, stream, frame_bodies}) do
stream = stream <> blob
{flags, size, stream, frame_bodies} = extract_frame_body(flags, size, stream, frame_bodies)
{:reply, {:ok, :queue.len(frame_bodies)}, {size, flags, stream, frame_bodies}}
end
def handle_call(:fetch, _from, {size, flags, stream, frame_bodies}) do
{:reply, {:ok, :queue.to_list(frame_bodies)}, {size, flags, stream, :queue.new}}
end
defp extract_frame_body(flags, size, stream, frame_bodies) do
working_parts =
if size == nil || flags == nil do
ZeroMQ.Frame.extract_flags_and_size(stream)
else
{flags, size, stream}
end
if working_parts == :error do
if byte_size(stream) == 0 do
{nil, nil, stream, frame_bodies}
else
{flags, size, stream, frame_bodies}
end
else
{flags, size, stream} = working_parts
if byte_size(stream) >= size do
<<frame_body::binary-size(size), stream::binary>> = stream
frame_bodies = :queue.in({flags, frame_body}, frame_bodies)
extract_frame_body(nil, nil, stream, frame_bodies)
else
{flags, size, stream, frame_bodies}
end
end
end
end
|
lib/util/frame_splitter.ex
| 0.845113
| 0.542803
|
frame_splitter.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.MultiChannelEndpointFindReport do
@moduledoc """
This command is used to advertise End Points that implement a given combination of Generic and
Specific Device Classes.
Params:
* `:reports_to_follow` - the number of reports to follow (required)
* `:generic_device_class` - a generic device class (required)
* `:specific_device_class` - a specific device class (required)
* `:end_points` - the list of End Point identifier(s) that matches the advertised Generic and
Specific Device Class values. (required)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave.{Command, DecodeError, DeviceClasses}
alias Grizzly.ZWave.CommandClasses.MultiChannel
@type end_point :: MultiChannel.end_point()
@type param ::
{:reports_to_follow, byte}
| {:generic_device_class, DeviceClasses.generic_device_class()}
| {:specific_device_class, DeviceClasses.specific_device_class()}
| {:end_points, [end_point]}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
command = %Command{
name: :multi_channel_endpoint_find_report,
command_byte: 0x0C,
command_class: MultiChannel,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
reports_to_follow = Command.param!(command, :reports_to_follow)
generic_device_class = Command.param!(command, :generic_device_class)
generic_device_class_byte = DeviceClasses.generic_device_class_to_byte(generic_device_class)
specific_device_class_byte =
DeviceClasses.specific_device_class_to_byte(
generic_device_class,
Command.param!(command, :specific_device_class)
)
end_points = Command.param!(command, :end_points)
<<reports_to_follow, generic_device_class_byte, specific_device_class_byte>> <>
encode_end_points(end_points)
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(
<<reports_to_follow, generic_device_class_byte, specific_device_class_byte,
end_points_binary::binary>>
) do
end_points = decode_end_points(end_points_binary)
with {:ok, generic_device_class} <-
MultiChannel.decode_generic_device_class(generic_device_class_byte),
{:ok, specific_device_class} <-
MultiChannel.decode_specific_device_class(
generic_device_class,
specific_device_class_byte
) do
{:ok,
[
reports_to_follow: reports_to_follow,
generic_device_class: generic_device_class,
specific_device_class: specific_device_class,
end_points: end_points
]}
else
{:error, %DecodeError{}} = error ->
error
end
end
defp encode_end_points(end_points) do
for end_point <- end_points, into: <<>>, do: <<0x00::size(1), end_point::size(7)>>
end
defp decode_end_points(binary) do
for <<_reserved::size(1), end_point::size(7) <- binary>>, do: end_point
end
end
|
lib/grizzly/zwave/commands/multi_channel_endpoint_find_report.ex
| 0.905348
| 0.436202
|
multi_channel_endpoint_find_report.ex
|
starcoder
|
defmodule Solana.SPL.AssociatedToken do
@moduledoc """
Functions for interacting with the [Associated Token Account
Program](https://spl.solana.com/associated-token-account).
An associated token account's address is derived from a user's main system
account and the token mint, which means each user can only have one associated
token account per token.
"""
alias Solana.{SPL.Token, Key, Instruction, Account, SystemProgram}
import Solana.Helpers
@doc """
The Associated Token Account's Program ID
"""
def id(), do: Solana.pubkey!("<KEY>")
@doc """
Finds the token account address associated with a given owner and mint.
This address will be unique to the mint/owner combination.
"""
@spec find_address(mint :: Solana.key(), owner :: Solana.key()) :: {:ok, Solana.key()} | :error
def find_address(mint, owner) do
with true <- Ed25519.on_curve?(owner),
{:ok, key, _} <- Key.find_address([owner, Token.id(), mint], id()) do
{:ok, key}
else
_ -> :error
end
end
@create_account_schema [
payer: [
type: {:custom, Solana.Key, :check, []},
required: true,
doc: "The account which will pay for the `new` account's creation"
],
owner: [
type: {:custom, Solana.Key, :check, []},
required: true,
doc: "The account which will own the `new` account"
],
new: [
type: {:custom, Solana.Key, :check, []},
required: true,
doc: "Public key of the associated token account to create"
],
mint: [
type: {:custom, Solana.Key, :check, []},
required: true,
doc: "The mint of the `new` account"
]
]
@doc """
Creates an associated token account.
This will be owned by the `owner` regardless of who actually creates it.
## Options
#{NimbleOptions.docs(@create_account_schema)}
"""
def create_account(opts) do
case validate(opts, @create_account_schema) do
{:ok, params} ->
%Instruction{
program: id(),
accounts: [
%Account{key: params.payer, writable?: true, signer?: true},
%Account{key: params.new, writable?: true},
%Account{key: params.owner},
%Account{key: params.mint},
%Account{key: SystemProgram.id()},
%Account{key: Token.id()},
%Account{key: Solana.rent()}
],
data: Instruction.encode_data([0])
}
error ->
error
end
end
end
|
lib/solana/spl/associated_token.ex
| 0.837836
| 0.475544
|
associated_token.ex
|
starcoder
|
defmodule Mix.Deps.Retriever do
@moduledoc false
@doc """
Gets all direct children of the current `Mix.Project`
as a `Mix.Dep` record. Umbrella project dependencies
are included as children.
"""
def children do
to_deps(Mix.project[:deps]) ++ Mix.Deps.Umbrella.children
end
@doc """
Gets all children of a given dependency using
the base project configuration.
"""
def children(dep, config) do
cond do
Mix.Deps.available?(dep) and mixfile?(dep) ->
Mix.Deps.in_dependency(dep, config, fn _ ->
to_deps(Mix.project[:deps]) ++ Mix.Deps.Umbrella.children
end)
Mix.Deps.available?(dep) and rebarconfig?(dep) ->
Mix.Deps.in_dependency(dep, config, fn _ -> rebar_children end)
true ->
[]
end
end
@doc """
Updates the status of a dependency.
"""
def update(Mix.Dep[scm: scm, app: app, requirement: req, opts: opts,
manager: manager, from: from]) do
update({ app, req, opts }, [scm], from, manager)
end
@doc """
Converts the given list of raw deps to dependencies.
"""
def to_deps(deps) do
scms = Mix.SCM.available
from = current_source(:mix)
Enum.map(deps || [], &update(&1, scms, from))
end
## Helpers
defp rebar_children do
scms = Mix.SCM.available
from = current_source(:rebar)
Mix.Rebar.recur(".", fn config ->
Mix.Rebar.deps(config) |> Enum.map(&update(&1, scms, from, :rebar))
end) |> Enum.concat
end
defp update(tuple, scms, from, manager // nil) do
dep = with_scm_and_app(tuple, scms).from(from)
if match?({ _, req, _ } when is_regex(req), tuple) and
not String.ends_with?(from, "rebar.config") do
invalid_dep_format(tuple)
end
if Mix.Deps.available?(dep) do
validate_app(cond do
# If the manager was already set to rebar, let's use it
manager == :rebar ->
rebar_dep(dep)
mixfile?(dep) ->
Mix.Deps.in_dependency(dep, fn project ->
mix_dep(dep, project)
end)
rebarconfig?(dep) or rebarexec?(dep) ->
rebar_dep(dep)
makefile?(dep) ->
make_dep(dep)
true ->
dep
end)
else
dep
end
end
defp current_source(manager) do
case manager do
:mix -> "mix.exs"
:rebar -> "rebar.config"
end |> Path.absname
end
defp mix_dep(Mix.Dep[manager: nil, opts: opts, app: app] = dep, project) do
default =
if Mix.Project.umbrella? do
false
else
Path.join(Mix.project[:compile_path], "#{app}.app")
end
opts = Keyword.put_new(opts, :app, default)
dep.manager(:mix).source(project).opts(opts)
end
defp mix_dep(dep, _project), do: dep
defp rebar_dep(Mix.Dep[manager: nil, opts: opts] = dep) do
config = Mix.Rebar.load_config(opts[:dest])
dep.manager(:rebar).source(config)
end
defp rebar_dep(dep), do: dep
defp make_dep(Mix.Dep[manager: nil] = dep) do
dep.manager(:make)
end
defp make_dep(dep), do: dep
defp with_scm_and_app({ app, opts }, scms) when is_atom(app) and is_list(opts) do
with_scm_and_app({ app, nil, opts }, scms)
end
defp with_scm_and_app({ app, req, opts }, scms) when is_atom(app) and
(is_binary(req) or is_regex(req) or req == nil) and is_list(opts) do
path = Path.join(Mix.project[:deps_path], app)
opts = Keyword.put(opts, :dest, path)
{ scm, opts } = Enum.find_value scms, { nil, [] }, fn(scm) ->
(new = scm.accepts_options(app, opts)) && { scm, new }
end
if scm do
Mix.Dep[
scm: scm,
app: app,
requirement: req,
status: scm_status(scm, opts),
opts: opts
]
else
raise Mix.Error, message: "#{inspect Mix.Project.get} did not specify a supported scm " <>
"for app #{inspect app}, expected one of :git, :path or :in_umbrella"
end
end
defp with_scm_and_app(other, _scms) do
invalid_dep_format(other)
end
defp scm_status(scm, opts) do
if scm.checked_out? opts do
{ :ok, nil }
else
{ :unavailable, opts[:dest] }
end
end
defp validate_app(Mix.Dep[opts: opts, requirement: req, app: app] = dep) do
opts_app = opts[:app]
if opts_app == false do
dep
else
path = if is_binary(opts_app), do: opts_app, else: "ebin/#{app}.app"
path = Path.expand(path, opts[:dest])
dep.status app_status(path, app, req)
end
end
defp app_status(app_path, app, req) do
case :file.consult(app_path) do
{ :ok, [{ :application, ^app, config }] } ->
case List.keyfind(config, :vsn, 0) do
{ :vsn, actual } when is_list(actual) ->
actual = iolist_to_binary(actual)
if vsn_match?(req, actual) do
{ :ok, actual }
else
{ :nomatchvsn, actual }
end
{ :vsn, actual } ->
{ :invalidvsn, actual }
nil ->
{ :invalidvsn, nil }
end
{ :ok, _ } -> { :invalidapp, app_path }
{ :error, _ } -> { :noappfile, app_path }
end
end
defp vsn_match?(nil, _actual), do: true
defp vsn_match?(req, actual) when is_regex(req), do: actual =~ req
defp vsn_match?(req, actual) when is_binary(req) do
Version.match?(actual, req)
end
defp mixfile?(dep) do
File.regular?(Path.join(dep.opts[:dest], "mix.exs"))
end
defp rebarexec?(dep) do
File.regular?(Path.join(dep.opts[:dest], "rebar"))
end
defp rebarconfig?(dep) do
Enum.any?(["rebar.config", "rebar.config.script"], fn file ->
File.regular?(Path.join(dep.opts[:dest], file))
end)
end
defp makefile?(dep) do
File.regular? Path.join(dep.opts[:dest], "Makefile")
end
defp invalid_dep_format(dep) do
raise Mix.Error, message: %s(Dependency specified in the wrong format: #{inspect dep}, ) <>
%s(expected { app :: atom, opts :: Keyword.t } | { app :: atom, requirement :: String.t, opts :: Keyword.t })
end
end
|
lib/mix/lib/mix/deps/retriever.ex
| 0.703651
| 0.459925
|
retriever.ex
|
starcoder
|
defmodule Bencode do
@moduledoc ~S"""
A bencode implementation specified by the BEP 03.
## Examples
iex> Bencode.decode("d4:samplekeyi50ee")
{:error, :string, "keyi50ee"}
iex> Bencode.encode(%{"foobar" => 10, "baz" => [], "bar" => %{}})
"d3:barde3:bazle6:foobari10ee"
"""
@doc """
Encode Elixir data structures into a bencoded string.
"""
def encode(data) do
encode_value(data)
end
# Helper function to keep encode_value() small
defp reduce_list(list) do
Enum.reduce(list, "", fn(x, acc) ->
acc <> encode_value(x)
end)
end
# Helper function to keep encode_value() small
defp reduce_dict(dict) do
Enum.reduce(dict, "", fn({key, value}, acc) ->
acc <> encode_value(key) <> encode_value(value)
end)
end
defp encode_value(data) when is_integer(data), do: "i#{data}e"
defp encode_value(data) when is_binary(data), do: "#{byte_size(data)}:#{data}"
defp encode_value(data) when is_list(data), do: "l" <> reduce_list(data) <> "e"
defp encode_value(data) when is_map(data), do: "d" <> reduce_dict(data) <> "e"
@doc """
Decodes a bencoded string into Elixir data structures.
Returns `{:ok, value}` on success otherwise `{:error, reason, where}`.
Reason values might be:
* `:string`
* `:number`
* `:list`
* `:dict`
The `where` return contains the part of the input where it stopped parsing the bencoded string.
"""
def decode(data) when is_binary(data) do
case decode_value(data) do
{:ok, value, _tail} ->
{:ok, value}
{:error, what, tail} ->
{:error, what, tail}
end
end
# Find out the structure type by the first character.
defp decode_value(<<?i, tail :: binary>>), do: decode_number(tail)
defp decode_value(<<?l, tail :: binary>>), do: decode_list(tail)
defp decode_value(<<?d, tail :: binary>>), do: decode_dict(tail)
defp decode_value(data), do: decode_string(data)
# Decode strings into Elixir strings.
defp decode_string(<<?:, tail :: binary>>, acc) do
{length, _} = Integer.parse(acc)
{:ok, binary_part(tail, 0, length), binary_part(tail, length, byte_size(tail) - length)}
end
defp decode_string(<<?0, tail :: binary>>, acc), do: decode_string(tail, acc <> "0")
defp decode_string(<<?1, tail :: binary>>, acc), do: decode_string(tail, acc <> "1")
defp decode_string(<<?2, tail :: binary>>, acc), do: decode_string(tail, acc <> "2")
defp decode_string(<<?3, tail :: binary>>, acc), do: decode_string(tail, acc <> "3")
defp decode_string(<<?4, tail :: binary>>, acc), do: decode_string(tail, acc <> "4")
defp decode_string(<<?5, tail :: binary>>, acc), do: decode_string(tail, acc <> "5")
defp decode_string(<<?6, tail :: binary>>, acc), do: decode_string(tail, acc <> "6")
defp decode_string(<<?7, tail :: binary>>, acc), do: decode_string(tail, acc <> "7")
defp decode_string(<<?8, tail :: binary>>, acc), do: decode_string(tail, acc <> "8")
defp decode_string(<<?9, tail :: binary>>, acc), do: decode_string(tail, acc <> "9")
defp decode_string(tail, _acc), do: {:error, :string, tail}
defp decode_string(data), do: decode_string(data, "")
# Decode number into Elixir Integer.
defp decode_number(<<?e, tail :: binary>>, acc) do
{number, _} = Integer.parse(acc)
{:ok, number, tail}
end
defp decode_number(<<?0, tail :: binary>>, acc), do: decode_number(tail, acc <> "0")
defp decode_number(<<?1, tail :: binary>>, acc), do: decode_number(tail, acc <> "1")
defp decode_number(<<?2, tail :: binary>>, acc), do: decode_number(tail, acc <> "2")
defp decode_number(<<?3, tail :: binary>>, acc), do: decode_number(tail, acc <> "3")
defp decode_number(<<?4, tail :: binary>>, acc), do: decode_number(tail, acc <> "4")
defp decode_number(<<?5, tail :: binary>>, acc), do: decode_number(tail, acc <> "5")
defp decode_number(<<?6, tail :: binary>>, acc), do: decode_number(tail, acc <> "6")
defp decode_number(<<?7, tail :: binary>>, acc), do: decode_number(tail, acc <> "7")
defp decode_number(<<?8, tail :: binary>>, acc), do: decode_number(tail, acc <> "8")
defp decode_number(<<?9, tail :: binary>>, acc), do: decode_number(tail, acc <> "9")
defp decode_number(<<data :: binary>>, _acc), do: {:error, :number, data}
defp decode_number(<<data :: binary>>), do: decode_number(data, "")
# Decode list into Elixir list.
defp decode_list(list, <<?e, tail :: binary>>), do: {:ok, Enum.reverse(list), tail}
defp decode_list(list, <<data :: binary>>) do
with {:ok, value, tail} <- decode_value(data),
do: decode_list([value | list], tail)
end
defp decode_list(_list, ""), do: {:error, :list, ""}
defp decode_list(<<data :: binary>>), do: decode_list([], data)
# Decode dictionary into Elixir Map.
defp decode_dict(dict, <<?e, tail :: binary>>), do: {:ok, dict, tail}
defp decode_dict(dict, <<data :: binary>>) do
with {:ok, key, tail} <- decode_string(data),
{:ok, value, tail} <- decode_value(tail),
do: decode_dict(Map.put(dict, key, value), tail)
end
defp decode_dict(_dict, ""), do: {:error, :dict, ""}
defp decode_dict(<<data :: binary>>), do: decode_dict(%{}, data)
end
|
lib/bencode.ex
| 0.834879
| 0.412323
|
bencode.ex
|
starcoder
|
defmodule OpenTelemetry.Tracer do
@moduledoc """
This module contains macros for Tracer operations around the lifecycle of the Spans within a Trace.
The Tracer is able to start a new Span as a child of the active Span of the current process, set
a different Span to be the current Span by passing the Span's context, end a Span or run a code
block within the context of a newly started span that is ended when the code block completes.
The macros use the Tracer registered to the Application the module using the macro is included in,
assuming `OpenTelemetry.register_application_tracer/1` has been called for the Application. If
not then the default Tracer is used.
require OpenTelemetry.Tracer
OpenTelemetry.Tracer.with_span \"span-1\" do
... do something ...
end
"""
@type start_opts() :: %{optional(:parent) => OpenTelemetry.span() | OpenTelemetry.span_ctx(),
optional(:attributes) => OpenTelemetry.attributes(),
optional(:sampler) => :ot_sampler.sampler(),
optional(:links) => OpenTelemetry.links(),
optional(:is_recording) => boolean(),
optional(:start_time) => :opentelemetry.timestamp(),
optional(:kind) => OpenTelemetry.span_kind()}
@doc """
Starts a new span and makes it the current active span of the current process.
The current active Span is used as the parent of the created Span unless a `parent` is given in the
`t:start_opts/0` argument or there is no active Span. If there is neither a current Span or a
`parent` option given then the Tracer checks for an extracted SpanContext to use as the parent. If
there is also no extracted context then the created Span is a root Span.
"""
defmacro start_span(name, opts \\ quote(do: %{})) do
quote bind_quoted: [name: name, start_opts: opts] do
:ot_tracer.start_span(:opentelemetry.get_tracer(__MODULE__), name, start_opts)
end
end
@doc """
Starts a new span but does not make it the current active span of the current process.
This is particularly useful when creating a child Span that is for a new process. Before spawning
the new process start an inactive Span, which uses the current context as the parent, then
pass this new SpanContext as an argument to the spawned function and in that function use
`set_span/1`.
The current active Span is used as the parent of the created Span unless a `parent` is given in the
`t:start_opts/0` argument or there is no active Span. If there is neither a current Span or a
`parent` option given then the Tracer checks for an extracted SpanContext to use as the parent. If
there is also no extracted context then the created Span is a root Span.
"""
defmacro start_inactive_span(name, opts \\ quote(do: %{})) do
quote bind_quoted: [name: name, start_opts: opts] do
:ot_tracer.start_inactive_span(:opentelemetry.get_tracer(__MODULE__), name, start_opts)
end
end
@doc """
Takes a `t:OpenTelemetry.span_ctx/0` and the Tracer sets it to the currently active Span.
"""
defmacro set_span(span_ctx) do
quote bind_quoted: [span_ctx: span_ctx] do
:ot_tracer.set_span(:opentelemetry.get_tracer(__MODULE__), span_ctx)
end
end
@doc """
End the Span. Sets the end timestamp for the currently active Span. This has no effect on any
child Spans that may exist of this Span.
The default Tracer in the OpenTelemetry Erlang/Elixir SDK will then set the parent, if there
is a local parent of the current Span, to the current active Span.
"""
defmacro end_span() do
quote do
:ot_tracer.end_span(:opentelemetry.get_tracer(__MODULE__))
end
end
@doc """
Creates a new span which is ended automatically when the `block` completes.
See `start_span/2` and `end_span/0`.
"""
defmacro with_span(name, start_opts \\ quote(do: %{}), do: block) do
quote do
:ot_tracer.with_span(:opentelemetry.get_tracer(__MODULE__),
unquote(name),
unquote(start_opts),
fn _ -> unquote(block) end)
end
end
@doc """
Returns the currently active `t:OpenTelemetry.tracer_ctx/0`.
"""
defmacro current_ctx() do
quote do
:ot_tracer.current_ctx(:opentelemetry.get_tracer(__MODULE__))
end
end
@doc """
Returns the currently active `t:OpenTelemetry.span_ctx/0`.
"""
defmacro current_span_ctx() do
quote do
:ot_tracer.current_span_ctx(:opentelemetry.get_tracer(__MODULE__))
end
end
end
|
lib/open_telemetry/tracer.ex
| 0.810066
| 0.543106
|
tracer.ex
|
starcoder
|
defmodule FirestormData.Category do
@moduledoc """
A `Category` is a grouping of related `Thread`s. Categories are modeled as a
Forest of Trees.
"""
defmodule TitleSlug do
@moduledoc """
A configuration for turning category titles into slugs.
"""
use EctoAutoslugField.Slug, from: :title, to: :slug
alias FirestormData.{Repo, Category}
import Ecto.{Query}
def build_slug(sources) do
base_slug = super(sources)
get_unused_slug(base_slug, 0)
end
def get_unused_slug(base_slug, number) do
slug = get_slug(base_slug, number)
if slug_used?(slug) do
get_unused_slug(base_slug, number + 1)
else
slug
end
end
def slug_used?(slug) do
Category
|> where(slug: ^slug)
|> Repo.one
end
def get_slug(base_slug, 0), do: base_slug
def get_slug(base_slug, number) do
"#{base_slug}-#{number}"
end
end
use Ecto.Schema
import Ecto.Changeset
alias FirestormData.{Repo, Thread, View, Follow, Tagging, Tag}
use Arbor.Tree
schema "categories" do
field :title, :string
field :slug, TitleSlug.Type
field :children, :any, virtual: true
field :ancestors, :any, virtual: true
belongs_to :parent, __MODULE__
has_many :threads, Thread
has_many :views, {"categories_views", View}, foreign_key: :assoc_id
has_many :follows, {"categories_follows", Follow}, foreign_key: :assoc_id
has_many :taggings, {"categories_taggings", Tagging}, foreign_key: :assoc_id
many_to_many :tags, Tag, join_through: "categories_taggings", join_keys: [assoc_id: :id, tag_id: :id]
timestamps()
end
@required_fields ~w(title)a
@optional_fields ~w(parent_id slug)a
def changeset(record, params \\ :empty) do
record
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
|> TitleSlug.maybe_generate_slug
|> TitleSlug.unique_constraint
end
def categories do
Repo.all(__MODULE__)
end
def color(category) do
category
|> hash_number
end
def hash(category) do
:crypto.hash(:sha, category.slug)
end
def hashlist(category) do
for <<num <- hash(category)>>, do: num
end
def hash_number(category) do
category
|> hashlist
|> Enum.sum
|> rem(360)
end
end
|
apps/firestorm_data/lib/firestorm_data/schema/category.ex
| 0.782413
| 0.437523
|
category.ex
|
starcoder
|
defmodule Xipper do
@moduledoc """
An Elixir implementation of [Huet's Zipper](https://www.st.cs.uni-saarland.de/edu/seminare/2005/advanced-fp/docs/huet-zipper.pdf), with gratitude to <NAME>'s
[Clojure implementation](https://clojure.github.io/clojure/clojure.zip-api.html).
Zippers provide an elegant solution for traversing a tree-like data structure,
while maintaining enough state data to reconstruct the entire tree from any
of its child nodes.
All that is required to create a zipper for a data structure is the data structure
itself and a set of functions that define behaviours around nodes of the data structure.
See `Xipper.new/4` for details.
For the sake of brevity, the documentation for this module's functions will
assume the following code has been run before each example:
iex> zipper = Xipper.new(
...> [1, 2, [3, 4], 5],
...> &is_list/1,
...> fn node -> node end,
...> fn _node, children -> children end
...> )
iex> zipper.focus
[1, 2, [3, 4], 5]
Again, see `new/4` for an explanation of the functions passed as arguments here.
"""
defstruct [
focus: nil,
left: [],
right: [],
parents: [],
is_end: false,
functions: [
is_branch: nil,
children: nil,
make_node: nil
]
]
@type is_branch_function :: (any -> boolean)
@type children_function :: (any -> [any])
@type make_node_function :: (any, any, any -> any)
@type functions :: [
is_branch: __MODULE__.is_branch_function,
children: __MODULE__.children_function,
make_node: __MODULE__.make_node_function
]
@type parent :: [
focus: any,
left: any,
right: any
]
@type t :: %__MODULE__{
focus: any,
left: [any],
right: [any],
parents: [__MODULE__.parent],
is_end: boolean,
functions: __MODULE__.functions
}
@type error :: {:error, atom}
@type maybe_zipper :: __MODULE__.t | __MODULE__.error
@doc """
Creates a new zipper.
Creating a zipper requires four arguments. The first argument is the data
structure to be traversed by the zipper, and the final three arguments are
functions. In order, these functions are:
1. a function that takes a node from the data structure and returns true if it
is a branch node (that is, it has children or can have children), and false
otherwise
1. a function that takes a node from the data structure and returns its children
if it is a branch node
1. a function that takes a node and a list of children and returns a new node
with those children
As an example, the following code returns a zipper for a nested list.
## Example
iex> zipper = Xipper.new(
...> [1, 2, [3, 4], 5],
...> &is_list/1,
...> fn node -> node end,
...> fn _node, children -> children end
...> )
iex> zipper.focus
[1, 2, [3, 4], 5]
The given arguments are
1. the root list
1. a function for defining a branch node -- in this case whether a node is a list
1. a function for returing a node's children -- since a branch node is simply a list, this returns the node itself
1. a function for creating a new node -- since a branch is just a list of its children, this function returns the list of children as the new node
"""
@spec new(any, __MODULE__.is_branch_function, __MODULE__.children_function, __MODULE__.make_node_function) :: __MODULE__.t
def new(root, is_branch_fn, children_fn, make_node_fn) do
%__MODULE__{
focus: root,
functions: [
is_branch: is_branch_fn,
children: children_fn,
make_node: make_node_fn
]
}
end
@doc """
Returns the current focus of the zipper.
## Example
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> zipper |> Xipper.down |> Xipper.focus
1
"""
@spec focus(__MODULE__.t) :: any
def focus(%{focus: focus}), do: focus
@doc """
Returns true if the current focus of the zipper is a branch node, false
otherwise.
## Example
iex> Xipper.is_branch(zipper)
true
iex> zipper |> Xipper.down |> Xipper.is_branch
false
"""
@spec is_branch(__MODULE__.t) :: boolean
def is_branch(zipper = %__MODULE__{focus: focus}) do
zipper.functions[:is_branch].(focus)
end
@doc """
Returns a node's children if called on a branch node, an error tuple
otherwise.
## Example
iex> Xipper.children(zipper)
[1, 2, [3, 4], 5]
iex> zipper |> Xipper.down |> Xipper.children
{:error, :children_of_leaf}
"""
@spec children(__MODULE__.t) :: __MODULE__.maybe_zipper
def children(zipper = %__MODULE__{focus: focus}) do
case is_branch(zipper) do
true -> zipper.functions[:children].(focus)
false -> {:error, :children_of_leaf}
end
end
@doc """
Takes a zipper, a node, and a list of child nodes and returns a new node
constructed from the node and children via the user-defined `make_node` function
passed in to `Xipper.new/4`.
In the case of our example zipper, since a list node's children are simply the
list itself, in this context this function will return the list of children passed
as the third argument.
## Example
iex> Xipper.make_node(zipper, [1,2,3], [4,5,6])
[4, 5, 6]
"""
@spec make_node(__MODULE__.t, any, [any]) :: any
def make_node(zipper, node, children) do
zipper.functions[:make_node].(node, children)
end
@doc """
Shifts the zipper's focus down to the leftmost child node of the current focus.
This function returns an error tuple if the current focus is a leaf node, or a
branch node with no children.
## Example
iex> zipper = Xipper.down(zipper)
iex> Xipper.focus(zipper)
1
iex> Xipper.down(zipper)
{:error, :down_from_leaf}
"""
@spec down(__MODULE__.t) :: __MODULE__.maybe_zipper
def down(zipper = %__MODULE__{}) do
case is_branch(zipper) do
false -> {:error, :down_from_leaf}
true ->
case children(zipper) do
[] -> {:error, :down_from_empty_branch}
[new_focus|right] ->
%__MODULE__{zipper |
focus: new_focus,
left: [],
right: right,
parents: [generate_parent_element(zipper) | zipper.parents]
}
end
end
end
defp generate_parent_element(zipper) do
zipper |> Map.take([:focus, :left, :right]) |> Enum.into([], &(&1))
end
@doc """
Shifts the zipper's focus to the sibling node directly to the right of the current focus.
This function returns an error tuple if the current focus is the rightmost
of its siblings.
## Example
iex> zipper = Xipper.down(zipper)
iex> zipper |> Xipper.right |> Xipper.focus
2
iex> zipper = Xipper.rightmost(zipper)
iex> Xipper.focus(zipper)
5
iex> Xipper.right(zipper)
{:error, :right_of_rightmost}
"""
@spec right(__MODULE__.t) :: __MODULE__.maybe_zipper
def right(%__MODULE__{right: []}), do: {:error, :right_of_rightmost}
def right(zipper = %__MODULE__{}) do
[new_focus|new_right] = zipper.right
%__MODULE__{zipper |
focus: new_focus,
right: new_right,
left: [zipper.focus|zipper.left]
}
end
@doc """
Shifts the zipper's focus to the sibling node directly to the left of the current focus.
This function returns an error tuple if the current focus is the leftmost
of its siblings.
## Example
iex> zipper = Xipper.down(zipper)
iex> zipper |> Xipper.left
{:error, :left_of_leftmost}
iex> zipper = zipper |> Xipper.rightmost |> Xipper.left
iex> Xipper.focus(zipper)
[3, 4]
"""
@spec left(__MODULE__.t) :: __MODULE__.maybe_zipper
def left(%__MODULE__{left: []}), do: {:error, :left_of_leftmost}
def left(zipper = %__MODULE__{}) do
[new_focus|new_left] = zipper.left
%__MODULE__{zipper |
focus: new_focus,
left: new_left,
right: [zipper.focus|zipper.right]
}
end
@doc """
Shifts the zipper's focus to the current focus's parent.
This function returns an error if the current focus is the root of the zipper.
## Example
iex> zipper = Xipper.down(zipper)
iex> Xipper.focus(zipper)
1
iex> zipper = Xipper.up(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> Xipper.up(zipper)
{:error, :up_from_root}
"""
@spec up(__MODULE__.t) :: __MODULE__.maybe_zipper
def up(%__MODULE__{parents: []}), do: {:error, :up_from_root}
def up(zipper = %__MODULE__{}) do
[[focus: new_focus, left: new_left, right: new_right]|new_parents] = zipper.parents
new_children = Enum.reverse(zipper.left) ++ [zipper.focus|zipper.right]
new_focus = make_node(zipper, new_focus, new_children)
%__MODULE__{zipper |
focus: new_focus,
left: new_left,
right: new_right,
parents: new_parents
}
end
@doc """
Returns all right-hand siblings of a node.
## Example
iex> zipper |> Xipper.down |> Xipper.rights
[2, [3, 4], 5]
"""
@spec rights(__MODULE__.t) :: [any]
def rights(%__MODULE__{right: rights}), do: rights
@doc """
Returns all left-hand siblings of a node.
## Example
iex> zipper |> Xipper.down |> Xipper.lefts
[]
iex> zipper |> Xipper.down |> Xipper.rightmost |> Xipper.lefts
[1, 2, [3, 4]]
"""
@spec lefts(__MODULE__.t) :: [any]
def lefts(%__MODULE__{left: lefts}), do: Enum.reverse(lefts)
@doc """
Traverses a zipper upwards to the root of the zipper.
This function has no effect if the current focus is already the root.
## Example
iex> zipper = Xipper.root(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> zipper = zipper |> Xipper.down |> Xipper.right |> Xipper.right |> Xipper.down
iex> Xipper.focus(zipper)
3
iex> zipper = Xipper.root(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
"""
@spec root(__MODULE__.t) :: __MODULE__.t
def root(zipper = %__MODULE__{parents: []}), do: zipper
def root(zipper = %__MODULE__{}), do: zipper |> up |> root
@doc """
Moves focus to the rightmost sibling of the current focus.
iex> zipper = zipper |> Xipper.down |> Xipper.rightmost
iex> Xipper.focus(zipper)
5
"""
@spec rightmost(__MODULE__.t) :: __MODULE__.t
def rightmost(zipper = %__MODULE__{right: []}), do: zipper
def rightmost(zipper = %__MODULE__{}), do: zipper |> right |> rightmost
@doc """
Moves focus to the leftmost sibling of the current focus.
iex> zipper = zipper |> Xipper.down |> Xipper.right |> Xipper.right
iex> Xipper.focus(zipper)
[3 ,4]
iex> zipper = Xipper.leftmost(zipper)
iex> Xipper.focus(zipper)
1
"""
@spec leftmost(__MODULE__.t) :: __MODULE__.t
def leftmost(zipper = %__MODULE__{left: []}), do: zipper
def leftmost(zipper = %__MODULE__{}), do: zipper |> left |> leftmost
@doc """
Moves to the next node in a depth-first walk through the zipper.
`next/1` will attempt to move `down/1`, then `right/1`, and then seek back up
the zipper until it finds a right-hand sibling to move to. Once it reaches the
end of the walk it will return the root of the zipper indefinitely.
## Example
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
1
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
2
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
[3, 4]
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
3
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
4
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
5
iex> zipper = Xipper.next(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
"""
@spec next(__MODULE__.t) :: __MODULE__.t
def next(zipper) do
case is_end(zipper) do
true -> zipper
false ->
case {down(zipper), right(zipper)} do
{{:error, _}, {:error, _}} -> _next(zipper)
{{:error, _}, right_z} -> right_z
{down_z, _} -> down_z
end
end
end
defp _next(zipper) do
case up(zipper) do
{:error, _} -> %{zipper | is_end: true}
next_zipper ->
case right(next_zipper) do
{:error, _} -> _next(next_zipper)
new_zipper -> new_zipper
end
end
end
@doc """
Moves to the previous node in a depth-first walk through the zipper.
`prev/1` will attempt to move `left/1`, then recuse down through its left siblings
children, then `up/1`, until it reaches the root of the zipper. Calling
`prev/1` on a zipper that has reached the end of its depth-first walk
(for which `is_end/1` returns true), will return the same zipper indefinitely.
## Example
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> zipper = zipper |> Xipper.down |> Xipper.rightmost
iex> Xipper.focus(zipper)
5
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
4
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
3
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
[3, 4]
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
2
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
1
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
iex> zipper = zipper |> Xipper.down |> Xipper.rightmost |> Xipper.next
iex> zipper = Xipper.prev(zipper)
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5]
"""
@spec prev(__MODULE__.t) :: __MODULE__.t
def prev(zipper) do
case is_end(zipper) do
true -> zipper
false ->
case left(zipper) do
{:error, _} -> up(zipper)
left_zipper -> _prev(left_zipper)
end
end
end
defp _prev(zipper) do
case down(zipper) do
{:error, _} -> zipper
down_zipper -> down_zipper |> rightmost |> _prev
end
end
@doc """
Returns true if the zipper has reached the end of a depth-first walk, false
otherwise.
## Example
iex> Xipper.is_end(zipper)
false
iex> zipper |> Xipper.down |> Xipper.rightmost |> Xipper.next |> Xipper.is_end
true
"""
@spec is_end(__MODULE__.t) :: boolean
def is_end(%__MODULE__{is_end: true}), do: true
def is_end(%__MODULE__{is_end: _}), do: false
@spec path(__MODULE__.t) :: [any]
def path(zipper = %__MODULE__{}) do
zipper.parents
|> Enum.reverse
|> Enum.map(&(&1[:focus]))
end
@doc """
Inserts the given node as the immediate left-hand sibling of the current focus,
without shifting focus.
This function returns an error tuple if called on the root of a zipper.
## Example
iex> zipper = Xipper.down(zipper)
iex> zipper = Xipper.insert_left(zipper, -10)
iex> zipper |> Xipper.root |> Xipper.focus
[-10, 1, 2, [3, 4], 5]
"""
@spec insert_left(__MODULE__.t, any) :: __MODULE__.maybe_zipper
def insert_left(%__MODULE__{parents: []}, _), do: {:error, :insert_left_of_root}
def insert_left(zipper = %__MODULE__{}, new_sibling) do
%{ zipper | left: [new_sibling|zipper.left]}
end
@doc """
Inserts the given node as the immediate right-hand sibling of the current focus,
without shifting focus.
This function returns an error tuple if called on the root of a zipper.
## Example
iex> zipper = Xipper.down(zipper)
iex> zipper = Xipper.insert_right(zipper, 1.5)
iex> zipper |> Xipper.root |> Xipper.focus
[1, 1.5, 2, [3, 4], 5]
"""
@spec insert_right(__MODULE__.t, any) :: __MODULE__.maybe_zipper
def insert_right(%__MODULE__{parents: []}, _), do: {:error, :insert_right_of_root}
def insert_right(zipper = %__MODULE__{}, new_sibling) do
%{ zipper | right: [new_sibling|zipper.right]}
end
@doc """
Replaces the currently focused node with the result of applying the given
function to that node.
## Example
iex> zipper = Xipper.down(zipper)
iex> Xipper.focus(zipper)
1
iex> zipper = Xipper.edit(zipper, &to_string/1)
iex> Xipper.focus(zipper)
"1"
"""
@spec edit(__MODULE__.t, (any -> any)) :: __MODULE__.t
def edit(zipper = %__MODULE__{}, func) do
%__MODULE__{zipper | focus: func.(zipper.focus)}
end
@doc """
Replaces the current focus with the node passed as the second argument.
## Example
iex> zipper = Xipper.down(zipper)
iex> Xipper.focus(zipper)
1
iex> zipper = Xipper.replace(zipper, 42)
iex> Xipper.focus(zipper)
42
"""
@spec replace(__MODULE__.t, any) :: __MODULE__.t
def replace(zipper = %__MODULE__{}, new_focus) do
edit(zipper, fn _ -> new_focus end)
end
@doc """
Appends the given child as the right-most child of the current focus, if it is
a branch node, without shifting focus.
This function returns an error if trying to insert a child into a leaf node.
## Example
iex> zipper = Xipper.append_child(zipper, [6, 7])
iex> Xipper.focus(zipper)
[1, 2, [3, 4], 5, [6, 7]]
iex> zipper |> Xipper.down |> Xipper.append_child(1.5)
{:error, :append_child_of_leaf}
"""
@spec append_child(__MODULE__.t, any) :: __MODULE__.maybe_zipper
def append_child(zipper = %__MODULE__{}, new_child) do
case is_branch(zipper) do
false -> {:error, :append_child_of_leaf}
true -> %{zipper |
focus: make_node(zipper, zipper.focus, children(zipper) ++ [new_child])
}
end
end
@doc """
Inserts the given child as the left-most child of the current focus, if it is
a branch node, without shifting focus.
This function returns an error if trying to insert a child into a leaf node.
## Example
iex> zipper = Xipper.insert_child(zipper, 0)
iex> Xipper.focus(zipper)
[0, 1, 2, [3, 4], 5]
iex> zipper |> Xipper.down |> Xipper.insert_child(0)
{:error, :insert_child_of_leaf}
"""
@spec insert_child(__MODULE__.t, any) :: __MODULE__.maybe_zipper
def insert_child(zipper = %__MODULE__{}, new_child) do
case is_branch(zipper) do
false -> {:error, :insert_child_of_leaf}
true -> %{zipper |
focus: make_node(zipper, zipper.focus, [new_child|children(zipper)])
}
end
end
@doc """
Removes the current focus of the zipper and shifts focus to where the previous
node in a depth-first walk would be.
This function returns an error if trying to remove the root of the zipper.
## Example
iex> Xipper.remove(zipper)
{:error, :remove_of_root}
iex> zipper = zipper |> Xipper.down |> Xipper.remove
iex> Xipper.focus(zipper)
[2, [3, 4], 5]
iex> zipper = zipper |> Xipper.down |> Xipper.right |> Xipper.remove
iex> Xipper.focus(zipper)
1
iex> Xipper.rights(zipper)
[[3, 4], 5]
"""
@spec remove(__MODULE__.t) :: __MODULE__.maybe_zipper
def remove(zipper = %__MODULE__{}) do
case left(zipper) do
{:error, _} ->
case up(zipper) do
{:error, _} -> {:error, :remove_of_root}
up_zipper ->
[_|new_children] = children(up_zipper)
%{up_zipper | focus: make_node(up_zipper, up_zipper.focus, new_children)}
end
left_zipper ->
[_|new_right] = left_zipper.right
%{left_zipper | right: new_right}
end
end
end
|
lib/xipper.ex
| 0.89662
| 0.709032
|
xipper.ex
|
starcoder
|
defmodule Absinthe.Schema.Notation.Scope do
@moduledoc false
@stack :absinthe_notation_scopes
defstruct name: nil, recordings: [], attrs: []
use Absinthe.Type.Fetch
def open(name, mod, attrs \\ []) do
Module.put_attribute(mod, @stack, [%__MODULE__{name: name, attrs: attrs} | on(mod)])
end
def close(mod) do
{current, rest} = split(mod)
Module.put_attribute(mod, @stack, rest)
current
end
def split(mod) do
case on(mod) do
[] ->
{nil, []}
[current | rest] ->
{current, rest}
end
end
def current(mod) do
{c, _} = split(mod)
c
end
def recorded!(mod, kind, identifier) do
update_current(mod, fn
%{recordings: recs} = scope ->
%{scope | recordings: [{kind, identifier} | recs]}
nil ->
# Outside any scopes, ignore
nil
end)
end
@doc """
Check if a certain operation has been recorded in the current scope.
## Examples
See if an input object with the identifier `:input` has been defined from
this scope:
```
recorded?(mod, :input_object, :input)
```
See if the `:description` attribute has been
```
recorded?(mod, :attr, :description)
```
"""
@spec recorded?(atom, atom, atom) :: boolean
def recorded?(mod, kind, identifier) do
scope = current(mod)
case kind do
:attr ->
# Supports attributes passed directly to the macro that
# created the scope, usually (?) short-circuits the need to
# check the scope recordings.
scope.attrs[identifier] || recording_marked?(scope, kind, identifier)
_ ->
recording_marked?(scope, kind, identifier)
end
end
# Check the list of recordings for `recorded?/3`
defp recording_marked?(scope, kind, identifier) do
scope.recordings
|> Enum.find(fn
{^kind, ^identifier} ->
true
_ ->
false
end)
end
def put_attribute(mod, key, value, opts \\ [accumulate: false]) do
if opts[:accumulate] do
update_current(mod, fn scope ->
new_attrs = update_in(scope.attrs, [key], &[value | &1 || []])
%{scope | attrs: new_attrs}
end)
else
update_current(mod, fn scope ->
%{scope | attrs: Keyword.put(scope.attrs, key, value)}
end)
end
end
defp update_current(mod, fun) do
{current, rest} = split(mod)
updated = fun.(current)
Module.put_attribute(mod, @stack, [updated | rest])
end
def on(mod) do
case Module.get_attribute(mod, @stack) do
nil ->
Module.put_attribute(mod, @stack, [])
[]
value ->
value
end
end
end
|
lib/absinthe/schema/notation/scope.ex
| 0.813535
| 0.808257
|
scope.ex
|
starcoder
|
defmodule FaktoryWorker.Batch do
@moduledoc """
Supports Faktory Batch operations
[Batch support](https://github.com/contribsys/faktory/wiki/Ent-Batches) is a
Faktory Enterprise feature. It allows jobs to pushed as part of a batch. When
all jobs in a batch have completed, Faktory will queue a callback job. This
allows building complex job workflows with dependencies.
## Creating a batch
A batch is created using `new!/1` and must provide a description and declare
one of the success or complete callbacks. The `new!/1` function returns the
batch ID (or `bid`) which identifies the batch for future commands.
Once created, jobs can be pushed to the batch by providing the `bid` in the
`custom` payload. These jobs must be pushed synchronously.
```
alias FaktoryWorker.Batch
{:ok, bid} = Batch.new!(on_success: {MyApp.EmailReportJob, [], []})
MyApp.Job.perform_async([1, 2], custom: %{"bid" => bid})
MyApp.Job.perform_async([3, 4], custom: %{"bid" => bid})
MyApp.Job.perform_async([5, 6], custom: %{"bid" => bid})
Batch.commit(bid)
```
## Opening a batch
In order to open a batch, you must know the batch ID. Since FaktoryWorker
doesn't currently pass the job itself as a parameter to `perform` functions,
you must explicitly pass it as an argument in order to open the batch as part
of a job.
```
defmodule MyApp.Job do
use FaktoryWorker.Job
def perform(arg1, arg2, bid) do
Batch.open(bid)
MyApp.OtherJob.perform_async([1, 2], custom: %{"bid" => bid})
Batch.commit(bid)
end
end
```
"""
alias FaktoryWorker.{ConnectionManager, Job, Pool}
@type bid :: String.t()
@default_timeout 5000
@doc """
Creates a new Faktory batch
Returns the batch ID (`bid`) which needs to be passed in the `:custom`
parameters of every job that should be part of this batch as well as to commit
the batch.
## Opts
Batch jobs must define a success or complete callback (or both). These
callbacks are passed as tuples to the `:on_success` and `:on_complete` opts.
They are defined as a tuple consisting of `{mod, args, opts}` where `mod` is a
module with a `perform` function that corresponds in arity to the length of `args`.
Any `opts` that can be passed to `perform_async/2` can be provided as `opts`
to the callback except for `:faktory_worker`.
If neither callback is provided, an error will be raised.
### `:on_success`
See above.
### `:on_complete`
See above.
### `:description`
The description, if provided, is shown in Faktory's Web UI on the batch
listing tab.
### `:parent_bid`
The parent batch ID--only used if you are creating a child batch.
### `:faktory_worker`
The name of the `FaktoryWorker` instance (determines which connection pool
will be used).
"""
@spec new!(Keyword.t()) :: {:ok, bid()} | {:error, any()}
def new!(opts \\ []) do
success = Keyword.get(opts, :on_success)
complete = Keyword.get(opts, :on_complete)
bid = Keyword.get(opts, :parent_bid)
description = Keyword.get(opts, :description)
payload =
%{}
|> maybe_put_description(description)
|> maybe_put_parent_bid(bid)
|> maybe_put_callback(:success, success)
|> maybe_put_callback(:complete, complete)
|> validate!()
send_command({:batch_new, payload}, opts)
end
@doc """
Commits the batch identified by `bid`
Faktory will begin scheduling jobs that are part of the batch before the batch
is committed, but
"""
def commit(bid, opts \\ []) do
send_command({:batch_commit, bid}, opts)
end
@doc """
Opens the batch identified by `bid`
An existing batch needs to be re-opened in order to add more jobs to it or to
add a child batch.
After opening the batch, it must be committed again using `commit/2`.
"""
def open(bid, opts \\ []) do
send_command({:batch_open, bid}, opts)
end
@doc """
Gets the status of a batch
Returns a map representing the status
"""
def status(bid, opts \\ []) do
send_command({:batch_status, bid}, opts)
end
defp send_command(command, opts) do
opts
|> Keyword.get(:faktory_name, FaktoryWorker)
|> Pool.format_pool_name()
|> :poolboy.transaction(
&ConnectionManager.Server.send_command(&1, command),
@default_timeout
)
end
defp maybe_put_description(payload, nil), do: payload
defp maybe_put_description(payload, description),
do: Map.put_new(payload, :description, description)
defp maybe_put_parent_bid(payload, nil), do: payload
defp maybe_put_parent_bid(payload, bid), do: Map.put_new(payload, :parent_bid, bid)
defp maybe_put_callback(payload, _type, nil), do: payload
defp maybe_put_callback(payload, type, {mod, job, opts}) do
job_payload = Job.build_payload(mod, job, opts)
Map.put_new(payload, type, job_payload)
end
defp validate!(payload) do
success = Map.get(payload, :success)
complete = Map.get(payload, :complete)
case {success, complete} do
{nil, nil} ->
raise("Faktory batch jobs must declare a success or complete callback")
{_, _} ->
payload
end
end
end
|
lib/faktory_worker/batch.ex
| 0.871023
| 0.850282
|
batch.ex
|
starcoder
|
defmodule PromEx.Config do
@moduledoc """
This module defines a struct that contains all of the fields necessary to configure
an instance of PromEx.
While this module does not directly access your Application config, PromEx will call the
`PromEx.Config.build/1` function directly with the contents of `Application.get_env(:your_otp_app, YourPromEx.Module)`. As
such, this is an appropriate place to talk about how you go about configuring PromEx via your Application config.
By default, you can run PromEx without any additional configuration and PromEx will fall back on some sane defaults. Specifically,
if you were to not add any configuration to your config.exs, dev.exs, prod.exs, etc files it would be the same as setting the
following config:
```elixir
config :web_app, WebApp.PromEx,
disabled: false,
manual_metrics_start_delay: :no_delay,
drop_metrics_groups: [],
grafana: :disabled,
metrics_server: :disabled
```
In this configuration, the Grafana dashboards are not uploaded on application start, and a standalone HTTP metrics server is not
started. In addition, the `PromEx.ManualMetricsManager` is started without any time delay, and all metrics groups from all the plugins
are registered and set up.
If you would like to set up PromEx to communicate with Grafana, your config would look something like:
```elixir
config :web_app, WebApp.PromEx,
grafana: [
host: "http://localhost:3000",
username: "<YOUR_USERNAME>", # Or authenticate via Basic Auth
password: "<<PASSWORD>>"
auth_token: "<YOUR_AUTH_TOKEN_HERE>", # Or authenticate via API Token
upload_dashboards_on_start: true # This is an optional setting and will default to `true`
]
```
If you would like PromEx to start a standalone HTTP server to serve your aggregated metrics, you can leverage the `:metrics_server`
config:
```elixir
config :web_app, WebApp.PromEx,
metrics_server: [
port: 4021,
path: "/metrics", # This is an optional setting and will default to `"/metrics"`
protocol: :http, # This is an optional setting and will default to `:http`
pool_size: 5, # This is an optional setting and will default to `5`
cowboy_opts: [], # This is an optional setting and will default to `[]`
auth_strategy: :none # This is an optional and will default to `:none`
]
```
If you would like the metrics server to be protected behind some sort of authentication, you can configure your `:metrics_server`
like so:
```elixir
config :web_app, WebApp.PromEx,
metrics_server: [
port: 4021,
auth_strategy: :bearer,
auth_token: "<KEY>
]
```
## Option Details
* `:disabled` - This option will diable the PromEx supervision tree entirely and will not
start any metris collectors. This is primarily used for disabling PromEx during testing. Default
value: false
* `:manual_metrics_start_delay` - Manual metrics are gathered once on start up and then only when
you call `PromEx.ManualMetricsManager.refresh_metrics/1`. Sometimes, you may have metrics
that require your entire supervision tree to be started in order to fetch accurate data.
This option will allow you to delays the initial metrics capture of the
`ManualMetricsManager` by a certain number of milliseconds or the `:no_delay` atom if you
want the metrics to be captured as soon as the `ManualMetricsManager` starts up. Default
value: `:no_delay`
* `:drop_metrics_groups` - A list of all the metrics groups that you are not interested in
tracking. For example, if your application does not leverage Phoenix channels at all but
you still would like to use the `PromEx.Plugins.Phoenix` plugin, you can pass
`[:phoenix_channel_event_metrics]` as the value to `:drop_metrics_groups` and that set of
metrics will not be captured. Default value: `[]`
* `:grafana` - This key contains the configuration information for connecting to Grafana. Its
configuration options are:
* `:host` - The host address of your Grafana instance. In order for PromEx to communicate with
Grafana this value should be in the format `protocol://host:port` like `http://localhost:3000`
for example.
* `:username` - The username that was created in Grafana so that PromEx can upload dashboards
via the API.
* `:password` - The password that was created in Grafana so that PromEx can upload dashboards
via the API.
* `:auth_token` - The auth token that was created in Grafana so that PromEx can upload dashboards
via the API.
* `:upload_dashboards_on_start` - Using the config values that you set in your application config
(`config.exs`, `dev.exs`, `prod.exs`, etc) PromEx will attempt to upload your Dashboards to
Grafana using Grafana's HTTP API.
* `:folder_name` - The name of the folder that PromEx will put all of the project dashboards in.
PromEx will automatically generate a unique ID for the folder based on the project's otp_app
value so that it can access the correct folder in Grafana. This also makes sure that different
Elixir projects running in the same cluster and publishing dashboards to Grafana do not collide
with one another. If no name is provided, then the dashboards will all be uploaded to the default
Grafana folder.
* `:annotate_app_lifecycle` - By enabling this setting, PromEx will leverage the Grafana API to annotate
when the application was started, and when it was shut down. By default this is disabled but if you
do enable it, no action is required from you in order to display these events on the dashboards. The
annotations will automatically contain the necessary tags to only display on the PromEx dashboards.
The annotation will include information including:
- Hostname
- OTP app name
- App version
- Git SHA of the last commit (if the GIT_SHA environment variable is present)
- Git author of the last commit (if the GIT_AUTHOR environment variable is present)
* `:metrics_server` - This key contains the configuration information needed to run a standalone
HTTP server powered by Cowboy. This server provides a lightweight solution to serving up PromEx
metrics. Its configuration options are:
* `:port` - The port that the Cowboy HTTP server should run on.
* `:path` - The path that the metrics should be accessible at.
* `:protocol` - The protocol that the metrics should be accessible over (`:http` or `:https`).
* `:pool_size` - How many Cowboy processes should be in the pool to handle metrics related requests.
* `:auth_strategy` - What authentication strategy should be used to authorize requests to your metrics. The
Supported strategies are `:none`, `:bearer`, and `:basic`. Depending on what strategy is selected, you
will need to also add additional config values. For `:none` (which is the default), no additional
information needs to be provided. When using a `:bearer` strategy, you'll need to provide a `:auth_token`
config value. When using `:basic` strategy you'll need to provide `:auth_user` and `:auth_password` values.
* `:auth_token` - When using a `:bearer` authentication strategy, this field is required to validate the
incoming request against a valid auth token.
* `:auth_user` - When using a `:basic` authentication strategy, this field is required to validate the
incoming request against a valid user.
* `:auth_password` - When using a `:bearer` authentication strategy, this field is required to validate the
incoming request against a valid password.
* `:cowboy_opts` - A keyword list of any additional options that should be passed to `Plug.Cowboy` (see
docs for more information https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html). The `:port` and
`:transport_options` options are handled by PromEx via the aforementioned config settings and so
adding them again here has no effect.
"""
@typedoc """
- `manual_metrics_start_delay`: How the ManualMetricsManager worker process should be started (instantly or with a millisecond delay).
- `drop_metrics_groups`: A list of metrics groups that should be omitted from the metrics collection process.
- `grafana_config`: A map containing all the relevant settings to connect to Grafana.
- `metrics_server_config`: A map containing all the relevant settings to start a standalone HTTP Cowboy server for metrics.
"""
@type t :: %__MODULE__{
disabled: boolean(),
manual_metrics_start_delay: :no_delay | pos_integer(),
drop_metrics_groups: MapSet.t(),
grafana_config: map(),
metrics_server_config: map()
}
defstruct [
:disabled,
:manual_metrics_start_delay,
:drop_metrics_groups,
:grafana_config,
:metrics_server_config
]
@doc """
Create a struct that encapsulates all of the configuration needed to start a PromEx supervisor instance as well as all
of the worker processes.
"""
@spec build(keyword()) :: __MODULE__.t()
def build(opts) do
grafana_config =
opts
|> Keyword.get(:grafana, :disabled)
|> generate_grafana_config()
metrics_server_config =
opts
|> Keyword.get(:metrics_server, :disabled)
|> generate_metrics_server_config()
%__MODULE__{
disabled: Keyword.get(opts, :disabled, false),
manual_metrics_start_delay: Keyword.get(opts, :manual_metrics_start_delay, :no_delay),
drop_metrics_groups: opts |> Keyword.get(:drop_metrics_groups, []) |> MapSet.new(),
grafana_config: grafana_config,
metrics_server_config: metrics_server_config
}
end
defp generate_grafana_config(:disabled), do: :disabled
defp generate_grafana_config(grafana_opts) do
%{
host: grafana_opts |> get_grafana_config(:host) |> normalize_host(),
username: Keyword.get(grafana_opts, :username),
password: Keyword.get(grafana_opts, :password),
auth_token: Keyword.get(grafana_opts, :auth_token),
upload_dashboards_on_start: Keyword.get(grafana_opts, :upload_dashboards_on_start, true),
folder_name: Keyword.get(grafana_opts, :folder_name, :default),
annotate_app_lifecycle: Keyword.get(grafana_opts, :annotate_app_lifecycle, false)
}
end
defp get_grafana_config(grafana_opts, config_key) do
case Keyword.fetch(grafana_opts, config_key) do
{:ok, value} ->
value
:error ->
raise "When configuring the Grafana client for PromEx, the #{inspect(config_key)} key is required."
end
end
defp normalize_host(host_string) do
host_string
|> URI.parse()
|> Map.put(:path, nil)
|> Map.put(:query, nil)
|> URI.to_string()
end
defp generate_metrics_server_config(:disabled), do: :disabled
defp generate_metrics_server_config(metrics_server_opts) do
base_config = %{
port: get_metrics_server_config(metrics_server_opts, :port),
path: Keyword.get(metrics_server_opts, :path, "/metrics"),
protocol: Keyword.get(metrics_server_opts, :protocol, :http),
pool_size: Keyword.get(metrics_server_opts, :pool_size, 5),
cowboy_opts: Keyword.get(metrics_server_opts, :cowboy_opts, []),
auth_strategy: Keyword.get(metrics_server_opts, :auth_strategy, :none)
}
add_auth_config(base_config, metrics_server_opts)
end
defp add_auth_config(%{auth_strategy: :none} = base_config, _add_auth_config) do
base_config
end
defp add_auth_config(%{auth_strategy: :bearer} = base_config, add_auth_config) do
Map.put(base_config, :auth_token, get_metrics_server_config(add_auth_config, :auth_token))
end
defp add_auth_config(%{auth_strategy: :basic} = base_config, add_auth_config) do
base_config
|> Map.put(:auth_user, get_metrics_server_config(add_auth_config, :auth_user))
|> Map.put(:auth_password, get_metrics_server_config(add_auth_config, :auth_password))
end
defp add_auth_config(_base_config, _add_auth_config) do
raise "Unknown auth strategy provided to PromEx metrics server. Supported strategies include :none, :bearer, or :basic."
end
defp get_metrics_server_config(metrics_server_opts, config_key) do
case Keyword.fetch(metrics_server_opts, config_key) do
{:ok, value} ->
value
:error ->
raise "When configuring the PromEx metrics server, the #{inspect(config_key)} key is required."
end
end
end
|
lib/prom_ex/config.ex
| 0.914343
| 0.711392
|
config.ex
|
starcoder
|
defmodule Elixium.Transaction do
alias Elixium.Transaction
alias Elixium.Utilities
alias Elixium.Utxo
@moduledoc """
Contains all the functions that pertain to creating valid transactions
"""
defstruct id: nil,
inputs: [],
outputs: [],
sigs: [],
# Most transactions will be pay-to-public-key
txtype: "P2PK"
@spec calculate_outputs(Transaction, Map) :: %{outputs: list, fee: integer}
def calculate_outputs(transaction, designations) do
outputs =
designations
|> Enum.with_index()
|> Enum.map(fn {designation, idx} ->
%Utxo{
txoid: "#{transaction.id}:#{idx}",
addr: designation.addr,
amount: designation.amount
}
end)
%{outputs: outputs}
end
@doc """
Creates a signature list based on unique addresses in the inputs. One signature
is needed for each address.
"""
@spec create_sig_list(List, Map) :: List
def create_sig_list(inputs, transaction) do
digest = signing_digest(transaction)
inputs
|> Enum.uniq_by(& &1.addr)
|> Enum.map(fn %{addr: addr} ->
priv = Elixium.KeyPair.get_priv_from_file(addr)
sig = Elixium.KeyPair.sign(priv, digest)
{addr, sig}
end)
end
@doc """
Take the correct amount of Utxo's to send the alloted amount in a transaction.
"""
@spec take_necessary_utxos(List, integer) :: List | :not_enough_balance
def take_necessary_utxos(utxos, amount), do: take_necessary_utxos(utxos, [], amount)
@spec take_necessary_utxos(List, List, integer) :: List | :not_enough_balance
def take_necessary_utxos([], _, amount) when amount > 0, do: :not_enough_balance
def take_necessary_utxos([utxo | remaining], chosen, amount) when amount > 0 do
take_necessary_utxos(remaining, [utxo | chosen], amount - utxo.amount)
end
def take_necessary_utxos(_utxos, chosen, _amount), do: chosen
@doc """
Each transaction consists of multiple inputs and outputs. Inputs to any
particular transaction are just outputs from other transactions. This is
called the UTXO model. In order to efficiently represent the UTXOs within
the transaction, we can calculate the merkle root of the inputs of the
transaction.
"""
@spec calculate_hash(Transaction) :: String.t()
def calculate_hash(transaction) do
transaction.inputs
|> Enum.map(& &1.txoid)
|> Utilities.calculate_merkle_root()
end
@doc """
In order for a block to be considered valid, it must have a coinbase as the
FIRST transaction in the block. This coinbase has a single output, designated
to the address of the miner, and the output amount is the block reward plus
any transaction fees from within the transaction
"""
@spec generate_coinbase(integer, String.t()) :: Transaction
def generate_coinbase(amount, miner_address) do
timestamp = DateTime.utc_now() |> DateTime.to_string()
txid = Utilities.sha_base16(miner_address <> timestamp)
%Transaction{
id: txid,
txtype: "COINBASE",
outputs: [
%Utxo{txoid: "#{txid}:0", addr: miner_address, amount: amount}
]
}
end
@spec sum_inputs(list) :: integer
def sum_inputs(inputs), do: Enum.reduce(inputs, 0, & &1.amount + &2)
@spec calculate_fee(Transaction) :: integer
def calculate_fee(transaction) do
sum_inputs(transaction.inputs) - sum_inputs(transaction.outputs)
end
@doc """
Takes in a transaction received from a peer which may have malicious or extra
attributes attached. Removes all extra parameters which are not defined
explicitly by the transaction struct.
"""
@spec sanitize(Transaction) :: Transaction
def sanitize(unsanitized_transaction) do
sanitized_transaction = struct(Transaction, Map.delete(unsanitized_transaction, :__struct__))
sanitized_inputs = Enum.map(sanitized_transaction.inputs, &Utxo.sanitize/1)
sanitized_outputs = Enum.map(sanitized_transaction.outputs, &Utxo.sanitize/1)
sanitized_transaction
|> Map.put(:inputs, sanitized_inputs)
|> Map.put(:outputs, sanitized_outputs)
end
@doc """
Returns the data that a signer of the transaction needs to sign
"""
@spec signing_digest(Transaction) :: binary
def signing_digest(%{inputs: inputs, outputs: outputs, id: id, txtype: txtype}) do
digest = :erlang.term_to_binary(inputs) <> :erlang.term_to_binary(outputs) <> id <> txtype
:crypto.hash(:sha256, digest)
end
@doc """
Takes in a list of maps that match %{addr: addr, amount: amount} and creates
a valid transaction.
"""
@spec create(list, integer) :: Transaction
def create(designations, fee) do
utxos = Elixium.Store.Utxo.retrieve_wallet_utxos()
# Find total amount of elixir being sent in this transaction
total_amount = Enum.reduce(designations, 0, fn x, acc -> x.amount + acc end)
# Grab enough UTXOs to cover the total amount plus the fee
inputs = take_necessary_utxos(utxos, [], total_amount + fee)
tx = %Transaction{inputs: inputs}
tx = Map.put(tx, :id, calculate_hash(tx))
# UTXO totals will likely exceed the total amount we're trying to send.
# Let's see what the difference is
remaining =
inputs
|> sum_inputs()
|> Kernel.-(total_amount + fee)
# If there is any remaining unspent elixir in this transaction, assign it
# back to an address we control as change
designations =
if remaining > 0 do
designations ++ [%{addr: hd(tx.inputs).addr, amount: remaining}]
else
designations
end
tx = Map.merge(tx, calculate_outputs(tx, designations))
# Create a signature for each unique address in the inputs
sigs = create_sig_list(tx.inputs, tx)
Map.put(tx, :sigs, sigs)
end
end
|
lib/transaction.ex
| 0.853898
| 0.492249
|
transaction.ex
|
starcoder
|
defmodule AWS.DocDB do
@moduledoc """
Amazon DocumentDB API documentation
"""
@doc """
Adds metadata tags to an Amazon DocumentDB resource.
You can use these tags with cost allocation reporting to track costs that are
associated with Amazon DocumentDB resources. or in a `Condition` statement in an
AWS Identity and Access Management (IAM) policy for Amazon DocumentDB.
"""
def add_tags_to_resource(client, input, options \\ []) do
request(client, "AddTagsToResource", input, options)
end
@doc """
Applies a pending maintenance action to a resource (for example, to an Amazon
DocumentDB instance).
"""
def apply_pending_maintenance_action(client, input, options \\ []) do
request(client, "ApplyPendingMaintenanceAction", input, options)
end
@doc """
Copies the specified cluster parameter group.
"""
def copy_d_b_cluster_parameter_group(client, input, options \\ []) do
request(client, "CopyDBClusterParameterGroup", input, options)
end
@doc """
Copies a snapshot of a cluster.
To copy a cluster snapshot from a shared manual cluster snapshot,
`SourceDBClusterSnapshotIdentifier` must be the Amazon Resource Name (ARN) of
the shared cluster snapshot. You can only copy a shared DB cluster snapshot,
whether encrypted or not, in the same AWS Region.
To cancel the copy operation after it is in progress, delete the target cluster
snapshot identified by `TargetDBClusterSnapshotIdentifier` while that cluster
snapshot is in the *copying* status.
"""
def copy_d_b_cluster_snapshot(client, input, options \\ []) do
request(client, "CopyDBClusterSnapshot", input, options)
end
@doc """
Creates a new Amazon DocumentDB cluster.
"""
def create_d_b_cluster(client, input, options \\ []) do
request(client, "CreateDBCluster", input, options)
end
@doc """
Creates a new cluster parameter group.
Parameters in a cluster parameter group apply to all of the instances in a
cluster.
A cluster parameter group is initially created with the default parameters for
the database engine used by instances in the cluster. In Amazon DocumentDB, you
cannot make modifications directly to the `default.docdb3.6` cluster parameter
group. If your Amazon DocumentDB cluster is using the default cluster parameter
group and you want to modify a value in it, you must first [ create a new parameter
group](https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-create.html)
or [ copy an existing parameter group](https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-copy.html),
modify it, and then apply the modified parameter group to your cluster. For the
new cluster parameter group and associated settings to take effect, you must
then reboot the instances in the cluster without failover. For more information,
see [ Modifying Amazon DocumentDB Cluster Parameter Groups](https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-modify.html).
"""
def create_d_b_cluster_parameter_group(client, input, options \\ []) do
request(client, "CreateDBClusterParameterGroup", input, options)
end
@doc """
Creates a snapshot of a cluster.
"""
def create_d_b_cluster_snapshot(client, input, options \\ []) do
request(client, "CreateDBClusterSnapshot", input, options)
end
@doc """
Creates a new instance.
"""
def create_d_b_instance(client, input, options \\ []) do
request(client, "CreateDBInstance", input, options)
end
@doc """
Creates a new subnet group.
subnet groups must contain at least one subnet in at least two Availability
Zones in the AWS Region.
"""
def create_d_b_subnet_group(client, input, options \\ []) do
request(client, "CreateDBSubnetGroup", input, options)
end
@doc """
Deletes a previously provisioned cluster.
When you delete a cluster, all automated backups for that cluster are deleted
and can't be recovered. Manual DB cluster snapshots of the specified cluster are
not deleted.
"""
def delete_d_b_cluster(client, input, options \\ []) do
request(client, "DeleteDBCluster", input, options)
end
@doc """
Deletes a specified cluster parameter group.
The cluster parameter group to be deleted can't be associated with any clusters.
"""
def delete_d_b_cluster_parameter_group(client, input, options \\ []) do
request(client, "DeleteDBClusterParameterGroup", input, options)
end
@doc """
Deletes a cluster snapshot.
If the snapshot is being copied, the copy operation is terminated.
The cluster snapshot must be in the `available` state to be deleted.
"""
def delete_d_b_cluster_snapshot(client, input, options \\ []) do
request(client, "DeleteDBClusterSnapshot", input, options)
end
@doc """
Deletes a previously provisioned instance.
"""
def delete_d_b_instance(client, input, options \\ []) do
request(client, "DeleteDBInstance", input, options)
end
@doc """
Deletes a subnet group.
The specified database subnet group must not be associated with any DB
instances.
"""
def delete_d_b_subnet_group(client, input, options \\ []) do
request(client, "DeleteDBSubnetGroup", input, options)
end
@doc """
Returns a list of certificate authority (CA) certificates provided by Amazon
DocumentDB for this AWS account.
"""
def describe_certificates(client, input, options \\ []) do
request(client, "DescribeCertificates", input, options)
end
@doc """
Returns a list of `DBClusterParameterGroup` descriptions.
If a `DBClusterParameterGroupName` parameter is specified, the list contains
only the description of the specified cluster parameter group.
"""
def describe_d_b_cluster_parameter_groups(client, input, options \\ []) do
request(client, "DescribeDBClusterParameterGroups", input, options)
end
@doc """
Returns the detailed parameter list for a particular cluster parameter group.
"""
def describe_d_b_cluster_parameters(client, input, options \\ []) do
request(client, "DescribeDBClusterParameters", input, options)
end
@doc """
Returns a list of cluster snapshot attribute names and values for a manual DB
cluster snapshot.
When you share snapshots with other AWS accounts,
`DescribeDBClusterSnapshotAttributes` returns the `restore` attribute and a list
of IDs for the AWS accounts that are authorized to copy or restore the manual
cluster snapshot. If `all` is included in the list of values for the `restore`
attribute, then the manual cluster snapshot is public and can be copied or
restored by all AWS accounts.
"""
def describe_d_b_cluster_snapshot_attributes(client, input, options \\ []) do
request(client, "DescribeDBClusterSnapshotAttributes", input, options)
end
@doc """
Returns information about cluster snapshots.
This API operation supports pagination.
"""
def describe_d_b_cluster_snapshots(client, input, options \\ []) do
request(client, "DescribeDBClusterSnapshots", input, options)
end
@doc """
Returns information about provisioned Amazon DocumentDB clusters.
This API operation supports pagination. For certain management features such as
cluster and instance lifecycle management, Amazon DocumentDB leverages
operational technology that is shared with Amazon RDS and Amazon Neptune. Use
the `filterName=engine,Values=docdb` filter parameter to return only Amazon
DocumentDB clusters.
"""
def describe_d_b_clusters(client, input, options \\ []) do
request(client, "DescribeDBClusters", input, options)
end
@doc """
Returns a list of the available engines.
"""
def describe_d_b_engine_versions(client, input, options \\ []) do
request(client, "DescribeDBEngineVersions", input, options)
end
@doc """
Returns information about provisioned Amazon DocumentDB instances.
This API supports pagination.
"""
def describe_d_b_instances(client, input, options \\ []) do
request(client, "DescribeDBInstances", input, options)
end
@doc """
Returns a list of `DBSubnetGroup` descriptions.
If a `DBSubnetGroupName` is specified, the list will contain only the
descriptions of the specified `DBSubnetGroup`.
"""
def describe_d_b_subnet_groups(client, input, options \\ []) do
request(client, "DescribeDBSubnetGroups", input, options)
end
@doc """
Returns the default engine and system parameter information for the cluster
database engine.
"""
def describe_engine_default_cluster_parameters(client, input, options \\ []) do
request(client, "DescribeEngineDefaultClusterParameters", input, options)
end
@doc """
Displays a list of categories for all event source types, or, if specified, for
a specified source type.
"""
def describe_event_categories(client, input, options \\ []) do
request(client, "DescribeEventCategories", input, options)
end
@doc """
Returns events related to instances, security groups, snapshots, and DB
parameter groups for the past 14 days.
You can obtain events specific to a particular DB instance, security group,
snapshot, or parameter group by providing the name as a parameter. By default,
the events of the past hour are returned.
"""
def describe_events(client, input, options \\ []) do
request(client, "DescribeEvents", input, options)
end
@doc """
Returns a list of orderable instance options for the specified engine.
"""
def describe_orderable_d_b_instance_options(client, input, options \\ []) do
request(client, "DescribeOrderableDBInstanceOptions", input, options)
end
@doc """
Returns a list of resources (for example, instances) that have at least one
pending maintenance action.
"""
def describe_pending_maintenance_actions(client, input, options \\ []) do
request(client, "DescribePendingMaintenanceActions", input, options)
end
@doc """
Forces a failover for a cluster.
A failover for a cluster promotes one of the Amazon DocumentDB replicas
(read-only instances) in the cluster to be the primary instance (the cluster
writer).
If the primary instance fails, Amazon DocumentDB automatically fails over to an
Amazon DocumentDB replica, if one exists. You can force a failover when you want
to simulate a failure of a primary instance for testing.
"""
def failover_d_b_cluster(client, input, options \\ []) do
request(client, "FailoverDBCluster", input, options)
end
@doc """
Lists all tags on an Amazon DocumentDB resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Modifies a setting for an Amazon DocumentDB cluster.
You can change one or more database configuration parameters by specifying these
parameters and the new values in the request.
"""
def modify_d_b_cluster(client, input, options \\ []) do
request(client, "ModifyDBCluster", input, options)
end
@doc """
Modifies the parameters of a cluster parameter group.
To modify more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A maximum of 20 parameters
can be modified in a single request.
Changes to dynamic parameters are applied immediately. Changes to static
parameters require a reboot or maintenance window before the change can take
effect.
After you create a cluster parameter group, you should wait at least 5 minutes
before creating your first cluster that uses that cluster parameter group as the
default parameter group. This allows Amazon DocumentDB to fully complete the
create action before the parameter group is used as the default for a new
cluster. This step is especially important for parameters that are critical when
creating the default database for a cluster, such as the character set for the
default database defined by the `character_set_database` parameter.
"""
def modify_d_b_cluster_parameter_group(client, input, options \\ []) do
request(client, "ModifyDBClusterParameterGroup", input, options)
end
@doc """
Adds an attribute and values to, or removes an attribute and values from, a
manual DB cluster snapshot.
To share a manual cluster snapshot with other AWS accounts, specify `restore` as
the `AttributeName`, and use the `ValuesToAdd` parameter to add a list of IDs of
the AWS accounts that are authorized to restore the manual cluster snapshot. Use
the value `all` to make the manual cluster snapshot public, which means that it
can be copied or restored by all AWS accounts. Do not add the `all` value for
any manual DB cluster snapshots that contain private information that you don't
want available to all AWS accounts. If a manual cluster snapshot is encrypted,
it can be shared, but only by specifying a list of authorized AWS account IDs
for the `ValuesToAdd` parameter. You can't use `all` as a value for that
parameter in this case.
"""
def modify_d_b_cluster_snapshot_attribute(client, input, options \\ []) do
request(client, "ModifyDBClusterSnapshotAttribute", input, options)
end
@doc """
Modifies settings for an instance.
You can change one or more database configuration parameters by specifying these
parameters and the new values in the request.
"""
def modify_d_b_instance(client, input, options \\ []) do
request(client, "ModifyDBInstance", input, options)
end
@doc """
Modifies an existing subnet group.
subnet groups must contain at least one subnet in at least two Availability
Zones in the AWS Region.
"""
def modify_d_b_subnet_group(client, input, options \\ []) do
request(client, "ModifyDBSubnetGroup", input, options)
end
@doc """
You might need to reboot your instance, usually for maintenance reasons.
For example, if you make certain changes, or if you change the cluster parameter
group that is associated with the instance, you must reboot the instance for the
changes to take effect.
Rebooting an instance restarts the database engine service. Rebooting an
instance results in a momentary outage, during which the instance status is set
to *rebooting*.
"""
def reboot_d_b_instance(client, input, options \\ []) do
request(client, "RebootDBInstance", input, options)
end
@doc """
Removes metadata tags from an Amazon DocumentDB resource.
"""
def remove_tags_from_resource(client, input, options \\ []) do
request(client, "RemoveTagsFromResource", input, options)
end
@doc """
Modifies the parameters of a cluster parameter group to the default value.
To reset specific parameters, submit a list of the following: `ParameterName`
and `ApplyMethod`. To reset the entire cluster parameter group, specify the
`DBClusterParameterGroupName` and `ResetAllParameters` parameters.
When you reset the entire group, dynamic parameters are updated immediately and
static parameters are set to `pending-reboot` to take effect on the next DB
instance reboot.
"""
def reset_d_b_cluster_parameter_group(client, input, options \\ []) do
request(client, "ResetDBClusterParameterGroup", input, options)
end
@doc """
Creates a new cluster from a snapshot or cluster snapshot.
If a snapshot is specified, the target cluster is created from the source DB
snapshot with a default configuration and default security group.
If a cluster snapshot is specified, the target cluster is created from the
source cluster restore point with the same configuration as the original source
DB cluster, except that the new cluster is created with the default security
group.
"""
def restore_d_b_cluster_from_snapshot(client, input, options \\ []) do
request(client, "RestoreDBClusterFromSnapshot", input, options)
end
@doc """
Restores a cluster to an arbitrary point in time.
Users can restore to any point in time before `LatestRestorableTime` for up to
`BackupRetentionPeriod` days. The target cluster is created from the source
cluster with the same configuration as the original cluster, except that the new
cluster is created with the default security group.
"""
def restore_d_b_cluster_to_point_in_time(client, input, options \\ []) do
request(client, "RestoreDBClusterToPointInTime", input, options)
end
@doc """
Restarts the stopped cluster that is specified by `DBClusterIdentifier`.
For more information, see [Stopping and Starting an Amazon DocumentDB Cluster](https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-stop-start.html).
"""
def start_d_b_cluster(client, input, options \\ []) do
request(client, "StartDBCluster", input, options)
end
@doc """
Stops the running cluster that is specified by `DBClusterIdentifier`.
The cluster must be in the *available* state. For more information, see
[Stopping and Starting an Amazon DocumentDB Cluster](https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-stop-start.html).
"""
def stop_d_b_cluster(client, input, options \\ []) do
request(client, "StopDBCluster", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "rds"}
host = build_host("rds", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2014-10-31"})
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :query)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end
|
lib/aws/generated/doc_db.ex
| 0.888463
| 0.481941
|
doc_db.ex
|
starcoder
|
defmodule LavaPotion.Struct.Player do
alias LavaPotion.Struct.Node
defstruct [:node, :guild_id, :session_id, :token, :endpoint, :track, :volume, :is_real, :paused, :raw_timestamp, :raw_position]
def initialize(player = %__MODULE__{node: %Node{address: address}}) do
WebSockex.cast(Node.pid(address), {:voice_update, player})
end
def play(player = %__MODULE__{node: %Node{address: old_address}}, track) when is_binary(track) do
info = LavaPotion.Api.decode_track(track)
{:ok, node = %Node{address: address}} = Node.best_node()
if old_address !== address do
set_node(player, node)
WebSockex.cast(Node.pid(address), {:play, player, {track, info}})
else
WebSockex.cast(Node.pid(old_address), {:play, player, {track, info}})
end
end
def play(player = %__MODULE__{node: %Node{address: old_address}}, %{"track" => track, "info" => info = %{}}) do
{:ok, node = %Node{address: address}} = Node.best_node()
if old_address !== address do
set_node(player, node)
WebSockex.cast(Node.pid(address), {:play, player, {track, info}})
else
WebSockex.cast(Node.pid(old_address), {:play, player, {track, info}})
end
end
def volume(player = %__MODULE__{node: %Node{address: address}}, volume) when is_number(volume) and volume >= 0 and volume <= 1000 do
WebSockex.cast(Node.pid(address), {:volume, player, volume})
end
def seek(player = %__MODULE__{node: %Node{address: address}}, position) when is_number(position) and position >= 0 do
WebSockex.cast(Node.pid(address), {:seek, player, position})
end
def pause(player = %__MODULE__{node: %Node{address: address}}), do: WebSockex.cast(Node.pid(address), {:pause, player, true})
def resume(player = %__MODULE__{node: %Node{address: address}}), do: WebSockex.cast(Node.pid(address), {:pause, player, false})
def destroy(player = %__MODULE__{node: %Node{address: address}}), do: WebSockex.cast(Node.pid(address), {:destroy, player})
def stop(player = %__MODULE__{node: %Node{address: address}}), do: WebSockex.cast(Node.pid(address), {:stop, player})
def position(player = %__MODULE__{node: %Node{}, raw_position: raw_position, raw_timestamp: raw_timestamp})
when not is_nil(raw_position) and not is_nil(raw_timestamp) do
%__MODULE__{paused: paused, track: {_, %{"length" => length}}} = player
if paused do
min(raw_position, length)
else
min(raw_position + (:os.system_time(:millisecond) - raw_timestamp), length)
end
end
def set_node(player = %__MODULE__{node: %Node{address: address}, is_real: true}, node = %Node{}) do
WebSockex.cast(Node.pid(address), {:update_node, player, node})
end
end
|
lib/lavapotion/struct/player.ex
| 0.650689
| 0.40157
|
player.ex
|
starcoder
|
defmodule Mop8.Bot.Message do
alias Mop8.Bot.Error.InvalidMessageError
@enforce_keys [
:text,
:event_at
]
defstruct [
:text,
:event_at
]
@type t :: %__MODULE__{
text: String.t(),
event_at: DateTime.t()
}
@spec new(String.t(), DateTime.t()) :: t()
def new(text, event_at) do
if !is_binary(text) || String.length(text) == 0 do
msg = "The text must be non empty string. text: #{text}"
raise InvalidMessageError, msg
end
if !is_struct(event_at) || DateTime != event_at.__struct__ do
msg = "The event_at must be DateTime. event_at: #{event_at}"
raise InvalidMessageError, msg
end
%__MODULE__{
text: text,
event_at: event_at
}
end
@spec is_mention?(t(), String.t()) :: boolean()
def is_mention?(%__MODULE__{text: text}, user_id) do
String.match?(text, ~r/^<@#{user_id}>/)
end
@spec tokenize(t()) :: [token]
when token:
{:user_id, String.t()}
| {:url, String.t()}
| {:code, String.t()}
| {:bold, String.t()}
| {:quote, String.t()}
| {:text, String.t()}
def tokenize(%__MODULE__{text: text}) do
text
|> split()
|> tokenize([])
|> Enum.reverse()
end
defp split(text) do
String.split(text, ~r/<.+>|```(\s|.)*```|\*.*\*|>.*|^\/.*/,
include_captures: true,
trim: true
)
end
defp tokenize([], acc) do
acc
end
defp tokenize([text | rest], acc) do
token =
cond do
String.match?(text, ~r/^<@.+>$/) ->
{:user_id, String.slice(text, 2..-2)}
String.match?(text, ~r/^<http.*>$/) ->
{:uri, String.slice(text, 1..-2)}
String.match?(text, ~r/^```(\s|.)*```$/) ->
{:code, String.slice(text, 3..-4)}
String.match?(text, ~r/^\*.+\*$/) ->
{:bold, String.slice(text, 1..-2)}
String.match?(text, ~r/^>.*$/) ->
{:quote, String.slice(text, 5..-1)}
String.match?(text, ~r/^\/.*$/) ->
{:command, String.trim(text)}
String.match?(text, ~r/^:.*:$/) ->
{:emoji_only, text}
true ->
nil
end
if token == nil do
case String.trim(text) do
"" ->
tokenize(rest, acc)
text ->
acc =
String.split(text, "\n", trim: true)
|> Enum.reduce(acc, fn text, acc ->
[{:text, text} | acc]
end)
tokenize(rest, acc)
end
else
tokenize(rest, [token | acc])
end
end
end
|
lib/mop8/bot/message.ex
| 0.776369
| 0.608071
|
message.ex
|
starcoder
|
defmodule Kino.Frame do
@moduledoc """
A placeholder for outputs.
A frame wraps outputs that can be dynamically updated at
any time.
Also see `Kino.animate/3` which offers a convenience on
top of this widget.
## Examples
widget = Kino.Frame.new() |> Kino.render()
for i <- 1..100 do
Kino.Frame.render(widget, i)
Process.sleep(50)
end
Or with a scheduled task in the background.
widget = Kino.Frame.new() |> Kino.render()
Kino.Frame.periodically(widget, 50, 0, fn i ->
Kino.Frame.render(widget, i)
{:cont, i + 1}
end)
"""
@doc false
use GenServer, restart: :temporary
defstruct [:ref, :pid]
@opaque t :: %__MODULE__{ref: String.t(), pid: pid()}
@typedoc false
@type state :: %{outputs: list(Kino.Output.t())}
@doc """
Starts a widget process.
"""
@spec new() :: t()
def new() do
ref = System.unique_integer() |> Integer.to_string()
{:ok, pid} = Kino.start_child({__MODULE__, ref})
%__MODULE__{ref: ref, pid: pid}
end
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@doc """
Renders the given term within the frame.
This works similarly to `Kino.render/1`, but the rendered
output replaces existing frame contents.
"""
@spec render(t(), term()) :: :ok
def render(widget, term) do
GenServer.cast(widget.pid, {:render, term})
end
@doc """
Renders and appends the given term to the frame.
"""
@spec append(t(), term()) :: :ok
def append(widget, term) do
GenServer.cast(widget.pid, {:append, term})
end
@doc """
Removes all outputs within the given frame.
"""
@spec clear(t()) :: :ok
def clear(widget) do
GenServer.cast(widget.pid, :clear)
end
@doc """
Registers a callback to run periodically in the widget process.
The callback is run every `interval_ms` milliseconds and receives
the accumulated value. The callback should return either of:
* `{:cont, acc}` - the continue with the new accumulated value
* `:halt` - to no longer schedule callback evaluation
The callback is run for the first time immediately upon registration.
"""
@spec periodically(t(), pos_integer(), term(), (term() -> {:cont, term()} | :halt)) :: :ok
def periodically(widget, interval_ms, acc, fun) do
GenServer.cast(widget.pid, {:periodically, interval_ms, acc, fun})
end
@doc false
@spec get_outputs(t()) :: list(Kino.Output.t())
def get_outputs(widget) do
GenServer.call(widget.pid, :get_outputs)
end
@impl true
def init(ref) do
{:ok, %{ref: ref, outputs: []}}
end
@impl true
def handle_cast({:render, term}, state) do
output = Kino.Render.to_livebook(term)
put_update(state.ref, [output], :replace)
state = %{state | outputs: [output]}
{:noreply, state}
end
def handle_cast({:append, term}, state) do
output = Kino.Render.to_livebook(term)
put_update(state.ref, [output], :append)
state = %{state | outputs: [output | state.outputs]}
{:noreply, state}
end
def handle_cast(:clear, state) do
put_update(state.ref, [], :replace)
state = %{state | outputs: []}
{:noreply, state}
end
def handle_cast({:periodically, interval_ms, acc, fun}, state) do
periodically_iter(interval_ms, acc, fun)
{:noreply, state}
end
@impl true
def handle_call(:get_outputs, _from, state) do
{:reply, state.outputs, state}
end
@impl true
def handle_info({:periodically_iter, interval_ms, acc, fun}, state) do
periodically_iter(interval_ms, acc, fun)
{:noreply, state}
end
defp periodically_iter(interval_ms, acc, fun) do
case fun.(acc) do
{:cont, acc} ->
Process.send_after(self(), {:periodically_iter, interval_ms, acc, fun}, interval_ms)
:halt ->
:ok
end
end
defp put_update(ref, outputs, type) do
output = Kino.Output.frame(outputs, %{ref: ref, type: type})
Kino.Bridge.put_output(output)
end
end
|
lib/kino/frame.ex
| 0.878542
| 0.563468
|
frame.ex
|
starcoder
|
defprotocol Geocalc.Point do
@moduledoc """
The `Geocalc.Point` protocol is responsible for receiving latitude and
longitude from any Elixir data structure.
At this time it have implementations only for Map, Tuple and List, and Shape
defined inside this project.
Point values can be decimal degrees or DMS (degrees, minutes, seconds).
"""
@doc """
Returns point latitude.
"""
def latitude(point)
@doc """
Returns point longitude.
"""
def longitude(point)
end
defimpl Geocalc.Point, for: List do
def latitude([lat = %Geocalc.DMS{}, _lng]) do
Geocalc.DMS.to_decimal(lat)
end
def latitude([lat, _lng]) when is_number(lat) do
lat
end
def longitude([_lat, lng = %Geocalc.DMS{}]) do
Geocalc.DMS.to_decimal(lng)
end
def longitude([_lat, lng]) when is_number(lng) do
lng
end
end
defimpl Geocalc.Point, for: Map do
def latitude(%{lat: lat = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude(%{lat: lat}) when is_number(lat) do
lat
end
def latitude(%{latitude: lat = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude(%{latitude: lat}) when is_number(lat) do
lat
end
def longitude(%{lon: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%{lon: lng}) when is_number(lng) do
lng
end
def longitude(%{lng: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%{lng: lng}) when is_number(lng) do
lng
end
def longitude(%{longitude: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%{longitude: lng}) when is_number(lng) do
lng
end
end
defimpl Geocalc.Point, for: Tuple do
def latitude({lat = %Geocalc.DMS{}, _lng}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude({lat, _lng}) when is_number(lat) do
lat
end
def latitude({:ok, lat = %Geocalc.DMS{}, _lng}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude({:ok, lat, _lng}) when is_number(lat) do
lat
end
def longitude({_lat, lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude({_lat, lng}) when is_number(lng) do
lng
end
def longitude({:ok, _lat, lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude({:ok, _lat, lng}) when is_number(lng) do
lng
end
end
defimpl Geocalc.Point, for: Geocalc.Shape.Circle do
def latitude(%Geocalc.Shape.Circle{latitude: lat = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude(%Geocalc.Shape.Circle{latitude: lat}) when is_number(lat) do
lat
end
def longitude(%Geocalc.Shape.Circle{longitude: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%Geocalc.Shape.Circle{longitude: lng}) when is_number(lng) do
lng
end
end
defimpl Geocalc.Point, for: Geocalc.Shape.Rectangle do
def latitude(%Geocalc.Shape.Rectangle{latitude: lat = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude(%Geocalc.Shape.Rectangle{latitude: lat}) when is_number(lat) do
lat
end
def longitude(%Geocalc.Shape.Rectangle{longitude: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%Geocalc.Shape.Rectangle{longitude: lng}) when is_number(lng) do
lng
end
end
defimpl Geocalc.Point, for: Geocalc.Shape.Ellipse do
def latitude(%Geocalc.Shape.Ellipse{latitude: lat = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lat)
end
def latitude(%Geocalc.Shape.Ellipse{latitude: lat}) when is_number(lat) do
lat
end
def longitude(%Geocalc.Shape.Ellipse{longitude: lng = %Geocalc.DMS{}}) do
Geocalc.DMS.to_decimal(lng)
end
def longitude(%Geocalc.Shape.Ellipse{longitude: lng}) when is_number(lng) do
lng
end
end
|
lib/geocalc/point.ex
| 0.920888
| 0.825238
|
point.ex
|
starcoder
|
defmodule OkThen.Result.Enum do
@moduledoc """
Functions for processing tagged tuples inside Enums.
"""
alias OkThen.Result
alias Result.Private
require Private
@doc """
Collects an Enum of results into a single result. If all results were tagged `:ok`, then a
result will be returned tagged with `:ok`, whose value is a list of the wrapped values from each
element in the list. Otherwise, the result whose tag didn't match `tag` is returned.
Equivalent to `collect_tagged(results, :ok)`. See `collect_tagged/2`.
## Examples
iex> [:ok, :ok]
...> |> Result.Enum.collect()
{:ok, [{}, {}]}
iex> [:ok, :ok, :ok, :error, {:error, 2}]
...> |> Result.Enum.collect()
{:error, {}}
iex> [{:ok, 1}, {:ok, 1, 2}, :ok]
...> |> Result.Enum.collect()
{:ok, [1, {1, 2}, {}]}
iex> [{:ok, 1}, {:ok, 1, 2}, {:something, 1}, :ok]
...> |> Result.Enum.collect()
{:something, 1}
iex> []
...> |> Result.Enum.collect()
{:ok, []}
"""
@spec collect([Result.tagged()]) :: {atom(), [any()]}
def collect(results), do: collect_tagged(results, :ok)
@doc """
Collects an Enum of results into a single result. If all results were tagged with the specified
`tag`, then a result will be returned tagged with `tag`, whose value is a list of the wrapped
values from each element in the list. Otherwise, the result whose tag didn't match `tag` is
returned.
## Examples
iex> [:ok, :ok]
...> |> Result.Enum.collect_tagged(:ok)
{:ok, [{}, {}]}
iex> [:ok, :ok, :ok, :error, {:error, 2}]
...> |> Result.Enum.collect_tagged(:ok)
{:error, {}}
iex> [{:ok, 1}, {:ok, 1, 2}, :ok]
...> |> Result.Enum.collect_tagged(:ok)
{:ok, [1, {1, 2}, {}]}
iex> [{:ok, 1}, {:ok, 1, 2}, {:something, 1}, :ok]
...> |> Result.Enum.collect_tagged(:ok)
{:something, 1}
iex> []
...> |> Result.Enum.collect_tagged(:ok)
{:ok, []}
"""
@spec collect_tagged([Result.tagged()], atom()) :: {atom(), [any()]}
def collect_tagged(results, tag) do
results
|> Enum.map(&Private.normalize_result_input/1)
|> Enum.reduce({tag, []}, fn
{^tag, value}, {^tag, out_list} -> {tag, [value | out_list]}
other_result, {^tag, _out_list} -> other_result
_result, acc -> acc
end)
|> Result.tagged_map(tag, &Enum.reverse/1)
end
@doc """
Modifies an Enum of results by applying `filter_function` to each group of values separately,
according to their tag.
**Note:** The ordering of results is not maintained.
## Examples
iex> [{:ok, 1}, {:ok, 2}, :error, :error]
...> |> Result.Enum.map_grouped_by_tag(fn
...> :ok, values -> Enum.map(values, &(&1 + 1))
...> :error, _values -> []
...> end)
[{:ok, 2}, {:ok, 3}]
iex> [{:ok, 1}]
...> |> Result.Enum.map_grouped_by_tag(fn
...> :ok, _values -> nil
...> end)
** (ArgumentError) Expected map_function clause for tag :ok to return a list, but got: nil
iex> [{:ok, 1}, {:error, 1}, {:ok, 2}, {:error, 2}, :none]
...> |> Result.Enum.map_grouped_by_tag(fn
...> :ok, values -> Enum.map(values, &(&1 + 1))
...> :error, values -> Enum.take(values, 1)
...> :none, _values -> []
...> end)
[{:error, 1}, {:ok, 2}, {:ok, 3}]
iex> [{:some, 1}, :other, {:some, 2}]
...> |> Result.Enum.map_grouped_by_tag(fn
...> :some, values -> Enum.map(values, &(&1 + 1))
...> :other, _values -> []
...> end)
[{:some, 2}, {:some, 3}]
iex> [:ok, "hello", {:error, "hello"}, {1, 2}]
...> |> Result.Enum.map_grouped_by_tag(fn
...> tag, _values when tag in [:ok, :error] -> []
...> :untagged, values -> values
...> end)
[{:untagged, "hello"}, {:untagged, {1, 2}}]
iex> []
...> |> Result.Enum.map_grouped_by_tag(fn
...> :ok, values -> Enum.map(values, &(&1 + 1))
...> end)
[]
"""
@spec map_grouped_by_tag([Result.tagged()], (atom(), [any()] -> [any()])) :: [Result.tagged()]
def map_grouped_by_tag(results, map_function)
when is_list(results) and is_function(map_function, 2) do
results
|> group_by_tag()
|> Enum.flat_map(fn {tag, values} ->
map_function.(tag, values)
|> case do
list when is_list(list) ->
Enum.map(list, &{tag, &1})
other ->
raise ArgumentError,
"Expected map_function clause for tag #{Kernel.inspect(tag)} " <>
"to return a list, but got: #{Kernel.inspect(other)}"
end
end)
end
@doc """
Collects an Enum of results into a map, with result values grouped by their tag.
## Examples
iex> [:ok, :ok, :ok, :error, :error]
...> |> Result.Enum.group_by_tag()
%{
error: [{}, {}],
ok: [{}, {}, {}]
}
iex> [{:ok, 1}, {:ok, 2}, {:ok, 3}, {:error, 4}, {:error, 5}]
...> |> Result.Enum.group_by_tag()
%{
error: [4, 5],
ok: [1, 2, 3]
}
iex> [{:ok, 1}, {:ok, 2, 3}, :none, {:error, 4}, {:another, 5}]
...> |> Result.Enum.group_by_tag()
%{
another: [5],
error: [4],
none: [{}],
ok: [1, {2, 3}]
}
iex> [{:ok, 1}, "hello", {1, 2}]
...> |> Result.Enum.group_by_tag()
%{
ok: [1],
untagged: ["hello", {1, 2}]
}
iex> []
...> |> Result.Enum.group_by_tag()
%{}
"""
@spec group_by_tag([Result.tagged()]) :: %{atom() => [any()]}
def group_by_tag(results) when is_list(results) do
results
|> Enum.map(&Private.normalize_result_input/1)
|> Enum.group_by(&elem(&1, 0), &elem(&1, 1))
end
end
|
lib/ok_then/result/enum.ex
| 0.898643
| 0.573738
|
enum.ex
|
starcoder
|
defmodule Cortex do
@moduledoc"""
Cortex begins the network - it sends initiatory signals to sensors, receives output - which is relayed to the network - then terminates all nodes.
"""
defstruct id: nil, pid: nil, scape: nil, type: :ffnn
@doc"""
Used to in the genotyping stage. I guess, it's somewhat unimportant, except that the scape is set here, as well as the type.
"""
def generate(scape, type) do
case is_atom(scape) and is_atom(type) do
true -> [%Cortex{id: {:cortex, Generate.id}, scape: scape, type: type}]
false -> IO.puts "scape and type must both be atoms"
end
end
@doc"""
Sends init message to sensors, upon receiving :start message
run/5
"""
# Perhaps run each iteration of input/output in a case clause?
# run/4
def run(genotype, network_pid, table) do
receive do
{:start, counter} -> iterate(genotype, counter, network_pid, [], [], :start)
{:terminate, _} -> Process.exit(self(), :kill)
end
end
# run/6
def iterate(genotype, counter, network_pid, table, acc, state) do
[n, s, a, c] = genotype
if counter == 0 do
finish(genotype, network_pid, table)
else
case state do
:start -> Transmit.list(:sensors, genotype, {:start, self(), counter})
:wait -> :ok
end
receive do
{:sensor_input, {scape, input}} ->
iterate(genotype, counter, network_pid, table, {counter, input, Scape.get_output(c.scape, input)}, :wait)
{:actuator_output, {actuator_id, actuator_name}, output} ->
# send network_pid, {:nn_output, generated_input, correct_output, output}
{counter, acc_input, acc_output} = acc
iterate(genotype, counter - 1, network_pid, [{counter, acc_input, acc_output, output} | table], [], :start)
{:terminate, _} -> Process.exit(self(), :kill)
end
end
end
def finish(genotype, network_pid, table) do
# table is list of tuples... {count, input, corr_output, output}
total_wrong = Enum.sum(Enum.map(table, fn {count, input, [corr_out], out} -> case corr_out == out do
true -> 0
false -> 1
end end))
error = total_wrong / length(table)
send network_pid, {:nn_error, error, length(table)}
end
end
|
lib/cortex.ex
| 0.540196
| 0.465934
|
cortex.ex
|
starcoder
|
defmodule Appsignal.Utils.MapFilter do
require Logger
@moduledoc """
Helper functions for filtering parameters to prevent sensitive data
to be submitted to AppSignal.
"""
@doc """
Filter parameters based on Appsignal and Phoenix configuration.
"""
def filter_parameters(values), do: filter_values(values, get_filter_parameters())
@doc """
Filter session data based Appsignal configuration.
"""
def filter_session_data(values), do: filter_values(values, get_filter_session_data())
@doc false
def filter_values(values, {:discard, params}), do: discard_values(values, params)
def filter_values(values, {:keep, params}), do: keep_values(values, params)
def filter_values(values, params), do: discard_values(values, params)
def get_filter_parameters do
merge_filters(
Application.get_env(:appsignal, :config)[:filter_parameters],
Application.get_env(:phoenix, :filter_parameters, [])
)
end
def get_filter_session_data do
Application.get_env(:appsignal, :config)[:filter_session_data] || []
end
defp discard_values(list, filter_keys) when is_list(list) do
discard_values(list, filter_keys, [])
end
defp discard_values(%{__struct__: _} = struct, filter_keys) do
struct
|> Map.from_struct()
|> discard_values(filter_keys)
end
defp discard_values(map, filter_keys) when is_map(map) do
map
|> Map.to_list()
|> discard_values(filter_keys, [])
|> Enum.into(%{})
end
defp discard_values(value, _filter_keys), do: value
defp discard_values([{key, value} | tail], filter_keys, acc) do
if (is_binary(key) or is_atom(key)) and to_string(key) in filter_keys do
discard_values(tail, filter_keys, [{key, "[FILTERED]"} | acc])
else
discard_values(tail, filter_keys, [{key, discard_values(value, filter_keys)} | acc])
end
end
defp discard_values([value | tail], filter_keys, acc) do
discard_values(tail, filter_keys, [discard_values(value, filter_keys) | acc])
end
defp discard_values([], _filter_keys, acc), do: acc
defp keep_values(list, keep_keys) when is_list(list) do
keep_values(list, keep_keys, [])
end
defp keep_values(%{__struct__: _} = struct, keep_keys) do
struct
|> Map.from_struct()
|> keep_values(keep_keys)
end
defp keep_values(map, keep_keys) when is_map(map) do
map
|> Map.to_list()
|> keep_values(keep_keys, [])
|> Enum.into(%{})
end
defp keep_values(_value, _keep_keys), do: "[FILTERED]"
defp keep_values([{key, value} | tail], keep_keys, acc) do
if (is_binary(key) or is_atom(key)) and to_string(key) in keep_keys do
keep_values(tail, keep_keys, [{key, discard_values(value, [])} | acc])
else
keep_values(tail, keep_keys, [{key, keep_values(value, keep_keys)} | acc])
end
end
defp keep_values([value | tail], keep_keys, acc) do
keep_values(tail, keep_keys, [keep_values(value, keep_keys) | acc])
end
defp keep_values([], _keep_keys, acc), do: acc
defp merge_filters(appsignal, phoenix) when is_list(appsignal) and is_list(phoenix) do
appsignal ++ phoenix
end
defp merge_filters({:keep, appsignal}, {:keep, phoenix}), do: {:keep, appsignal ++ phoenix}
defp merge_filters(appsignal, {:keep, phoenix}) when is_list(appsignal) and is_list(phoenix) do
{:keep, phoenix -- appsignal}
end
defp merge_filters({:keep, appsignal}, phoenix) when is_list(appsignal) and is_list(phoenix) do
{:keep, appsignal -- phoenix}
end
defp merge_filters(appsignal, phoenix) do
Logger.error("""
An error occured while merging parameter filters.
AppSignal expects all parameter_filter values to be either a list of
strings (`["email"]`), or a :keep-tuple with a list of strings as its
second element (`{:keep, ["email"]}`).
From the AppSignal configuration:
#{inspect(appsignal)}
From the Phoenix configuration:
#{inspect(phoenix)}
To ensure no sensitive parameters are sent, all parameters are filtered out
for this transaction.
""")
{:keep, []}
end
end
|
lib/appsignal/utils/map_filter.ex
| 0.72086
| 0.695467
|
map_filter.ex
|
starcoder
|
defmodule SMSFactor.AccountManaging do
@moduledoc """
Wrappers around **Account Managing** section of SMSFactor API.
"""
@typedoc """
Params to create account or sub-account.
- `email` **(required)** : The email of the account
- `password` **(required)** : The password must be at least 6 characters long (25 max)
- `country_code` **(required if main account)** : The country code associated to the account (ISO 3166-1 alpha-2)
- `firstname` : The firstname associated to the account
- `lastname` : The lastname associated to the account
- `city` : The city associated to the account
- `phone` : The phone number associated to the account
- `address1` : The address associated to the account
- `address2` : Further information about the address
- `zip` : The zip code
- `company` : The company associated to the account
- `type` : Select one between : company, association, administration, private
- `sender` : The default sender that will be used for your sendings
- `description` : Feel free to write anything about this account
- `isChild` : integer 0 for a main account, 1 for a sub-account
- `unlimited` **(required if isChild)** : Is the account unlimited ? If unlimited, the sub-account uses the parent's credits. If not, the main account has to give a certain amount of credits to its sub-account.
## Example
```elixir
{
"account":{
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
"firstname" : "Vasili",
"lastname": "Arkhipov",
"city" : "Zvorkovo",
"phone": "33612345678",
"address1": "Somewhere in Zvorkovo",
"zip": "386",
"country_code" : "ru",
"isChild" : 1,
"unlimited" : 0
}
}
```
"""
@type account_params() :: %{account: %{atom() => any()}}
@typedoc """
Params for updating retention.
Supports the following options :
- `message` : The data retention time of your messages
- `list` : The data retention time of your lists (-1 for endless expiration)
- `survey` : The data retention time of your surveys
- `campaign` : The data retention time of your campaigns
After your number put a 'd' for day and a 'm' for month.
## Example
```elixir
{
"retention":{
"message": "2d",
"survey": "5m",
"list": "2m",
"campaign": "5m"
}
}
```
"""
@type retention_params() :: %{retention: %{atom() => any()}}
@spec credits(Tesla.Client.t()) :: Tesla.Env.result()
def credits(client), do: Tesla.get(client, "/credits")
@spec retrieve_account(Tesla.Client.t()) :: Tesla.Env.result()
def retrieve_account(client), do: Tesla.get(client, "/account")
@spec retrieve_sub_accounts(Tesla.Client.t()) :: Tesla.Env.result()
def retrieve_sub_accounts(client), do: Tesla.get(client, "/sub-accounts")
@spec create_account(Tesla.Client.t(), account_params()) :: Tesla.Env.result()
def create_account(client, params), do: Tesla.post(client, "/account", params)
@spec get_retention(Tesla.Client.t()) :: Tesla.Env.result()
def get_retention(client), do: Tesla.get(client, "/retention")
@spec update_retention(Tesla.Client.t(), retention_params()) :: Tesla.Env.result()
def update_retention(client, params), do: Tesla.put(client, "/retention", params)
end
|
lib/sms_factor/account_managing.ex
| 0.870432
| 0.843831
|
account_managing.ex
|
starcoder
|
defmodule NiceTrie do
@doc ~S"""
Check for a word in the NiceTrie
## Examples
iex> NiceTrie.member?([], "a")
false
iex> NiceTrie.member?([{"a", []}, {"be", []}], "a")
true
iex> NiceTrie.member?([{"a", []}, {"be", []}], "be")
true
iex> NiceTrie.member?([{"be", [{"er", []}, {"d", []}]}, {"a", [{"t", []}]}], "beer")
true
iex> NiceTrie.member?([{"be", [{"er", []}, {"d", []}]}, {"a", [{"t", []}]}], "bed")
true
iex> NiceTrie.member?([{"be", [{"er", []}, {"d", []}]}, {"a", [{"t", []}]}], "bet")
false
iex> NiceTrie.member?([{"be", [{"er", []}, {"d", []}]}, {"a", [{"t", []}]}], "bear")
false
"""
def member?([{prefix, suffix_trie}| tail], word) do
if String.starts_with?(word, prefix) do
member?(suffix_trie, String.replace(word, prefix, "", global: false))
else
member?(tail, word)
end
end
def member?(_, ""), do: true
def member?([], _), do: false
def member?(nil, _), do: false
@doc ~S"""
Load a list of words and return a NiceTrie
"""
def load(words), do: store([], words)
def store(trie, [word|words]) do
store(store_word(trie, word), words)
end
def store(trie, []), do: trie
@doc ~S"""
Store a word into the NiceTrie
## Examples
iex> NiceTrie.store_word([], "a")
[{"a", []}]
iex> NiceTrie.store_word([{"a", []}], "a")
[{"a", []}]
iex> NiceTrie.store_word([{"a", []}], "at")
[{"a", [{"t", []}]}]
iex> NiceTrie.store_word([{"a", []}], "be")
[{"be", []}, {"a", []}]
iex> NiceTrie.store_word([{"be", []}, {"a", []}], "beer")
[{"be", [{"er", []}]}, {"a", []}]
"""
def store_word(trie = [{prefix,suffix_trie} | tail], word) do
#IO.inspect {:store_word, word, prefix, suffix_trie, tail}
if String.starts_with?(word, prefix) do
remainder = String.replace(word, prefix, "", global: false)
[{prefix, store_word(suffix_trie, remainder)} | tail]
else
[{word, []} | trie]
end
end
def store_word(trie, ""), do: trie
def store_word([], word), do: [{word, []}]
end
|
lib/nice_trie.ex
| 0.538255
| 0.413536
|
nice_trie.ex
|
starcoder
|
defmodule Pbuf.Encoder do
import Bitwise, only: [bsr: 2, bsl: 2, bor: 2, band: 2]
defmodule Error do
defexception [:message, :value, :tag, :type]
end
@doc """
Generates a field prefix. This is the tag number + wire type
"""
@spec prefix(pos_integer, atom) :: binary
def prefix(tag, type) do
tag
|> bsl(3)
|> bor(wire_type(type))
|> varint()
end
@spec wire_type(atom) :: integer
def wire_type(:fixed64), do: 1
def wire_type(:sfixed64), do: 1
def wire_type(:double), do: 1
def wire_type(:string), do: 2
def wire_type(:bytes), do: 2
def wire_type(:struct), do: 2
def wire_type(:fixed32), do: 5
def wire_type(:sfixed32), do: 5
def wire_type(:float), do: 5
def wire_type(_), do: 0 # varint
@spec varint(integer) :: iodata
for n <- (0..127) do
def varint(unquote(n)) do
<<unquote(n)>>
end
end
def varint(n) when n < 0 do
<<n::64-unsigned-native>> = <<n::64-signed-native>>
varint(n)
end
def varint(n) do
<<1::1, band(n, 127)::7, varint(bsr(n, 7))::binary>>
end
@spec zigzag(integer) :: integer
def zigzag(val) when val >= 0 do
val * 2
end
def zigzag(val) do
val * -2 - 1
end
@doc """
Encodes a value
"""
@spec field(atom, any) :: iodata
def field(:int32, n), do: varint(n)
def field(:int64, n), do: varint(n)
def field(:uint32, n), do: varint(n)
def field(:uint64, n), do: varint(n)
def field(:sint32, n), do: n |> zigzag |> varint
def field(:sint64, n), do: n |> zigzag |> varint
def field(:bool, n) when n == true, do: varint(1)
def field(:bool, n) when n == false, do: varint(0)
def field(:enum, n), do: field(:int32, n)
def field(:fixed64, n), do: <<n::64-little>>
def field(:sfixed64, n), do: <<n::64-signed-little>>
def field(:double, n), do: <<n::64-float-little>>
def field(:string, n), do: field(:bytes, n)
def field(:fixed32, n), do: <<n::32-little>>
def field(:sfixed32, n), do: <<n::32-signed-little>>
def field(:float, n), do: <<n::32-float-little>>
def field(:bytes, n) do
bin = :erlang.iolist_to_binary(n)
len = varint(byte_size(bin))
<<len::binary, bin::binary>>
end
def field(:struct, n) do
encoded = n.__struct__.encode_to_iodata!(n)
len = varint(:erlang.iolist_size(encoded))
[len, encoded]
end
@doc """
Encodes a field. This means we encode the prefix + the value. We also do
type checking and, omit any nil / default values.
"""
@spec field(atom, any, binary) :: iodata
# nil / defaults
def field(_type, nil, _prefix), do: <<>>
def field(:bool, false, _prefix), do: <<>>
def field(:int32, 0, _prefix), do: <<>>
def field(:int64, 0, _prefix), do: <<>>
def field(:uint32, 0, _prefix), do: <<>>
def field(:uint64, 0, _prefix), do: <<>>
def field(:sint32, 0, _prefix), do: <<>>
def field(:sint64, 0, _prefix), do: <<>>
def field(:fixed32, 0, _prefix), do: <<>>
def field(:fixed64, 0, _prefix), do: <<>>
def field(:sfixed32, 0, _prefix), do: <<>>
def field(:sfixed64, 0, _prefix), do: <<>>
def field(:float, 0, _prefix), do: <<>>
def field(:float, 0.0, _prefix), do: <<>>
def field(:double, 0, _prefix), do: <<>>
def field(:double, 0.0, _prefix), do: <<>>
def field(:string, "", _prefix), do: <<>>
def field(:bytes, <<>>, _prefix), do: <<>>
@int32_max 0x7FFFFFFF
@int32_min -0x80000000
@int64_max 0x7FFFFFFFFFFFFFFF
@int64_min -0x8000000000000000
@uint32_max 0xFFFFFFFF
@uint64_max 0xFFFFFFFFFFFFFFFF
def field(:bool, true, prefix) do
[prefix, <<1>>]
end
def field(:bool, val, prefix) do
raise_invalid(:bool, val, prefix)
end
def field(:int32, val, prefix) when is_integer(val) and val <= @int32_max and val >= @int32_min do
[prefix, field(:int32, val)]
end
def field(:int32, val, prefix) do
raise_invalid(:int32, val, prefix)
end
def field(:int64, val, prefix) when is_integer(val) and val <= @int64_max and val >= @int64_min do
[prefix, field(:int64, val)]
end
def field(:int64, val, prefix) do
raise_invalid(:int64, val, prefix)
end
def field(:uint32, val, prefix) when is_integer(val) and val <= @uint32_max and val >= 0 do
[prefix, field(:uint32, val)]
end
def field(:uint32, val, prefix) do
raise_invalid(:uint32, val, prefix)
end
def field(:uint64, val, prefix) when is_integer(val) and val <= @uint64_max and val >= 0 do
[prefix, field(:uint64, val)]
end
def field(:uint64, val, prefix) do
raise_invalid(:uint64, val, prefix)
end
def field(:sint32, val, prefix) when is_integer(val) and val <= @int32_max and val >= @int32_min do
[prefix, field(:sint32, val)]
end
def field(:sint32, val, prefix) do
raise_invalid(:sint32, val, prefix)
end
def field(:sint64, val, prefix) when is_integer(val) and val <= @int64_max and val >= @int64_min do
[prefix, field(:sint64, val)]
end
def field(:sint64, val, prefix) do
raise_invalid(:sint64, val, prefix)
end
def field(:fixed32, val, prefix) when is_integer(val) and val <= @uint32_max and val >= 0 do
[prefix, field(:fixed32, val)]
end
def field(:fixed32, val, prefix) do
raise_invalid(:fixed32, val, prefix)
end
def field(:fixed64, val, prefix) when is_integer(val) and val <= @uint64_max and val >= 0 do
[prefix, field(:fixed64, val)]
end
def field(:fixed64, val, prefix) do
raise_invalid(:fixed64, val, prefix)
end
def field(:sfixed32, val, prefix) when is_integer(val) and val <= @int32_max and val >= @int32_min do
[prefix, field(:sfixed32, val)]
end
def field(:sfixed32, val, prefix) do
raise_invalid(:sfixed32, val, prefix)
end
def field(:sfixed64, val, prefix) when is_integer(val) and val <= @int64_max and val >= @int64_min do
[prefix, field(:sfixed64, val)]
end
def field(:sfixed64, val, prefix) do
raise_invalid(:sfixed64, val, prefix)
end
def field(:float, val, prefix) when is_number(val) do
[prefix, field(:float, val)]
end
def field(:float, val, prefix) do
raise_invalid(:float, val, prefix)
end
def field(:double, val, prefix) when is_number(val) do
[prefix, field(:double, val)]
end
def field(:double, val, prefix) do
raise_invalid(:double, val, prefix)
end
def field(:string, val, prefix) when is_binary(val) do
[prefix, field(:string, val)]
end
def field(:string, val, prefix) when is_atom(val) do
field(:string, Atom.to_string(val), prefix)
end
def field(:string, val, prefix) do
raise_invalid(:string, val, prefix)
end
def field(:bytes, val, prefix) when is_binary(val) or is_list(val) do
[prefix, field(:bytes, val)]
end
def field(:bytes, val, prefix) do
raise_invalid(:bytes, val, prefix)
end
def field(:struct, %{__struct__: _} = val, prefix) do
[prefix, field(:struct, val)]
end
def field(:struct, val, prefix) do
raise_invalid(:struct, val, prefix)
end
@spec enum_field(module, any, binary) :: iodata
def enum_field(_mod, nil, _prefix) do
<<>>
end
def enum_field(mod, val, prefix) do
field(:int32, mod.to_int(val), prefix)
end
@spec repeated_enum_field(module, any, binary) :: iodata
def repeated_enum_field(_mod, nil, _prefix) do
<<>>
end
def repeated_enum_field(_mod, [], _prefix) do
<<>>
end
def repeated_enum_field(mod, vals, prefix) when is_list(vals) do
encoded = Enum.reduce(vals, [], fn val, acc ->
[acc, field(:int32, mod.to_int(val))]
end)
byte_size = :erlang.iolist_size(encoded)
[prefix, varint(byte_size), encoded]
end
@spec map_field(binary, atom, binary, atom, any, binary) :: iodata
def map_field(_kprefix, _ktype, _vprefix, _vtype, nil, _prefix) do
<<>>
end
def map_field(_kprefix, _ktype, _vprefix, _vtype, map, _prefix) when map_size(map) == 0 do
<<>>
end
def map_field(kprefix, ktype, vprefix, vtype, map, prefix) do
Enum.reduce(map, [], fn {key, value}, acc ->
bin = [
field(ktype, key, kprefix),
field(vtype, value, vprefix)
]
len = :erlang.iolist_size(bin)
[[prefix, varint(len), bin] | acc]
end)
end
@spec repeated_field(atom, any, binary) :: iodata
def repeated_field(_type, nil, _prefix) do
<<>>
end
def repeated_field(_type, [], _prefix) do
<<>>
end
# In proto3, only scalar numeric types are packed.
def repeated_field(type, enum, prefix) when type in [:bytes, :string] do
Enum.reduce(enum, [], fn value, acc ->
[acc, prefix, field(type, value)]
end)
end
def repeated_field(:struct, enum, prefix) do
Enum.reduce(enum, [], fn value, acc ->
[acc, field(:struct, value, prefix)]
end)
end
def repeated_field(type, enum, prefix) do
encoded = Enum.reduce(enum, [], fn value, acc ->
[acc, field(type, value)]
end)
byte_size = :erlang.iolist_size(encoded)
[prefix, varint(byte_size), encoded]
end
def repeated_unpacked_field(type, enum, prefix) do
Enum.reduce(enum, [], fn value, acc ->
[acc, prefix, field(type, value)]
end)
end
def oneof_field(_choice, nil, _fun) do
<<>>
end
def oneof_field(choice, {choice, value}, 0, fun) do
fun.(value)
end
def oneof_field(choice, %{__type: choice, value: value}, 1, fun) do
fun.(value)
end
def oneof_field(choice, value, 2, fun) when map_size(value) == 1 do
case :maps.next(:maps.iterator(value)) do
{^choice, value, :none} -> fun.(value)
_ -> <<>>
end
end
def oneof_field(_choice, _value, _, _prefix) do
<<>>
end
@spec raise_invalid(atom, any, binary) :: no_return
defp raise_invalid(type, val, <<prefix, _::binary>>) do
tag = bsr(prefix, 3)
raise Error,
tag: tag,
type: type,
value: val,
message: "#{inspect(val)} is not a valid #{type} (#{tag})"
end
end
|
lib/pbuf/encoder.ex
| 0.751375
| 0.432123
|
encoder.ex
|
starcoder
|
defmodule Exnoops.Pathbot do
@moduledoc """
Module to interact with Github's Noop: Pathbot
See the [official `noop` documentation](https://noopschallenge.com/challenges/pathbot) for API information including the accepted parameters.
"""
require Logger
import Exnoops.API
@noop "pathbot"
@doc """
Start helping `pathbot` out of the maze!
## Examples
iex> Exnoops.Pathbot.start()
{:ok, %{
"status" => "in-progress",
"message" => "You find yourself in a strange room. You're not sure how you got here but you know you need to escape, somehow.",
"exits" => [ "N", "S" ],
"description" => "You are in a bright long dining room with exits to the North and South. You sense that the maze's exit is to the North, at least 6 rooms away..",
"mazeExitDirection" => "N",
"mazeExitDistance" => 6,
"locationPath" => "/pathbot/rooms/LU62ZaD_SqudPvH3Qt3kJQ"
}}
"""
@spec start :: {atom(), map()}
def start do
Logger.debug("Calling Pathbot.start()")
case post("/" <> @noop <> "/start", []) do
{:ok, _} = res ->
res
error ->
error
end
end
@doc """
Submit directions to pathbot
## Examples
iex> Exnoops.Pathbot.submit_direction("/pathbot/rooms/LU62ZaD_SqudPvH3Qt3kJQ", "N")
{:ok, %{
"status" => "in-progress",
"message" => "You are trapped in a maze",
"exits" => [ "N", "S" ],
"description" => "You are in a chartreuse rectangular storage room with exits to the North and South. You sense that the maze's exit is to the North, at least 5 rooms away..",
"mazeExitDirection" => "N",
"mazeExitDistance" => 5,
"locationPath" => "/pathbot/rooms/OkNMk8D_XfLtYgnicZWzcA"
}}
iex> Exnoops.Pathbot.submit_direction("/pathbot/rooms/RPq3xhL51USGI_iU16alKA", "N")
{:ok, %{
"status" => "finished",
"description" => "Congratulations! You have escaped the maze."
}}
"""
@spec submit_direction(String.t(), String.t()) :: {atom(), map()}
def submit_direction(path, direction) when is_binary(path) and is_binary(direction) do
Logger.debug("Calling Pathbot.submit_direction(#{path}, #{direction}")
case post(path, %{"direction" => direction}) do
{:ok, _} = res ->
res
error ->
error
end
end
end
|
lib/exnoops/pathbot.ex
| 0.727298
| 0.522507
|
pathbot.ex
|
starcoder
|
defmodule Lab42.SimpleStateMachine.Runner do
use Lab42.SimpleStateMachine.Types
@moduledoc false
@spec run( state_t(), list(), any(), map()) :: any()
def run(state, input, data, states)
def run(_state, [], data, states), do: _end_state(states, data)
def run(:halt, _, data, _), do: data.data
def run(:end, _, data, states), do: _end_state(states, data)
def run(state, [input|rest], data, states) do
case Map.fetch(states, state) do
{:ok, transitions} ->
case Enum.find_value(transitions, &_find_transition(input, &1, state)) do
{transition, matched} -> _execute_transition(transition, matched, input, rest, data, states)
_ -> raise "No transition found in state #{inspect state}, on input #{inspect input}"
end
_ -> raise "No transitions defined for state #{inspect state}"
end
end
@spec _end_state( map(), any() ) :: any()
defp _end_state(states, data) do
case Map.get(states, :end) do
nil -> data.data
fun -> fun.(data)
end
end
@spec _execute_transition( transition_t(), any(), any(), list(), any(), map() ) :: any()
defp _execute_transition(transition, matched, input, rest, data, states)
defp _execute_transition({_, nil, new_state}, _matched, _input, rest, data, states) do
run(new_state, rest, data, states)
end
defp _execute_transition({_, transition_fn, new_state}, matched, input, rest, data, states) do
new_data = transition_fn.(%{data | matched: matched, input: input})
run(new_state, rest, %{data | data: new_data}, states)
end
@spec _find_transition( any(), transition_t(), state_t() ) :: maybe({complete_transition_t(), any()})
defp _find_transition(input, transition, current_state)
defp _find_transition(input, {trigger}, current_state) do
_find_transition(input, {trigger, nil, current_state}, current_state)
end
defp _find_transition(input, {trigger, transition_fn}, current_state) do
_find_transition(input, {trigger, transition_fn, current_state}, current_state)
end
defp _find_transition(input, {trigger, _fun, _ns}=transition, _) do
case _match_transition(trigger, input) do
nil -> nil
false -> nil
matched -> {transition, matched}
end
end
@spec _match_transition( trigger_t(), any() ) :: any()
defp _match_transition(trigger, input)
defp _match_transition(true,_), do: true
defp _match_transition(trigger_fn, input) when is_function(trigger_fn) do
trigger_fn.(input)
end
defp _match_transition(trigger_fn, input) do
Regex.run(trigger_fn, input)
end
end
|
lib/lab42/simple_state_machine/runner.ex
| 0.639624
| 0.63341
|
runner.ex
|
starcoder
|
defmodule Blockchain.BlockSetter do
@moduledoc """
This module is a utility module that performs setters on a block struct.
"""
alias Block.Header
alias Blockchain.Block
alias Blockchain.BlockGetter
alias Blockchain.Chain
alias MerklePatriciaTree.Trie
alias MerklePatriciaTree.TrieStorage
@doc """
Calculates the `number` for a new block. This implements Eq.(38) from
the Yellow Paper.
## Examples
iex> Blockchain.Block.set_block_number(%Blockchain.Block{header: %Block.Header{extra_data: "hello"}}, %Blockchain.Block{header: %Block.Header{number: 32}})
%Blockchain.Block{header: %Block.Header{number: 33, extra_data: "hello"}}
iex> Blockchain.Block.set_block_number(%Blockchain.Block{header: %Block.Header{extra_data: "hello"}}, %Blockchain.Block{header: %Block.Header{number: nil}})
%Blockchain.Block{header: %Block.Header{number: nil, extra_data: "hello"}}
"""
@spec set_block_number(Block.t(), Block.t()) :: Block.t()
def set_block_number(block, %Block{header: %Header{number: nil}}) do
%{block | header: %{block.header | number: nil}}
end
def set_block_number(block, %Block{header: %Header{number: parent_block_number}})
when is_integer(parent_block_number) do
%{block | header: %{block.header | number: parent_block_number + 1}}
end
@doc """
Set the difficulty of a new block based on Eq.(39), better defined
in `Block.Header`.
# TODO: Validate these results
## Examples
iex> Blockchain.Block.set_block_difficulty(
...> %Blockchain.Block{header: %Block.Header{number: 0, timestamp: 0}},
...> Blockchain.Test.ropsten_chain(),
...> nil
...> )
%Blockchain.Block{header: %Block.Header{number: 0, timestamp: 0, difficulty: 1_048_576}}
iex> Blockchain.Block.set_block_difficulty(
...> %Blockchain.Block{header: %Block.Header{number: 1, timestamp: 1_479_642_530}},
...> Blockchain.Test.ropsten_chain(),
...> %Blockchain.Block{header: %Block.Header{number: 0, timestamp: 0, difficulty: 1_048_576}}
...> )
%Blockchain.Block{header: %Block.Header{number: 1, timestamp: 1_479_642_530, difficulty: 997_888}}
"""
@spec set_block_difficulty(Block.t(), Chain.t(), Block.t()) :: Block.t()
def set_block_difficulty(block, chain, parent_block) do
difficulty = BlockGetter.get_difficulty(block, parent_block, chain)
%{block | header: %{block.header | difficulty: difficulty}}
end
@doc """
Sets the gas limit of a given block, or raises
if the block limit is not acceptable. The validity
check is defined in Eq.(45), Eq.(46) and Eq.(47) of
the Yellow Paper.
## Examples
iex> Blockchain.Block.set_block_gas_limit(
...> %Blockchain.Block{header: %Block.Header{}},
...> Blockchain.Test.ropsten_chain(),
...> %Blockchain.Block{header: %Block.Header{gas_limit: 1_000_000}},
...> 1_000_500
...> )
%Blockchain.Block{header: %Block.Header{gas_limit: 1_000_500}}
iex> Blockchain.Block.set_block_gas_limit(
...> %Blockchain.Block{header: %Block.Header{}},
...> Blockchain.Test.ropsten_chain(),
...> %Blockchain.Block{header: %Block.Header{gas_limit: 1_000_000}},
...> 2_000_000
...> )
** (RuntimeError) Block gas limit not valid
"""
@spec set_block_gas_limit(Block.t(), Chain.t(), Block.t(), EVM.Gas.t()) :: Block.t()
def set_block_gas_limit(block, chain, parent_block, gas_limit) do
if not Header.is_gas_limit_valid?(
gas_limit,
parent_block.header.gas_limit,
chain.params[:gas_limit_bound_divisor],
chain.params[:min_gas_limit]
),
do: raise("Block gas limit not valid")
%{block | header: %{block.header | gas_limit: gas_limit}}
end
@doc """
Sets block's parent's hash
"""
@spec set_block_parent_hash(Block.t(), Block.t()) :: Block.t()
def set_block_parent_hash(block, parent_block) do
parent_hash = parent_block.block_hash || Block.hash(parent_block)
header = %{block.header | parent_hash: parent_hash}
%{block | header: header}
end
@doc """
Sets the state_root of a given block from a trie.
## Examples
iex> trie = %MerklePatriciaTree.Trie{root_hash: <<5::256>>, db: {MerklePatriciaTree.DB.ETS, :get_state}}
iex> Blockchain.Block.set_state(%Blockchain.Block{}, trie)
%Blockchain.Block{header: %Block.Header{state_root: <<5::256>>}}
"""
@spec set_state(Block.t(), Trie.t()) :: Block.t()
def set_state(block, trie) do
root_hash = TrieStorage.root_hash(trie)
put_header(block, :state_root, root_hash)
end
'''
Sets a given block header field as a shortcut when
we want to change a single field.
'''
@spec put_header(Block.t(), any(), any()) :: Block.t()
defp put_header(block, key, value) do
new_header = Map.put(block.header, key, value)
%{block | header: new_header}
end
end
|
apps/blockchain/lib/blockchain/block_setter.ex
| 0.816772
| 0.460168
|
block_setter.ex
|
starcoder
|
defmodule ResxCSV.Decoder do
@moduledoc """
Decode CSV string resources into erlang terms.
### Media Types
Only CSV/TSV types are valid. This can either be a CSV/TSV subtype or suffix.
Valid: `text/csv`, `application/geo+csv`, `text/tab-separated-values`
If an error is being returned when attempting to open a data URI due to
`{ :invalid_reference, "invalid media type: \#{type}" }`, the MIME type
will need to be added to the config.
To add additional media types to be decoded, that can be done by configuring
the `:csv_types` option.
config :resx_csv,
csv_types: [
{ "application/x.my-type", "application/x.erlang.native", ?; }
]
The `:csv_types` field should contain a list of 3 element tuples with the
format `{ pattern :: String.pattern | Regex.t, replacement :: String.t, separator :: char }`.
The `pattern` and `replacement` are arguments to `String.replace/3`. While the
separator specifies the character literal used to separate columns in the document.
The replacement becomes the new media type of the transformed resource. Nested
media types will be preserved. By default the current matches will be replaced
(where the `csv` type part is), with `x.erlang.native`, in order to denote
that the content is now a native erlang type. If this behaviour is not desired
simply override the match with `:csv_types` for the media types that should
not be handled like this.
### Options
`:skip_errors` - expects a `boolean` value, defaults to `false`. This option
specifies whether any decoding/formatting errors in the CSV should be skipped.
If they are skipped then those rows will not appear in the decoded result, if
should not be skipped then the decoding fails if there are any errors.
`:separator` - expects a `char` value, defaults to the separator literal that
is returned for the given MIME/csv_type. This option allows for the separator
to be overriden.
`:headers` - expects a `boolean` value, defaults to `true`. This option specifies
whether the first row will be used as a header.
`:strip_fields` - expects a `boolean` value, defaults to `false`. This option
specifies whether any surrounding whitespace will be trimmed.
`:validate_row_length` - expects a `boolean` value, defaults to `true`. This
option specifies whether the row length needs to be the same or whether it
can be of variable length.
`:delimiter` - expects a `String.t` or `Regex.t` value, defaults to `"\\r\\n"`.
This option specifies the delimiter use to separate the different rows. As
streams are already assumed to be separated, this only applies to non-streamed
content.
Resx.Resource.transform(resource, ResxCSV.Decoder, skip_errors: true, headers: false)
"""
use Resx.Transformer
alias Resx.Resource.Content
@impl Resx.Transformer
def transform(resource = %{ content: content }, opts) do
case validate_type(content.type) do
{ :ok, { type, separator } } ->
content = prepare_content(content, opts[:delimiter] || "\r\n")
decode = if(opts[:skip_errors], do: &filter_decode/2, else: &CSV.decode!/2)
opts = [
headers: Keyword.get(opts, :headers, true),
separator: opts[:separator] || separator,
strip_fields: opts[:strip_fields] || false,
validate_row_length: Keyword.get(opts, :validate_row_length, true)
]
{ :ok, %{ resource | content: %{ content | type: type, data: content |> decode.(opts) } } }
error -> error
end
end
defp filter_decode(row, opts), do: CSV.decode(row, opts) |> Stream.filter(&filter_row/1) |> Stream.map(&elem(&1, 1))
defp filter_row({ :ok, _ }), do: true
defp filter_row(_), do: false
defp prepare_content(content = %Content.Stream{}, _), do: content
defp prepare_content(content = %Content{}, delimiter), do: %Content.Stream{ type: content.type, data: String.split(content.data, delimiter) }
@default_csv_types [
{ ~r/\/(csv(\+csv)?|(.*?\+)csv)(;|$)/, "/\\3x.erlang.native\\4", ?, },
{ ~r/\/(tab-separated-values(\+tab-separated-values)?|(.*?\+)tab-separated-values)(;|$)/, "/\\3x.erlang.native\\4", ?\t }
]
defp validate_type(types) do
cond do
new_type = validate_type(types, Application.get_env(:resx_csv, :csv_types, [])) -> { :ok, new_type }
new_type = validate_type(types, @default_csv_types) -> { :ok, new_type }
true -> { :error, { :internal, "Invalid resource type" } }
end
end
defp validate_type(_, []), do: nil
defp validate_type(type_list = [type|types], [{ match, replacement, decoder }|matches]) do
if type =~ match do
{ [String.replace(type, match, replacement)|types], decoder }
else
validate_type(type_list, matches)
end
end
end
|
lib/resx_csv/decoder.ex
| 0.914644
| 0.454956
|
decoder.ex
|
starcoder
|
defmodule AWS.EMR do
@moduledoc """
Amazon EMR is a web service that makes it easy to process large amounts of data
efficiently.
Amazon EMR uses Hadoop processing combined with several AWS products to do tasks
such as web indexing, data mining, log file analysis, machine learning,
scientific simulation, and data warehousing.
"""
@doc """
Adds an instance fleet to a running cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x.
"""
def add_instance_fleet(client, input, options \\ []) do
request(client, "AddInstanceFleet", input, options)
end
@doc """
Adds one or more instance groups to a running cluster.
"""
def add_instance_groups(client, input, options \\ []) do
request(client, "AddInstanceGroups", input, options)
end
@doc """
AddJobFlowSteps adds new steps to a running cluster.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you
may require more than 256 steps to process your data. You can bypass the
256-step limitation in various ways, including using SSH to connect to the
master node and submitting queries directly to the software running on the
master node, such as Hive and Hadoop. For more information on how to do this,
see [Add More than 256 Steps to a Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
A step specifies the location of a JAR file stored either on the master node of
the cluster or in Amazon S3. Each step is performed by the main function of the
main class of the JAR file. The main class can be specified either in the
manifest of the JAR or by using the MainFunction parameter of the step.
Amazon EMR executes each step in the order listed. For a step to be considered
complete, the main function must exit with a zero exit code and all Hadoop jobs
started while the step was running must have completed and run successfully.
You can only add steps to a cluster that is in one of the following states:
STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
"""
def add_job_flow_steps(client, input, options \\ []) do
request(client, "AddJobFlowSteps", input, options)
end
@doc """
Adds tags to an Amazon EMR resource.
Tags make it easier to associate clusters in various ways, such as grouping
clusters to track your Amazon EMR resource allocation costs. For more
information, see [Tag Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
"""
def add_tags(client, input, options \\ []) do
request(client, "AddTags", input, options)
end
@doc """
Cancels a pending step or steps in a running cluster.
Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0.
A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is
idempotent but asynchronous; it does not guarantee a step will be canceled, even
if the request is successfully submitted. You can only cancel steps that are in
a `PENDING` state.
"""
def cancel_steps(client, input, options \\ []) do
request(client, "CancelSteps", input, options)
end
@doc """
Creates a security configuration, which is stored in the service and can be
specified when a cluster is created.
"""
def create_security_configuration(client, input, options \\ []) do
request(client, "CreateSecurityConfiguration", input, options)
end
@doc """
Deletes a security configuration.
"""
def delete_security_configuration(client, input, options \\ []) do
request(client, "DeleteSecurityConfiguration", input, options)
end
@doc """
Provides cluster-level details including status, hardware and software
configuration, VPC settings, and so on.
"""
def describe_cluster(client, input, options \\ []) do
request(client, "DescribeCluster", input, options)
end
@doc """
This API is deprecated and will eventually be removed.
We recommend you use `ListClusters`, `DescribeCluster`, `ListSteps`,
`ListInstanceGroups` and `ListBootstrapActions` instead.
DescribeJobFlows returns a list of job flows that match all of the supplied
parameters. The parameters can include a list of job flow IDs, job flow states,
and restrictions on job flow creation date and time.
Regardless of supplied parameters, only job flows created within the last two
months are returned.
If no parameters are supplied, then job flows matching either of the following
criteria are returned:
* Job flows created and completed in the last two weeks
* Job flows created within the last two months that are in one of
the following states: `RUNNING`, `WAITING`, `SHUTTING_DOWN`, `STARTING`
Amazon EMR can return a maximum of 512 job flow descriptions.
"""
def describe_job_flows(client, input, options \\ []) do
request(client, "DescribeJobFlows", input, options)
end
@doc """
Provides details of a notebook execution.
"""
def describe_notebook_execution(client, input, options \\ []) do
request(client, "DescribeNotebookExecution", input, options)
end
@doc """
Provides the details of a security configuration by returning the configuration
JSON.
"""
def describe_security_configuration(client, input, options \\ []) do
request(client, "DescribeSecurityConfiguration", input, options)
end
@doc """
Provides more detail about the cluster step.
"""
def describe_step(client, input, options \\ []) do
request(client, "DescribeStep", input, options)
end
@doc """
Returns the Amazon EMR block public access configuration for your AWS account in
the current Region.
For more information see [Configure Block Public Access for Amazon EMR](https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html)
in the *Amazon EMR Management Guide*.
"""
def get_block_public_access_configuration(client, input, options \\ []) do
request(client, "GetBlockPublicAccessConfiguration", input, options)
end
@doc """
Fetches the attached managed scaling policy for an Amazon EMR cluster.
"""
def get_managed_scaling_policy(client, input, options \\ []) do
request(client, "GetManagedScalingPolicy", input, options)
end
@doc """
Provides information about the bootstrap actions associated with a cluster.
"""
def list_bootstrap_actions(client, input, options \\ []) do
request(client, "ListBootstrapActions", input, options)
end
@doc """
Provides the status of all clusters visible to this AWS account.
Allows you to filter the list of clusters based on certain criteria; for
example, filtering by cluster creation date and time or by status. This call
returns a maximum of 50 clusters per call, but returns a marker to track the
paging of the cluster list across multiple ListClusters calls.
"""
def list_clusters(client, input, options \\ []) do
request(client, "ListClusters", input, options)
end
@doc """
Lists all available details about the instance fleets in a cluster.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions.
"""
def list_instance_fleets(client, input, options \\ []) do
request(client, "ListInstanceFleets", input, options)
end
@doc """
Provides all available details about the instance groups in a cluster.
"""
def list_instance_groups(client, input, options \\ []) do
request(client, "ListInstanceGroups", input, options)
end
@doc """
Provides information for all active EC2 instances and EC2 instances terminated
in the last 30 days, up to a maximum of 2,000.
EC2 instances in any of the following states are considered active:
AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
"""
def list_instances(client, input, options \\ []) do
request(client, "ListInstances", input, options)
end
@doc """
Provides summaries of all notebook executions.
You can filter the list based on multiple criteria such as status, time range,
and editor id. Returns a maximum of 50 notebook executions and a marker to track
the paging of a longer notebook execution list across multiple
`ListNotebookExecution` calls.
"""
def list_notebook_executions(client, input, options \\ []) do
request(client, "ListNotebookExecutions", input, options)
end
@doc """
Lists all the security configurations visible to this account, providing their
creation dates and times, and their names.
This call returns a maximum of 50 clusters per call, but returns a marker to
track the paging of the cluster list across multiple ListSecurityConfigurations
calls.
"""
def list_security_configurations(client, input, options \\ []) do
request(client, "ListSecurityConfigurations", input, options)
end
@doc """
Provides a list of steps for the cluster in reverse order unless you specify
`stepIds` with the request of filter by `StepStates`.
You can specify a maximum of ten `stepIDs`.
"""
def list_steps(client, input, options \\ []) do
request(client, "ListSteps", input, options)
end
@doc """
Modifies the number of steps that can be executed concurrently for the cluster
specified using ClusterID.
"""
def modify_cluster(client, input, options \\ []) do
request(client, "ModifyCluster", input, options)
end
@doc """
Modifies the target On-Demand and target Spot capacities for the instance fleet
with the specified InstanceFleetID within the cluster specified using ClusterID.
The call either succeeds or fails atomically.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions.
"""
def modify_instance_fleet(client, input, options \\ []) do
request(client, "ModifyInstanceFleet", input, options)
end
@doc """
ModifyInstanceGroups modifies the number of nodes and configuration settings of
an instance group.
The input parameters include the new target instance count for the group and the
instance group ID. The call will either succeed or fail atomically.
"""
def modify_instance_groups(client, input, options \\ []) do
request(client, "ModifyInstanceGroups", input, options)
end
@doc """
Creates or updates an automatic scaling policy for a core instance group or task
instance group in an Amazon EMR cluster.
The automatic scaling policy defines how an instance group dynamically adds and
terminates EC2 instances in response to the value of a CloudWatch metric.
"""
def put_auto_scaling_policy(client, input, options \\ []) do
request(client, "PutAutoScalingPolicy", input, options)
end
@doc """
Creates or updates an Amazon EMR block public access configuration for your AWS
account in the current Region.
For more information see [Configure Block Public Access for Amazon EMR](https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html)
in the *Amazon EMR Management Guide*.
"""
def put_block_public_access_configuration(client, input, options \\ []) do
request(client, "PutBlockPublicAccessConfiguration", input, options)
end
@doc """
Creates or updates a managed scaling policy for an Amazon EMR cluster.
The managed scaling policy defines the limits for resources, such as EC2
instances that can be added or terminated from a cluster. The policy only
applies to the core and task nodes. The master node cannot be scaled after
initial configuration.
"""
def put_managed_scaling_policy(client, input, options \\ []) do
request(client, "PutManagedScalingPolicy", input, options)
end
@doc """
Removes an automatic scaling policy from a specified instance group within an
EMR cluster.
"""
def remove_auto_scaling_policy(client, input, options \\ []) do
request(client, "RemoveAutoScalingPolicy", input, options)
end
@doc """
Removes a managed scaling policy from a specified EMR cluster.
"""
def remove_managed_scaling_policy(client, input, options \\ []) do
request(client, "RemoveManagedScalingPolicy", input, options)
end
@doc """
Removes tags from an Amazon EMR resource.
Tags make it easier to associate clusters in various ways, such as grouping
clusters to track your Amazon EMR resource allocation costs. For more
information, see [Tag Clusters](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html).
The following example removes the stack tag with value Prod from a cluster:
"""
def remove_tags(client, input, options \\ []) do
request(client, "RemoveTags", input, options)
end
@doc """
RunJobFlow creates and starts running a new cluster (job flow).
The cluster runs the steps specified. After the steps complete, the cluster
stops and the HDFS partition is lost. To prevent loss of data, configure the
last step of the job flow to store results in Amazon S3. If the
`JobFlowInstancesConfig` `KeepJobFlowAliveWhenNoSteps` parameter is set to
`TRUE`, the cluster transitions to the WAITING state rather than shutting down
after the steps have completed.
For additional protection, you can set the `JobFlowInstancesConfig`
`TerminationProtected` parameter to `TRUE` to lock the cluster and prevent it
from being terminated by API call, user intervention, or in the event of a job
flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you
may require more than 256 steps to process your data. You can bypass the
256-step limitation in various ways, including using the SSH shell to connect to
the master node and submitting queries directly to the software running on the
master node, such as Hive and Hadoop. For more information on how to do this,
see [Add More than 256 Steps to a Cluster](https://docs.aws.amazon.com/emr/latest/ManagementGuide/AddMoreThan256Steps.html)
in the *Amazon EMR Management Guide*.
For long running clusters, we recommend that you periodically store your
results.
The instance fleets configuration is available only in Amazon EMR versions 4.8.0
and later, excluding 5.0.x versions. The RunJobFlow request can contain
InstanceFleets parameters or InstanceGroups parameters, but not both.
"""
def run_job_flow(client, input, options \\ []) do
request(client, "RunJobFlow", input, options)
end
@doc """
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the
cluster cannot be terminated by user intervention, an API call, or in the event
of a job-flow error.
The cluster still terminates upon successful completion of the job flow. Calling
`SetTerminationProtection` on a cluster is similar to calling the Amazon EC2
`DisableAPITermination` API on all EC2 instances in a cluster.
`SetTerminationProtection` is used to prevent accidental termination of a
cluster and to ensure that in the event of an error, the instances persist so
that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting
`SetTerminationProtection` to `true`, you must first unlock the job flow by a
subsequent call to `SetTerminationProtection` in which you set the value to
`false`.
For more information, see[Managing Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_TerminationProtection.html)
in the *Amazon EMR Management Guide*.
"""
def set_termination_protection(client, input, options \\ []) do
request(client, "SetTerminationProtection", input, options)
end
@doc """
Sets the `Cluster$VisibleToAllUsers` value, which determines whether the cluster
is visible to all IAM users of the AWS account associated with the cluster.
Only the IAM user who created the cluster or the AWS account root user can call
this action. The default value, `true`, indicates that all IAM users in the AWS
account can perform cluster actions if they have the proper IAM policy
permissions. If set to `false`, only the IAM user that created the cluster can
perform actions. This action works on running clusters. You can override the
default `true` setting when you create a cluster by using the
`VisibleToAllUsers` parameter with `RunJobFlow`.
"""
def set_visible_to_all_users(client, input, options \\ []) do
request(client, "SetVisibleToAllUsers", input, options)
end
@doc """
Starts a notebook execution.
"""
def start_notebook_execution(client, input, options \\ []) do
request(client, "StartNotebookExecution", input, options)
end
@doc """
Stops a notebook execution.
"""
def stop_notebook_execution(client, input, options \\ []) do
request(client, "StopNotebookExecution", input, options)
end
@doc """
TerminateJobFlows shuts a list of clusters (job flows) down.
When a job flow is shut down, any step not yet completed is canceled and the EC2
instances on which the cluster is running are stopped. Any log files not already
saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was
created.
The maximum number of clusters allowed is 10. The call to `TerminateJobFlows` is
asynchronous. Depending on the configuration of the cluster, it may take up to
1-5 minutes for the cluster to completely terminate and release allocated
resources, such as Amazon EC2 instances.
"""
def terminate_job_flows(client, input, options \\ []) do
request(client, "TerminateJobFlows", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "elasticmapreduce"}
host = build_host("elasticmapreduce", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ElasticMapReduce.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/emr.ex
| 0.878614
| 0.676119
|
emr.ex
|
starcoder
|
defmodule InteropProxy.Sanitize do
@moduledoc """
Translates the interop server responses to our own and vise-versa.
"""
# Aliasing the main messages.
alias InteropProxy.Message.Interop.{
Position, AerialPosition, InteropMission, Obstacles, InteropTelem, Odlc,
OdlcList, InteropMessage
}
# Aliasing the nested messages.
alias InteropProxy.Message.Interop.InteropMission.FlyZone
alias InteropProxy.Message.Interop.Obstacles.StationaryObstacle
def sanitize_mission(nil) do
%InteropMission{
time: time(),
current_mission: false
}
end
def sanitize_mission(mission) do
%InteropMission{
time: time(),
current_mission: true,
air_drop_pos:
mission
|> Map.get("airDropPos", %{})
|> sanitize_position,
fly_zones:
mission
|> Map.get("flyZones", %{})
|> sanitize_fly_zones,
waypoints:
mission
|> Map.get("waypoints", %{})
|> sanitize_aerial_position,
off_axis_pos:
mission
|> Map.get("offAxisOdlcPos", %{})
|> sanitize_position,
emergent_pos:
mission
|> Map.get("emergentLastKnownPos", %{})
|> sanitize_position,
search_area:
mission
|> Map.get("searchGridPoints", %{})
|> sanitize_aerial_position
}
end
defp sanitize_fly_zones(fly_zones) do
fly_zones
|> Enum.map(fn fly_zone ->
%FlyZone{
alt_msl_max:
fly_zone
|> Map.get("altitudeMax", 0.0)
|> meters,
alt_msl_min:
fly_zone
|> Map.get("altitudeMin", 0.0)
|> meters,
boundary:
fly_zone
|> Map.get("boundaryPoints", %{})
|> sanitize_position
}
end)
end
def sanitize_obstacles(obstacles) do
%Obstacles{
time: time(),
stationary:
obstacles
|> Map.get("stationaryObstacles", [])
|> sanitize_stationary_obstacles
}
end
defp sanitize_stationary_obstacles(stationary) do
stationary
|> Enum.map(fn obs ->
%StationaryObstacle{
pos:
obs
|> sanitize_position,
height:
obs
|> Map.get("height", 0.0)
|> meters,
radius:
obs
|> Map.get("radius", 0.0)
|> meters
}
end)
end
def sanitize_outgoing_telemetry(%InteropTelem{} = telem) do
%{
latitude:
telem.pos
|> sanitize_outgoing_latitude,
longitude:
telem.pos
|> sanitize_outgoing_longitude,
altitude:
telem.pos.alt_msl
|> feet,
heading: telem.yaw
}
end
def sanitize_odlc(odlc, image \\ <<>>) do
%Odlc{
time: time(),
id:
odlc
|> Map.get("id", 0),
type:
odlc
|> Map.get("type")
|> string_to_atom(:type),
pos:
odlc
|> sanitize_position,
orientation:
odlc
|> Map.get("orientation")
|> sanitize_orientation,
shape:
odlc
|> Map.get("shape")
|> string_to_atom(:shape),
background_color:
odlc
|> Map.get("shapeColor")
|> string_to_atom(:color),
alphanumeric:
odlc
|> Map.get("alphanumeric", ""),
alphanumeric_color:
odlc
|> Map.get("alphanumericColor")
|> string_to_atom(:color),
description:
odlc
|> Map.get("description", ""),
autonomous:
odlc
|> Map.get("autonomous", false),
image: image
}
end
def sanitize_odlc_list(odlcs) do
time = time()
%OdlcList{time: time, list: Enum.map(odlcs, &Map.put(&1, :time, time))}
end
def sanitize_outgoing_odlc(%Odlc{type: :EMERGENT} = odlc) do
outgoing_odlc = %{
type:
odlc.type
|> atom_to_string,
latitude:
odlc.pos
|> sanitize_outgoing_latitude,
longitude:
odlc.pos
|> sanitize_outgoing_longitude,
description:
odlc.description
|> parse_string,
autonomous:
case odlc.autonomous do
nil -> false
bool -> bool
end
}
outgoing_image =
case odlc.image do
nil -> <<>>
string -> string
end
{outgoing_odlc, outgoing_image}
end
def sanitize_outgoing_odlc(%Odlc{} = odlc) do
outgoing_odlc = %{
type:
odlc.type
|> atom_to_string,
latitude:
odlc.pos
|> sanitize_outgoing_latitude,
longitude:
odlc.pos
|> sanitize_outgoing_longitude,
orientation:
odlc.orientation
|> sanitize_outgoing_orientation,
shape:
odlc.shape
|> atom_to_string,
shapeColor:
odlc.background_color
|> atom_to_string,
alphanumeric:
odlc.alphanumeric
|> parse_string,
alphanumeric_color:
odlc.alphanumeric_color
|> atom_to_string,
autonomous:
case odlc.autonomous do
nil -> false
bool -> bool
end
}
outgoing_image =
case odlc.image do
nil -> <<>>
string -> string
end
{outgoing_odlc, outgoing_image}
end
def sanitize_message(text) do
%InteropMessage{
time: time(),
text: text
}
end
defp sanitize_position(pos) when is_list(pos) do
pos
|> Enum.map(&sanitize_position/1)
end
defp sanitize_position(pos) do
%Position{
lat:
pos
|> Map.get("latitude", 0.0),
lon:
pos
|> Map.get("longitude", 0.0),
}
end
defp sanitize_aerial_position(pos) when is_list(pos) do
pos
|> Enum.map(&sanitize_aerial_position/1)
end
defp sanitize_aerial_position(pos) do
%AerialPosition{
lat:
pos
|> Map.get("latitude", 0.0),
lon:
pos
|> Map.get("longitude", 0.0),
alt_msl:
pos
|> Map.get("altitude", 0.0)
|> meters
}
end
defp sanitize_outgoing_latitude(%Position{} = pos), do: pos.lat
defp sanitize_outgoing_latitude(%AerialPosition{} = pos), do: pos.lat
defp sanitize_outgoing_latitude(nil), do: 0.0
defp sanitize_outgoing_longitude(%Position{} = pos), do: pos.lon
defp sanitize_outgoing_longitude(%AerialPosition{} = pos), do: pos.lon
defp sanitize_outgoing_longitude(nil), do: 0.0
defp sanitize_orientation(string) do
case string do
nil -> :UNKNOWN_ORIENTATION
"N" -> :NORTH
"NE" -> :NORTHEAST
"E" -> :EAST
"SE" -> :SOUTHEAST
"S" -> :SOUTH
"SW" -> :SOUTHWEST
"W" -> :WEST
"NW" -> :NORTHWEST
end
end
defp sanitize_outgoing_orientation(atom) do
case atom do
nil -> nil
:UNKNOWN_ORIENTATION -> nil
:NORTH -> "N"
:NORTHEAST -> "NE"
:EAST -> "E"
:SOUTHEAST -> "SE"
:SOUTH -> "S"
:SOUTHWEST -> "SW"
:WEST -> "W"
:NORTHWEST -> "NW"
end
end
defp meters(feet), do: feet * 0.3048
defp feet(meters), do: meters / 0.3048
defp string_to_atom(nil, :type), do: :STANDARD
defp string_to_atom(nil, :shape), do: :UNKNOWN_SHAPE
defp string_to_atom(nil, :color), do: :UNKNOWN_COLOR
defp string_to_atom(string, _), do: string |> String.to_atom()
defp atom_to_string(nil), do: nil
defp atom_to_string(:UNKNOWN_SHAPE), do: nil
defp atom_to_string(:UNKNOWN_COLOR), do: nil
defp atom_to_string(atom), do: atom |> Atom.to_string() |> String.upcase()
defp parse_string(<<>>), do: nil
defp parse_string(string), do: string
defp time() do
DateTime.utc_now()
|> DateTime.to_unix(:millisecond)
|> Kernel./(1000)
end
end
|
services/interop-proxy/lib/interop_proxy/sanitize.ex
| 0.759939
| 0.428712
|
sanitize.ex
|
starcoder
|
defmodule PhoenixLiveViewExt.MultiRender do
@moduledoc """
The module provides a @before_compile macro enabling multiple template files per live component or live view.
Each template file gets pre-compiled as a private render/2 function in its live component or live view module.
This in turn enables (conditional) invocation from the render/1 functions.
The template files should share the component or live view folder and start with their respective underscored names.
In the example below we have a component that requires a different template when flagged for deletion (as part of,
say, an appended container list) than the one used in the base-case scenario.
defmodule MyComponent do
use Phoenix.LiveComponent
require PhoenixLiveViewExt.MultiRender
@before_compile PhoenixLiveViewExt.MultiRender
@impl true
def render( %{ delete: :true} = assigns) do
render( "my_component_delete.html", assigns)
end
def render( assigns) do
render( "my_component_enjoy.html", assigns)
end
end
The component folder tree may look as follows:
my_app
lib
my_app_web
live
components
my_component.ex
my_component_delete.html.leex
my_component_enjoy.html.leex
..
"""
defmacro __before_compile__( env) do
render? = Module.defines?( env.module, { :render, 1})
if render? do
root = Path.dirname( env.file)
pattern = template_pattern( env)
templates = Phoenix.Template.find_all( root, pattern)
for template <- templates do
basename = Path.basename( template, Path.extname( template))
relative_path = Path.relative_to_cwd( template)
ext = template |> Path.extname() |> String.trim_leading( ".") |> String.to_atom()
engine = Map.fetch!( Phoenix.Template.engines(), ext)
ast = engine.compile( template, basename)
quote do
@external_resource unquote( relative_path)
defp render( unquote( basename), var!( assigns)) when is_map( var!( assigns)) do
unquote( ast)
end
end
end
end
end
defp template_pattern( env) do
env.module
|> Module.split()
|> List.last()
|> Macro.underscore()
|> Kernel.<>( "*.html")
end
end
|
lib/multi_render/multi_render.ex
| 0.761804
| 0.552781
|
multi_render.ex
|
starcoder
|
defmodule PortAudio.StreamError do
defexception [:reason, :message]
end
defmodule PortAudio.Stream do
defstruct [:resource]
@type t :: %PortAudio.Stream{}
@type stream_params :: %{
device: PortAudio.Device.t(),
channel_count: non_neg_integer,
sample_format: PortAudio.Native.sample_format(),
suggested_latency: float
}
@spec new(
input_params :: stream_params | nil,
output_params :: stream_params | nil,
sample_rate :: float
) :: {:ok, t} | {:error, atom}
# TODO: accept flags
@doc """
Open a stream with the given input and output parameters.
If the input parameters are nil then only then output device will be used
and vice-versa for output parameters.
"""
def new(input_params, output_params, sample_rate) do
input_params =
if input_params do
param_map_to_native(input_params)
end
output_params =
if output_params do
param_map_to_native(output_params)
end
with {:ok, s} <- PortAudio.Native.stream_open(input_params, output_params, sample_rate, []) do
{:ok, %PortAudio.Stream{resource: s}}
end
end
@spec new!(
input_params :: PortAudio.Native.stream_params() | nil,
output_params :: PortAudio.Native.stream_params() | nil,
sample_rate :: float
) :: t | no_return
@doc """
Same as `new`, but will throw a `PortAudio.StreamError` on failure instead
of returning `{:error, reason}`.
"""
def new!(input_params, output_params, sample_rate) do
case new(input_params, output_params, sample_rate) do
{:ok, s} ->
s
{:error, reason} ->
raise PortAudio.StreamError, reason: reason
end
end
defp param_map_to_native(map) do
{
map.device.index,
map.channel_count,
map.sample_format,
map.suggested_latency
}
end
@spec start(t) :: {:ok, t} | {:error, atom}
@doc """
Start the stream, returning `{:ok, stream}` on success or `{:error, reason}`
otherwise.
"""
def start(%PortAudio.Stream{resource: s} = stream) do
with :ok <- PortAudio.Native.stream_start(s) do
{:ok, stream}
end
end
@spec stop(t) :: {:ok, t} | {:error, atom}
@doc """
Stop the stream, returning `{:ok, stream}` on success or `{:error, reason}`
otherwise.
"""
def stop(%PortAudio.Stream{resource: s} = stream) do
with :ok <- PortAudio.Native.stream_stop(s) do
{:ok, stream}
end
end
@spec abort(t) :: {:ok, t} | {:error, atom}
@doc """
Abort the stream, immediately killing any processes. Returns `{:ok, stream}`
on success or `{:error, reason}` on failure.
"""
def abort(%PortAudio.Stream{resource: s} = stream) do
with :ok <- PortAudio.Native.stream_abort(s) do
{:ok, stream}
end
end
@spec active?(t) :: boolean
@doc """
Returns `true` if the stream is active.
"""
def active?(%PortAudio.Stream{resource: s}) do
PortAudio.Native.stream_is_active(s)
end
@spec stopped?(t) :: boolean
@doc """
Returns `true` if the stream has been stopped either through calling `stop`
or `abort`.
"""
def stopped?(%PortAudio.Stream{resource: s}) do
PortAudio.Native.stream_is_stopped(s)
end
@spec read(t) :: {:ok, binary} | {:error, atom}
@doc """
Read a binary from the audio stream. Will return `{:ok, binary}` on success
or `{:error, reason}` on failure.
"""
def read(%PortAudio.Stream{resource: s}) do
PortAudio.Native.stream_read(s)
end
@spec read!(t) :: binary | no_return
@doc """
Same as `read`, but throws a `PortAudio.StreamError` instead of returning
`{:error, reason}` on failure.
"""
def read!(%PortAudio.Stream{} = stream) do
case read(stream) do
{:ok, data} ->
data
{:error, reason} ->
raise PortAudio.StreamError, reason: reason
end
end
@spec write(t, binary) :: :ok | {:error, atom}
@doc """
Attempt to write binary data to the stream, returning `:ok` on success or
`{:error, reason}` on failure.
"""
def write(%PortAudio.Stream{resource: s}, data) do
PortAudio.Native.stream_write(s, data)
end
@spec write!(t, binary) :: :ok | no_return
@doc """
Same as `write`, but throws a `PortAudio.StreamError` instead of returning
`{:error, reason}` on failure.
"""
def write!(%PortAudio.Stream{} = stream, data) do
case write(stream, data) do
:ok ->
:ok
{:error, reason} ->
raise PortAudio.StreamError, reason: reason
end
end
defimpl Inspect do
def inspect(_stream, _opts) do
"#PortAudio.PortAudio.Stream<>"
end
end
end
|
lib/portaudio/stream.ex
| 0.666605
| 0.401923
|
stream.ex
|
starcoder
|
defmodule PassiveSupport.MapList do
@moduledoc """
\*\* Planned \*\*
**This moduledoc is speculative, meant inform the ideal
working shape of a MapList data structure. It is not yet implemented,
however, and there is no set timetable for completion as of now.**
Functions for interacting with a list of data in an indexed
manner. As maps are not a lightweight structure, usage of this
data structure is recommended for edge cases in which normal traversal
of a `List` would otherwise be too time inefficient for frequent
tasks. E.g., shuffling an arbitrarily large collection,
or generating permutations.
MapLists are implemented to behave as a `List` in almost all
cases. The notable exception being that a `MapList` provides a
tuple of `{index, item}` to its implementation of `Enumerable`,
and always returns a `MapList` for further transformation. To
get the list of items from a `MapList`, use `MapList.values/1`.
You can also call `MapList.to_list/1` to get a list of the
`{index, item}` pairs.
iex> MapList.new(~W[Hello elixir world!])
%MapList["Hello", "elixir", "world!"]
Since MapLists can't be destructured, functions are provided to
apply transformations to them. Adding elements to either the
head or the tail of a MapList can be done quickly, as indices
will not need to be internally recalculated in these operations.
iex> map_list = MapList.push(MapList.new([1,2,3,4]), 5)
%MapList[1, 2, 3, 4, 5]
iex> map_list = MapList.unshift(map_list, 0)
%MapList[0, 1, 2, 3, 4, 5]
Removing elements from the head or tail is similarly fast.
iex> map_list = MapList.new(1..10)
%MapList[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
iex> {last, map_list} = MapList.pop(map_list)
{10, %MapList[1, 2, 3, 4, 5, 6, 7, 8, 9]}
iex> {first, map_list} = MapList.shift
{1, %MapList[2, 3, 4, 5, 6, 7, 8, 9]}
Replacing an element in the middle of the MapList is similarly
uncostly, but inserting or removing an element from the middle
of a MapList will cause the indices to be internally recalculated,
to avoid requiring to traverse and ignore a sentinel value
(or stop at the sentinel value, as parts of the `:array` module do).
iex> map_list = MapList.new(~W"Hello elixir world!")
...> |> MapList.insert_at(1, "brave")
%MapList["Hello", "brave", "elixir", "world!"]
iex> map_list
...> |> MapList.replace_at(1, "awesome")
%MapList["Hello", "awesome", "elixir", "world!"]
"""
@moduledoc false
end
|
lib/passive_support/ext/map_list.ex
| 0.765155
| 0.6565
|
map_list.ex
|
starcoder
|
defmodule Timex.Parse.DateTime.Parsers do
@moduledoc false
alias Timex.Parse.DateTime.Helpers
use Combine
def year4(opts \\ []) do
min_digits =
case Keyword.get(opts, :padding) do
:none ->
1
_ ->
get_in(opts, [:min]) || 1
end
max_digits = get_in(opts, [:max]) || 4
expected_digits =
case {min_digits, max_digits} do
{min, min} -> "#{min} digit year"
{min, max} -> "#{min}-#{max} digit year"
end
Helpers.integer(Keyword.put(opts, :max, max_digits))
|> satisfy(fn year -> year > 0 end)
|> map(fn year -> [year4: year] end)
|> label(expected_digits)
end
def year2(opts \\ []) do
min_digits =
case Keyword.get(opts, :padding) do
:none ->
1
_ ->
get_in(opts, [:min]) || 1
end
max_digits = get_in(opts, [:max]) || 2
expected_digits =
case {min_digits, max_digits} do
{min, min} -> "#{min} digit year"
{min, max} -> "#{min}-#{max} digit year"
end
Helpers.integer(Keyword.put(opts, :max, max_digits))
|> satisfy(fn year -> year >= 0 end)
|> map(fn year -> [year2: year] end)
|> label(expected_digits)
end
def century(opts \\ []) do
Helpers.integer(opts)
|> map(fn c -> [century: c] end)
|> label("2 digit century")
end
def month2(opts \\ []) do
min_digits =
case Keyword.get(opts, :padding) do
:none ->
# This may be a one digit month
1
_ ->
get_in(opts, [:min]) || 1
end
max_digits = get_in(opts, [:max]) || 2
expected_digits =
case {min_digits, max_digits} do
{min, min} -> "#{min} digit month"
{min, max} -> "#{min}-#{max} digit month"
end
Helpers.integer(Keyword.put(opts, :max, max_digits))
|> satisfy(fn month -> month in 0..12 end)
|> map(&Helpers.to_month/1)
|> label(expected_digits)
end
def month_full(_) do
one_of(word_of(~r/[[:alpha:]]/u), Helpers.months())
|> map(&Helpers.to_month_num/1)
|> label("full month name")
end
def month_short(_) do
abbrs = Helpers.months() |> Enum.map(fn m -> String.slice(m, 0, 3) end)
one_of(word_of(~r/[[:alpha:]]/), abbrs)
|> map(&Helpers.to_month_num/1)
|> label("month abbreviation")
end
def day_of_month(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn day -> day >= 1 && day <= 31 end)
|> map(fn n -> [day: n] end)
|> label("day of month")
end
def day_of_year(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn day -> day >= 1 && day <= 366 end)
|> map(fn n -> [day_of_year: n] end)
|> label("day of year")
end
def week_of_year(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn week -> week >= 1 && week <= 53 end)
|> map(fn n -> [week_of_year: n] end)
|> label("week of year")
end
defdelegate week_of_year_sun(opts \\ []), to: __MODULE__, as: :week_of_year
def weekday(_) do
fixed_integer(1)
|> satisfy(fn day -> day >= 1 && day <= 7 end)
|> map(fn n -> [weekday: n] end)
|> label("ordinal weekday")
end
def weekday_short(_) do
word_of(~r/[[:alpha:]]/u)
|> satisfy(&Helpers.is_weekday/1)
|> map(fn name -> Helpers.to_weekday(name) end)
|> label("weekday abbreviation")
end
def weekday_full(_) do
word_of(~r/[[:alpha:]]/u)
|> satisfy(&Helpers.is_weekday/1)
|> map(fn name -> Helpers.to_weekday(name) end)
|> label("weekday name")
end
def hour24(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn hour -> hour >= 0 && hour <= 24 end)
|> map(fn hour -> [hour24: hour] end)
|> label("hour between 0 and 24")
end
def hour12(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn hour -> hour >= 1 && hour <= 12 end)
|> map(fn hour -> [hour12: hour] end)
|> label("hour between 1 and 12")
end
def ampm_lower(_) do
one_of(word(), ["am", "pm"])
|> map(&Helpers.to_ampm/1)
|> label("am/pm")
end
def ampm_upper(_) do
one_of(word(), ["AM", "PM"])
|> map(&Helpers.to_ampm/1)
|> label("AM/PM")
end
def ampm(_) do
one_of(word(), ["am", "AM", "pm", "PM"])
|> map(&Helpers.to_ampm/1)
|> label("am/pm or AM/PM")
end
def minute(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn min -> min >= 0 && min <= 59 end)
|> map(fn min -> [min: min] end)
|> label("minute")
end
def second(opts \\ []) do
Helpers.integer(opts)
|> satisfy(fn sec -> sec >= 0 && sec <= 60 end)
|> map(fn sec -> [sec: sec] end)
|> label("second")
end
def second_fractional(_) do
map(pair_right(char("."), word_of(~r/\d{1,6}/)), &Helpers.to_sec_ms/1)
|> label("fractional second")
end
def seconds_epoch(opts \\ []) do
parser =
case get_in(opts, [:padding]) do
:spaces -> skip(spaces()) |> integer
_ -> integer()
end
parser
|> map(fn secs -> [sec_epoch: secs] end)
|> label("seconds since epoch")
end
def microseconds(_) do
label(map(word_of(~r/\d{1,6}/), &Helpers.parse_microseconds/1), "microseconds")
end
def milliseconds(_) do
label(map(word_of(~r/\d{1,3}/), &Helpers.parse_milliseconds/1), "milliseconds")
end
def zname(_) do
word_of(~r/[\/\w_-]/)
|> map(fn name -> [zname: name] end)
|> label("timezone name")
end
def zoffs(_) do
pipe(
[
one_of(char(), ["-", "+"]),
digit(),
digit(),
option(digit()),
option(digit())
],
fn
[sign, h1, h2, nil, nil] -> [zoffs: "#{sign}#{h1}#{h2}"]
[sign, h1, h2, m1, m2] -> [zoffs: "#{sign}#{h1}#{h2}#{m1}#{m2}"]
end
)
|> label("timezone offset (+/-hhmm)")
end
def zoffs_colon(_) do
pipe(
[
one_of(char(), ["-", "+"]),
digit(),
digit(),
char(":"),
digit(),
digit()
],
fn xs -> [zoffs_colon: Enum.join(xs)] end
)
|> label("timezone offset (+/-hh:mm)")
end
def zoffs_sec(_) do
pipe(
[
one_of(char(), ["-", "+"]),
digit(),
digit(),
char(":"),
digit(),
digit(),
char(":"),
digit(),
digit()
],
fn xs -> [zoffs_sec: Enum.join(xs)] end
)
|> label("timezone offset (+/-hh:mm:ss)")
end
def iso_date(_) do
sequence([
year4(padding: :zeroes, min: 4, max: 4),
ignore(char("-")),
month2(padding: :zeroes, min: 2, max: 2),
ignore(char("-")),
day_of_month(padding: :zeroes, min: 2, max: 2)
])
end
def iso_time(_) do
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
ignore(char(":")),
minute(padding: :zeroes, min: 2, max: 2),
ignore(char(":")),
both(
second(padding: :zeroes, min: 2, max: 2),
option(second_fractional(padding: :zeroes)),
fn
[{:sec, _sec}] = res, nil -> res
[{:sec, _} = sec], [{:sec_fractional, _} = frac] -> [sec, frac]
end
)
])
end
def iso_week(_) do
sequence([
year4(padding: :zeroes),
ignore(char("-")),
ignore(char("W")),
week_of_year(padding: :zeroes)
])
end
def iso_weekday(opts \\ []) do
sequence([
iso_week(opts),
ignore(char("-")),
weekday(opts)
])
end
def iso_ordinal(_) do
sequence([
year4(padding: :zeros),
ignore(char("-")),
day_of_year(padding: :zeroes)
])
end
@doc """
ISO 8601 date/time format with timezone information.
NOTE: Deprecated. See iso8601_extended for documentation
"""
def iso8601(_opts \\ []), do: Timex.Parse.DateTime.Parsers.ISO8601Extended.parse()
@doc """
ISO 8601 date/time (extended) format with timezone information.
With zulu: true, assumes UTC timezone.
Examples:
2007-08-13T16:48:01+03:00
2007-08-13T13:48:01Z
"""
def iso8601_extended(_opts \\ []), do: Timex.Parse.DateTime.Parsers.ISO8601Extended.parse()
@doc """
ISO 8601 date/time (basic) format with timezone information.
With zulu: true, assumes UTC timezone.
Examples:
20070813T164801+0300
20070813T134801Z
"""
def iso8601_basic(opts \\ []) do
is_zulu? = get_in(opts, [:zulu])
parts = [
sequence([
year4(padding: :zeroes, min: 4, max: 4),
month2(padding: :zeroes, min: 2, max: 2),
day_of_month(padding: :zeroes, min: 2, max: 2)
]),
either(literal(char("T")), literal(space())),
choice([
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2),
both(
second(padding: :zeroes, min: 2, max: 2),
option(second_fractional(padding: :zeroes)),
fn
[{:sec, _sec}] = res, nil -> res
[{:sec, _} = sec], [{:sec_fractional, _} = frac] -> [sec, frac]
end
)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2)
])
])
]
case is_zulu? do
true ->
sequence(parts ++ [map(char("Z"), fn _ -> [zname: "Etc/UTC"] end)])
_ ->
sequence(
parts ++
[
choice([
map(char("Z"), fn _ -> [zname: "Etc/UTC"] end),
zoffs_sec(opts),
zoffs(opts)
])
]
)
end
end
@doc """
RFC 822 date/time format with timezone information.
Examples: `Mon, 05 Jun 14 23:20:59 Y`
## From the specification (RE: timezones):
Time zone may be indicated in several ways. "UT" is Univer-
sal Time (formerly called "Greenwich Mean Time"); "GMT" is per-
mitted as a reference to Universal Time. The military standard
uses a single character for each zone. "Z" is Universal Time.
"A" indicates one hour earlier, and "M" indicates 12 hours ear-
lier; "N" is one hour later, and "Y" is 12 hours later. The
letter "J" is not used. The other remaining two forms are taken
from ANSI standard X3.51-1975. One allows explicit indication of
the amount of offset from UT; the other uses common 3-character
strings for indicating time zones in North America.
"""
def rfc822(opts \\ []) do
is_zulu? = get_in(opts, [:zulu])
parts = [
option(
sequence([
weekday_short(opts),
literal(string(", "))
])
),
day_of_month(padding: :zeroes, min: 1, max: 2),
literal(space()),
month_short(opts),
literal(space()),
year2(padding: :zeroes),
literal(space()),
iso_time(opts)
]
case is_zulu? do
true ->
zone_parts = [
literal(space()),
map(one_of(word(), ["UT", "GMT", "Z"]), fn _ -> [zname: "Etc/UTC"] end)
]
sequence(parts ++ zone_parts)
_ ->
zone_parts = [
literal(space()),
choice([
zname(opts),
zoffs(opts),
map(one_of(word(), ["UT", "GMT", "Z"]), fn _ -> [zname: "Etc/UTC"] end),
map(one_of(char(), ["A", "M", "N", "Y", "J"]), fn
"A" -> [zoffs: "-0100"]
"M" -> [zoffs: "-1200"]
"N" -> [zoffs: "+0100"]
"Y" -> [zoffs: "+1200"]
"J" -> []
end)
])
]
sequence(parts ++ zone_parts)
end
end
@doc """
RFC 1123 date/time format with timezone information.
With zulu: true, assumes GMT
Examples:
Tue, 05 Mar 2013 23:25:19 GMT
Tue, 05 Mar 2013 23:25:19 +0200
"""
def rfc1123(opts \\ []) do
is_zulu? = get_in(opts, [:zulu])
parts = [
weekday_short(opts),
literal(string(", ")),
day_of_month(padding: :zeroes, min: 1, max: 2),
literal(space()),
month_short(opts),
literal(space()),
year4(padding: :zeroes),
literal(space()),
iso_time(opts)
]
case is_zulu? do
true ->
zone_parts = [
literal(space()),
map(char("Z"), fn _ -> [zname: "Etc/UTC"] end)
]
sequence(parts ++ zone_parts)
_ ->
zone_parts = [
literal(space()),
either(zname(opts), zoffs(opts))
]
sequence(parts ++ zone_parts)
end
end
@doc """
RFC 3339 date/time format with timezone information.
Example: `2013-03-05T23:25:19+02:00`
"""
def rfc3339(_opts \\ []), do: Timex.Parse.DateTime.Parsers.ISO8601Extended.parse()
@doc """
UNIX standard date/time format.
Example: `Tue Mar 5 23:25:19 PST 2013`
"""
def unix(opts \\ []) do
sequence([
weekday_short(opts),
literal(space()),
month_short(opts),
literal(space()),
day_of_month(padding: :spaces, min: 1, max: 2),
literal(space()),
iso_time(opts),
literal(space()),
zname(opts),
literal(space()),
year4(padding: :spaces, min: 4, max: 4)
])
end
@doc """
ANSI C standard date/time format.
Example: `Tue Mar 5 23:25:19 2013`
"""
def ansic(opts \\ []) do
sequence([
weekday_short(opts),
literal(space()),
month_short(opts),
literal(space()),
day_of_month(padding: :spaces, min: 1, max: 2),
literal(space()),
iso_time(opts),
literal(space()),
year4(padding: :spaces, min: 4, max: 4)
])
end
@doc """
ASN.1 UTCTime standard date/time format.
Example: `130305232519Z`
"""
def asn1_utc_time(_) do
parts = [
sequence([
year2(padding: :zeroes, min: 2, max: 2),
month2(padding: :zeroes, min: 2, max: 2),
day_of_month(padding: :zeroes, min: 2, max: 2)
]),
choice([
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2),
second(padding: :zeroes, min: 2, max: 2)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2)
])
])
]
sequence(parts ++ [map(char("Z"), fn _ -> [zname: "Etc/UTC"] end)])
end
@doc """
ASN.1 GeneralizedTime standard date/time format.
Example: `20130305232519`
asn1_generalized_time(zulu: true)
Example: `20130305232519Z`
asn1_generalized_time(zoffs: true)
Example: `20130305232519-0700`
"""
def asn1_generalized_time(opts \\ []) do
is_zulu? = get_in(opts, [:zulu])
is_zoffs? = get_in(opts, [:zoffs])
parts = [
sequence([
year4(padding: :zeroes, min: 4, max: 4),
month2(padding: :zeroes, min: 2, max: 2),
day_of_month(padding: :zeroes, min: 2, max: 2)
]),
choice([
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2),
both(
second(padding: :zeroes, min: 2, max: 2),
option(second_fractional(padding: :zeroes)),
fn
[{:sec, _sec}] = res, nil -> res
[{:sec, _} = sec], [{:sec_fractional, _} = frac] -> [sec, frac]
end
)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2),
second(padding: :zeroes, min: 2, max: 2)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2),
minute(padding: :zeroes, min: 2, max: 2)
]),
sequence([
hour24(padding: :zeroes, min: 2, max: 2)
])
])
]
cond do
is_zulu? -> sequence(parts ++ [map(char("Z"), fn _ -> [zname: "Etc/UTC"] end)])
is_zoffs? -> sequence(parts ++ [zoffs(opts)])
true -> sequence(parts)
end
end
@doc """
Kitchen clock time format.
Example: `3:25PM`
"""
def kitchen(opts) do
sequence([
hour12(),
literal(char(":")),
minute(padding: :zeroes),
ampm(opts)
])
|> map(fn parts -> [kitchen: List.flatten(parts)] end)
end
@doc """
Month, day, and year sans century, in slashed style.
Example: `04/12/87`
"""
def slashed(_) do
opts = [padding: :zeroes, min: 2, max: 2]
sequence([
month2(opts),
day_of_month(opts),
year2(opts)
])
end
@doc """
Wall clock in strftime (%R) format.
Example: `23:30`
"""
def strftime_iso_clock(_) do
opts = [padding: :zeroes]
sequence([
hour24(opts),
literal(char(":")),
minute(opts)
])
end
@doc """
Wall clock in strftime (%T) format.
Example: `23:30:05`
"""
def strftime_iso_clock_full(_) do
opts = [padding: :zeroes]
sequence([
hour24(opts),
literal(char(":")),
minute(opts),
literal(char(":")),
second(opts)
])
end
@doc """
Kitchen clock in strftime (%r) format.
Example: `4:30:01 PM`
"""
def strftime_kitchen(opts \\ [padding: :zeroes]) do
sequence([
hour12(opts),
literal(char(":")),
minute(opts),
literal(char(":")),
second(opts),
literal(space()),
ampm_upper(opts)
])
|> map(fn parts -> [strftime_iso_kitchen: List.flatten(parts)] end)
end
@doc """
Friendly short date format. Uses spaces for padding on the day.
Example: ` 5-Jan-2014`
"""
def strftime_iso_shortdate(_) do
sequence([
day_of_month(padding: :spaces, min: 1, max: 2),
literal(char("-")),
month_short([]),
literal(char("-")),
year4(padding: :zeroes)
])
end
defp literal(parser), do: map(parser, fn x -> [literal: x] end)
end
|
lib/parse/datetime/parsers.ex
| 0.762954
| 0.577227
|
parsers.ex
|
starcoder
|
defmodule Absinthe.Type.Object do
@moduledoc """
Represents a non-leaf node in a GraphQL tree of information.
Objects represent a list of named fields, each of which yield a value of a
specific type. Object values are serialized as unordered maps, where the
queried field names (or aliases) are the keys and the result of evaluating the
field is the value.
Also see `Absinthe.Type.Scalar`.
## Examples
Given a type defined as the following (see `Absinthe.Schema.Notation.object/3`):
```
@desc "A person"
object :person do
field :name, :string
field :age, :integer
field :best_friend, :person
field :pets, list_of(:pet)
end
```
The "Person" type (referred inside Absinthe as `:person`) is an object, with
fields that use `Absinthe.Type.Scalar` types (namely `:name` and `:age`), and
other `Absinthe.Type.Object` types (`:best_friend` and `:pets`, assuming
`:pet` is an object).
Given we have a query that supports getting a person by name
(see `Absinthe.Schema`), and a query document like the following:
```
{
person(name: "Joe") {
name
best_friend {
name
age
}
pets {
breed
}
}
}
```
We could get a result like this:
```
%{
data: %{
"person" => %{
"best_friend" => %{
"name" => "Jill",
"age" => 29
},
"pets" => [
%{"breed" => "Wyvern"},
%{"breed" => "<NAME>"}
]
}
}
}
```
"""
alias Absinthe.Type
use Absinthe.Introspection.Kind
@typedoc """
A defined object type.
Note new object types (with the exception of the root-level `query`, `mutation`, and `subscription`)
should be defined using `Absinthe.Schema.Notation.object/3`.
* `:name` - The name of the object type. Should be a TitleCased `binary`. Set automatically.
* `:description` - A nice description for introspection.
* `:fields` - A map of `Absinthe.Type.Field` structs. Usually built via `Absinthe.Schema.Notation.field/1`.
* `:interfaces` - A list of interfaces that this type guarantees to implement. See `Absinthe.Type.Interface`.
* `:is_type_of` - A function used to identify whether a resolved object belongs to this defined type. For use with `:interfaces` entry and `Absinthe.Type.Interface`.
The `__private__` and `:__reference__` keys are for internal use.
"""
@type t :: %__MODULE__{
identifier: atom,
name: binary,
description: binary,
fields: map,
interfaces: [Absinthe.Type.Interface.t()],
__private__: Keyword.t(),
definition: Module.t(),
__reference__: Type.Reference.t()
}
defstruct identifier: nil,
name: nil,
description: nil,
fields: nil,
interfaces: [],
__private__: [],
definition: nil,
__reference__: nil,
is_type_of: nil
@doc false
defdelegate functions, to: Absinthe.Blueprint.Schema.ObjectTypeDefinition
@doc false
@spec field(t, atom) :: Absinthe.Type.Field.t()
def field(%{fields: fields}, identifier) do
fields
|> Map.get(identifier)
end
defimpl Absinthe.Traversal.Node do
def children(node, _traversal) do
Map.values(node.fields) ++ node.interfaces
end
end
end
|
lib/absinthe/type/object.ex
| 0.919661
| 0.962988
|
object.ex
|
starcoder
|
defmodule CCSP.Chapter3.CSP do
alias CCSP.Chapter3.Constraint
alias __MODULE__, as: T
@moduledoc """
Corresponds to CCSP in Python, Section 3.1, titled "Building a constraint-satisfaction problem framework"
"""
@type t :: __MODULE__.t()
# variable type
@type v :: any
# domain type
@type d :: any
@type domains :: %{v => [d]}
defstruct variables: [], domains: %{}, constraints: %{}
@spec new(list(v), domains) :: t()
def new(variables, domains) do
constraints =
Enum.reduce(variables, %{}, fn element, acc ->
unless Map.has_key?(domains, element) do
raise "Every variable should have a domain assigned to it."
end
Map.put(acc, element, [])
end)
%T{variables: variables, domains: domains, constraints: constraints}
end
@spec add_constraint(t, Constraint.t(v, d)) :: t
def add_constraint(csp, constraint) do
constraints =
Enum.reduce(constraint.variables, csp.constraints, fn variable, csp_constraints ->
unless variable in csp.variables do
raise "Every variable should have a domain assigned to it."
end
constraint_list =
Map.get(csp_constraints, variable)
|> (&[constraint | &1]).()
Map.put(csp_constraints, variable, constraint_list)
end)
%T{variables: csp.variables, domains: csp.domains, constraints: constraints}
end
@spec consistent?(t, v, %{v => d}) :: boolean
def consistent?(csp, variable, assignment) do
Map.get(csp.constraints, variable)
|> Enum.all?(fn constraint -> Constraint.satisfied?(constraint, assignment) end)
end
def next_candidate(csp, assignment) do
[first | _] = Enum.filter(csp.variables, fn v -> not Map.has_key?(assignment, v) end)
first
end
@spec backtracking_search(t, %{v => d}) :: {atom, %{v => d}} | nil
def backtracking_search(csp, assignment \\ %{})
def backtracking_search(%T{variables: variables} = _csp, assignment)
when map_size(assignment) == length(variables) do
{:ok, assignment}
end
def backtracking_search(csp, assignment) do
first = next_candidate(csp, assignment)
Map.get(csp.domains, first)
|> Enum.find_value(fn value ->
with local_assignment <- Map.put(assignment, first, value),
true <- consistent?(csp, first, local_assignment),
results <- backtracking_search(csp, local_assignment),
true <- is_tuple(results) do
results
else
_ -> nil
end
end)
end
end
|
lib/ccsp/chapter3/csp.ex
| 0.771413
| 0.422475
|
csp.ex
|
starcoder
|
defmodule Snake.Scene.Game3 do
use Scenic.Scene
import Scenic.Primitives, only: [rounded_rectangle: 3]
alias Scenic.ViewPort
alias Scenic.Graph
@graph Graph.build(clear_color: :dark_sea_green)
@tile_size 32
@tile_radius 8
@frame_ms 192
def init(_arg, opts) do
viewport = opts[:viewport]
{:ok, %ViewPort.Status{size: {vp_width, vp_height}}} = ViewPort.info(viewport)
number_of_columns = div(vp_width, @tile_size)
number_of_rows = div(vp_height, @tile_size)
state = %{
width: number_of_columns,
height: number_of_rows,
snake: %{body: [{9, 9}, {10, 9}, {11, 9}], direction: {1, 0}}
}
# start timer
{:ok, _timer} = :timer.send_interval(@frame_ms, :frame)
{:ok, state, push: @graph}
end
def handle_info(:frame, state) do
new_state = move_snake(state)
graph = draw_objects(@graph, new_state)
{:noreply, new_state, push: graph}
end
defp move_snake(%{snake: snake} = state) do
%{body: body, direction: direction} = snake
# new head's position
[head | _] = body
new_head = move(state, head, direction)
# place a new head on the tile that we want to move to
# and remove the last tile from the snake tail
new_body = List.delete_at([new_head | body], -1)
state
|> put_in([:snake, :body], new_body)
end
defp move(%{width: w, height: h}, {pos_x, pos_y}, {vec_x, vec_y}) do
# We use the remainder function `rem` to make the snake appear from the opposite side
# of the screen when it reaches the limits of the graph.
x = rem(pos_x + vec_x + w, w)
y = rem(pos_y + vec_y + h, h)
{x, y}
end
defp draw_objects(graph, %{snake: %{body: body}}) do
Enum.reduce(body, graph, fn {x, y}, graph ->
draw_tile(graph, x, y, fill: :dark_slate_gray)
end)
end
# draw tiles as rounded rectangles to look nice
defp draw_tile(graph, x, y, opts) do
tile_opts = Keyword.merge([fill: :black, translate: {x * @tile_size, y * @tile_size}], opts)
rounded_rectangle(graph, {@tile_size, @tile_size, @tile_radius}, tile_opts)
end
end
|
lib/scenes/current/game3.ex
| 0.707607
| 0.4575
|
game3.ex
|
starcoder
|
defmodule ArangoXEcto.Edge do
@moduledoc """
Defines an Arango Edge collection as an Ecto Schema Module.
Edge modules are dynamically created in the environment if they don't already exist.
This will define the required fields of an edge (`_from` and `_to`) and will define the default changeset.
Collections in ArangoDB are automatically created if they don't exist already.
## Extending Edge Schemas
If you need to add additional fields to an edge, you can do so by creating your own edge module
and defining the required fields as well as any additional fields. Luckily there are some helper macros
so you don't have to do this manually again.
A custom schema module must use this module by adding `use ArangoXEcto.Edge`.
When defining the fields in your schema, make sure to call `edge_fields/1`. This will add the `_from`
and `_to` fields to the schema. It does not have to be before any custom fields but it good convention
to do so.
A `changeset/2` function is automatically defined on the custom schema module but this must be overridden
this so that you can cast and validate the custom fields. The `edges_changeset/2` method should be called
to automatically implement the casting and validation of the `_from` and `_to` fields. It does not have
to be before any custom field operations but it good convention to do so.
### Example
defmodule MyProject.UserPosts do
use ArangoXEcto.Edge
import Ecto.Changeset
schema "user_posts" do
edge_fields()
field(:type, :string)
end
def changeset(edge, attrs) do
edges_changeset(edge, attrs)
|> cast(attrs, [:type])
|> validate_required([:type])
end
end
"""
use ArangoXEcto.Schema
import Ecto.Changeset
require ArangoXEcto.Schema.Fields
alias ArangoXEcto.Schema.Fields
@type t :: %__MODULE__{}
@callback changeset(Ecto.Schema.t() | Changeset.t(), map()) :: Changeset.t()
defmacro __using__(_opts) do
quote do
use ArangoXEcto.Schema
import unquote(__MODULE__)
@behaviour unquote(__MODULE__)
@doc """
Default Changeset for an Edge
Should be overridden when using custom fields.
"""
@spec changeset(Ecto.Schema.t() | Changeset.t(), map()) :: Changeset.t()
def changeset(edge, attrs) do
unquote(__MODULE__).edges_changeset(edge, attrs)
end
@doc """
Defines that this schema is an edge
"""
def __edge__, do: true
defoverridable unquote(__MODULE__)
end
end
@doc """
Macro to define the required edge fields i.e. `_from` and `_to`.
This is required when using a custom edge schema and can be used as below.
schema "user_posts" do
edge_fields()
field(:type, :string)
end
"""
defmacro edge_fields do
quote do
require unquote(Fields)
unquote(Fields).define_fields(:edge)
end
end
schema "" do
Fields.define_fields(:edge)
end
@doc """
Default changeset for an edge.
Casts and requires the `_from` and `_to` fields. This will also verify the format of both fields to match that of
an Arango id.
Any custom changeset should first use this changeset.
Direct use of the `edges_changeset/2` function is discouraged unless per the use case mentioned above.
### Example
To add a required `type` field, you could do the following:
def changeset(edge, attrs) do
edges_changeset(edge, attrs)
|> cast(attrs, [:type])
|> validate_required([:type])
end
"""
def edges_changeset(edge, attrs) do
edge
|> cast(attrs, [:_from, :_to])
|> validate_required([:_from, :_to])
|> validate_id([:_from, :_to])
end
defp validate_id(changeset, fields) when is_list(fields) do
Enum.reduce(fields, changeset, &validate_format(&2, &1, ~r/[a-zA-Z0-9]+\/[a-zA-Z0-9]+/))
end
end
|
lib/arangox_ecto/edge.ex
| 0.842313
| 0.591458
|
edge.ex
|
starcoder
|
defmodule Membrane.MPEG.TS.Demuxer.Parser do
@moduledoc false
# Based on:
# * https://en.wikipedia.org/wiki/MPEG_transport_stream
# * https://en.wikipedia.org/wiki/Packetized_elementary_stream
# * https://en.wikipedia.org/wiki/Program-specific_information
use Bunch
use Membrane.Log
@type mpegts_pid :: non_neg_integer
@ts_packet_size 188
@default_stream_state %{started_pts_payload: nil}
defmodule State do
@moduledoc false
defstruct streams: %{}, known_tables: []
@type t :: %__MODULE__{
streams: map,
known_tables: [non_neg_integer]
}
end
# Parses a single packet.
# Packet should be at least 188 bytes long, otherwise parsing will result in error.
# Unparsed data will be returned as part of the result.
@spec parse_single_packet(binary(), State.t()) ::
{{:ok, {mpegts_pid, data :: binary}}, {rest :: binary, State.t()}}
| {{:error, reason :: atom()}, {rest :: binary, State.t()}}
def parse_single_packet(<<packet::@ts_packet_size-binary, rest::binary>>, state) do
case parse_packet(packet, state) do
{{:ok, {stream_pid, data}}, state} ->
{{:ok, {stream_pid, data}}, {rest, state}}
{{:error, _reason} = error, state} ->
{error, {rest, state}}
end
end
def parse_single_packet(rest, state), do: {{:error, :not_enough_data}, {rest, state}}
# Parses a binary that contains sequence of packets.
# Each packet that fails parsing shall be ignored.
@spec parse_packets(binary, State.t()) ::
{results :: %{mpegts_pid => [binary]}, rest :: binary, State.t()}
def parse_packets(packets, state), do: do_parse_packets(packets, state, [])
defp do_parse_packets(<<packet::@ts_packet_size-binary, rest::binary>>, state, acc) do
case parse_packet(packet, state) do
{{:ok, data}, state} ->
do_parse_packets(rest, state, [data | acc])
{{:error, reason}, state} ->
"""
MPEG-TS parser encountered an error: #{inspect(reason)}
"""
|> warn()
do_parse_packets(rest, state, acc)
end
end
defp do_parse_packets(<<rest::binary>>, state, acc) do
acc
|> Enum.reverse()
|> Enum.group_by(fn {stream_pid, _data} -> stream_pid end, fn {_stream_pid, data} -> data end)
|> Bunch.Map.map_values(&IO.iodata_to_binary/1)
~> {&1, rest, state}
end
defp parse_packet(
<<
0x47::8,
_transport_error_indicator::1,
payload_unit_start_indicator::1,
_transport_priority::1,
stream_pid::13,
_transport_scrambling_control::2,
adaptation_field_control::2,
_continuity_counter::4,
# 184 = 188 - header length
optional_fields::184-binary
>>,
state
) do
case parse_pts_optional(optional_fields, adaptation_field_control) do
{:ok, payload} ->
handle_parsed_payload(stream_pid, payload, payload_unit_start_indicator, state)
error ->
{error, put_stream(state, stream_pid)}
end
end
defp parse_packet(_, state) do
{{:error, {:invalid_packet, :pts}}, state}
end
defp parse_pts_optional(payload, adaptation_field_control)
defp parse_pts_optional(payload, 0b01) do
{:ok, payload}
end
defp parse_pts_optional(_adaptation_field, 0b10) do
{:error, {:unsupported_packet, :only_adaptation_field}}
end
defp parse_pts_optional(optional, 0b11) do
case optional do
<<
adaptation_field_length::8,
_adaptation_field::binary-size(adaptation_field_length),
payload::bitstring
>> ->
{:ok, payload}
_ ->
{:error, {:invalid_packet, :adaptation_field}}
end
end
defp parse_pts_optional(_optional, 0b00) do
{:error, {:invalid_packet, :adaptation_field_control}}
end
defp handle_parsed_payload(stream_pid, payload, payload_unit_start_indicator, state) do
cond do
stream_pid in 0x0000..0x0004 or stream_pid in state.known_tables ->
<<_pointer::8, payload::binary>> = payload
known_tables = state.known_tables |> List.delete(stream_pid)
{{:ok, {stream_pid, payload}}, %{state | known_tables: known_tables}}
stream_pid in 0x0020..0x1FFA or stream_pid in 0x1FFC..0x1FFE ->
stream_state = state.streams[stream_pid] || @default_stream_state
payload
|> parse_pts_payload(payload_unit_start_indicator, stream_state)
|> handle_pts_payload(stream_pid, state)
stream_pid == 0x1FFF ->
{:null_packet, state}
true ->
{{:error, :unsuported_stream_pid}, state}
end
end
defp parse_pts_payload(<<1::24, _::bitstring>> = payload, 0b1, stream_state) do
with {:ok, payload} <- parse_pes_packet(payload) do
{:ok, {payload, %{stream_state | started_pts_payload: :pes}}}
end
end
defp parse_pts_payload(payload, 0b0, %{started_pts_payload: :pes} = stream_state) do
{:ok, {payload, stream_state}}
end
defp parse_pts_payload(_payload, _pusi, _stream_state) do
{:error, {:unsupported_packet, :not_pes}}
end
defp handle_pts_payload({:ok, {data, stream_state}}, stream_pid, state) do
{{:ok, {stream_pid, data}}, put_stream(state, stream_pid, stream_state)}
end
defp handle_pts_payload({:error, _} = error, stream_pid, state) do
{error, put_stream(state, stream_pid)}
end
defp parse_pes_packet(<<
1::24,
stream_id::8,
_packet_length::16,
optional_fields::bitstring
>>) do
parse_pes_optional(optional_fields, stream_id)
end
defp parse_pes_packet(_), do: {:error, {:invalid_packet, :pes}}
defp parse_pes_optional(<<fc00:e968:6179::de52:7100, optional::bitstring>>, stream_id) do
stream_ids_with_no_optional_fields = [
# padding_stream
0b10111110,
# private_stream_2
0b10111111
]
case optional do
<<
_scrambling_control::2,
_priority::1,
_data_alignment_indicator::1,
_copyright::1,
_original_or_copy::1,
_pts_dts_indicator::2,
_escr_flag::1,
_es_rate_flag::1,
_dsm_trick_mode_flag::1,
_additional_copy_info_flag::1,
_crc_flag::1,
_extension_flag::1,
pes_header_length::8,
rest::binary
>> ->
if stream_id in stream_ids_with_no_optional_fields do
{:ok, rest}
else
case rest do
<<_optional_fields::binary-size(pes_header_length), data::binary>> -> {:ok, data}
_ -> {:error, {:invalid_packet, :pes_optional}}
end
end
_ ->
{:error, {:invalid_packet, :pes_optional}}
end
end
defp parse_pes_optional(optional, _stream_id) do
{:ok, optional}
end
defp put_stream(state, stream_pid, stream \\ @default_stream_state) do
%State{state | streams: Map.put(state.streams, stream_pid, stream)}
end
end
|
lib/membrane_element_mpegts/demuxer/parser.ex
| 0.769946
| 0.553505
|
parser.ex
|
starcoder
|
defmodule Expline.Matrix do
@moduledoc false
@enforce_keys [:n_rows, :m_cols, :internal]
defstruct [:n_rows, :m_cols, :internal]
@type t() :: %__MODULE__{ n_rows: pos_integer(),
m_cols: pos_integer(),
internal: internal() }
@type vector() :: Expline.Vector.t()
@typep internal() :: tuple()
@typep binary_op() :: ( float(), float() -> float() )
@typep unary_op() :: ( float() -> float() )
@spec zeros(pos_integer(), pos_integer()) :: __MODULE__.t()
def zeros(n_rows, m_cols) do
construct(n_rows, m_cols, fn (_, _) -> 0.0 end)
end
@spec identity(pos_integer()) :: __MODULE__.t()
def identity(n) do
construct(n, n, fn
(i, i) -> 1.0
(_i, _j) -> 0.0
end)
end
@spec sub(__MODULE__.t(), __MODULE__.t()) :: {:ok, __MODULE__.t()}
| {:error, :dimension_mismatch}
def sub(%__MODULE__{} = a, %__MODULE__{} = b), do: do_binary_op(a, b, &Kernel.-/2)
@spec add(__MODULE__.t(), __MODULE__.t()) :: {:ok, __MODULE__.t()}
| {:error, :dimension_mismatch}
def add(%__MODULE__{} = a, %__MODULE__{} = b), do: do_binary_op(a, b, &Kernel.+/2)
@spec do_binary_op(__MODULE__.t(), __MODULE__.t(), binary_op()) :: {:ok, __MODULE__.t()}
| {:error, :dimension_mismatch}
defp do_binary_op(%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = a,
%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = b, op)
when is_function(op, 2) do
construct(n_rows, m_cols, fn
(i, j) -> op.(at(a, i, j), at(b, i, j))
end)
end
defp do_binary_op(%__MODULE__{}, %__MODULE__{}, _op),
do: {:error, :dimension_mismatch}
@spec scale(__MODULE__.t(), float()) :: __MODULE__.t()
def scale(%__MODULE__{} = matrix, scalar)
when is_float(scalar) do
transform(matrix, &(scalar * &1))
end
@spec transform(__MODULE__.t(), unary_op()) :: __MODULE__.t()
def transform(%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = matrix, op)
when is_function(op, 1) do
construct(n_rows, m_cols, fn
(i, j) ->
matrix |> at(i, j) |> op.()
end)
end
@spec construct(pos_integer(), pos_integer(), (non_neg_integer(), non_neg_integer() -> float())) :: __MODULE__.t()
def construct(n_rows, m_cols, elem_fn)
when n_rows > 0
and m_cols > 0
and is_function(elem_fn, 2) do
internal = 0..(n_rows - 1)
|> Enum.reduce({}, fn (i, matrix) ->
row = 0..(m_cols - 1)
|> Enum.reduce({}, fn (j, row) ->
Tuple.append(row, elem_fn.(i, j))
end)
Tuple.append(matrix, row)
end)
%__MODULE__{ n_rows: n_rows, m_cols: m_cols, internal: internal }
end
@spec at(__MODULE__.t(), non_neg_integer(), non_neg_integer()) :: float()
def at(%__MODULE__{ n_rows: n_rows, m_cols: m_cols, internal: internal }, i, j)
when is_integer(i)
and i < n_rows
and is_integer(j)
and j < m_cols do
internal
|> elem(i)
|> elem(j)
end
@spec transpose(__MODULE__.t()) :: __MODULE__.t()
def transpose(%__MODULE__{} = matrix) do
construct(matrix.m_cols, matrix.n_rows, fn
(i, j) -> at(matrix, j, i)
end)
end
@spec symmetric?(__MODULE__.t()) :: boolean()
def symmetric?(%__MODULE__{} = matrix) do
matrix == transpose(matrix)
end
@spec lower_triangular?(__MODULE__.t()) :: boolean()
def lower_triangular?(%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = matrix) do
for i <- 0..(n_rows-1), j <- 0..(m_cols-1), i < j do
at(matrix, i, j)
end |> Enum.all?(fn (0.0) -> true; (_) -> false end)
end
@spec upper_triangular?(__MODULE__.t()) :: boolean()
def upper_triangular?(%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = matrix) do
for i <- 0..(n_rows-1), j <- 0..(m_cols-1), i > j do
at(matrix, i, j)
end |> Enum.all?(fn (0.0) -> true; (_) -> false end)
end
@spec positive_definite?(__MODULE__.t()) :: boolean()
def positive_definite?(%__MODULE__{ n_rows: n, m_cols: n } = matrix) do
case cholesky_decomposition(matrix) do
{:ok, _} -> true
{:error, _} -> false
end
end
@spec cholesky_decomposition(__MODULE__.t()) :: {:ok, __MODULE__.t()}
| {:error, :not_square}
| {:error, :not_symmetric}
| {:error, :not_positive_definite}
def cholesky_decomposition(%__MODULE__{ n_rows: n, m_cols: n } = matrix) do
if symmetric?(matrix) do
l_internal = 0..(n - 1)
|> Enum.reduce_while(matrix.internal, fn (i, mat_l) ->
row_a_i = elem(matrix.internal, i)
row_l_i = elem(mat_l, i)
new_row = 0..(n - 1)
|> Enum.reduce_while(row_l_i, fn (j, row_l_i) ->
cond do
i == j ->
summation = for k <- 0..(j-1), k >= 0, k <= (j-1) do
l_jk = elem(row_l_i, k)
:math.pow(l_jk, 2)
end |> Enum.sum
a_jj = elem(row_a_i, j)
case a_jj - summation do
value when value < 0.0 ->
{:halt, {:error, :not_positive_definite}}
value ->
new_row = put_elem(row_l_i, j, :math.sqrt(value))
{:cont, new_row}
end
i > j ->
summation = for k <- 0..(j-1), k >= 0, k <= (j-1) do
row_l_j = elem(mat_l, j)
l_ik = elem(row_l_i, k)
l_jk = elem(row_l_j, k)
l_ik * l_jk
end |> Enum.sum
a_ij = elem(row_a_i, j)
l_jj = mat_l |> elem(j) |> elem(j)
new_row = put_elem(row_l_i, j, (a_ij - summation) / l_jj)
{:cont, new_row}
# i < j
true -> {:cont, put_elem(row_l_i, j, 0.0)}
end
end)
case new_row do
{:error, :not_positive_definite} ->
{:halt, new_row}
row ->
{:cont, put_elem(mat_l, i, new_row)}
end
end)
case l_internal do
{:error, :not_positive_definite} ->
{:error, :not_positive_definite}
_ ->
{:ok, %{ matrix | internal: l_internal }}
end
else
{:error, :not_symmetric}
end
end
def cholesky_decomposition(%__MODULE__{}), do: {:error, :not_square}
@spec product(__MODULE__.t(), __MODULE__.t()) :: {:ok, __MODULE__.t()}
| {:error, :dimension_mismatch}
def product(%__MODULE__{ n_rows: a_rows, m_cols: a_cols, internal: a_internal },
%__MODULE__{ n_rows: b_rows, m_cols: b_cols } = b)
when a_cols == b_rows do
b_internal = transpose(b).internal
c = construct(a_rows, b_cols, fn
(i, j) ->
as = elem(a_internal, i) |> Tuple.to_list
bs = elem(b_internal, j) |> Tuple.to_list
Enum.zip(as, bs)
|> Enum.map(fn ({a_ik, b_kj}) -> a_ik * b_kj end)
|> Enum.sum
end)
{:ok, c}
end
def product(%__MODULE__{}, %__MODULE__{}), do: {:error, :dimension_mismatch}
@spec forward_substitution(__MODULE__.t(), vector()) :: {:ok, vector()}
| {:error, :dimension_mismatch}
| {:error, :not_lower_triangular}
def forward_substitution(%__MODULE__{ n_rows: n_rows } = matrix,
%Expline.Vector{ n_slots: n_slots } = vector)
when n_rows == n_slots do
if lower_triangular?(matrix) do
solution = do_forward_substitution(matrix, vector, 0, {})
{:ok, solution}
else
{:error, :not_lower_triangular}
end
end
def forward_substitution(%__MODULE__{}, %Expline.Vector{}), do: {:error, :dimension_mismatch}
@spec do_forward_substitution(__MODULE__.t(), vector(), integer(), tuple()) :: vector()
defp do_forward_substitution(_matrix, %Expline.Vector{ n_slots: n_slots }, _row, solution)
when n_slots == tuple_size(solution) do
Expline.Vector.construct(tuple_size(solution), fn
(i) -> elem(solution, i)
end)
end
defp do_forward_substitution(matrix, vector, nth_row, solution) do
summation = for i <- 0..(nth_row-1), i >= 0, i <= nth_row-1 do
at(matrix, nth_row, i) * elem(solution, i)
end |> Enum.sum
new_solution = (Expline.Vector.at(vector, nth_row) - summation) / at(matrix, nth_row, nth_row)
do_forward_substitution(matrix, vector, nth_row + 1, Tuple.append(solution, new_solution))
end
@spec backward_substitution(__MODULE__.t(), vector()) :: {:ok, vector()}
| {:error, :dimension_mismatch}
| {:error, :not_upper_triangular}
def backward_substitution(%__MODULE__{ n_rows: n_rows } = matrix,
%Expline.Vector{ n_slots: n_slots } = vector)
when n_rows == n_slots do
if upper_triangular?(matrix) do
sln_buffer = (1..n_rows) |> Enum.reduce({}, fn (_, t) -> Tuple.append(t, 0.0) end)
solution = do_backward_substitution(matrix, vector, n_rows - 1, sln_buffer)
{:ok, solution}
else
{:error, :not_upper_triangular}
end
end
def backward_substitution(%__MODULE__{}, %Expline.Vector{}), do: {:error, :dimension_mismatch}
@spec do_backward_substitution(__MODULE__.t(), vector(), integer(), tuple()) :: vector()
defp do_backward_substitution(_matrix, _vector, -1, solution) do
Expline.Vector.construct(tuple_size(solution), fn
(i) -> elem(solution, i)
end)
end
defp do_backward_substitution(matrix, vector, nth_row, solution) do
summation = for i <- nth_row..(matrix.n_rows-1),
i >= 0,
i <= matrix.n_rows do
at(matrix, nth_row, i) * elem(solution, i)
end |> Enum.sum
new_solution = (Expline.Vector.at(vector, nth_row) - summation) / at(matrix, nth_row, nth_row)
do_backward_substitution(matrix, vector, nth_row - 1, put_elem(solution, nth_row, new_solution))
end
@spec disaugment(__MODULE__.t()) :: {:ok, {__MODULE__.t(), vector()}}
| {:error, :dimension_mismatch}
def disaugment(%__MODULE__{ n_rows: n_rows, m_cols: m_cols } = matrix)
when m_cols > 1 do
augment = Expline.Vector.construct(n_rows, fn
(i) ->
at(matrix, i, m_cols-1)
end)
disaugmented_matrix = construct(n_rows, m_cols - 1, fn
(i, j) ->
at(matrix, i, j)
end)
{:ok, {disaugmented_matrix, augment}}
end
def disaugment(%__MODULE__{}), do: {:error, :dimension_mismatch}
end
defimpl Inspect, for: Expline.Matrix do
import Inspect.Algebra
def inspect(%Expline.Matrix{ internal: internal }, opts) do
internal
|> Tuple.to_list
|> Enum.map(&(to_doc(&1, opts)))
|> Enum.intersperse(break("\n"))
|> concat
end
end
|
lib/expline/matrix.ex
| 0.848062
| 0.622832
|
matrix.ex
|
starcoder
|
if Code.ensure_loaded?(Plug) do
defmodule Guardian.Plug.VerifyCookie do
@moduledoc """
Looks for and validates a token found in the request cookies.
In the case where:
a. The cookies are not loaded
b. A token is already found for `:key`
This plug will not do anything.
This, like all other Guardian plugs, requires a Guardian pipeline to be setup.
It requires an implementation module, an error handler and a key.
These can be set either:
1. Upstream on the connection with `plug Guardian.Pipeline`
2. Upstream on the connection with `Guardian.Pipeline.{put_module, put_error_handler, put_key}`
3. Inline with an option of `:module`, `:error_handler`, `:key`
If a token is found but is invalid, the error handler will be called with
`auth_error(conn, {:invalid_token, reason}, opts)`
If a token is expired, the error handler WON'T be called, the error can be
handled with the ensure_authenticated plug
Once a token has been found it will be exchanged for an access (default) token.
This access token will be placed into the session and connection.
They will be available using `Guardian.Plug.current_claims/2` and `Guardian.Plug.current_token/2`.
Tokens from cookies should be of type `refresh` and have a relatively long life.
They will be exchanged for `access` tokens (default).
Options:
* `:key` - The location of the token (default `:default`)
* `:exchange_from` - The type of the cookie (default `"refresh"`)
* `:exchange_to` - The type of token to provide. Defaults to the
implementation modules `default_type`
* `:ttl` - The time to live of the exchanged token. Defaults to configured values.
* `:halt` - Whether to halt the connection in case of error. Defaults to `true`
"""
import Plug.Conn
import Guardian.Plug.Keys
import Guardian.Plug, only: [find_token_from_cookies: 2]
alias Guardian.Plug.Pipeline
@behaviour Plug
@impl Plug
@spec init(opts :: Keyword.t()) :: Keyword.t()
@deprecated "Use Guardian.Plug.VerifySession or Guardian.Plug.VerifyHeader plug with `:refresh_from_cookie` option."
def init(opts), do: opts
@impl Plug
@spec call(conn :: Plug.Conn.t(), opts :: Keyword.t()) :: Plug.Conn.t()
def call(conn, opts) do
refresh_from_cookie(conn, opts)
end
def refresh_from_cookie(%{req_cookies: %Plug.Conn.Unfetched{}} = conn, opts) do
conn
|> fetch_cookies()
|> refresh_from_cookie(opts)
end
def refresh_from_cookie(conn, opts) do
with nil <- Guardian.Plug.current_token(conn, opts),
{:ok, token} <- find_token_from_cookies(conn, opts),
module <- Pipeline.fetch_module!(conn, opts),
key <- storage_key(conn, opts),
exchange_from <-
Keyword.get(opts, :exchange_from, "refresh"),
default_type <- module.default_token_type(),
exchange_to <- Keyword.get(opts, :exchange_to, default_type),
active_session? <- Guardian.Plug.session_active?(conn),
{:ok, _old, {new_t, new_c}} <-
Guardian.exchange(module, token, exchange_from, exchange_to, opts) do
conn
|> Guardian.Plug.put_current_token(new_t, key: key)
|> Guardian.Plug.put_current_claims(new_c, key: key)
|> maybe_put_in_session(active_session?, new_t, opts)
else
:no_token_found ->
conn
# Let the ensure_authenticated plug handle the token expired later in the pipeline
{:error, :token_expired} ->
conn
{:error, reason} ->
conn
|> Pipeline.fetch_error_handler!(opts)
|> apply(:auth_error, [conn, {:invalid_token, reason}, opts])
|> Guardian.Plug.maybe_halt(opts)
_ ->
conn
end
end
defp maybe_put_in_session(conn, false, _, _), do: conn
defp maybe_put_in_session(conn, true, token, opts) do
key = conn |> storage_key(opts) |> token_key()
put_session(conn, key, token)
end
defp storage_key(conn, opts), do: Pipeline.fetch_key(conn, opts)
end
end
|
lib/guardian/plug/verify_cookie.ex
| 0.780955
| 0.563978
|
verify_cookie.ex
|
starcoder
|
defmodule AWS.ManagedBlockchain do
@moduledoc """
Amazon Managed Blockchain is a fully managed service for creating and managing
blockchain networks using open-source frameworks.
Blockchain allows you to build applications where multiple parties can securely
and transparently run transactions and share data without the need for a
trusted, central authority.
Managed Blockchain supports the Hyperledger Fabric and Ethereum open-source
frameworks. Because of fundamental differences between the frameworks, some API
actions or data types may only apply in the context of one framework and not the
other. For example, actions related to Hyperledger Fabric network members such
as `CreateMember` and `DeleteMember` do not apply to Ethereum.
The description for each action indicates the framework or frameworks to which
it applies. Data types and properties that apply only in the context of a
particular framework are similarly indicated.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-09-24",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "managedblockchain",
global?: false,
protocol: "rest-json",
service_id: "ManagedBlockchain",
signature_version: "v4",
signing_name: "managedblockchain",
target_prefix: nil
}
end
@doc """
Creates a member within a Managed Blockchain network.
Applies only to Hyperledger Fabric.
"""
def create_member(%Client{} = client, network_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/members"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a new blockchain network using Amazon Managed Blockchain.
Applies only to Hyperledger Fabric.
"""
def create_network(%Client{} = client, input, options \\ []) do
url_path = "/networks"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a node on the specified blockchain network.
Applies to Hyperledger Fabric and Ethereum.
"""
def create_node(%Client{} = client, network_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/nodes"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a proposal for a change to the network that other members of the network
can vote on, for example, a proposal to add a new member to the network.
Any member can create a proposal.
Applies only to Hyperledger Fabric.
"""
def create_proposal(%Client{} = client, network_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/proposals"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a member.
Deleting a member removes the member and all associated resources from the
network. `DeleteMember` can only be called for a specified `MemberId` if the
principal performing the action is associated with the AWS account that owns the
member. In all other cases, the `DeleteMember` action is carried out as the
result of an approved proposal to remove a member. If `MemberId` is the last
member in a network specified by the last AWS account, the network is deleted
also.
Applies only to Hyperledger Fabric.
"""
def delete_member(%Client{} = client, member_id, network_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/members/#{URI.encode(member_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a node that your AWS account owns.
All data on the node is lost and cannot be recovered.
Applies to Hyperledger Fabric and Ethereum.
"""
def delete_node(%Client{} = client, network_id, node_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/nodes/#{URI.encode(node_id)}"
headers = []
{query_params, input} =
[
{"MemberId", "memberId"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns detailed information about a member.
Applies only to Hyperledger Fabric.
"""
def get_member(%Client{} = client, member_id, network_id, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/members/#{URI.encode(member_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns detailed information about a network.
Applies to Hyperledger Fabric and Ethereum.
"""
def get_network(%Client{} = client, network_id, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns detailed information about a node.
Applies to Hyperledger Fabric and Ethereum.
"""
def get_node(%Client{} = client, network_id, node_id, member_id \\ nil, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/nodes/#{URI.encode(node_id)}"
headers = []
query_params = []
query_params =
if !is_nil(member_id) do
[{"memberId", member_id} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns detailed information about a proposal.
Applies only to Hyperledger Fabric.
"""
def get_proposal(%Client{} = client, network_id, proposal_id, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/proposals/#{URI.encode(proposal_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of all invitations for the current AWS account.
Applies only to Hyperledger Fabric.
"""
def list_invitations(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/invitations"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of the members in a network and properties of their
configurations.
Applies only to Hyperledger Fabric.
"""
def list_members(
%Client{} = client,
network_id,
is_owned \\ nil,
max_results \\ nil,
name \\ nil,
next_token \\ nil,
status \\ nil,
options \\ []
) do
url_path = "/networks/#{URI.encode(network_id)}/members"
headers = []
query_params = []
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(name) do
[{"name", name} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(is_owned) do
[{"isOwned", is_owned} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about the networks in which the current AWS account
participates.
Applies to Hyperledger Fabric and Ethereum.
"""
def list_networks(
%Client{} = client,
framework \\ nil,
max_results \\ nil,
name \\ nil,
next_token \\ nil,
status \\ nil,
options \\ []
) do
url_path = "/networks"
headers = []
query_params = []
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(name) do
[{"name", name} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(framework) do
[{"framework", framework} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns information about the nodes within a network.
Applies to Hyperledger Fabric and Ethereum.
"""
def list_nodes(
%Client{} = client,
network_id,
max_results \\ nil,
member_id \\ nil,
next_token \\ nil,
status \\ nil,
options \\ []
) do
url_path = "/networks/#{URI.encode(network_id)}/nodes"
headers = []
query_params = []
query_params =
if !is_nil(status) do
[{"status", status} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(member_id) do
[{"memberId", member_id} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the list of votes for a specified proposal, including the value of each
vote and the unique identifier of the member that cast the vote.
Applies only to Hyperledger Fabric.
"""
def list_proposal_votes(
%Client{} = client,
network_id,
proposal_id,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/networks/#{URI.encode(network_id)}/proposals/#{URI.encode(proposal_id)}/votes"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of proposals for the network.
Applies only to Hyperledger Fabric.
"""
def list_proposals(
%Client{} = client,
network_id,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/networks/#{URI.encode(network_id)}/proposals"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of tags for the specified resource.
Each tag consists of a key and optional value.
For more information about tags, see [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Ethereum Developer Guide*, or [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Hyperledger Fabric Developer Guide*.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Rejects an invitation to join a network.
This action can be called by a principal in an AWS account that has received an
invitation to create a member and join a network.
Applies only to Hyperledger Fabric.
"""
def reject_invitation(%Client{} = client, invitation_id, input, options \\ []) do
url_path = "/invitations/#{URI.encode(invitation_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds or overwrites the specified tags for the specified Amazon Managed
Blockchain resource.
Each tag consists of a key and optional value.
When you specify a tag key that already exists, the tag value is overwritten
with the new value. Use `UntagResource` to remove tag keys.
A resource can have up to 50 tags. If you try to create more than 50 tags for a
resource, your request fails and returns an error.
For more information about tags, see [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Ethereum Developer Guide*, or [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Hyperledger Fabric Developer Guide*.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes the specified tags from the Amazon Managed Blockchain resource.
For more information about tags, see [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Ethereum Developer Guide*, or [Tagging Resources](https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html)
in the *Amazon Managed Blockchain Hyperledger Fabric Developer Guide*.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a member configuration with new parameters.
Applies only to Hyperledger Fabric.
"""
def update_member(%Client{} = client, member_id, network_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/members/#{URI.encode(member_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates a node configuration with new parameters.
Applies only to Hyperledger Fabric.
"""
def update_node(%Client{} = client, network_id, node_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/nodes/#{URI.encode(node_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:patch,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Casts a vote for a specified `ProposalId` on behalf of a member.
The member to vote as, specified by `VoterMemberId`, must be in the same AWS
account as the principal that calls the action.
Applies only to Hyperledger Fabric.
"""
def vote_on_proposal(%Client{} = client, network_id, proposal_id, input, options \\ []) do
url_path = "/networks/#{URI.encode(network_id)}/proposals/#{URI.encode(proposal_id)}/votes"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/managed_blockchain.ex
| 0.79736
| 0.482551
|
managed_blockchain.ex
|
starcoder
|
defmodule QRCode.QR do
@moduledoc """
QR code data structure
"""
alias QRCode.ErrorCorrection
@type level() :: :low | :medium | :quartile | :high
@type version() :: 1..40
@type mode() :: :numeric | :alphanumeric | :byte | :kanji | :eci
@type mask_num() :: 0..7
@type groups() :: {[[], ...], [[]]}
@type t() :: %__MODULE__{
orig: ExMaybe.t(String.t()),
encoded: ExMaybe.t(binary()),
version: ExMaybe.t(version()),
ecc_level: level(),
ecc: ExMaybe.t(ErrorCorrection.t()),
message: ExMaybe.t(String.t()),
mode: mode(),
matrix: MatrixReloaded.Matrix.t(),
mask_num: mask_num()
}
@levels [:low, :medium, :quartile, :high]
# @modes [
# numeric: 0b0001,
# alphanumeric: 0b0010,
# byte: 0b0100,
# kanji: 0b1000,
# eci: 0b0111
# ]
defstruct orig: nil,
encoded: nil,
version: nil,
ecc_level: :low,
ecc: nil,
message: nil,
mode: :byte,
matrix: [[]],
mask_num: 0
defguard level(lvl) when lvl in @levels
defguard version(v) when v in 1..40
defguard masking(m) when m in 0..7
@doc """
Creates QR code. You can change the error correction level according to your needs.
There are four level of error correction: `:low | :medium | :quartile | :high`
where `:low` is default value.
This function returns [Result](https://hexdocs.pm/result/api-reference.html),
it means either tuple of `{:ok, QR.t()}` or `{:error, "msg"}`.
## Example:
iex> QRCode.QR.create("Hello World")
{:ok,
%QRCode.QR{
ecc: %QRCode.ErrorCorrection{
blocks_in_group1: 1,
blocks_in_group2: 0,
codewords: {[[139, 194, 132, 243, 72, 115, 10]], []},
codewords_per_block_in_group1: 19,
codewords_per_block_in_group2: 0,
ec_codewrods_per_block: 7,
groups: {[
[64, 180, 134, 86, 198, 198, 242, 5, 118, 247, 38, 198, 64, 236, 17,
236, 17, 236, 17]
], []}
},
ecc_level: :low,
encoded: <<64, 180, 134, 86, 198, 198, 242, 5, 118, 247, 38, 198, 64, 236,
17, 236, 17, 236, 17>>,
mask_num: 0,
matrix: [
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1]
],
message: <<64, 180, 134, 86, 198, 198, 242, 5, 118, 247, 38, 198, 64, 236,
17, 236, 17, 236, 17, 139, 194, 132, 243, 72, 115, 10>>,
mode: :byte,
orig: "Hello World",
version: 1
}}
For saving QR code to svg file, use `QRCode.Svg.save_as/3` function:
iex> qr = QRCode.QR.create("Hello World", :high)
iex> qr |> Result.and_then(&QRCode.Svg.save_as(&1,"hello.svg"))
{:ok, "hello.svg"}
The svg file will be saved into your project directory.
"""
@spec create(String.t(), level()) :: Result.t(String.t(), t())
def create(orig, level \\ :low) when level(level) do
%__MODULE__{orig: orig, ecc_level: level}
|> QRCode.ByteMode.put_version()
|> Result.map(&QRCode.DataEncoding.byte_encode/1)
|> Result.map(&QRCode.ErrorCorrection.put/1)
|> Result.map(&QRCode.Message.put/1)
|> Result.and_then(&QRCode.Placement.put_patterns/1)
|> Result.map(&QRCode.DataMasking.apply/1)
|> Result.and_then(&QRCode.Placement.replace_placeholders/1)
|> Result.and_then(&QRCode.FormatVersion.put_information/1)
end
@doc """
The same as `create/2`, but raises a `QRCode.Error` exception if it fails.
"""
@spec create!(String.t(), level()) :: t()
def create!(text, level \\ :low) when level(level) do
case create(text, level) do
{:ok, qr} -> qr
{:error, msg} -> raise QRCode.Error, message: msg
end
end
end
|
lib/qr_code/qr.ex
| 0.830147
| 0.684323
|
qr.ex
|
starcoder
|
defimpl Timex.Protocol, for: Map do
@moduledoc """
This is an implementation of Timex.Protocol for plain maps,
which allows working directly with deserialized date/times
via the Timex API. It accepts date/time maps with either atom
or string keys, as long as those keys match current Calendar
types. It also accepts a few legacy variations for types which
may have been serialized with older versions of Timex.
"""
defmacro convert!(map, function, args \\ []) when is_list(args) do
quote do
case Timex.Convert.convert_map(unquote(map)) do
{:error, reason} -> raise reason
converted -> apply(Timex.Protocol, unquote(function), [converted | unquote(args)])
end
end
end
defmacro convert(map, function, args \\ []) when is_list(args) do
quote do
case Timex.Convert.convert_map(unquote(map)) do
{:error, _} = err -> err
converted -> apply(Timex.Protocol, unquote(function), [converted | unquote(args)])
end
end
end
def to_julian(map), do: convert!(map, :to_julian)
def to_gregorian_seconds(map), do: convert!(map, :to_gregorian_seconds)
def to_gregorian_microseconds(map), do: convert!(map, :to_gregorian_microseconds)
def to_unix(map), do: convert!(map, :to_unix)
def to_date(map), do: convert!(map, :to_date)
def to_datetime(map, timezone), do: convert(map, :to_datetime, [timezone])
def to_naive_datetime(map), do: convert(map, :to_naive_datetime)
def to_erl(map), do: convert(map, :to_erl)
def century(map), do: convert!(map, :century)
def is_leap?(map), do: convert!(map, :is_leap?)
def shift(map, options), do: convert(map, :shift, [options])
def set(map, options), do: convert(map, :set, [options])
def beginning_of_day(map), do: convert!(map, :beginning_of_day)
def end_of_day(map), do: convert!(map, :end_of_day)
def beginning_of_week(map, start), do: convert(map, :beginning_of_week, [start])
def end_of_week(map, start), do: convert(map, :end_of_week, [start])
def beginning_of_year(map), do: convert!(map, :beginning_of_year)
def end_of_year(map), do: convert!(map, :end_of_year)
def beginning_of_quarter(map), do: convert!(map, :beginning_of_quarter)
def end_of_quarter(map), do: convert!(map, :end_of_quarter)
def beginning_of_month(map), do: convert!(map, :beginning_of_month)
def end_of_month(map), do: convert!(map, :end_of_month)
def quarter(map), do: convert!(map, :quarter)
def days_in_month(map), do: convert!(map, :days_in_month)
def week_of_month(map), do: convert!(map, :week_of_month)
def weekday(map), do: convert!(map, :weekday)
def day(map), do: convert!(map, :day)
def is_valid?(map), do: convert!(map, :is_valid?)
def iso_week(map), do: convert!(map, :iso_week)
def from_iso_day(map, day), do: convert(map, :from_iso_day, [day])
end
|
deps/timex/lib/datetime/map.ex
| 0.778776
| 0.723944
|
map.ex
|
starcoder
|
defmodule Grizzly.ZWave.CommandClasses.ThermostatSetpoint do
@moduledoc """
"ThermostatSetpoint" Command Class
The Thermostat Setpoint Command Class is used to configure setpoints for the modes supported by a
thermostat.
What type of commands does this command class support?
"""
@type type ::
:na
| :heating
| :cooling
| :furnace
| :dry_air
| :moist_air
| :auto_changeover
| :energy_save_heating
| :energy_save_cooling
| :away_heating
| :away_cooling
| :full_power
@type scale :: :celsius | :fahrenheit
@behaviour Grizzly.ZWave.CommandClass
alias Grizzly.ZWave.DecodeError
@impl true
def byte(), do: 0x43
@impl true
def name(), do: :thermostat_setpoint
@spec encode_type(type) :: byte
def encode_type(:na), do: 0x00
def encode_type(:heating), do: 0x01
def encode_type(:cooling), do: 0x02
def encode_type(:furnace), do: 0x07
def encode_type(:dry_air), do: 0x08
def encode_type(:moist_air), do: 0x09
def encode_type(:auto_changeover), do: 0x0A
def encode_type(:energy_save_heating), do: 0x0B
def encode_type(:energy_save_cooling), do: 0x0C
def encode_type(:away_heating), do: 0x0D
def encode_type(:away_cooling), do: 0x0E
def encode_type(:full_power), do: 0x0F
@spec decode_type(byte()) :: type()
def decode_type(0x01), do: :heating
def decode_type(0x02), do: :cooling
def decode_type(0x07), do: :furnace
def decode_type(0x08), do: :dry_air
def decode_type(0x09), do: :moist_air
def decode_type(0x0A), do: :auto_changeover
def decode_type(0x0B), do: :energy_save_heating
def decode_type(0x0C), do: :energy_save_cooling
def decode_type(0x0D), do: :away_heating
def decode_type(0x0E), do: :away_cooling
def decode_type(0x0F), do: :full_power
def decode_type(_na_type), do: :na
@spec encode_scale(scale) :: byte
def encode_scale(:celcius), do: 0x00
def encode_scale(:fahrenheit), do: 0x01
@spec decode_scale(byte) :: {:ok, scale} | {:error, %DecodeError{}}
def decode_scale(0x00), do: {:ok, :celcius}
def decode_scale(0x01), do: {:ok, :fahrenheit}
def decode_scale(byte),
do: {:error, %DecodeError{value: byte, param: :type, command: :thermostat_setpoint}}
end
|
lib/grizzly/zwave/command_classes/thermostat_setpoint.ex
| 0.909281
| 0.44348
|
thermostat_setpoint.ex
|
starcoder
|
require Record
defmodule File.Stat do
@moduledoc """
A struct that holds file information.
In Erlang, this struct is represented by a `:file_info` record.
Therefore this module also provides functions for converting
between the Erlang record and the Elixir struct.
Its fields are:
* `size` - size of file in bytes.
* `type` - `:device | :directory | :regular | :other`; the type of the
file.
* `access` - `:read | :write | :read_write | :none`; the current system
access to the file.
* `atime` - the last time the file was read.
* `mtime` - the last time the file was written.
* `ctime` - the interpretation of this time field depends on the operating
system. On Unix, it is the last time the file or the inode was changed.
In Windows, it is the time of creation.
* `mode` - the file permissions.
* `links` - the number of links to this file. This is always 1 for file
systems which have no concept of links.
* `major_device` - identifies the file system where the file is located.
In Windows, the number indicates a drive as follows: 0 means A:, 1 means
B:, and so on.
* `minor_device` - only valid for character devices on Unix. In all other
cases, this field is zero.
* `inode` - gives the inode number. On non-Unix file systems, this field
will be zero.
* `uid` - indicates the owner of the file. Will be zero for non-Unix file
systems.
* `gid` - indicates the group that owns the file. Will be zero for
non-Unix file systems.
The time type returned in `atime`, `mtime`, and `ctime` is dependent on the
time type set in options. `{:time, type}` where type can be `:local`,
`:universal`, or `:posix`. Default is `:universal`.
"""
record = Record.extract(:file_info, from_lib: "kernel/include/file.hrl")
keys = :lists.map(&elem(&1, 0), record)
vals = :lists.map(&{&1, [], nil}, keys)
pairs = :lists.zip(keys, vals)
defstruct keys
@type t :: %__MODULE__{}
@doc """
Converts a `File.Stat` struct to a `:file_info` record.
"""
def to_record(%File.Stat{unquote_splicing(pairs)}) do
{:file_info, unquote_splicing(vals)}
end
@doc """
Converts a `:file_info` record into a `File.Stat`.
"""
def from_record(file_info)
def from_record({:file_info, unquote_splicing(vals)}) do
%File.Stat{unquote_splicing(pairs)}
end
end
|
lib/elixir/lib/file/stat.ex
| 0.790409
| 0.669698
|
stat.ex
|
starcoder
|
defmodule ExAlipay.Client do
@moduledoc """
ExAlipay Client that export API and perform request to alipay backend.
This module defined some common used api and you can easily add new api:
* `page_pay` - alipay.trade.page.pay
* `wap_pay` - alipay.trade.wap.pay
* `app_pay` - alipay.trade.app.pay
* `query` - alipay.trade.query
* `refund` - alipay.trade.refund
* `close` - alipay.trade.close
* `refund_query` - alipay.trade.fastpay.refund.query
* `bill_downloadurl_query` - alipay.data.dataservice.bill.downloadurl.query
* `auth_token` - alipay.system.oauth.token
* `user_info` - alipay.user.info.share
* `transfer` - alipay.fund.trans.toaccount.transfer
* `transfer_query` - alipay.fund.trans.order.query
### Example Usage:
Define a module `AlipayClient` that use `ExAlipay.Client`,
`AlipayClient` module will define new functions that calling
the same `ExAlipay.Client` functions, the difference is that it
stores the client with module property `@client` for convenient:
```elixir
defmodule AlipayClient do
use ExAlipay.Client, Application.fetch_env!(:my_app, __MODULE__)
end
```
Config your `AlipayClient` in `config/config.exs`:
```elixir
config :my_app, AlipayClient,
appid: "APPID",
pid: "PID",
public_key: "-- public_key --",
private_key: "-- private_key --",
sandbox?: false
```
Use the `page_pay`:
```elixir
AlipayClient.page_pay(%{
out_trade_no: "out_trade_no",
total_amount: 100,
subject: "the subject",
return_url: "http://example.com/return_url",
notify_url: "http://example.com/notify_url",
})
```
In the handler view of alipay notify:
```elixir
if AlipayClient.verify_notify_sign?(body) do
# process the payment success logic
# ...
# response a plain text `success` to alipay
else
# response with error
end
```
Extend new api you need that isn't provided by `ExAlipay.Client`.
```elixir
defmodule AlipayClient do
use ExAlipay.Client, Application.fetch_env!(:my_app, __MODULE__)
# access the public api request that defined in ExAlipay.Client
# also possible to use functions in ExAlipay.Utils directly
# see: https://docs.open.alipay.com/api_1/alipay.trade.precreate
def pre_create(params) do
{params, ext_params} = prepare_trade_params(params)
request(@client, "alipay.trade.precreate", params, ext_params)
end
end
# now we can use the new api
# AlipayClient.pre_create(%{})
```
"""
alias ExAlipay.{Client, RSA, Utils}
alias ExAlipay.{RequestError, ResponseError}
@http_adapter Application.get_env(:ex_alipay, :http_adapter)
defstruct appid: nil,
public_key: nil,
private_key: nil,
pid: nil,
format: "JSON",
charset: "utf-8",
sign_type: "RSA2",
version: "1.0",
sandbox?: false
@type t :: %__MODULE__{
appid: binary,
public_key: binary,
private_key: binary,
pid: binary,
format: binary,
charset: binary,
sign_type: binary,
version: binary,
sandbox?: boolean
}
@supported_api %{
page_pay: "alipay.trade.page.pay",
wap_pay: "alipay.trade.wap.pay",
app_pay: "alipay.trade.app.pay",
query: "alipay.trade.query",
refund: "alipay.trade.refund",
close: "alipay.trade.close",
refund_query: "alipay.trade.fastpay.refund.query",
bill_downloadurl_query: "alipay.data.dataservice.bill.downloadurl.query",
auth_token: "alipay.<PASSWORD>",
user_info: "alipay.user.info.share",
transfer: "alipay.fund.trans.toaccount.transfer",
transfer_query: "alipay.fund.trans.order.query"
}
defmacro __using__(opts) do
quote do
import Client
@before_compile Client
@client Map.merge(%Client{}, Map.new(unquote(opts)))
end
end
defmacro __before_compile__(_) do
exist_functions = Client.__info__(:functions)
supported_api()
|> Map.keys()
|> Enum.filter(fn key -> Keyword.has_key?(exist_functions, key) end)
|> Enum.concat([:auth_url, :app_auth_str, :verify_notify_sign?])
|> Enum.map(fn key ->
quote do
def unquote(key)(params), do: unquote(key)(@client, params)
end
end)
end
@doc false
def supported_api, do: @supported_api
@doc """
Create trade url for web page.
See: https://docs.open.alipay.com/270/alipay.trade.page.pay
## Examples:
ExAlipay.Client.page_pay(client, %{
out_trade_no: "out_trade_no",
total_amount: 100,
subject: "the subject",
return_url: "http://example.com/return_url",
notify_url: "http://example.com/notify_url",
})
"""
def page_pay(client, params) do
params = Map.put_new(params, :product_code, "FAST_INSTANT_TRADE_PAY")
{params, ext_params} = prepare_trade_params(params)
Utils.build_request_url(client, @supported_api.page_pay, params, ext_params)
end
@doc """
Create trade url for mobile page.
See: https://docs.open.alipay.com/203/107090/
"""
def wap_pay(client, params) do
params = Map.put_new(params, :product_code, "QUICK_WAP_WAY")
{params, ext_params} = prepare_trade_params(params)
Utils.build_request_url(client, @supported_api.wap_pay, params, ext_params)
end
@doc """
Create trade string for app pay.
See: https://docs.open.alipay.com/204/105465/
## Examples:
ExAlipay.Client.app_pay(client, %{
out_trade_no: "out_trade_no",
total_amount: 100,
subject: "the subject",
notify_url: "http://example.com/notify_url",
})
"""
def app_pay(client, params) do
params = Map.put_new(params, :product_code, "QUICK_MSECURITY_PAY")
{params, ext_params} = prepare_trade_params(params)
Utils.build_request_str(client, @supported_api.app_pay, params, ext_params)
end
@doc """
Pop `return_url` and `notify_url` from create trade params as ext_params.
## Examples:
params = %{
out_trade_no: "out_trade_no",
total_amount: 100,
subject: "the subject",
notify_url: "http://example.com/notify_url",
}
ExAlipay.Client.prepare_trade_params(params)
# Result:
# {
# %{out_trade_no: "out_trade_no", subject: "the subject", total_amount: 100},
# %{notify_url: "http://example.com/notify_url", return_url: nil}
# }
"""
def prepare_trade_params(params) do
{return_url, params} = Map.pop(params, :return_url)
{notify_url, params} = Map.pop(params, :notify_url)
ext_params = %{return_url: return_url, notify_url: notify_url}
{params, ext_params}
end
@doc """
Refund trade.
See: https://docs.open.alipay.com/api_1/alipay.trade.refund
## Examples:
ExAlipay.Client.refund(client, %{
out_trade_no: "out_trade_no",
refund_amount: 100
})
"""
def refund(client, params) do
request(client, @supported_api.refund, params)
end
@doc """
Query trade info.
See: https://docs.open.alipay.com/api_1/alipay.trade.query
## Examples:
ExAlipay.Client.query(client, %{
out_trade_no: "out_trade_no",
})
"""
def query(client, params) do
request(client, @supported_api.query, params)
end
@doc """
Close trade.
See: https://docs.open.alipay.com/api_1/alipay.trade.close
## Examples:
ExAlipay.Client.close(client, %{
out_trade_no: "out_trade_no",
})
"""
def close(client, params) do
request(client, @supported_api.close, params)
end
@doc """
Query refund info.
See: https://docs.open.alipay.com/api_1/alipay.trade.fastpay.refund.query
## Examples:
ExAlipay.Client.refund_query(client, %{
out_trade_no: "out_trade_no",
out_request_no: "out_request_no",
})
"""
def refund_query(client, params) do
request(client, @supported_api.refund_query, params)
end
@doc """
Fetch bill download url.
See: https://docs.open.alipay.com/api_15/alipay.data.dataservice.bill.downloadurl.query
## Examples:
ExAlipay.Client.bill_downloadurl_query(client, %{
bill_type: "trade",
bill_date: "2019-06-06",
})
"""
def bill_downloadurl_query(client, params) do
request(client, @supported_api.bill_downloadurl_query, params)
end
@doc """
Get user auth_token by auth_code.
See: https://docs.open.alipay.com/api_9/alipay.system.oauth.token
## Examples:
ExAlipay.Client.auth_token(client, %{
grant_type: "authorization_code",
code: "an auth_code",
})
"""
def auth_token(client, params) do
request(client, @supported_api.auth_token, nil, params)
end
@doc """
Get user info by auth_token.
See: https://docs.open.alipay.com/api_2/alipay.user.info.share
## Examples:
ExAlipay.Client.user_info(client, %{
auth_token: "an auth_token",
})
"""
def user_info(client, params) do
request(client, @supported_api.user_info, nil, params)
end
@doc """
Transfer money to users' alipay acoount.
See: https://docs.open.alipay.com/api_28/alipay.fund.trans.toaccount.transfer
## Examples:
ExAlipay.Client.transfer(client, %{
payee_account: "an alipay account",
out_biz_no: "an out_biz_no",
amount: 100,
payee_type: "ALIPAY_LOGONID",
})
"""
def transfer(client, params) do
request(client, @supported_api.transfer, params)
end
@doc """
Query transfer order.
See: https://docs.open.alipay.com/api_28/alipay.fund.trans.order.query
## Examples:
ExAlipay.Client.transfer_query(client, %{
out_biz_no: "an out_biz_no",
})
"""
def transfer_query(client, params) do
request(client, @supported_api.transfer_query, params)
end
@doc """
Get auth url for alipay web auth.
See: https://docs.open.alipay.com/289/105656
## Examples:
ExAlipay.Client.auth_url(client, %{
redirect_uri: http://example.com/auth_redirect_url,
scope: "auth_user",
state: "state"
})
"""
def auth_url(client, %{redirect_uri: redirect_uri} = params) do
base_url = get_base_auth_url(client)
state = Map.get(params, :state)
scope = Map.get(params, :scope, "auth_user")
url = "#{base_url}?app_id=#{client.appid}&scope=#{scope}&redirect_uri=#{redirect_uri}"
case state do
nil -> url
_ -> "#{url}&state=#{state}"
end
end
@doc """
Get auth string for alipay app auth.
See: https://docs.open.alipay.com/218/105327/
## Examples:
ExAlipay.Client.app_auth_str(client, %{
target_id: "target_id"
})
"""
def app_auth_str(client, %{target_id: target_id}) do
params = %{
apiname: "com.alipay.account.auth",
method: "alipay.open.auth.sdk.code.get",
app_id: client.appid,
app_name: "mc",
biz_type: "openservice",
pid: client.pid,
product_id: "APP_FAST_LOGIN",
scope: "kuaijie",
target_id: target_id,
auth_type: "AUTHACCOUNT",
sign_type: "RSA2"
}
data = Utils.create_sign_str(params)
sign = Utils.create_sign(client, data)
"#{data}&sign=#{sign}"
end
@doc """
Perform the request to alipay backend.
"""
@spec request(%Client{}, binary, map, map) :: map
def request(client, method, content, ext_params \\ %{}) do
url = Utils.build_request_url(client, method, content, ext_params)
with {:ok, resp} <- @http_adapter.get(url),
{:ok, body} <- verify_status(resp),
{:ok, key} <- verify_request_sign(client, body),
{:ok, json_data} <- Jason.decode(body),
{:ok, resp_data} <- check_response_data(json_data[key]) do
resp_data
else
{:error, error} -> raise error
end
end
defp verify_status(%{status_code: 200, body: body}) do
{:ok, body}
end
defp verify_status(%{status_code: status_code}) do
{:error, %RequestError{status_code: status_code}}
end
defp check_response_data(resp_data) do
case resp_data["code"] do
# api response like auth_token without code
nil -> {:ok, resp_data}
# 10000 is the success code of alipay
"10000" -> {:ok, resp_data}
_ -> {:error, ResponseError.from_map(resp_data)}
end
end
defp verify_request_sign(client, body) do
regex = ~r/"(?<key>\w+_response)":(?<response>.*),"sign":/
case Regex.named_captures(regex, body) do
%{"response" => response, "key" => key} ->
resp_json = Jason.decode!(body)
ok? = RSA.verify(response, client.sign_type, client.public_key, resp_json["sign"])
cond do
ok? -> {:ok, key}
not ok? -> {:error, %RequestError{reason: "verify sign failed"}}
end
nil ->
{:error, %RequestError{reason: "unexpected response data"}}
end
end
@doc """
Verify the sign of alipay notify, used in handler of notify_url.
"""
@spec verify_notify_sign?(Client.t(), Map.t()) :: boolean
def verify_notify_sign?(client, body) do
{sign, body} = Map.pop(body, :sign)
{sign_type, body} = Map.pop(body, :sign_type)
body
|> Utils.create_sign_str()
|> RSA.verify(sign_type, client.public_key, sign)
end
defp get_base_auth_url(%Client{sandbox?: false}) do
"https://openauth.alipay.com/oauth2/publicAppAuthorize.htm"
end
defp get_base_auth_url(%Client{sandbox?: true}) do
"https://openauth.alipaydev.com/oauth2/publicAppAuthorize.htm"
end
end
|
lib/client.ex
| 0.868283
| 0.655873
|
client.ex
|
starcoder
|
defmodule Representer do
@moduledoc """
Implementation of the Representer pattern for the API
"""
defguard known_extension?(extension) when extension in [
"json",
"collection",
"hal",
"mason",
"siren"
]
defmodule Collection do
@moduledoc """
Struct for a collection of `Representer.Item`s
Contains the list of `:items`, `:pagination`, and a list of `:links`
"""
defstruct [:href, :name, :items, :pagination, links: []]
end
defmodule Item do
@moduledoc """
Struct for an item that can be rendered in various formats
Consists of an `:item` that contains a map of properties and a list
of `:links` that may be associated with the item.
"""
defstruct [:rel, :href, :item, :type, embedded: [], links: []]
end
defmodule Link do
@moduledoc """
Struct for a hypermedia link
"""
defstruct [:rel, :href, :title, :template]
end
defmodule Pagination do
@moduledoc """
Pagination struct and link generators
"""
defstruct [:base_url, :current_page, :total_pages, :total_count]
@doc """
Maybe add pagination links to the link list
If pagination is nil, skip this
"""
def maybe_paginate(links, nil), do: links
def maybe_paginate(links, pagination) do
cond do
pagination.total_pages == 1 ->
links
pagination.current_page == 1 ->
[next_link(pagination) | links]
pagination.current_page == pagination.total_pages ->
[prev_link(pagination) | links]
true ->
[next_link(pagination) | [prev_link(pagination) | links]]
end
end
defp next_link(pagination) do
%Representer.Link{rel: "next", href: page_path(pagination.base_url, pagination.current_page + 1)}
end
defp prev_link(pagination) do
%Representer.Link{rel: "prev", href: page_path(pagination.base_url, pagination.current_page - 1)}
end
defp page_path(path, page) do
uri = URI.parse(path)
query =
uri.query
|> decode_query()
|> Map.put(:page, page)
|> URI.encode_query()
%{uri | query: query}
|> URI.to_string()
end
defp decode_query(nil), do: %{}
defp decode_query(query) do
URI.decode_query(query)
end
end
@doc """
Transform the internal representation based on the extension
"""
def transform(struct, extension) do
case extension do
"collection" ->
Representer.CollectionJSON.transform(struct)
"hal" ->
Representer.HAL.transform(struct)
"siren" ->
Representer.Siren.transform(struct)
"mason" ->
Representer.Mason.transform(struct)
"json" ->
Representer.JSON.transform(struct)
end
end
defmodule Adapter do
@moduledoc """
Behaviour for representations to implement
"""
@type json :: map()
@callback transform(collection :: %Representer.Collection{}) :: json()
@callback transform(item :: %Representer.Item{}) :: json()
end
defmodule JSON do
@moduledoc """
Adapter for plain JSON
Renders the representation almost directly
"""
@behaviour Representer.Adapter
@impl true
def transform(collection = %Representer.Collection{}) do
%{}
|> maybe_put("items", render_collection(collection))
|> maybe_put("links", transform_links(collection.links))
end
def transform(item = %Representer.Item{}) do
item.item
|> maybe_put("links", transform_links(item.links))
end
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value) do
Map.put(map, key, value)
end
defp render_collection(collection) do
case collection.items do
nil ->
nil
[] ->
nil
items ->
Enum.map(items, &transform/1)
end
end
defp transform_links(links) do
Enum.map(links, fn link ->
%{"rel" => link.rel, "href" => link.href}
end)
end
end
defmodule CollectionJSON do
@moduledoc """
Adapter for collection+json
"""
@behaviour Representer.Adapter
@impl true
def transform(collection = %Representer.Collection{}) do
collection =
%{"version" => "1.0"}
|> maybe_put("items", render_collection(collection))
|> maybe_put("links", render_links(collection))
|> maybe_put("href", collection.href)
%{"collection" => collection}
end
def transform(item = %Representer.Item{}) do
%{
"version" => "1.0",
"items" => Enum.map([item], &render_item/1)
}
end
defp render_collection(collection) do
case collection.items do
nil ->
nil
[] ->
nil
items ->
Enum.map(items, &render_item/1)
end
end
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value) do
Map.put(map, key, value)
end
defp render_links(collection) do
collection.links
|> Representer.Pagination.maybe_paginate(collection.pagination)
|> transform_links()
end
defp render_item(item) do
%{
"href" => item.href,
"data" => render_data(item.item),
"links" => transform_links(item.links),
}
end
defp render_data(properties) do
Enum.map(properties, fn {key, value} ->
%{"name" => key, "value" => value}
end)
end
defp transform_links(links) do
links
|> Enum.reject(&(&1.rel == "self"))
|> Enum.map(fn link ->
%{
"rel" => link.rel,
"href" => link.href,
}
end)
end
end
defmodule HAL do
@moduledoc """
The HAL JSON hypermedia format
http://stateless.co/hal_specification.html
"""
@behaviour Representer.Adapter
@impl true
def transform(collection = %Representer.Collection{}) do
%{}
|> maybe_put("_links", render_links(collection))
|> maybe_put("_embedded", render_collection(collection))
end
def transform(item = %Representer.Item{}) do
item.item
|> Map.put("_links", transform_links(item.links))
|> Map.put("_embedded", render_embedded(item.embedded))
end
defp render_embedded(embedded) do
Enum.reduce(embedded, %{}, fn {name, list}, embedded ->
Map.put(embedded, name, Enum.map(list, &transform/1))
end)
end
defp render_collection(collection) do
case collection.items do
nil ->
nil
[] ->
nil
items ->
%{collection.name => Enum.map(items, &transform/1)}
end
end
defp render_links(collection) do
collection.links
|> Representer.Pagination.maybe_paginate(collection.pagination)
|> transform_links()
end
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value) do
Map.put(map, key, value)
end
defp transform_links(links) do
Enum.reduce(links, %{}, fn link, links ->
json =
%{"href" => link.href}
|> maybe_put(:name, link.title)
|> maybe_put(:template, link.template)
case Map.get(links, link.rel) do
nil ->
Map.put(links, link.rel, json)
existing_links ->
Map.put(links, link.rel, [json | List.wrap(existing_links)])
end
end)
end
end
defmodule Siren do
@moduledoc """
The Siren hypermedia format
https://github.com/kevinswiber/siren
"""
@behaviour Representer.Adapter
@impl true
def transform(collection = %Representer.Collection{}) do
%{}
|> maybe_put("title", collection.name)
|> maybe_put("links", render_links(collection))
|> maybe_put("entities", render_collection(collection))
end
def transform(item = %Representer.Item{}) do
%{}
|> maybe_put("rel", item.rel)
|> maybe_put("properties", item.item)
|> maybe_put("links", transform_links(item.links))
|> maybe_put("entities", render_embedded(item.embedded))
end
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value) do
Map.put(map, key, value)
end
defp render_collection(collection) do
case collection.items do
nil ->
nil
[] ->
nil
items ->
Enum.map(items, &transform/1)
end
end
defp render_embedded([]), do: nil
defp render_embedded(embedded) do
Enum.reduce(embedded, [], fn {_name, list}, embedded ->
Enum.reduce(list, embedded, fn item, embedded ->
[transform(item) | embedded]
end)
end)
end
defp render_links(collection) do
collection.links
|> Representer.Pagination.maybe_paginate(collection.pagination)
|> transform_links()
end
defp transform_links(links) do
Enum.map(links, fn link ->
%{"rel" => [link.rel], "href" => link.href}
end)
end
end
defmodule Mason do
@moduledoc """
The Siren hypermedia format
https://github.com/JornWildt/Mason
"""
@behaviour Representer.Adapter
@impl true
def transform(collection = %Representer.Collection{}) do
%{"name" => collection.name}
|> maybe_put("entities", render_collection(collection))
|> maybe_put("@controls", render_links(collection))
end
def transform(item = %Representer.Item{}) do
Map.put(item.item, "@controls", transform_links(item.links))
end
defp maybe_put(map, _key, nil), do: map
defp maybe_put(map, key, value) do
Map.put(map, key, value)
end
defp render_collection(collection) do
case collection.items do
nil ->
nil
[] ->
nil
items ->
Enum.map(items, &transform/1)
end
end
defp render_links(collection) do
collection.links
|> Enum.filter(fn link -> link.rel != "curies" end)
|> Representer.Pagination.maybe_paginate(collection.pagination)
|> transform_links()
end
defp transform_links(links) do
links
|> Enum.filter(fn link -> link.rel != "curies" end)
|> Enum.reduce(%{}, fn link, links ->
json =
%{"href" => link.href}
|> maybe_put(:title, link.title)
case Map.get(links, link.rel) do
nil ->
Map.put(links, link.rel, json)
existing_links ->
Map.put(links, link.rel, [json | List.wrap(existing_links)])
end
end)
end
end
end
|
lib/representer.ex
| 0.855429
| 0.521837
|
representer.ex
|
starcoder
|
defmodule Cldr.Date.Interval.Backend do
@moduledoc false
def define_date_interval_module(config) do
backend = config.backend
config = Macro.escape(config)
quote location: :keep, bind_quoted: [config: config, backend: backend] do
defmodule Date.Interval do
@moduledoc """
Interval formats allow for software to format intervals like "Jan 10-12, 2008" as a
shorter and more natural format than "Jan 10, 2008 - Jan 12, 2008". They are designed
to take a start and end date, time or datetime plus a formatting pattern
and use that information to produce a localized format.
See `#{inspect(__MODULE__)}.to_string/3` and `#{inspect(backend)}.Interval.to_string/3`
"""
date = quote do
%{
year: _,
month: _,
day: _,
calendar: var!(calendar, unquote(__MODULE__))
}
end
if Cldr.Code.ensure_compiled?(CalendarInterval) do
@doc false
def to_string(%CalendarInterval{} = interval) do
Cldr.Date.Interval.to_string(interval, unquote(backend), [])
end
end
@doc false
def to_string(%Elixir.Date.Range{} = interval) do
Cldr.Date.Interval.to_string(interval, unquote(backend), [])
end
@doc """
Returns a `Date.Range` or `CalendarInterval` as
a localised string.
## Arguments
* `range` is either a `Date.Range.t` returned from `Date.range/2`
or a `CalendarInterval.t`
* `options` is a keyword list of options. The default is `[]`.
## Options
* `:format` is one of `:short`, `:medium` or `:long` or a
specific format type or a string representing of an interval
format. The default is `:medium`.
* `:style` supports dfferent formatting styles. The
alternatives are `:date`, `:month_and_day`, `:month`
and `:year_and_month`. The default is `:date`.
* `:locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `#{backend}.get_locale/0`
* `:number_system` a number system into which the formatted date digits should
be transliterated
## Returns
* `{:ok, string}` or
* `{:error, {exception, reason}}`
## Notes
* `CalendarInterval` support requires adding the
dependency [calendar_interval](https://hex.pm/packages/calendar_interval)
to the `deps` configuration in `mix.exs`.
* For more information on interval format string
see the `Cldr.Interval`.
* The available predefined formats that can be applied are the
keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)`
where `"en"` can be replaced by any configuration locale name and `:gregorian`
is the underlying `CLDR` calendar type.
* In the case where `from` and `to` are equal, a single
date is formatted instead of an interval
## Examples
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-12-31])
{:ok, "Jan 1 – Dec 31, 2020"}
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-01-12])
{:ok, "Jan 1 – 12, 2020"}
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long
{:ok, "Wed, Jan 1 – Sun, Jan 12, 2020"}
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-12-01]),
...> format: :long, style: :year_and_month
{:ok, "January – December 2020"}
iex> use CalendarInterval
iex> #{inspect(__MODULE__)}.to_string ~I"2020-01/12"
{:ok, "Jan 1 – Dec 31, 2020"}
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :short
{:ok, "1/1/2020 – 1/12/2020"}
iex> #{inspect(__MODULE__)}.to_string Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long, locale: "fr"
{:ok, "mer. 1 – dim. 12 janv. 2020"}
"""
@spec to_string(Cldr.Interval.range(), Keyword.t()) ::
{:ok, String.t()} | {:error, {module, String.t()}}
if Cldr.Code.ensure_compiled?(CalendarInterval) do
def to_string(%CalendarInterval{} = interval, options) do
Cldr.Date.Interval.to_string(interval, unquote(backend), options)
end
end
def to_string(%Elixir.Date.Range{} = interval, options) do
Cldr.Date.Interval.to_string(interval, unquote(backend), options)
end
@doc false
def to_string(unquote(date) = from, unquote(date) = to) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), [])
end
def to_string(nil = from, unquote(date) = to) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), [])
end
def to_string(unquote(date) = from, nil = to) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), [])
end
@doc """
Returns a interval formed from two dates as
a localised string.
## Arguments
* `from` is any map that conforms to the
`Calendar.date` type.
* `to` is any map that conforms to the
`Calendar.date` type. `to` must occur
on or after `from`.
* `options` is a keyword list of options. The default is `[]`.
Either `from` or `to` may also be `nil`, in which case an
open interval is formatted and the non-nil item is formatted
as a standalone date.
## Options
* `:format` is one of `:short`, `:medium` or `:long` or a
specific format type or a string representing of an interval
format. The default is `:medium`.
* `:style` supports dfferent formatting styles. The
alternatives are `:date`, `:month_and_day`, `:month`
and `:year_and_month`. The default is `:date`.
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `#{backend}.get_locale/0`
* `number_system:` a number system into which the formatted date digits should
be transliterated
## Returns
* `{:ok, string}` or
* `{:error, {exception, reason}}`
## Notes
* For more information on interval format string
see the `Cldr.Interval`.
* The available predefined formats that can be applied are the
keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)`
where `"en"` can be replaced by any configuration locale name and `:gregorian`
is the underlying `CLDR` calendar type.
* In the case where `from` and `to` are equal, a single
date is formatted instead of an interval
## Examples
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-12-31]
{:ok, "Jan 1 – Dec 31, 2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-01-12]
{:ok, "Jan 1 – 12, 2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-01-12],
...> format: :long
{:ok, "Wed, Jan 1 – Sun, Jan 12, 2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-12-01],
...> format: :long, style: :year_and_month
{:ok, "January – December 2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-01-12],
...> format: :short
{:ok, "1/1/2020 – 1/12/2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-01-12],
...> format: :long, locale: "fr"
{:ok, "mer. 1 – dim. 12 janv. 2020"}
iex> #{inspect(__MODULE__)}.to_string ~D[2020-01-01], ~D[2020-01-12],
...> format: :long, locale: "th", number_system: :thai
{:ok, "พ. ๑ ม.ค. – อา. ๑๒ ม.ค. ๒๐๒๐"}
"""
@spec to_string(Elixir.Calendar.date() | nil, Elixir.Calendar.date() | nil, Keyword.t()) ::
{:ok, String.t()} | {:error, {module, String.t()}}
def to_string(unquote(date) = from, unquote(date) = to, options) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), options)
end
def to_string(nil = from, unquote(date) = to, options) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), options)
end
def to_string(unquote(date) = from, nil = to, options) do
Cldr.Date.Interval.to_string(from, to, unquote(backend), options)
end
if Cldr.Code.ensure_compiled?(CalendarInterval) do
@doc false
def to_string!(%CalendarInterval{} = interval) do
locale = unquote(backend).get_locale
Cldr.Date.Interval.to_string!(interval, unquote(backend), locale: locale)
end
end
@doc false
def to_string!(%Elixir.Date.Range{} = interval) do
locale = unquote(backend).get_locale
Cldr.Date.Interval.to_string!(interval, unquote(backend), locale: locale)
end
@doc """
Returns a `Date.Range` or `CalendarInterval` as
a localised string.
## Arguments
* `range` as either a`Date.Range.t` returned from `Date.range/2`
or a `CalendarInterval.t`
* `options` is a keyword list of options. The default is `[]`.
## Options
* `:format` is one of `:short`, `:medium` or `:long` or a
specific format type or a string representing of an interval
format. The default is `:medium`.
* `:style` supports dfferent formatting styles. The
alternatives are `:date`, `:month_and_day`, `:month`
and `:year_and_month`. The default is `:date`.
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `#{backend}.get_locale/0`
* `number_system:` a number system into which the formatted date digits should
be transliterated
## Returns
* `string` or
* raises an exception
## Notes
* `CalendarInterval` support requires adding the
dependency [calendar_interval](https://hex.pm/packages/calendar_interval)
to the `deps` configuration in `mix.exs`.
* For more information on interval format string
see the `Cldr.Interval`.
* The available predefined formats that can be applied are the
keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)`
where `"en"` can be replaced by any configuration locale name and `:gregorian`
is the underlying `CLDR` calendar type.
* In the case where `from` and `to` are equal, a single
date is formatted instead of an interval
## Examples
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-12-31])
"Jan 1 – Dec 31, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12])
"Jan 1 – 12, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long
"Wed, Jan 1 – Sun, Jan 12, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-12-01]),
...> format: :long, style: :year_and_month
"January – December 2020"
iex> use CalendarInterval
iex> #{inspect(__MODULE__)}.to_string! ~I"2020-01/12"
"Jan 1 – Dec 31, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :short
"1/1/2020 – 1/12/2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long, locale: "fr"
"mer. 1 – dim. 12 janv. 2020"
"""
@spec to_string!(Cldr.Interval.range(), Keyword.t()) ::
String.t() | no_return
if Cldr.Code.ensure_compiled?(CalendarInterval) do
def to_string!(%CalendarInterval{} = interval, options) do
locale = unquote(backend).get_locale
options = Keyword.put_new(options, :locale, locale)
Cldr.Date.Interval.to_string!(interval, unquote(backend), options)
end
end
def to_string!(%Elixir.Date.Range{} = interval, options) do
locale = unquote(backend).get_locale
options = Keyword.put_new(options, :locale, locale)
Cldr.Date.Interval.to_string!(interval, unquote(backend), options)
end
@doc false
def to_string!(from, to) do
locale = unquote(backend).get_locale
Cldr.Date.Interval.to_string!(from, to, unquote(backend), locale: locale)
end
@doc """
Returns a interval formed from two dates as
a localised string.
## Arguments
* `from` is any map that conforms to the
`Calendar.date` type.
* `to` is any map that conforms to the
`Calendar.date` type. `to` must occur
on or after `from`.
* `options` is a keyword list of options. The default is `[]`.
Either `from` or `to` may also be `nil`, in which case an
open interval is formatted and the non-nil item is formatted
as a standalone date.
## Options
* `:format` is one of `:short`, `:medium` or `:long` or a
specific format type or a string representing of an interval
format. The default is `:medium`.
* `:style` supports dfferent formatting styles. The
alternatives are `:date`, `:month_and_day`, `:month`
and `:year_and_month`. The default is `:date`.
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `#{backend}.get_locale/0`.
* `number_system:` a number system into which the formatted date digits should
be transliterated.
## Returns
* `string` or
* raises an exception
## Notes
* For more information on interval format string
see the `Cldr.Interval`.
* The available predefined formats that can be applied are the
keys of the map returned by `Cldr.DateTime.Format.interval_formats("en", :gregorian)`
where `"en"` can be replaced by any configuration locale name and `:gregorian`
is the underlying `CLDR` calendar type.
* In the case where `from` and `to` are equal, a single
date is formatted instead of an interval
## Examples
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-12-31])
"Jan 1 – Dec 31, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12])
"Jan 1 – 12, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long
"Wed, Jan 1 – Sun, Jan 12, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-12-01]),
...> format: :long, style: :year_and_month
"January – December 2020"
iex> use CalendarInterval
iex> #{inspect(__MODULE__)}.to_string! ~I"2020-01/12"
"Jan 1 – Dec 31, 2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :short
"1/1/2020 – 1/12/2020"
iex> #{inspect(__MODULE__)}.to_string! Date.range(~D[2020-01-01], ~D[2020-01-12]),
...> format: :long, locale: "fr"
"mer. 1 – dim. 12 janv. 2020"
"""
@spec to_string!(Elixir.Calendar.date() | nil, Elixir.Calendar.date() | nil, Keyword.t()) ::
String.t() | no_return
def to_string!(unquote(date) = from, unquote(date) = to, options) do
do_to_string!(from, to, options)
end
def to_string!(nil = from, unquote(date) = to, options) do
do_to_string!(from, to, options)
end
def to_string!(unquote(date) = from, nil = to, options) do
do_to_string!(from, to, options)
end
def do_to_string!(from, to, options) do
locale = unquote(backend).get_locale
options = Keyword.put_new(options, :locale, locale)
Cldr.Date.Interval.to_string!(from, to, unquote(backend), options)
end
end
end
end
end
|
lib/cldr/backend/interval/date.ex
| 0.912896
| 0.580679
|
date.ex
|
starcoder
|
defmodule Akin.Metaphone.Single do
@moduledoc """
Calculates the [Metaphone Phonetic Algorithm](http://en.wikipedia.org/wiki/
Metaphone) of a string.
"""
import String, only: [downcase: 1, first: 1, split_at: 2, last: 1, at: 2]
import Akin.Util, only: [len: 1, is_alphabetic?: 1, deduplicate: 1]
@doc """
Returns the Metaphone phonetic version of the provided string.
## Examples
iex> Akin.Metaphone.Single.compute("z")
"s"
iex> Akin.Metaphone.Single.compute("ztiaz")
"sxs"
"""
def compute(value) when is_binary(value) do
cond do
len(value) == 0 ->
nil
!is_alphabetic?(value) ->
nil
true ->
value
|> downcase
|> transcode_first_character
|> deduplicate
|> transcode
end
end
@doc """
Transcodes the first character of the string using the Metaphone algorithm.
"""
def transcode_first_character(value) do
case len(value) do
0 ->
value
1 ->
if first(value) == "x", do: "s", else: value
_ ->
t = elem(split_at(value, 1), 1)
case first(value) do
"a" ->
if first(t) == "e", do: t, else: value
x when x in ["g", "k", "p"] ->
if first(t) == "n", do: t, else: value
"w" ->
cond do
first(t) == "r" -> t
first(t) == "h" -> "w" <> elem(split_at(value, 2), 1)
true -> value
end
"x" ->
"s" <> t
_ ->
value
end
end
end
@doc """
Transcodes the rest of the string using the Metaphone algorithm.
p = processed values
c = current character
r = remaining values
o = output
"""
def transcode(value) do
character = first(value)
remaining_values = elem(split_at(value, 1), 1)
processed_values = ""
output = ""
transcode(processed_values, character, remaining_values, output)
end
def transcode(_, nil, _, ""), do: nil
def transcode(_, nil, _, o), do: o
def transcode(p, c, r, o) do
vowels = ["a", "e", "i", "o", "u"]
# partially applied function for shifting - cannot nest defs.
shift = fn count, characters ->
{head, tail} = split_at(r, count - 1)
updated_p = if len(head) > 0, do: p <> c <> head, else: p <> c
updated_c = if len(tail) > 0, do: first(tail), else: nil
updated_r = elem(split_at(tail, 1), 1)
{updated_p, updated_c, updated_r, characters}
end
{updated_p, updated_c, updated_r, updated_o} =
case c do
x when x in ["a", "e", "i", "o", "u"] ->
case len(p) == 0 do
false -> shift.(1, o)
true -> shift.(1, o <> c)
end
x when x in ["f", "j", "l", "m", "n", "r"] ->
shift.(1, o <> c)
"b" ->
if len(p) >= 1 && last(p) == "m" && len(r) == 0 do
shift.(1, o)
else
shift.(1, o <> "b")
end
"c" ->
cond do
len(r) >= 1 && first(r) == "h" && len(p) >= 1 && last(p) == "s" ->
shift.(1, o <> "k")
len(r) >= 2 && first(r) == "i" && at(r, 1) == "a" ->
shift.(3, o <> "x")
len(r) >= 1 && first(r) == "h" ->
shift.(2, o <> "x")
len(p) >= 1 && len(r) >= 1 && last(p) == "s" && first(r) == "h" ->
shift.(2, o <> "x")
len(p) >= 1 && len(r) >= 1 && last(p) == "s" && first(r) in ["i", "e", "y"] ->
shift.(1, o)
len(r) >= 1 && first(r) in ["i", "e", "y"] ->
shift.(1, o <> "s")
true ->
shift.(1, o <> "k")
end
"d" ->
if len(r) >= 2 && first(r) == "g" && at(r, 1) in ["e", "y", "i"] do
shift.(1, o <> "j")
else
shift.(1, o <> "t")
end
"g" ->
cond do
(len(r) > 1 && first(r) == "h") || (len(r) == 1 && first(r) == "n") ||
(len(r) == 3 && first(r) == "n" && at(r, 2) == "d") ->
shift.(1, o)
len(r) >= 1 && first(r) in ["i", "e", "y"] ->
shift.(2, o <> "j")
true ->
shift.(1, o <> "k")
end
"h" ->
if (len(p) >= 1 && last(p) in vowels && (len(r) == 0 || first(r) in vowels)) ||
(len(p) >= 2 && last(p) == "h" && at(p, len(p) - 2) == "t") ||
at(p, len(p) - 2) == "g" do
shift.(1, o)
else
shift.(1, o <> "h")
end
"k" ->
if len(p) >= 1 && last(p) == "c" do
shift.(1, o)
else
shift.(1, o <> "k")
end
"p" ->
if len(r) >= 1 && first(r) == "h" do
shift.(2, o <> "f")
else
shift.(1, o <> "p")
end
"q" ->
shift.(1, o <> "k")
"s" ->
cond do
len(r) >= 2 && first(r) == "i" && at(r, 1) in ["o", "a"] ->
shift.(3, o <> "x")
len(r) >= 1 && first(r) == "h" ->
shift.(2, o <> "x")
true ->
shift.(1, o <> "s")
end
"t" ->
cond do
len(r) >= 2 && first(r) == "i" && at(r, 1) in ["a", "o"] ->
shift.(3, o <> "x")
len(r) >= 1 && first(r) == "h" ->
shift.(2, o <> "0")
len(r) >= 2 && first(r) == "c" && at(r, 1) == "h" ->
shift.(1, o)
true ->
shift.(1, o <> "t")
end
"v" ->
shift.(1, o <> "f")
x when x in ["w", "y"] ->
if len(r) == 0 || first(r) not in vowels do
shift.(1, o)
else
shift.(1, o <> c)
end
"x" ->
shift.(1, o <> "ks")
"z" ->
shift.(1, o <> "s")
_ ->
shift.(1, o)
end
transcode(updated_p, updated_c, updated_r, updated_o)
end
end
|
lib/akin/algorithms/phonetic/metaphone.ex
| 0.750187
| 0.602939
|
metaphone.ex
|
starcoder
|
defmodule ExAliyunOts.PlainBuffer do
@moduledoc false
@header 0x75
# tag type
@tag_row_pk 0x1
@tag_row_data 0x2
@tag_cell 0x3
@tag_cell_name 0x4
@tag_cell_value 0x5
@tag_cell_type 0x6
@tag_cell_timestamp 0x7
@tag_delete_row_marker 0x8
@tag_row_checksum 0x9
@tag_cell_checksum 0x0A
# cell op type
@op_delete_all_version 0x1
@op_delete_one_version 0x3
@op_increment 0x4
# variant type
@vt_integer 0x0
@vt_double 0x1
@vt_boolean 0x2
@vt_string 0x3
# @vt_null 0x6
@vt_blob 0x7
@vt_inf_min 0x9
@vt_inf_max 0xA
@vt_auto_increment 0xB
# other
@little_endian_32_size 4
@little_endian_64_size 8
@sum_endian_32_size @little_endian_32_size + 1
@sum_endian_64_size @little_endian_64_size + 1
alias ExAliyunOts.CRC
import ExAliyunOts.Logger, only: [debug: 1]
def serialize_for_put_row(primary_keys, attribute_columns) do
{buffer, row_checksum} = header() |> primary_keys(primary_keys) |> columns(attribute_columns)
row_checksum = CRC.crc_int8(row_checksum, 0)
process_row_checksum(buffer, row_checksum)
end
def serialize_primary_keys(primary_keys) do
{buffer, row_checksum} = header() |> primary_keys(primary_keys)
row_checksum = CRC.crc_int8(row_checksum, 0)
process_row_checksum(buffer, row_checksum)
end
def serialize_column_value(value) when is_boolean(value) do
<<@vt_boolean::integer, boolean_to_integer(value)::bitstring>>
end
def serialize_column_value(value) when is_integer(value) do
<<@vt_integer::integer, value::little-integer-size(64)>>
end
def serialize_column_value(value) when is_binary(value) do
value_size = byte_size(value)
<<@vt_string::integer, value_size::little-integer-size(32), value::bitstring>>
end
def serialize_column_value(value) when is_bitstring(value) do
value_size = byte_size(value)
<<@vt_blob::integer, value_size::little-integer-size(32), value::bitstring>>
end
def serialize_column_value(value) when is_float(value) do
<<@vt_double::integer, value::little-float>>
end
def serialize_column_value(value) do
raise ExAliyunOts.RuntimeError, "Unsupported column for value: #{inspect(value)}"
end
def serialize_for_update_row(primary_keys, attribute_columns) when is_map(attribute_columns) do
{buffer, row_checksum} =
header() |> primary_keys(primary_keys) |> process_update_marker(attribute_columns)
row_checksum = CRC.crc_int8(row_checksum, 0)
process_row_checksum(buffer, row_checksum)
end
def serialize_for_delete_row(primary_keys) do
{buffer, row_checksum} = header() |> primary_keys(primary_keys) |> process_delete_marker()
process_row_checksum(buffer, row_checksum)
end
def deserialize_row(<<>>) do
nil
end
def deserialize_row(row) do
debug(fn ->
[
"** deserialize_row:\n",
inspect(row, limit: :infinity)
]
end)
decode_row(row)
end
def deserialize_rows(<<>>) do
nil
end
def deserialize_rows(rows) do
debug(fn ->
[
"** deserialize_rows:\n",
inspect(rows, limit: :infinity)
]
end)
decode_rows(rows)
end
defp header() do
# row_checksum initialized value of header is 0
{<<@header::little-integer-size(32)>>, 0}
end
defp primary_keys({buffer, row_checksum}, primary_keys) do
buffer = <<buffer::bitstring, @tag_row_pk::integer>>
do_primary_keys(primary_keys, buffer, row_checksum)
end
defp do_primary_keys([], buffer, row_checksum) do
{buffer, row_checksum}
end
defp do_primary_keys([primary_key | rest], buffer, row_checksum) do
{buffer, row_checksum} = primary_key_column(primary_key, {buffer, row_checksum})
do_primary_keys(rest, buffer, row_checksum)
end
defp columns({buffer, row_checksum}, columns) when is_list(columns) do
buffer = <<buffer::bitstring, @tag_row_data::integer>>
do_columns(columns, buffer, row_checksum)
end
defp do_columns([], buffer, row_checksum) do
{buffer, row_checksum}
end
defp do_columns([column | rest], buffer, row_checksum) do
{buffer, row_checksum} = process_column(column, {buffer, row_checksum})
do_columns(rest, buffer, row_checksum)
end
defp process_update_marker({buffer, row_checksum}, columns_to_update_opers)
when is_map(columns_to_update_opers) do
buffer = <<buffer::bitstring, @tag_row_data::integer>>
Enum.reduce(columns_to_update_opers, {buffer, row_checksum}, fn({update_type, columns_to_oper}, acc) ->
process_update_marker_to_columns(columns_to_oper, update_type, acc, columns_to_update_opers)
end)
end
defp process_update_marker_to_columns(columns, update_type, _, _) when is_list(columns) == false do
raise ExAliyunOts.RuntimeError,
"Unsupported update value: #{inspect(columns)} to key: #{inspect(update_type)}, expect value as list"
end
defp process_update_marker_to_columns(columns, update_type, acc, _) when is_list(columns) and
update_type in [:PUT, :DELETE, :DELETE_ALL, :INCREMENT] do
Enum.reduce(columns, acc, fn column, acc_inner ->
process_update_column(acc_inner, update_type, column)
end)
end
defp process_update_marker_to_columns(columns, update_type, _acc, columns_to_update_opers) when is_list(columns) do
raise ExAliyunOts.RuntimeError,
"Unsupported update type: #{inspect(update_type)}, in attribute_columns: #{
inspect(columns_to_update_opers)
}"
end
defp primary_key_column({pk_name, pk_value}, {buffer, row_checksum}) do
buffer = <<buffer::bitstring, @tag_cell::integer>>
{buffer, cell_checksum} =
{buffer, 0}
|> process_cell_name(pk_name)
|> process_primary_key_value(pk_value)
buffer = <<buffer::bitstring, @tag_cell_checksum::integer, cell_checksum::integer>>
row_checksum = CRC.crc_int8(row_checksum, cell_checksum)
{buffer, row_checksum}
end
defp primary_key_column(primary_keys, {buffer, row_checksum}) when is_list(primary_keys) do
# nested primary_keys are used for batch operation with multiple pks
Enum.reduce(primary_keys, {buffer, row_checksum}, fn {pk_name, pk_value}, acc ->
primary_key_column({pk_name, pk_value}, acc)
end)
end
defp primary_key_column(primary_keys, _) do
raise ExAliyunOts.RuntimeError, "Invalid primary_keys: #{inspect(primary_keys)}"
end
defp process_cell_name({buffer, cell_checksum}, name) do
buffer =
<<buffer::bitstring, @tag_cell_name::integer,
<<String.length(name)::little-integer-size(32)>>, name::bitstring>>
cell_checksum = CRC.crc_string(cell_checksum, name)
{buffer, cell_checksum}
end
defp process_primary_key_value({buffer, cell_checksum}, value) do
do_process_primary_key_value(
{<<buffer::bitstring, @tag_cell_value::integer>>, cell_checksum},
value
)
end
defp do_process_primary_key_value({buffer, cell_checksum}, value)
when value == :INF_MIN
when value == :inf_min do
buffer = <<buffer::bitstring, <<1::little-integer-size(32)>>, @vt_inf_min::integer>>
cell_checksum = CRC.crc_int8(cell_checksum, @vt_inf_min)
{buffer, cell_checksum}
end
defp do_process_primary_key_value({buffer, cell_checksum}, value)
when value == :INF_MAX
when value == :inf_max do
buffer = <<buffer::bitstring, <<1::little-integer-size(32)>>, @vt_inf_max::integer>>
cell_checksum = CRC.crc_int8(cell_checksum, @vt_inf_max)
{buffer, cell_checksum}
end
defp do_process_primary_key_value({buffer, cell_checksum}, value)
when value == :AUTO_INCREMENT
when value == :auto_increment do
buffer = <<buffer::bitstring, <<1::little-integer-size(32)>>, @vt_auto_increment::integer>>
cell_checksum = CRC.crc_int8(cell_checksum, @vt_auto_increment)
{buffer, cell_checksum}
end
defp do_process_primary_key_value({buffer, cell_checksum}, value) when is_integer(value) do
buffer =
<<buffer::bitstring, <<@sum_endian_64_size::little-integer-size(32)>>, @vt_integer::integer,
(<<value::little-integer-size(64)>>)>>
cell_checksum = cell_checksum |> CRC.crc_int8(@vt_integer) |> CRC.crc_int64(value)
{buffer, cell_checksum}
end
defp do_process_primary_key_value({buffer, cell_checksum}, value) when is_binary(value) do
value_size = byte_size(value)
buffer =
<<buffer::bitstring, <<@sum_endian_32_size + value_size::little-integer-size(32)>>,
@vt_string::integer, <<value_size::little-integer-size(32)>>, value::bitstring>>
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_string)
|> CRC.crc_int32(value_size)
|> CRC.crc_string(value)
{buffer, cell_checksum}
end
defp do_process_primary_key_value({buffer, cell_checksum}, value) when is_bitstring(value) do
value_size = byte_size(value)
buffer =
<<buffer::bitstring, <<@sum_endian_32_size + value_size::little-integer-size(32)>>,
@vt_blob::integer, <<value_size::little-integer-size(32)>>, value::bitstring>>
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_blob)
|> CRC.crc_int32(value_size)
|> CRC.crc_string(value)
{buffer, cell_checksum}
end
defp do_process_primary_key_value(_, value) do
raise ExAliyunOts.RuntimeError, "Unsupported primary key for value: #{inspect(value)}"
end
defp process_column({column_name, column_value}, {buffer, row_checksum}) do
buffer = <<buffer::bitstring, @tag_cell::integer>>
{buffer, cell_checksum} =
{buffer, 0}
|> process_cell_name(column_name)
|> process_column_value_with_checksum(column_value)
buffer = <<buffer::bitstring, @tag_cell_checksum::integer, cell_checksum::integer>>
row_checksum = CRC.crc_int8(row_checksum, cell_checksum)
{buffer, row_checksum}
end
defp process_column({column_name, column_value, timestamp}, {buffer, row_checksum})
when timestamp != nil do
buffer = <<buffer::bitstring, @tag_cell::integer>>
{buffer, cell_checksum} =
{buffer, 0}
|> process_cell_name(column_name)
|> process_column_value_with_checksum(column_value)
buffer =
<<buffer::bitstring, @tag_cell_timestamp::integer,
(<<timestamp::little-integer-size(64)>>)>>
cell_checksum = CRC.crc_int64(cell_checksum, timestamp)
buffer = <<buffer::bitstring, @tag_cell_checksum::integer, cell_checksum::integer>>
row_checksum = CRC.crc_int8(row_checksum, cell_checksum)
{buffer, row_checksum}
end
defp process_column(column, _) do
raise ExAliyunOts.RuntimeError, "Invalid column: #{inspect(column)} is not a tuple"
end
defp process_column_value_with_checksum({buffer, cell_checksum}, nil) do
{buffer, cell_checksum}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, true) do
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_boolean)
|> CRC.crc_int8(1)
{
boolean_value_to_buffer(buffer, true),
cell_checksum
}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, false) do
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_boolean)
|> CRC.crc_int8(0)
{
boolean_value_to_buffer(buffer, false),
cell_checksum
}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, value)
when is_integer(value) do
buffer =
<<buffer::bitstring, @tag_cell_value::integer,
<<@sum_endian_64_size::little-integer-size(32)>>, <<@vt_integer::integer>>,
(<<value::little-integer-size(64)>>)>>
cell_checksum = cell_checksum |> CRC.crc_int8(@vt_integer) |> CRC.crc_int64(value)
{buffer, cell_checksum}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, value) when is_float(value) do
buffer = <<buffer::bitstring, @tag_cell_value::integer>>
value_to_binary = <<value::little-float>>
<<long::unsigned-little-integer-64>> = value_to_binary
buffer =
<<buffer::bitstring, <<@sum_endian_64_size::little-integer-size(32)>>, @vt_double::integer,
value_to_binary::bitstring>>
cell_checksum = cell_checksum |> CRC.crc_int8(@vt_double) |> CRC.crc_int64(long)
{buffer, cell_checksum}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, value) when is_binary(value) do
buffer = <<buffer::bitstring, @tag_cell_value::integer>>
value_size = byte_size(value)
buffer =
<<buffer::bitstring, <<@sum_endian_32_size + value_size::little-integer-size(32)>>,
@vt_string::integer, <<value_size::little-integer-size(32)>>, value::bitstring>>
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_string)
|> CRC.crc_int32(value_size)
|> CRC.crc_string(value)
{buffer, cell_checksum}
end
defp process_column_value_with_checksum({buffer, cell_checksum}, value)
when is_bitstring(value) do
buffer = <<buffer::bitstring, @tag_cell_value::integer>>
value_size = byte_size(value)
buffer =
<<buffer::bitstring, <<@sum_endian_32_size + value_size::little-integer-size(32)>>,
@vt_blob::integer, <<value_size::little-integer-size(32)>>, value::bitstring>>
cell_checksum =
cell_checksum
|> CRC.crc_int8(@vt_blob)
|> CRC.crc_int32(value_size)
|> CRC.crc_string(value)
{buffer, cell_checksum}
end
defp process_column_value_with_checksum({_buffer, _cell_checksum}, value) do
raise ExAliyunOts.RuntimeError, "Unsupported column for value: #{inspect(value)}"
end
defp boolean_value_to_buffer(buffer, value) when is_boolean(value) do
<<buffer::bitstring, @tag_cell_value::integer, <<2::little-integer-size(32)>>,
@vt_boolean::integer, boolean_to_integer(value)::bitstring>>
end
defp process_update_column({buffer, row_checksum}, update_type, column)
when is_bitstring(column) do
do_process_update_column({buffer, row_checksum}, update_type, column, {nil, nil})
end
defp process_update_column({buffer, row_checksum}, update_type, {column_name, column_value}) do
do_process_update_column(
{buffer, row_checksum},
update_type,
column_name,
{column_value, nil}
)
end
defp process_update_column(
{buffer, row_checksum},
update_type,
{column_name, column_value, timestamp}
) do
do_process_update_column(
{buffer, row_checksum},
update_type,
column_name,
{column_value, timestamp}
)
end
defp process_update_column({_buffer, _row_checksum}, _update_type, column) do
raise ExAliyunOts.RuntimeError,
"Unsupported column when update grouping columns: #{inspect(column)}"
end
defp do_process_update_column(
{buffer, row_checksum},
:DELETE,
column_name,
{column_value, timestamp}
) do
{buffer, cell_checksum} = process_update_column_with_cell(buffer, column_name, column_value)
buffer = <<buffer::bitstring, @tag_cell_type::integer, @op_delete_one_version::integer>>
{buffer, cell_checksum} =
process_update_column_with_timestamp(buffer, cell_checksum, timestamp)
cell_checksum = CRC.crc_int8(cell_checksum, @op_delete_one_version)
process_update_column_with_row_checksum(buffer, cell_checksum, row_checksum)
end
defp do_process_update_column(
{buffer, row_checksum},
:DELETE_ALL,
column_name,
{column_value, timestamp}
) do
{buffer, cell_checksum} = process_update_column_with_cell(buffer, column_name, column_value)
buffer = <<buffer::bitstring, @tag_cell_type::integer, @op_delete_all_version::integer>>
{buffer, cell_checksum} =
process_update_column_with_timestamp(buffer, cell_checksum, timestamp)
cell_checksum = CRC.crc_int8(cell_checksum, @op_delete_all_version)
process_update_column_with_row_checksum(buffer, cell_checksum, row_checksum)
end
defp do_process_update_column(
{buffer, row_checksum},
:INCREMENT,
column_name,
{column_value, timestamp}
) do
{buffer, cell_checksum} = process_update_column_with_cell(buffer, column_name, column_value)
buffer = <<buffer::bitstring, @tag_cell_type::integer, @op_increment::integer>>
{buffer, cell_checksum} =
process_update_column_with_timestamp(buffer, cell_checksum, timestamp)
cell_checksum = CRC.crc_int8(cell_checksum, @op_increment)
process_update_column_with_row_checksum(buffer, cell_checksum, row_checksum)
end
defp do_process_update_column(
{buffer, row_checksum},
_update_type,
column_name,
{column_value, timestamp}
) do
{buffer, cell_checksum} = process_update_column_with_cell(buffer, column_name, column_value)
{buffer, cell_checksum} =
process_update_column_with_timestamp(buffer, cell_checksum, timestamp)
process_update_column_with_row_checksum(buffer, cell_checksum, row_checksum)
end
defp process_update_column_with_cell(buffer, column_name, column_value) do
buffer = <<buffer::bitstring, @tag_cell::integer>>
{buffer, 0}
|> process_cell_name(column_name)
|> process_column_value_with_checksum(column_value)
end
defp process_update_column_with_timestamp(buffer, cell_checksum, nil) do
{buffer, cell_checksum}
end
defp process_update_column_with_timestamp(buffer, cell_checksum, timestamp) do
buffer =
<<buffer::bitstring, @tag_cell_timestamp::integer,
(<<timestamp::little-integer-size(64)>>)>>
cell_checksum = CRC.crc_int64(cell_checksum, timestamp)
{buffer, cell_checksum}
end
defp process_update_column_with_row_checksum(buffer, cell_checksum, row_checksum) do
buffer = <<buffer::bitstring, @tag_cell_checksum::integer, cell_checksum::integer>>
row_checksum = CRC.crc_int8(row_checksum, cell_checksum)
{buffer, row_checksum}
end
defp process_delete_marker({buffer, row_checksum}) do
{
<<buffer::bitstring, @tag_delete_row_marker::integer>>,
CRC.crc_int8(row_checksum, 1)
}
end
defp boolean_to_integer(true), do: <<1>>
defp boolean_to_integer(_), do: <<0>>
defp process_row_checksum(buffer, row_checksum) do
<<buffer::bitstring, @tag_row_checksum::integer, row_checksum::integer>>
end
# deserialize processing
defp decode_row(<<@header::little-integer-size(32), rest::bitstring>>) do
start_decoding(rest)
end
defp decode_rows(<<@header::little-integer-size(32), rest::bitstring>>) do
decode_rows(rest, [])
end
defp decode_rows(<<>>, acc) do
Enum.reverse(acc)
end
defp decode_rows(rest, acc) do
case start_decoding(rest) do
{:cont, rest, row} ->
decode_rows(rest, [row | acc])
row ->
decode_rows(<<>>, [row | acc])
end
end
defp start_decoding(<<@tag_row_pk::integer, rest::bitstring>>) do
# start decoding from primary key(s)
decode_pk(rest, [])
end
defp start_decoding(<<@tag_row_data::integer, rest::bitstring>>) do
# no primary key(s) decoding, start decoding from attribute column(s)
{nil, decode_attr(rest, [])}
end
defp decode_pk(
<<@tag_cell::integer, @tag_cell_name::integer, pk_field_size::little-integer-size(32),
pk_field::binary-size(pk_field_size), rest::bitstring>>,
acc
) do
# primary key(s) decoding
{pk_value, rest} = calculate_pk_value(rest)
acc = [{pk_field, pk_value} | acc]
decode_pk(rest, acc)
end
defp decode_pk(<<@tag_row_data::integer, rest::bitstring>>, acc) do
# finish primary key(s) decoding and start this row's attribute column(s) decoding
case decode_attr(rest, []) do
{rest, attrs} ->
{:cont, rest, {Enum.reverse(acc), attrs}}
attrs ->
{Enum.reverse(acc), attrs}
end
end
defp decode_pk(<<@tag_row_checksum::integer, _::integer>>, acc) do
# finish primary key(s) decoding and no attribute column(s) decoding
{
Enum.reverse(acc),
nil
}
end
defp decode_pk(<<@tag_row_checksum::integer, _::integer, rest::bitstring>>, acc) do
# finish primary keys(s) and no attribute column(s) decoding, but still be with other row(s) decoding
{
:cont,
rest,
{
Enum.reverse(acc),
nil
}
}
end
defp decode_pk(_, acc) do
# still some ignorable bytes but can finish row data decoding
{
Enum.reverse(acc),
nil
}
end
defp decode_attr(
<<@tag_cell::integer, @tag_cell_name::integer, attr_field_size::little-integer-size(32),
attr_field::binary-size(attr_field_size), rest::bitstring>>,
acc
) do
# attribute columns decoding
{attr_value, timestamp, rest} = calculate_attr_value(rest)
acc = [{attr_field, attr_value, timestamp} | acc]
decode_attr(rest, acc)
end
defp decode_attr(<<@tag_row_checksum::integer, _::integer>>, acc) do
# be with an ending flag to finish row data decoding
Enum.reverse(acc)
end
defp decode_attr(_, []) do
nil
end
defp decode_attr(<<@tag_row_checksum::integer, _::integer, rest::bitstring>>, acc) do
# current row data is decoded but still need to process other row(s) data
{rest, Enum.reverse(acc)}
end
defp decode_attr(_, acc) do
# still some ignorable bytes but can finish row data decoding
Enum.reverse(acc)
end
defp decode_attr_timestamp(
<<@tag_cell_timestamp::integer, timestamp::little-integer-size(64),
@tag_cell_checksum::integer, _row_crc8::integer, rest::bitstring>>
) do
{timestamp, rest}
end
defp decode_attr_timestamp(<<@tag_cell_checksum::integer, _row_crc8::integer, rest::bitstring>>) do
{nil, rest}
end
defp calculate_pk_value(
<<@tag_cell_value::integer, _total_bytes_size::little-integer-size(32),
@vt_integer::integer, pk_value::binary-size(8), @tag_cell_checksum::integer,
_row_crc8::integer, rest::bitstring>>
) do
<<value::signed-little-integer-size(64)>> = pk_value
{value, rest}
end
defp calculate_pk_value(
<<@tag_cell_value::integer, _total_bytes_size::little-integer-size(32), type::integer,
pk_value_size::little-integer-size(32), value::binary-size(pk_value_size),
@tag_cell_checksum::integer, _row_crc8::integer, rest::bitstring>>
)
when type == @vt_string or type == @vt_blob do
{value, rest}
end
defp calculate_pk_value(
<<@tag_cell_value::integer, _total_bytes_size::little-integer-size(32), type::integer,
_rest::bitstring>> = input
) do
raise ExAliyunOts.RuntimeError,
"Unexcepted primary type as: `#{inspect(type)}` and its binary input: `#{inspect(input)}`"
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), @vt_boolean::integer,
1::integer, rest::bitstring>>
) do
{timestamp, rest} = decode_attr_timestamp(rest)
{true, timestamp, rest}
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), @vt_boolean::integer,
_::integer, rest::bitstring>>
) do
{timestamp, rest} = decode_attr_timestamp(rest)
{false, timestamp, rest}
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), @vt_integer::integer,
value::signed-little-integer-size(64), rest::bitstring>>
) do
{timestamp, rest} = decode_attr_timestamp(rest)
{value, timestamp, rest}
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), @vt_double::integer,
value::signed-little-float-size(64), rest::bitstring>>
) do
{timestamp, rest} = decode_attr_timestamp(rest)
{value, timestamp, rest}
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), type::integer,
value_size::little-integer-size(32), value::binary-size(value_size), rest::bitstring>>
)
when type == @vt_string or type == @vt_blob do
{timestamp, rest} = decode_attr_timestamp(rest)
{value, timestamp, rest}
end
defp calculate_attr_value(
<<@tag_cell_value::integer, _total_size::little-integer-size(32), type::integer>> = input
) do
raise ExAliyunOts.RuntimeError,
"Unexcepted attribute column type as: `#{inspect(type)}` and its binary input: `#{
inspect(input)
}`"
end
end
|
lib/ex_aliyun_ots/plainbuffer/plainbuffer.ex
| 0.50293
| 0.407451
|
plainbuffer.ex
|
starcoder
|
defmodule Commanded.Aggregates.AggregateStateBuilder do
alias Commanded.Aggregates.Aggregate
alias Commanded.EventStore
alias Commanded.EventStore.RecordedEvent
alias Commanded.EventStore.SnapshotData
alias Commanded.Snapshotting
@read_event_batch_size 100
@doc """
Populate the aggregate's state from a snapshot, if present, and it's events.
Attempt to fetch a snapshot for the aggregate to use as its initial state.
If the snapshot exists, fetch any subsequent events to rebuild its state.
Otherwise start with the aggregate struct and stream all existing events for
the aggregate from the event store to rebuild its state from those events.
"""
def populate(%Aggregate{} = state) do
%Aggregate{aggregate_module: aggregate_module, snapshotting: snapshotting} = state
aggregate =
case Snapshotting.read_snapshot(snapshotting) do
{:ok, %SnapshotData{source_version: source_version, data: data}} ->
%Aggregate{
state
| aggregate_version: source_version,
aggregate_state: data
}
{:error, _error} ->
# No snapshot present, or exists but for outdated state, so use intial empty state
%Aggregate{state | aggregate_version: 0, aggregate_state: struct(aggregate_module)}
end
rebuild_from_events(aggregate)
end
@doc """
Load events from the event store, in batches, to rebuild the aggregate state
"""
def rebuild_from_events(%Aggregate{} = state) do
%Aggregate{
application: application,
aggregate_uuid: aggregate_uuid,
aggregate_version: aggregate_version
} = state
case EventStore.stream_forward(
application,
aggregate_uuid,
aggregate_version + 1,
@read_event_batch_size
) do
{:error, :stream_not_found} ->
# aggregate does not exist, return initial state
state
event_stream ->
rebuild_from_event_stream(event_stream, state)
end
end
# Rebuild aggregate state from a `Stream` of its events.
defp rebuild_from_event_stream(event_stream, %Aggregate{} = state) do
Enum.reduce(event_stream, state, fn event, state ->
%RecordedEvent{data: data, stream_version: stream_version} = event
%Aggregate{aggregate_module: aggregate_module, aggregate_state: aggregate_state} = state
%Aggregate{
state
| aggregate_version: stream_version,
aggregate_state: aggregate_module.apply(aggregate_state, data)
}
end)
end
end
|
lib/commanded/aggregates/aggregate_state_builder.ex
| 0.747432
| 0.440349
|
aggregate_state_builder.ex
|
starcoder
|
defmodule Transformers.Concatenation do
@behaviour Transformation
alias Transformers.FieldFetcher
@impl Transformation
def transform(payload, parameters) do
with {:ok, [source_fields, separator, target_field]} <- validate(parameters),
{:ok, values} <- fetch_values(payload, source_fields),
:ok <- can_convert_to_string?(values) do
joined_string = Enum.join(values, separator)
transformed = Map.put(payload, target_field, joined_string)
{:ok, transformed}
else
{:error, reason} -> {:error, reason}
end
end
def validate(parameters) do
with {:ok, source_fields} <- FieldFetcher.fetch_parameter(parameters, "sourceFields"),
{:ok, separator} <- FieldFetcher.fetch_parameter(parameters, "separator"),
{:ok, target_field} <- FieldFetcher.fetch_parameter(parameters, "targetField") do
{:ok, [source_fields, separator, target_field]}
else
{:error, reason} -> {:error, reason}
end
end
def fetch_values(payload, field_names) when is_list(field_names) do
find_values_or_errors(payload, field_names)
|> all_values_if_present_else_error()
end
def fetch_values(_, _), do: {:error, "Expected list but received single value: sourceFields"}
def find_values_or_errors(payload, field_names) do
Enum.reduce(field_names, %{values: [], errors: []}, fn field_name, accumulator ->
case Map.fetch(payload, field_name) do
{:ok, value} -> update_list_in_map(accumulator, :values, value)
:error -> update_list_in_map(accumulator, :errors, field_name)
end
end)
|> reverse_list(:values)
|> reverse_list(:errors)
end
def update_list_in_map(map, key, value) do
Map.update(map, key, [], fn current -> [value | current] end)
end
def reverse_list(map, key) do
Map.update(map, key, [], fn current -> Enum.reverse(current) end)
end
def all_values_if_present_else_error(results) do
if length(Map.get(results, :errors)) == 0 do
{:ok, Map.get(results, :values)}
else
errors = pretty_print_errors(results)
{:error, "Missing field in payload: #{errors}"}
end
end
def pretty_print_errors(results) do
errors =
Map.get(results, :errors)
|> Enum.map(&to_string/1)
|> Enum.join(", ")
"[#{errors}]"
end
def can_convert_to_string?(values) do
try do
Enum.each(values, fn value -> to_string(value) end)
:ok
rescue
_ -> {:error, "Could not convert all source fields into strings"}
end
end
end
|
apps/transformers/lib/transformations/concatenation.ex
| 0.706798
| 0.441854
|
concatenation.ex
|
starcoder
|
defmodule ExVenture.Rooms.Room do
@moduledoc """
Schema for Rooms
"""
use Ecto.Schema
import Ecto.Changeset
alias ExVenture.Rooms
alias ExVenture.StagedChanges.StagedChange
alias ExVenture.Zones.Zone
@map_colors ["blue", "brown", "green", "yellow"]
def map_colors(), do: @map_colors
schema "rooms" do
field(:live_at, :utc_datetime)
field(:name, :string)
field(:description, :string)
field(:listen, :string)
field(:map_color, :string)
field(:map_icon, :string)
field(:x, :integer)
field(:y, :integer)
field(:z, :integer)
field(:notes, :string)
belongs_to(:zone, Zone)
has_many(:staged_changes, {"room_staged_changes", StagedChange}, foreign_key: :struct_id)
# emebeds features
timestamps()
end
def create_changeset(struct, params) do
struct
|> cast(params, [:name, :description, :listen, :map_color, :map_icon, :notes, :x, :y, :z])
|> validate_required([:name, :description, :listen, :x, :y, :z, :zone_id])
|> validate_inclusion(:map_color, @map_colors)
|> validate_inclusion(:map_icon, Rooms.available_map_icons())
|> foreign_key_constraint(:zone_id)
end
def update_changeset(struct, params) do
struct
|> cast(params, [:name, :description, :listen, :map_color, :map_icon, :notes, :x, :y, :z])
|> validate_required([:name, :description, :listen, :x, :y, :z, :zone_id])
|> validate_inclusion(:map_color, @map_colors)
|> validate_inclusion(:map_icon, Rooms.available_map_icons())
|> foreign_key_constraint(:zone_id)
end
def publish_changeset(struct) do
struct
|> change()
|> put_change(:live_at, DateTime.truncate(DateTime.utc_now(), :second))
end
end
defmodule ExVenture.Rooms do
@moduledoc """
CRUD Rooms
"""
import Ecto.Query
alias ExVenture.Repo
alias ExVenture.Rooms.Room
alias ExVenture.StagedChanges
defdelegate map_colors(), to: Room
def new(zone), do: zone |> Ecto.build_assoc(:rooms) |> Ecto.Changeset.change(%{})
def edit(room), do: Ecto.Changeset.change(room, %{})
@doc """
Get all rooms, paginated
"""
def all(opts) do
opts = Enum.into(opts, %{})
Room
|> order_by([r], asc: r.zone_id, asc: r.name)
|> preload([:staged_changes, :zone])
|> Repo.paginate(opts[:page], opts[:per])
|> staged_changes()
end
@doc """
Get all rooms for a zone, paginated
"""
def all(zone, opts) do
opts = Enum.into(opts, %{})
Room
|> where([r], r.zone_id == ^zone.id)
|> order_by([r], asc: r.name)
|> preload([:staged_changes, :zone])
|> Repo.paginate(opts[:page], opts[:per])
|> staged_changes()
end
defp staged_changes(%{page: rooms, pagination: pagination}) do
rooms = Enum.map(rooms, &StagedChanges.apply/1)
%{page: rooms, pagination: pagination}
end
defp staged_changes(rooms) do
Enum.map(rooms, &StagedChanges.apply/1)
end
@doc """
Get a room
"""
def get(id) do
case Repo.get(Room, id) do
nil ->
{:error, :not_found}
room ->
room =
room
|> Repo.preload([:staged_changes, zone: [:staged_changes]])
|> StagedChanges.apply()
|> StagedChanges.apply(:zone)
{:ok, room}
end
end
@doc """
Create a new room
"""
def create(zone, params) do
zone
|> Ecto.build_assoc(:rooms)
|> Room.create_changeset(params)
|> Repo.insert()
end
@doc """
Update a room
"""
def update(%{live_at: nil} = room, params) do
room
|> Room.update_changeset(params)
|> Repo.update()
end
def update(room, params) do
room
|> Room.update_changeset(params)
|> StagedChanges.record_changes()
end
@doc """
Publish the room
When a room is published, it will startup inside the game.
"""
def publish(room) do
room
|> Room.publish_changeset()
|> Repo.update()
end
@doc """
Get a list of all available icons
"""
def available_map_icons() do
:code.priv_dir(:ex_venture)
|> Path.join("static/images/map-icons/*")
|> Path.wildcard()
|> Enum.map(fn file ->
Path.basename(file, Path.extname(file))
end)
end
end
|
lib/ex_venture/rooms.ex
| 0.693784
| 0.436022
|
rooms.ex
|
starcoder
|
defmodule RiakEcto3 do
@default_hostname "localhost"
@default_port 8087
@moduledoc """
Riak KV 2.0 adapter for Ecto 3.
Works by mapping Ecto Schemas to Riak Map-CRDTs.
**NOTE:** To use, ensure the following has been executed on your Riak database:
riak-admin bucket-type create your_database_name '{"props":{"datatype":"map"}}'
riak-admin bucket-type activate your_database_name
Here, `your_database_name` refers to any name you'd like the bucket type
that RiakEcto3 will use to be called. This is the same name you should use
in your configuration.
The `mix ecto.create` task will also do this for you.
## Supported Configuration Options:
- `database:` Name of the `bucket_type` to use for storing all data of this Repo.
This should be a bucket_type that has the datatype set to `map`.
- `hostname:` The hostname to connect to. Defaults to `#{@default_hostname}`.
- `port:` The port to connect to. Defaults to `#{@default_port}`.
## Ecto
RiakEcto3 currently does not use a pool (but this might change in the future).
## Queries
RiakEcto3 only supports `get`.
(In the future, hopefully we support simple 2i (secondary indexes) as well)
## Mix tasks
### Storage
RiakEcto3 only supports the `mix ecto.create` task.
This task will use `riak-admin` locally to create an appropriate bucket-type
that uses the `map` CRDT.
Be aware that `riak-admin` does not use any connection-settings, as it expects
to be ran on the computer that (one of the nodes of) the database will reside on.
The `mix ecto.drop` task is not supported, because Riak has no way to
drop an existing bucket_type.
"""
@behaviour Ecto.Adapter
@behaviour Ecto.Adapter.Storage
@impl Ecto.Adapter
defmacro __before_compile__(_env) do
quote do
@doc """
Fetches a struct using the given primary key `id`.
On success, will return the struct.
On failure (if the struct does not exist within the Riak database), returns `nil`.
iex> alice = %User{name: "Alice", age: 10, id: "33"}
iex> Repo.get(User, "33") == nil
true
iex> {:ok, %User{name: "Alice", age: 10, id: "33"}} = Repo.insert(alice)
iex> %user{name: "Alice", age: 10, id: "33"} = Repo.get(User, "33")
iex> {:ok, %User{name: "Alice", age: 10, id: "33"}} = Repo.delete(alice)
iex> Repo.get(User, "33") == nil
true
"""
def get(schema_module, id, opts \\ []) do
{adapter, meta} = Ecto.Repo.Registry.lookup(__MODULE__)
adapter.get(__MODULE__, meta, schema_module, id, opts)
end
@doc """
Inserts (or updates) a struct in the database.
Pass either a struct or an `Ecto.Changeset`
On success, will return `{:ok, struct}`.
On failure (when there were validation problems for instance), will return `{:error, struct_or_changeset}`
"""
def insert(struct_or_changeset, opts \\ []) do
{adapter, meta} = Ecto.Repo.Registry.lookup(__MODULE__)
adapter.insert(__MODULE__, meta, struct_or_changeset, opts)
end
@doc """
Deletes a struct from the database, using the primary ID of the struct or changeset
passed to this function.
Returns `{:ok, struct}` on success.
Raises `Ecto.NoPrimaryKeyValueError` if the passed struct or changeset does not have a primary key set.
"""
def delete(struct_or_changeset, opts \\ []) do
{adapter, meta} = Ecto.Repo.Registry.lookup(__MODULE__)
adapter.delete(__MODULE__, meta, struct_or_changeset, opts)
end
@doc """
Allows you to perform a raw SOLR query on a given Schema module.
See https://docs.riak.com/riak/kv/2.2.3/developing/usage/search/index.html
and https://docs.riak.com/riak/kv/2.2.3/developing/usage/searching-data-types.1.html#data-types-and-search-examples
for more information and syntax.
A query is prefixed to constrain it to the given schema module's bucket (in the database's bucket type).
The response of this will either be `{:error, problem}` or `{:ok, results}`
where `results` will be a list of maps.
Each of these maps has a `:meta`-key containing the raw SOLR result for that resource,
and a `:resource` key, which is a 0-arity function that will fetch the given resource from the repo when called.
Example:
iex> bob = %User{name: "Bob", id: "42", age: 41}
iex> {:ok, _} = Repo.insert(bob)
iex> :timer.sleep(2000) # It takes 'typically a second' (so wait for two to be safe) before SOLR is able to see changes.
iex> {:ok, results} = Repo.riak_raw_solr_query(RiakEcto3Test.Example.User, "age_register:[40 TO 41]")
iex> results |> Enum.map(fn elem -> elem.resource.() end) |> Enum.any?(fn user -> user.name == "Bob" end)
true
"""
def riak_raw_solr_query(schema_module, query, solr_opts \\ []) do
{adapter, meta} = Ecto.Repo.Registry.lookup(__MODULE__)
adapter.raw_solr_query(__MODULE__, meta, schema_module, query, solr_opts)
end
@doc """
Allows you look up all keys in between a lower and upper bound.
Be aware that since all Riak keys are strings, these lower and upper bounds are also cast to strings,
and that lexicographical comparisons are made!
Under the hood, it uses Riak's 'secondary indexes', which are only supported when using the
'Leveldb' or 'Memory' storage backends.
Example:
iex> bob = %User{name: "Bob", id: "1234", age: 38}
iex> {:ok, _} = Repo.insert(bob)
iex> jose = %User{name: "Jose", id: "1240", age: 30}
iex> {:ok, _} = Repo.insert(jose)
iex> Repo.riak_find_keys_between(User, "1200", "1300") |> Enum.sort
["1234", "1240"]
"""
def riak_find_keys_between(schema_module, lower_bound, upper_bound) do
{adapter, meta} = Ecto.Repo.Registry.lookup(__MODULE__)
adapter.find_keys_between(__MODULE__, meta, schema_module, lower_bound, upper_bound)
end
end
end
@impl Ecto.Adapter
@doc """
NOTE: Currently we are not using the connection pool to keep the implementation simple.
This could be changed in a future version since `Riak` provides one.
"""
def checkout(_adapter_meta, _config, fun) do
fun.()
end
@impl Ecto.Adapter
@doc """
Dumps datatypes so they can properly be stored in Riak
"""
def dumpers(primitive_type, ecto_type)
def dumpers(:string, type), do: [type, &RiakEcto3.Dumpers.string/1]
def dumpers(:id, type), do: [type, &RiakEcto3.Dumpers.integer/1]
def dumpers(:integer, type), do: [type, &RiakEcto3.Dumpers.integer/1]
def dumpers(:boolean, type), do: [type, &RiakEcto3.Dumpers.boolean/1]
def dumpers(:float, type), do: [type, &RiakEcto3.Dumpers.float/1]
def dumpers(:binary_id, type), do: [type, &RiakEcto3.Dumpers.string/1]
def dumpers(_primitive, type) do
[type]
end
@impl Ecto.Adapter
@doc """
Implementation of Ecto.Adapter.ensure_all_started
"""
def ensure_all_started(_config, app_restart_type) do
with {:ok, from_driver} <- Application.ensure_all_started(:riak, app_restart_type) do
# We always return the adapter to force it to be restarted if necessary, because this is what `ecto_sql` also does.
# See: https://github.com/elixir-ecto/ecto_sql/blob/master/lib/ecto/adapters/sql.ex#L420
{:ok, (List.delete(from_driver, :riak) ++ [:riak])}
end
end
@impl Ecto.Adapter
@doc """
Initializes the connection with Riak.
Implementation of Ecto.Adapter.init
"""
def init(config) do
hostname = Keyword.get(config, :hostname, @default_hostname)
port = Keyword.get(config, :port, @default_port)
child_spec = %{id: Riak.Connection, start: {Riak.Connection, :start_link, [String.to_charlist(hostname), port]}}
{:ok, child_spec, %{}}
end
@impl Ecto.Adapter
@doc """
TODO Properly implement
"""
def loaders(primitive_type, ecto_type)
def loaders(:string, type), do: [&RiakEcto3.Loaders.string/1, type]
def loaders(:id, type), do: [&RiakEcto3.Loaders.integer/1, type]
def loaders(:integer, type), do: [&RiakEcto3.Loaders.integer/1, type]
def loaders(:boolean, type), do: [&RiakEcto3.Loaders.boolean/1, type]
def loaders(:float, type), do: [&RiakEcto3.Loaders.float/1, type]
def loaders(:binary_id, type), do: [&RiakEcto3.Loaders.string/1, type]
def loaders(_primitive, type), do: [type]
@doc """
Implementation of Repo.get
Returns `nil` if nothing is found. Returns the structure if something was found.
Raises an ArgumentError using improperly.
"""
def get(repo, meta, schema_module, id, _opts) do
source = schema_module.__schema__(:source)
riak_id = "#{id}"
result = Riak.find(meta.pid, repo.config[:database], source, riak_id)
case result do
{:error, problem} -> raise ArgumentError, "Riak error: #{problem}"
nil -> nil
riak_map ->
repo.load(schema_module, load_riak_map(riak_map))
end
end
defp load_riak_map(riak_map) do
riak_map
|> Riak.CRDT.Map.value
|> Enum.map(fn {{key, value_type}, riak_value} ->
value = case value_type do
:register -> riak_value # String
:counter -> riak_value # TODO
:flag -> riak_value # TODO
:map -> raise "Not Implemented"
end
{String.to_existing_atom(key), value}
end)
|> Enum.into(%{})
end
def dump(struct = %schema_module{}) do
build_riak_map(schema_module, Map.from_struct(struct))
end
defp build_riak_map(schema_module, map = %{}) do
map
|> Map.to_list
|> Enum.map(fn {key, value} ->
type = schema_module.__schema__(:type, key)
{key, type, value}
end)
|> Enum.reject(fn {_key, type, _} ->
type == nil
end)
|> Enum.map(fn {key, type, value} ->
case Ecto.Type.adapter_dump(__MODULE__, type, value) do
{:ok, riak_value} ->
{Atom.to_string(key), riak_value}
_ -> raise "Could not properly dump `#{value}` to Ecto type `#{inspect(type)}`. Please make sure it is cast properly."
end
end)
|> Enum.reduce(Riak.CRDT.Map.new, fn {key, value}, riak_map ->
Riak.CRDT.Map.put(riak_map, key, value)
end)
end
@doc """
Implementation of Repo.insert
"""
def insert(repo, meta, struct_or_changeset, opts)
def insert(repo, meta, changeset = %Ecto.Changeset{data: struct = %schema_module{}, changes: changes}, opts) do
riak_map = build_riak_map(schema_module, changes)
source = schema_module.__schema__(:source)
[primary_key | _] = schema_module.__schema__(:primary_key)
riak_id = "#{Map.fetch!(struct, primary_key)}"
case do_insert(repo, meta, source, riak_map, riak_id, schema_module, opts) do
:ok -> {:ok, repo.get(schema_module, riak_id)}
:error -> {:error, changeset}
end
end
def insert(repo, meta, struct = %schema_module{}, opts) do
riak_map = dump(struct)
source = schema_module.__schema__(:source)
[primary_key | _] = schema_module.__schema__(:primary_key)
riak_id = "#{Map.fetch!(struct, primary_key)}"
case do_insert(repo, meta, source, riak_map, riak_id, schema_module, opts) do
:ok -> {:ok, repo.get(schema_module, riak_id)}
:error -> {:error, Ecto.Changeset.change(struct)}
end
end
defp do_insert(repo, meta, source, riak_map, riak_id, schema_module, _opts) do
case Riak.update(meta.pid, riak_map, repo.config[:database], source, riak_id) do
{:ok, riak_map} ->
res = repo.load(schema_module, load_riak_map(riak_map))
{:ok, res}
other ->
other
end
end
@doc """
Implementation of Repo.delete
"""
def delete(repo, meta, struct_or_changeset, opts)
def delete(_repo, _meta, changeset = %Ecto.Changeset{valid?: false}, _opts) do
{:error, changeset}
end
def delete(repo, meta, %Ecto.Changeset{data: struct = %_schema_module{}}, opts) do
delete(repo, meta, struct, opts)
end
def delete(repo, meta, struct = %schema_module{}, _opts) do
source = schema_module.__schema__(:source)
[primary_key | _] = schema_module.__schema__(:primary_key)
riak_id = "#{Map.fetch!(struct, primary_key)}"
if riak_id == "" do
raise Ecto.NoPrimaryKeyValueError
end
case Riak.delete(meta.pid, repo.config[:database], source, riak_id) do
:ok -> {:ok, struct}
:error -> raise Ecto.StaleEntryError
end
end
def raw_solr_query(repo, meta, schema_module, query, solr_opts) do
require Logger
source = schema_module.__schema__(:source)
database = repo.config[:database]
compound_query = ~s[_yz_rt:"#{database}" AND _yz_rb:"#{source}" AND #{query}]
index = "#{database}_index"
case Riak.Search.query(meta.pid, index, compound_query, solr_opts) do
{:error, problem} ->
Logger.debug("Solr Query that was executed: #{compound_query}")
{:error, problem}
{:ok, {:search_results, results, _max_score, _num_found}} ->
results =
results
|> Enum.map(fn {_index_name, properties} ->
meta = Enum.into(properties, %{})
resource = fn -> repo.get(schema_module, meta["_yz_rk"]) end
%{meta: meta, resource: resource}
end)
{:ok, results}
end
end
def find_keys_between(repo, meta, schema_module, lower_bound, upper_bound) do
source = schema_module.__schema__(:source)
database = repo.config[:database]
{:ok, {:index_results_v1, results, _, _}} = :riakc_pb_socket.get_index(meta.pid, {database, source}, "$key", to_string(lower_bound), to_string(upper_bound))
results
end
@impl Ecto.Adapter.Storage
def storage_up(config) do
require Logger
{:ok, database} = Keyword.fetch(config, :database)
already_exists_binary = "Error creating bucket type #{database}:\nalready_active\n"
hostname = Keyword.get(config, :hostname, @default_hostname)
port = Keyword.get(config, :port, @default_port)
res = with {:ok, pid} <- Riak.Connection.start_link(String.to_charlist(hostname), port),
Logger.info("Creating bucket type `#{database}`..."),
{res1, 0} <- System.cmd("riak-admin", ["bucket-type", "create", database, ~s[{"props":{"datatype":"map"}}]]),
Logger.info("Activating Bucket Type `#{database}`..."),
{res2, 0} <- System.cmd("riak-admin", ["bucket-type", "activate", database])
do
IO.puts res1
IO.puts res2
:ok
else
{^already_exists_binary, 1} ->
{:error, :already_up}
{command_error_string, 1} when is_binary(command_error_string) ->
IO.inspect(command_error_string, label: "command_error_string")
{:error, command_error_string}
error ->
{:error, error}
end
create_search_index(database, hostname, port)
res
end
@doc false
def create_search_index(database, hostname, port) do
require Logger
with database_index = "#{database}_index",
{:ok, pid} <- Riak.Connection.start_link(String.to_charlist(hostname), port),
Logger.info("(Re)Creating Search Index `#{database_index}`..."),
:ok <- Riak.Search.Index.put(pid, database_index),
Logger.info("(Re)Associating Search Index `#{database_index}`with bucket type `#{database}`..."),
:ok <- Riak.Bucket.Type.put(pid, database, search_index: database_index)
do
:ok
end
end
@impl Ecto.Adapter.Storage
def storage_down(config) do
require Logger
with {:ok, database} <- Keyword.fetch(config, :database),
hostname = Keyword.get(config, :hostname, @default_hostname),
port = Keyword.get(config, :port, @default_port),
{:ok, pid} <- Riak.Connection.start_link(String.to_charlist(hostname), port),
buckets = Riak.Bucket.Type.list!(pid, database) do
for bucket <- buckets do
Logger.info "Flushing values in bucket `#{bucket}`"
keys = Riak.Bucket.keys!(pid, database, bucket)
n_keys = Enum.count(keys)
keys
|> Enum.with_index
|> Enum.each(fn {key, index} ->
Riak.delete(pid, database, bucket, key)
ProgressBar.render(index + 1, n_keys)
end)
end
Logger.info "NOTE: Riak does not support 'dropping' a bucket type (or buckets contained within), so this task has only removed all data contained within them."
:ok
end
end
end
|
lib/riak_ecto3.ex
| 0.792825
| 0.422147
|
riak_ecto3.ex
|
starcoder
|
defmodule Plaid.Income do
@moduledoc """
Functions for Plaid `income` endpoint.
"""
import Plaid, only: [make_request_with_cred: 4, get_cred: 0]
alias Plaid.Utils
@derive Jason.Encoder
defstruct item: nil, income: nil, request_id: nil
@type t :: %__MODULE__{
item: Plaid.Item.t(),
income: Plaid.Income.Income.t(),
request_id: String.t()
}
@type params :: %{
required(:access_token) => String.t()
}
@type cred :: %{required(atom) => String.t()}
@endpoint "income"
defmodule Income do
@moduledoc """
Plaid.Income Income data structure.
"""
@derive Jason.Encoder
defstruct income_streams: [],
last_year_income: nil,
last_year_income_before_tax: nil,
projected_yearly_income: nil,
projected_yearly_income_before_tax: nil,
max_number_of_overlapping_income_streams: nil,
number_of_income_streams: nil
@type t :: %__MODULE__{
income_streams: [Plaid.Income.Income.IncomeStream.t()],
last_year_income: Number.t(),
last_year_income_before_tax: Number.t(),
projected_yearly_income: Number.t(),
projected_yearly_income_before_tax: Number.t(),
max_number_of_overlapping_income_streams: Number.t(),
number_of_income_streams: Number.t()
}
defmodule IncomeStream do
@moduledoc """
Plaid.Income.Income IncomeStream data structure.
"""
@derive Jason.Encoder
defstruct confidence: nil, days: nil, monthly_income: nil, name: nil
@type t :: %__MODULE__{
confidence: Number.t(),
days: Number.t(),
monthly_income: Number.t(),
name: String.t()
}
end
end
@doc """
Gets Income data associated with an Access Token.
Parameters
```
%{
access_token: "<PASSWORD>"
}
```
"""
@spec get(params, cred | nil) :: {:ok, Plaid.Income.t()} | {:error, Plaid.Error.t()}
def get(params, cred \\ get_cred()) do
endpoint = "#{@endpoint}/get"
:post
|> make_request_with_cred(endpoint, cred, params)
|> Utils.handle_resp(:income)
end
end
|
lib/plaid/income.ex
| 0.794385
| 0.616272
|
income.ex
|
starcoder
|
defmodule Ipfinder do
use HTTPotion.Base
alias Ipfinder.Validation.Asnvalidation
alias Ipfinder.Validation.Domainvalidation
alias Ipfinder.Validation.Firewallvalidation
alias Ipfinder.Validation.Firewallvalidation
alias Ipfinder.Validation.Ipvalidation
# alias Ipfinder.Validation.Tokenvalidation
@moduledoc """
# IPFinder elixir Client Library
The official elixir client library for the [IPFinder.io](https://ipfinder.io) get details for :
- IP address details (city, region, country, postal code, latitude and more ..)
- ASN details (Organization name, registry,domain,comany_type, and more .. )
- Firewall by supported formats details (apache_allow, nginx_deny, CIDR , and more ..)
- IP Address Ranges by the Organization name details (list_asn, list_prefixes , and more ..)
- service status details (queriesPerDay, queriesLeft, key_type, key_info)
- Get Domain IP (asn, organization,country_code ....)
- Get Domain IP history (total_ip, list_ip,organization,asn ....)
- Get list Domain By ASN, Country,Ranges (select_by , total_domain , list_domain ....)
- [GitHub ipfinder elixir](https://github.com/ipfinder-io/ip-finder-elixir)
- [ipfinder](https://ipfinder.io/)
## Documentation for Ipfinder.
"""
@moduledoc since: "1.0.1"
# DEFAULT BASE URL
@default_base_url "https://api.ipfinder.io/v1/"
def default_base_url, do: @default_base_url
# The free token
@default_api_token "free"
def default_api_token, do: @default_api_token
# DEFAULT FORMAT
@format "json"
def format, do: @format
# service status path
@status_path "info"
def status_path, do: @status_path
# IP Address Ranges path
@ranges_path "ranges/"
def ranges_path, do: @ranges_path
# Firewall path
@firewall_path "firewall/"
def firewall_path, do: @firewall_path
# Get Domain IP path
@domain_path "domain/"
def domain_path, do: @domain_path
# Get Domain IP history path
@domain_h_path "domainhistory/"
def domain_h_path, do: @domain_h_path
# Domain By ASN, Country,Ranges path
@domain_by_path "domainby/"
def domain_by_path, do: @domain_by_path
defstruct token: nil,
baseUrl: nil
@doc """
Constructor
## Parameters
* `token` - add your token
* `baseUrl` - add proxy pass
"""
def new(token \\ @default_api_token, baseUrl \\ @default_base_url) do
# Tokenvalidation.validate(token)
%Ipfinder{
token: token,
baseUrl: baseUrl
}
end
@doc """
call to server
## Parameters
* `this` - Ipfinder
* `path` - specific path of asn, IP address, ranges, Firewall,Token status
* `format`- available format `json` `jsonp` `php` `xml`and Firewall format
"""
def call(this, path, format \\ @format) do
# HTTPotion.post this.baseUrl, [body: "hello=" <> URI.encode("w o r l d !!"), headers: ["User-Agent": "My App", "Content-Type": "application/x-www-form-urlencoded"]]
# body = '{\"token\": "this.token" , \"format\" : "{format}"}'
body = %{"token" => this.token, "format" => format}
header = ["User-Agent": "IPfinder elixir-client", "Content-Type": "application/json"]
req = HTTPotion.post(this.baseUrl <> path, body: Jason.encode!(body), headers: header)
if req.status_code != 200 do
{:error, Jason.decode!(req.error, keys: :atoms)}
else
{:ok, Jason.decode!(req.body, keys: :atoms)}
end
end
@doc """
Get details for an Your IP address.
## Parameters
* `this` - Ipfinder
## Examples
```ex
{:ok, auth} = Ipfinder.authentication(Ipfinder)
```
"""
def authentication(this) do
call(this, "")
end
@doc """
Get details for an IP address.
## Parameters
* `this` - Ipfinder
* `path` - IP address.
## Examples
```ex
{:ok, ip} = Ipfinder.getAddressInfo(Ipfinder,"1.0.0.0")
```
"""
def getAddressInfo(this, path) do
Ipvalidation.validate(path)
call(this, path)
end
@doc """
Get details for an AS number.
## Parameters
* `this` - Ipfinder
* `path` - AS number.
## Examples
```ex
{:ok, asn} = Ipfinder.getAsn(Ipfinder,"as1")
```
"""
def getAsn(this, path) do
Asnvalidation.validate(path)
call(this, path)
end
@doc """
Get details for an API Token .
## Examples
```ex
{:ok, status} = Ipfinder.getStatus(Ipfinder)
```
"""
def getStatus(this) do
call(this, @status_path)
end
@doc """
Get details for an Organization name.
## Parameters
* `this` - Ipfinder
* `path` - Organization name.
## Examples
```ex
{:ok, range} = Ipfinder.getRanges(Ipfinder,"Telecom Algeria")
```
"""
def getRanges(this, path) do
call(this, @ranges_path <> URI.encode(path))
end
@doc """
Get Firewall data
## Parameters
* `this` - Ipfinder
* `path` - AS number, alpha-2 country only.
* `formats` - list formats supported
## Examples
```ex
{:ok, range} = Ipfinder.getFirewall(Ipfinder,"DZ",'nginx_deny')
```
"""
def getFirewall(this, path, formats) do
Firewallvalidation.validate(path, formats)
call(this, @firewall_path <> path, formats)
end
@doc """
Get Domain IP
## Parameters
* `this` - Ipfinder
* `path` - The API supports passing in a single website name domain name
## Examples
```ex
{:ok, range} = Ipfinder.getDomain(Ipfinder,"google.com")
```
"""
def getDomain(this, path) do
Domainvalidation.validate(path)
call(this, @domain_path <> path)
end
@doc """
Get Domain History IP
## Parameters
* `this` - Ipfinder
* `path` - The API supports passing in a single website name domain name
## Examples
```ex
{:ok, range} = Ipfinder.getDomainHistory(Ipfinder,"google.com")
```
"""
def getDomainHistory(this, path) do
Domainvalidation.validate(path)
call(this, @domain_h_path <> path)
end
@doc """
Get list Domain By ASN, Country,Ranges
## Parameters
* `this` - Ipfinder
* `by` - The API supports passing in a single ASN,Country,Ranges
## Examples
```ex
{:ok, range} = Ipfinder.getDomainHistory(Ipfinder,"DZ")
```
"""
def getDomainBy(this, by) do
call(this, @domain_by_path <> by)
end
end
|
lib/ipfinder.ex
| 0.837188
| 0.557243
|
ipfinder.ex
|
starcoder
|
defmodule Segment.Analytics do
@moduledoc """
The `Segment.Analytics` module is the easiest way to send Segment events and provides convenience methods for `track`, `identify,` `screen`, `alias`, `group`, and `page` calls
The functions will then delegate the call to the configured service implementation which can be changed with:
```elixir
config :segment, sender_impl: Segment.Analytics.Batcher,
```
By default (if no configuration is given) it will use `Segment.Analytics.Batcher` to send events in a batch periodically
"""
alias Segment.Analytics.{Track, Identify, Screen, Context, Alias, Group, Page}
@type segment_id :: String.t() | integer()
@doc """
Make a call to Segment with an event. Should be of type `Track, Identify, Screen, Alias, Group or Page`
"""
@spec send(Segment.segment_event(), pid() | __MODULE__.t()) :: :ok
def send(%{__struct__: mod} = event, pid \\ __MODULE__)
when mod in [Track, Identify, Screen, Alias, Group, Page] do
call(event, pid)
end
@doc """
`track` lets you record the actions your users perform. Every action triggers what Segment call an “event”, which can also have associated properties as defined in the
`Segment.Analytics.Track` struct
See [https://segment.com/docs/spec/track/](https://segment.com/docs/spec/track/)
"""
@spec track(Segment.Analytics.Track.t(), pid() | __MODULE__.t()) :: :ok
def track(t = %Track{}, pid \\ __MODULE__), do: call(t, pid)
@doc """
`track` lets you record the actions your users perform. Every action triggers what Segment call an “event”, which can also have associated properties. `track/4` takes a `user_id`, an
`event_name`, optional additional `properties` and an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/track/](https://segment.com/docs/spec/track/)
"""
@spec track(
segment_id(),
String.t(),
map(),
Segment.Analytics.Context.t(),
pid() | __MODULE__.t()
) :: :ok
def track(user_id, event_name, properties, context \\ Context.new(), pid \\ __MODULE__)
when is_bitstring(event_name) do
%Track{
userId: user_id,
event: event_name,
properties: properties,
context: context
}
|> call(pid)
end
@doc """
`identify` lets you tie a user to their actions and record traits about them as defined in the
`Segment.Analytics.Identify` struct
See [https://segment.com/docs/spec/identify/](https://segment.com/docs/spec/identify/)
"""
@spec identify(Segment.Analytics.Identify.t(), pid() | __MODULE__.t()) :: :ok
def identify(i = %Identify{}, pid \\ __MODULE__), do: call(i, pid)
@doc """
`identify` lets you tie a user to their actions and record traits about them. `identify/3` takes a `user_id`, optional additional `traits` and an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/identify/](https://segment.com/docs/spec/identify/)
"""
@spec identify(segment_id(), map(), Segment.Analytics.Context.t(), pid() | __MODULE__.t()) ::
:ok
def identify(user_id, traits, context, pid \\ __MODULE__) do
%Identify{userId: user_id, traits: traits, context: context}
|> call(pid)
end
@doc """
`screen` let you record whenever a user sees a screen of your mobile app with properties defined in the
`Segment.Analytics.Screen` struct
See [https://segment.com/docs/spec/screen/](https://segment.com/docs/spec/screen/)
"""
@spec screen(Segment.Analytics.Screen.t(), pid() | __MODULE__.t()) :: :ok
def screen(s = %Screen{}, pid \\ __MODULE__), do: call(s, pid)
@doc """
`screen` let you record whenever a user sees a screen of your mobile app. `screen/4` takes a `user_id`, an optional `screen_name`, optional `properties` and an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/screen/](https://segment.com/docs/spec/screen/)
"""
@spec screen(
segment_id(),
String.t(),
map(),
Segment.Analytics.Context.t(),
pid() | __MODULE__.t()
) :: :ok
def screen(user_id, screen_name, properties, context \\ Context.new(), pid \\ __MODULE__) do
%Screen{
userId: user_id,
name: screen_name,
properties: properties,
context: context
}
|> call(pid)
end
@doc """
`alias` is how you associate one identity with another with properties defined in the `Segment.Analytics.Alias` struct
See [https://segment.com/docs/spec/alias/](https://segment.com/docs/spec/alias/)
"""
@spec alias(Segment.Analytics.Alias.t(), pid() | __MODULE__.t()) :: :ok
def alias(a = %Alias{}, pid \\ __MODULE__), do: call(a, pid)
@doc """
`alias` is how you associate one identity with another. `alias/3` takes a `user_id` and a `previous_id` to map from. It also takes an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/alias/](https://segment.com/docs/spec/alias/)
"""
@spec alias(segment_id(), segment_id(), Segment.Analytics.Context.t(), pid() | __MODULE__.t()) ::
:ok
def alias(user_id, previous_id, context, pid \\ __MODULE__) do
%Alias{userId: user_id, previousId: previous_id, context: context}
|> call(pid)
end
@doc """
The `group` call is how you associate an individual user with a group with the properties in the defined in the `Segment.Analytics.Group` struct
See [https://segment.com/docs/spec/group/](https://segment.com/docs/spec/group/)
"""
@spec group(Segment.Analytics.Group.t(), pid() | __MODULE__.t()) :: :ok
def group(g = %Group{}, pid \\ __MODULE__), do: call(g, pid)
@doc """
The `group` call is how you associate an individual user with a group. `group/4` takes a `user_id` and a `group_id` to associate it with. It also takes optional `traits` of the group and
an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/group/](https://segment.com/docs/spec/group/)
"""
@spec group(
segment_id(),
segment_id(),
map(),
Segment.Analytics.Context.t(),
pid() | __MODULE__.t()
) ::
:ok
def group(user_id, group_id, traits, context \\ Context.new(), pid \\ __MODULE__) do
%Group{userId: user_id, groupId: group_id, traits: traits, context: context}
|> call(pid)
end
@doc """
The `page` call lets you record whenever a user sees a page of your website with the properties defined in the `Segment.Analytics.Page` struct
See [https://segment.com/docs/spec/page/](https://segment.com/docs/spec/page/)
"""
@spec page(Segment.Analytics.Page.t(), pid() | __MODULE__.t()) :: :ok
def page(p = %Page{}, pid \\ __MODULE__), do: call(p, pid)
@doc """
The `page` call lets you record whenever a user sees a page of your website. `page/4` takes a `user_id` and an optional `page_name`, optional `properties` and an optional `Segment.Analytics.Context` struct.
See [https://segment.com/docs/spec/page/](https://segment.com/docs/spec/page/)
"""
@spec page(
segment_id(),
String.t(),
map(),
Segment.Analytics.Context.t(),
pid() | __MODULE__.t()
) :: :ok
def page(user_id, page_name, properties, context \\ Context.new(), pid \\ __MODULE__) do
%Page{userId: user_id, name: page_name, properties: properties, context: context}
|> call(pid)
end
@spec call(Segment.segment_event(), pid() | __MODULE__.t()) :: :ok
def call(event, pid \\ __MODULE__), do: Segment.Config.service().call(event, pid)
end
|
lib/segment/analytics.ex
| 0.892966
| 0.843766
|
analytics.ex
|
starcoder
|
defmodule RFC3986.Normalize do
@moduledoc """
Normalices a captured URI. Don't use this directly, it will be called after
matching an input.
Copyright 2015 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# fragment = *( pchar / "/" / "?" )
@spec parse(RFC3986.Result.t) :: RFC3986.Result.t
def parse(state = %{error: error}) when error != nil do
state
end
def parse(state) do
state |> port |> userinfo |> query_string
end
defp port(state = %{port: nil}) do
state
end
defp port(state) do
{p, _} = Integer.parse to_string(state.port)
%{state | port: p}
end
defp userinfo(state = %{userinfo: nil}) do
state
end
defp userinfo(state) do
userinfo state, state.userinfo, []
end
defp userinfo(state, [char|rest], acc) do
if char == ?: || Enum.empty?(rest) do
acc = if char !== ?: do
[char|acc]
else
acc
end
rest = if Enum.empty?(rest) do
nil
else
rest
end
%{state | username: Enum.reverse(acc), password: rest}
else
userinfo(state, rest, [char|acc])
end
end
defp query_string(state = %{query: nil}) do
state
end
defp query_string(state) do
query_string state, state.query, []
end
defp query_string(state, [], []) do
state
end
defp query_string(state = %{query_string: map}, [], acc) do
k = Enum.reverse acc
%{state | query_string: Map.put(map, k, nil)}
end
defp query_string(state = %{query_string: map}, [?&|rest], acc) do
k = Enum.reverse acc
query_string %{state | query_string: Map.put(map, k, nil)}, rest, []
end
defp query_string(state = %{query_string: map}, [?=|rest], acc) do
k = Enum.reverse acc
{v, rest} = query_string_value rest
query_string %{state | query_string: Map.put(map, k, v)}, rest, []
end
defp query_string(state, [char|rest], acc) do
query_string state, rest, [char|acc]
end
defp query_string_value([]) do
{nil, []}
end
defp query_string_value(text) do
query_string_value text, []
end
defp query_string_value([], []) do
{nil, []}
end
defp query_string_value([], acc) do
{Enum.reverse(acc), []}
end
defp query_string_value([?&|rest], acc) do
{Enum.reverse(acc), rest}
end
defp query_string_value([char|rest], acc) do
query_string_value rest, [char|acc]
end
end
|
lib/ex_rfc3986/normalize.ex
| 0.76908
| 0.453625
|
normalize.ex
|
starcoder
|
defmodule Ecto.Model.Dependent do
@moduledoc """
Defines callbacks for handling dependencies (associations).
Such callbacks are typically declared via the `has_many/3` macro
in your model's `schema` block. For example:
has_many :comments, MyApp.Comment, on_delete: :fetch_and_delete
## `:on_delete` options
There are four different behaviors you can set for your associations
when the parent is deleted:
* `:nothing` - Does nothing to the association;
* `:delete_all` - Deletes all associations without triggering lifecycle callbacks;
* `:nilify_all` - Sets model reference to nil for each association without triggering
any lifecycle callback;
* `:fetch_and_delete` - Explicitly fetch all associations and delete them one by one,
triggering any `before_delete` and `after_delete` callbacks;
Keep in mind these options are only available for `has_many/3` macros.
## Alternatives
Ecto also provides an `:on_delete` option when using `references/2` in migrations.
This allows you to set what to perform when an entry is deleted in your schema
effectively at the database level. Relying on the database is often the safest
way to perform this operation and should be preferred.
However using the `:on_delete` option may be more flexible specially if you have
logic that needs to be expressed on the application side or if your database does
not support references.
"""
@on_delete_callbacks [:fetch_and_delete, :nilify_all, :delete_all]
alias Ecto.Changeset
defmacro __using__(_) do
quote do
@before_compile Ecto.Model.Dependent
end
end
@doc false
def fetch_and_delete(%Changeset{repo: repo, model: model} = changeset, assoc_field, _related_key) do
query = Ecto.Model.assoc(model, assoc_field)
assocs = repo.all(query)
Enum.each assocs, fn (assoc) -> repo.delete!(assoc) end
changeset
end
@doc false
def delete_all(%Changeset{repo: repo, model: model} = changeset, assoc_field, _related_key) do
query = Ecto.Model.assoc(model, assoc_field)
repo.delete_all(query)
changeset
end
@doc false
def nilify_all(%Changeset{repo: repo, model: model} = changeset, assoc_field, related_key) do
query = Ecto.Model.assoc(model, assoc_field)
repo.update_all(query, set: [{related_key, nil}])
changeset
end
defmacro __before_compile__(env) do
assocs = Module.get_attribute(env.module, :ecto_assocs)
for {_assoc_name, assoc} <- assocs,
Map.get(assoc, :on_delete) in @on_delete_callbacks do
on_delete = assoc.on_delete
related_key = assoc.related_key
assoc_field = assoc.field
quote do
before_delete Ecto.Model.Dependent, unquote(on_delete),
[unquote(assoc_field), unquote(related_key)]
end
end
end
end
|
lib/ecto/model/dependent.ex
| 0.865437
| 0.528777
|
dependent.ex
|
starcoder
|
defmodule JSONDiff do
@moduledoc ~S"""
JSONDiff is an Elixir implementation of the diffing element of the JSON Patch format,
described in [RFC 6902](http://tools.ietf.org/html/rfc6902).
This library only handles diffing. For patching, see the wonderful [JSONPatch library](https://github.com/gamache/json_patch_elixir).
This library only supports add, replace and remove operations.
It is based on the very fast JavaScript library [JSON-Patch](https://github.com/Starcounter-Jack/JSON-Patch)
## Examples
iex> JSONDiff.diff(%{"a" => 1}, %{"a" => 2})
[%{"op" => "replace", "path" => "/a", "value" => 2}]
iex> JSONDiff.diff([1], [2])
[%{"op" => "replace", "path" => "/0", "value" => 2}]
## Installation
# mix.exs
def deps do
[
{:json_diff, "~> 0.1.0"}
]
end
"""
def diff(old, new, patches \\ [], path \\ "") do
{deleted, patches, old_keys} = patches_for_old(old, new, patches, path)
new_keys = list_or_map_keys_or_indexes(new)
unless deleted or length(new_keys) != length(old_keys) do
patches
end
Enum.reduce(new_keys, patches, fn key, patches ->
if !has_key_or_index?(old, key) do
add_patch(patches, patch(:add, path, key, item(new, key)))
else
patches
end
end)
end
@doc false
defp patches_for_old(old, new, patches, path) do
old_keys =
list_or_map_keys_or_indexes(old)
|> Enum.reverse()
{deleted, patches} = Enum.reduce(old_keys, {false, patches}, old_key_reducer(old, new, path))
{deleted, patches, old_keys}
end
@doc false
defp old_key_reducer(old, new, path) do
fn key, {deleted, patches} ->
old_val = item(old, key)
case has_key_or_index?(new, key) do
# The key for the old exists in the new
true ->
new_val = item(new, key)
cond do
# Both are maps or lists, so we need to recurse to check the child values
map_or_list?(old_val) and map_or_list?(new_val) ->
child_patches =
diff(
old_val,
new_val,
[],
path_component(path, key)
)
patches = add_patch(patches, child_patches)
{deleted || false, patches}
# No changes, do nothing
new_val === old_val ->
{deleted || false, patches}
# Changes, replace old value with new value
true ->
patches = add_patch(patches, patch(:replace, path, key, new_val))
{deleted || false, patches}
end
# The key for the old does not exist in the new
false ->
patches = add_patch(patches, patch(:remove, path, key))
{true, patches}
end
end
end
@doc false
defp add_patch(patches, new_patches) when is_list(new_patches), do: patches ++ new_patches
defp add_patch(patches, new_patch), do: patches ++ [new_patch]
@doc false
defp has_key_or_index?(map, key) when is_map(map) and is_binary(key) do
Map.has_key?(map, key)
end
defp has_key_or_index?(map, key) when is_map(map) and is_atom(key) do
Map.has_key?(map, key)
end
defp has_key_or_index?(list, index) when is_list(list) and is_integer(index) do
Enum.at(list, index) != nil
end
defp has_key_or_index?(_, _), do: false
@doc false
defp patch(:add, path, key, val) do
%{
"op" => "add",
"path" => path_component(path, key),
"value" => val
}
end
defp patch(:replace, path, key, val) do
%{
"op" => "replace",
"path" => path_component(path, key),
"value" => val
}
end
defp patch(:remove, path, key) do
%{
"op" => "remove",
"path" => path_component(path, key)
}
end
@doc false
defp path_component(path, key), do: path <> "/" <> escape_path_component(key)
@doc false
defp list_or_map_keys_or_indexes(map) when is_map(map), do: Map.keys(map)
defp list_or_map_keys_or_indexes([] = list) when is_list(list), do: []
defp list_or_map_keys_or_indexes(list) when is_list(list) do
0..(length(list) - 1)
|> Enum.to_list()
end
@doc false
defp map_or_list?(a) when is_list(a) or is_map(a), do: true
defp map_or_list?(_), do: false
@doc false
defp item(enum, key) when is_map(enum), do: Map.fetch!(enum, key)
defp item(enum, index) when is_list(enum), do: Enum.fetch!(enum, index)
@doc false
def escape_path_component(path) when is_integer(path) do
path
|> to_string()
|> escape_path_component()
end
def escape_path_component(path) when is_binary(path) do
case {:binary.match(path, "/"), :binary.match(path, "~")} do
{:nomatch, :nomatch} ->
path
_ ->
path = Regex.replace(~r/~/, path, "~0")
Regex.replace(~r/\//, path, "~1")
end
end
def escape_path_component(path) when is_atom(path) do
escape_path_component(Atom.to_string(path))
end
end
|
lib/json_diff.ex
| 0.753648
| 0.577227
|
json_diff.ex
|
starcoder
|
defmodule Safira.Contest do
import Ecto.Query, warn: false
alias Safira.Repo
alias Safira.Contest.Redeem
alias Safira.Contest.Badge
alias Safira.Interaction
alias Ecto.Multi
alias Safira.Contest.DailyToken
def list_badges do
Repo.all(Badge)
end
def list_secret do
Repo.all(from r in Redeem,
join: b in assoc(r, :badge),
join: a in assoc(r, :attendee),
where: b.type == ^1 and not(a.volunteer) and not(is_nil(a.nickname)),
preload: [badge: b, attendee: a],
distinct: :badge_id)
|> Enum.map(fn x -> x.badge end)
end
def list_normals do
Repo.all(from b in Badge,
where: b.type != ^1 and b.type != ^0)
end
def list_badges_conservative do
list_secret() ++ list_normals()
end
def get_badge!(id), do: Repo.get!(Badge, id)
def get_badge_name!(name) do
Repo.get_by!(Badge, name: name)
end
def get_badge_preload!(id) do
Repo.get!(Badge, id)
|> Repo.preload(:attendees)
end
def get_badge_description(description) do
Repo.get_by!(Badge, description: description)
end
def create_badge(attrs \\ %{}) do
%Badge{}
|> Badge.changeset(attrs)
|> Repo.insert()
end
def create_badges(list_badges) do
list_badges
|> Enum.with_index()
|> Enum.reduce(Multi.new,fn {x,index}, acc ->
Ecto.Multi.insert(acc, index, Badge.changeset(%Badge{},x))
end)
end
def update_badge(%Badge{} = badge, attrs) do
badge
|> Badge.changeset(attrs)
|> Repo.update()
end
def delete_badge(%Badge{} = badge) do
Repo.delete(badge)
end
def change_badge(%Badge{} = badge) do
Badge.changeset(badge, %{})
end
alias Safira.Contest.Referral
def list_referrals do
Repo.all(Referral)
end
def get_referral!(id), do: Repo.get!(Referral, id)
def get_referral_preload!(id) do
Repo.get!(Referral, id)
|> Repo.preload(:badge)
end
def create_referral(attrs \\ %{}) do
%Referral{}
|> Referral.changeset(attrs)
|> Repo.insert()
end
def update_referral(%Referral{} = referral, attrs) do
referral
|> Referral.changeset(attrs)
|> Repo.update()
end
def delete_referral(%Referral{} = referral) do
Repo.delete(referral)
end
def change_referral(%Referral{} = referral) do
Referral.changeset(referral, %{})
end
def list_redeems do
Repo.all(Redeem)
end
def list_redeems_stats do
Repo.all(from r in Redeem,
join: b in assoc(r, :badge),
join: a in assoc(r, :attendee),
where: b.type == ^1 and not(a.volunteer) and not(is_nil(a.nickname)),
preload: [badge: b, attendee: a])
end
def get_redeem!(id), do: Repo.get!(Redeem, id)
def get_keys_redeem(attendee_id, badge_id) do
Repo.get_by(Redeem, [attendee_id: attendee_id, badge_id: badge_id])
end
def create_redeem(attrs \\ %{}) do
Multi.new()
|> Multi.insert(:redeem, Redeem.changeset(%Redeem{}, attrs))
|> Multi.update(:attendee, fn %{redeem: redeem} ->
redeem = Repo.preload(redeem, [:badge, :attendee])
Safira.Accounts.Attendee.update_on_redeem_changeset(
redeem.attendee,
%{
token_balance: redeem.attendee.token_balance + calculate_badge_tokens(redeem.badge),
entries: redeem.attendee.entries + 1
}
)
end)
|> Multi.insert_or_update(:daily_token, fn %{attendee: attendee} ->
{:ok, date, _} = DateTime.from_iso8601("#{Date.utc_today()}T00:00:00Z")
DailyToken.changeset(%DailyToken{}, %{quantity: attendee.token_balance, attendee_id: attendee.id, day: date})
end)
|> Repo.transaction()
|> case do
{:ok, result} ->
{:ok, Map.get(result, :redeem)}
{:error, _failed_operation, changeset, _changes_so_far} ->
{:error, changeset}
end
end
def update_redeem(%Redeem{} = redeem, attrs) do
redeem
|> Redeem.changeset(attrs)
|> Repo.update()
end
def delete_redeem(%Redeem{} = redeem) do
Repo.delete(redeem)
end
def change_redeem(%Redeem{} = redeem) do
Redeem.changeset(redeem, %{})
end
def list_leaderboard do
Safira.Accounts.list_active_attendees
|> Enum.sort(&(&1.badge_count >= &2.badge_count))
end
def list_daily_leaderboard(date) do
Repo.all(
from a in Safira.Accounts.Attendee,
join: r in Redeem, on: a.id == r.attendee_id,
join: b in Badge, on: r.badge_id == b.id,
join: t in DailyToken, on: a.id == t.attendee_id,
where: not(is_nil a.user_id) and fragment("?::date", r.inserted_at) == ^date and b.type != ^0 and fragment("?::date", t.day) == ^date,
select: %{attendee: a, token_count: t.quantity},
preload: [badges: b, daily_tokens: t]
)
|> Enum.map(fn a -> Map.put(a, :badge_count, length(a.attendee.badges)) end)
|> Enum.sort_by(&{&1.badge_count, &1.token_count}, &>=/2)
end
def top_list_leaderboard(n) do
list_leaderboard()
|> Enum.take(n)
|> Enum.map(fn a -> %{a.name => a.badge_count} end)
end
def get_winner do
Repo.all(from a in Safira.Accounts.Attendee,
where: not (is_nil a.user_id) and not(a.volunteer))
|> Repo.preload([badges: from(b in Badge, where: b.type != ^0)])
|> Enum.map(fn x -> Map.put(x, :badge_count, length(Enum.filter(x.badges,fn x -> x.type != 0 end))) end)
|> Enum.filter(fn a -> a.badge_count >= 10 end)
|> Enum.map(fn a -> Enum.map( Enum.filter(a.badges,fn b -> b.type != 0 end), fn x -> "#{a.nickname}:#{x.name}" end) end)
|> List.flatten
end
defp calculate_badge_tokens(badge) do
cond do
Interaction.is_badge_spotlighted(badge.id) -> badge.tokens * 2
true -> badge.tokens
end
end
end
|
lib/safira/contest/contest.ex
| 0.519278
| 0.449272
|
contest.ex
|
starcoder
|
defmodule Ockam.Session.Pluggable.Responder do
@moduledoc """
Routing session responder
If :init_message is present in the options - processes the message,
otherwise waits for it in :handshake stage
On processing the handshake calls `handshake.handle_responder/1`, which
generates handshake response message and options
Starts the data worker with worker_options merged with
the options from `handshake.handle_responder/1`
If worker started successfully, sends the handshake response
and moves to the :data stage
All messages in :data stage are processed with the data worker module
Options:
`worker_mod` - data worker module
`worker_options` - data worker options, defaults to []
`handshake` - handshake module (defaults to `Ockam.Session.Handshake.Default`)
`handshake_options` - options for handshake module, defaults to []
`init_message` - optional init message
"""
use Ockam.AsymmetricWorker
alias Ockam.Message
alias Ockam.Session.Pluggable, as: RoutingSession
require Logger
@dialyzer {:nowarn_function, handle_inner_message: 2, handle_outer_message: 2}
@impl true
def address_prefix(_options), do: "S_R_"
@impl true
def inner_setup(options, state) do
base_state = state
worker_mod = Keyword.fetch!(options, :worker_mod)
worker_options = Keyword.get(options, :worker_options, [])
handshake = Keyword.get(options, :handshake, Ockam.Session.Handshake.Default)
handshake_options = Keyword.get(options, :handshake_options, [])
handshake_state = %{
worker_address: state.inner_address,
handshake_address: state.inner_address
}
state =
Map.merge(state, %{
worker_mod: worker_mod,
worker_options: worker_options,
base_state: base_state,
stage: :handshake,
handshake: handshake,
handshake_options: handshake_options,
handshake_state: handshake_state
})
case Keyword.get(options, :init_message) do
nil ->
## Stay in the handshake stage, wait for init message
{:ok, state}
%{payload: _} = message ->
handle_handshake_message(message, state)
end
end
@impl true
def handle_message(message, %{stage: :handshake} = state) do
case message_type(message, state) do
:inner ->
handle_handshake_message(message, state)
_other ->
## TODO: buffering option?
Logger.debug("Ignoring non-inner message #{inspect(message)} in handshake stage")
{:ok, state}
end
end
def handle_message(message, %{stage: :data} = state) do
RoutingSession.handle_data_message(message, state)
end
def handle_handshake_message(message, state) do
handshake = Map.fetch!(state, :handshake)
handshake_options = Map.fetch!(state, :handshake_options)
handshake_state = Map.fetch!(state, :handshake_state)
case handshake.handle_responder(handshake_options, message, handshake_state) do
{:ready, response, options, handshake_state} ->
switch_to_data_stage(response, options, handshake_state, state)
{:ready, options, handshake_state} ->
switch_to_data_stage(options, handshake_state, state)
{:next, response, handshake_state} ->
send_handshake_response(response)
{:ok, Map.put(state, :handshake_state, handshake_state)}
{:next, handshake_state} ->
{:ok, Map.put(state, :handshake_state, handshake_state)}
{:error, err} ->
{:error, err}
end
end
defp switch_to_data_stage(response \\ nil, handshake_options, handshake_state, state) do
worker_mod = Map.fetch!(state, :worker_mod)
worker_options = Map.fetch!(state, :worker_options)
base_state = Map.fetch!(state, :base_state)
options = Keyword.merge(worker_options, handshake_options)
case worker_mod.setup(options, base_state) do
{:ok, data_state} ->
send_handshake_response(response)
{:ok,
Map.merge(state, %{
data_state: data_state,
handshake_state: handshake_state,
stage: :data
})}
{:error, err} ->
Logger.error(
"Error starting responder data module: #{worker_mod}, reason: #{inspect(err)}"
)
## TODO: should we send handshake error?
{:error, err}
end
end
def send_handshake_response(response) do
case response do
nil ->
:ok
%{} ->
Logger.info("Send handshake #{inspect(response)}")
Ockam.Router.route(response)
end
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/session/pluggable/responder.ex
| 0.691393
| 0.460653
|
responder.ex
|
starcoder
|
defmodule Tesla.Middleware.TimberLogger do
@moduledoc """
Tesla middleware for logging outgoing requests to Timber.io.
Using this middleware will log all requests and responses using Timber.io formatting and metadata.
### Example usage
```
defmodule MyClient do
use Tesla
plug Tesla.Middleware.TimberLogger, service_name: "my-service"
end
```
### Options
- `:service_name` - the name of the external service (optional)
- `:log_level` - custom function for calculating log level (see below)
### Custom log levels
By default, the following log levels will be used:
- `:error` - for errors, 5xx and 4xx responses
- `:warn` - for 3xx responses
- `:info` - for 2xx responses
You can customize this setting by providing your own `log_level/1` function:
```
defmodule MyClient do
use Tesla
plug Tesla.Middleware.TimberLogger, log_level: &my_log_level/1
def my_log_level(env) do
case env.status do
404 -> :info
_ -> :default
end
end
end
```
"""
require Logger
alias Tesla.Env
alias Timber.Events.{HTTPRequestEvent, HTTPResponseEvent}
@behaviour Tesla.Middleware
@impl true
def call(env, next, opts) do
Logger.info(fn -> log_request(env, opts) end)
timer = Timber.start_timer()
response = Tesla.run(env, next)
level = log_level(response, opts)
Logger.log(level, fn -> log_response(response, timer, opts) end)
response
end
defp log_level({:error, _}, _), do: :error
defp log_level({:ok, env}, opts) do
case Keyword.get(opts, :log_level) do
nil ->
default_log_level(env)
fun when is_function(fun) ->
case fun.(env) do
:default -> default_log_level(env)
level -> level
end
atom when is_atom(atom) ->
atom
end
end
defp default_log_level(env) do
cond do
env.status >= 400 -> :error
env.status >= 300 -> :warn
true -> :info
end
end
defp log_request(env, opts) do
event =
HTTPRequestEvent.new(
direction: "outgoing",
url: serialize_url(env),
method: env.method,
headers: env.headers,
body: env.body,
request_id: Logger.metadata()[:request_id],
service_name: opts[:service_name]
)
{HTTPRequestEvent.message(event), event: event}
end
defp log_response({:error, reason}, _, _), do: Logger.error(fn -> inspect(reason) end)
defp log_response({:ok, env}, timer, opts) do
time_ms = Timber.duration_ms(timer)
event =
HTTPResponseEvent.new(
direction: "incoming",
status: env.status,
time_ms: time_ms,
headers: env.headers,
body: normalize_body(env),
request_id: Logger.metadata()[:request_id],
service_name: opts[:service_name]
)
{HTTPResponseEvent.message(event), event: event}
end
defp serialize_url(%Env{url: url, query: query}), do: Tesla.build_url(url, query)
defp normalize_body(env) do
case Tesla.get_header(env, "content-encoding") do
"gzip" ->
"[gzipped]"
"deflate" ->
"[zipped]"
_ ->
env.body
end
end
end
|
lib/tesla_timber_logger.ex
| 0.901784
| 0.731514
|
tesla_timber_logger.ex
|
starcoder
|
defmodule ExSenml do
@moduledoc """
Toolset to Normalize SenML and other promises
SenML is an open ietf standard that defines a format for representing simple sensor
measurements and device parameters in Sensor Measurement Lists (SenML)
You can find the spec: https://tools.ietf.org/html/rfc8428
Becoming more and more used within the IoT Domain, as common used standard for Constrained Application Protocol (CoAP)
LwM2M 1.0 (https://tools.ietf.org/html/draft-ietf-core-senml) and LwM2M 1.0 uses the actual RFC.
Also adding support for https://tools.ietf.org/html/draft-keranen-core-senml-data-ct-01
## Features
* (Basic) Normalized format for SenML Records (chapter 4.6 Resolve Records), A SenML Record is referred to as "resolved" if it does not contain
any base value.
* (TODO) Support Conversion between diferent representations defined in JavaScript Object Notation (JSON),
Concise Binary Object Representation (CBOR), eXtensible Markup Language (XML), and Efficient XML Interchange (EXI),
which share the common SenML data model.
## Example 1
iex> senml_payload_rsv_rec_1 = [%{u: "lon", v: 24.30621},%{u: "lat", v: 60.07965}]
[%{u: "lon", v: 24.30621}, %{u: "lat", v: 60.07965}]
iex> ExSenml.validate_and_resolve(senml_payload_rsv_rec_1,"1234")
{:ok,
[
%{n: "1234", t: 1559987179, u: "lat", v: 60.07965},
%{n: "1234", t: 1559987179, u: "lon", v: 24.30621}
]}
## Example 2
iex(1)> senml_payload_base_fields = [
...(1)> %{bn: "1234", bt: 1_559_813_429, bu: "%RH", v: 20},
...(1)> %{u: "lon", v: 24.30621},
...(1)> %{u: "lat", v: 60.07965},
...(1)> %{n: "tracker", t: 60, v: 20.3}
...(1)> ]
[
%{bn: "1234", bt: 1559813429, bu: "%RH", v: 20},
%{u: "lon", v: 24.30621},
%{u: "lat", v: 60.07965},
%{n: "tracker", t: 60, v: 20.3}
]
iex(2)> ExSenml.validate_and_resolve(senml_payload_base_fields,"1234")
{:ok,
[
%{n: "1234/tracker", t: 1559813489, u: "%RH", v: 20.3},
%{n: "1234", t: 1559813429, u: "lat", v: 60.07965},
%{n: "1234", t: 1559813429, u: "lon", v: 24.30621}
]}
## Example 3
iex(2)> ExSenml.validate_and_resolve(senml_payload_base_fields,"9876")
{:not_acceptable, "Payload is not valid SenML"}
## TODO
- [x] Basic record normalization
- [ ] Validate "n" fields caracters
- [ ] Convert Beetween formats
- [ ] Proper Documentation
- [ ] Make the world adopt SenML
"""
alias ExSenml.SenmlHandler
defdelegate validate_and_resolve(payload,device_id), to: SenmlHandler
end
|
lib/ex_senml.ex
| 0.616359
| 0.643476
|
ex_senml.ex
|
starcoder
|
defmodule Concentrate.Encoder.VehiclePositionsEnhanced do
@moduledoc """
Encodes a list of parsed data into a VehiclePositions.pb file.
"""
@behaviour Concentrate.Encoder
alias Concentrate.{TripDescriptor, VehiclePosition}
alias VehiclePosition.Consist, as: VehiclePositionConsist
import Concentrate.Encoder.GTFSRealtimeHelpers
import Concentrate.Encoder.VehiclePositions, only: [entity_id: 1, trip_descriptor: 1]
@impl Concentrate.Encoder
def encode_groups(groups) when is_list(groups) do
message = %{
"header" => feed_header(),
"entity" => Enum.flat_map(groups, &build_entity/1)
}
Jason.encode!(message)
end
def build_entity({%TripDescriptor{} = td, vps, _stus}) do
trip = trip_descriptor(td)
for vp <- vps do
%{
"id" => entity_id(vp),
"vehicle" => build_vehicle(vp, trip)
}
end
end
def build_entity({nil, vps, _stus}) do
# vehicles without a trip
for vp <- vps,
trip_id = VehiclePosition.trip_id(vp),
not is_nil(trip_id) do
trip = %{
"trip_id" => trip_id,
"schedule_relationship" => "UNSCHEDULED"
}
%{
"id" => entity_id(vp),
"vehicle" => build_vehicle(vp, trip)
}
end
end
defp build_vehicle(%VehiclePosition{} = vp, trip) do
descriptor =
drop_nil_values(%{
"id" => VehiclePosition.id(vp),
"label" => VehiclePosition.label(vp),
"license_plate" => VehiclePosition.license_plate(vp),
"consist" => optional_map(VehiclePosition.consist(vp), &build_consist/1)
})
position =
drop_nil_values(%{
"latitude" => VehiclePosition.latitude(vp),
"longitude" => VehiclePosition.longitude(vp),
"bearing" => VehiclePosition.bearing(vp),
"speed" => VehiclePosition.speed(vp)
})
drop_nil_values(%{
"trip" => trip,
"vehicle" => descriptor,
"position" => position,
"stop_id" => VehiclePosition.stop_id(vp),
"current_stop_sequence" => VehiclePosition.stop_sequence(vp),
"current_status" => VehiclePosition.status(vp),
"timestamp" => VehiclePosition.last_updated(vp),
"occupancy_status" => VehiclePosition.occupancy_status(vp),
"occupancy_percentage" => VehiclePosition.occupancy_percentage(vp)
})
end
defp optional_map(list, fun) when is_list(list) do
Enum.map(list, fun)
end
defp optional_map(nil, _) do
nil
end
defp build_consist(consist) do
%{
"label" => VehiclePositionConsist.label(consist)
}
end
end
|
lib/concentrate/encoder/vehicle_positions_enhanced.ex
| 0.721841
| 0.498169
|
vehicle_positions_enhanced.ex
|
starcoder
|
defmodule AWS.IoTThingsGraph do
@moduledoc """
AWS IoT Things Graph
AWS IoT Things Graph provides an integrated set of tools that enable
developers to connect devices and services that use different standards,
such as units of measure and communication protocols. AWS IoT Things Graph
makes it possible to build IoT applications with little to no code by
connecting devices and services and defining how they interact at an
abstract level.
For more information about how AWS IoT Things Graph works, see the [User
Guide](https://docs.aws.amazon.com/thingsgraph/latest/ug/iot-tg-whatis.html).
"""
@doc """
Associates a device with a concrete thing that is in the user's registry.
A thing can be associated with only one device at a time. If you associate
a thing with a new device id, its previous association will be removed.
"""
def associate_entity_to_thing(client, input, options \\ []) do
request(client, "AssociateEntityToThing", input, options)
end
@doc """
Creates a workflow template. Workflows can be created only in the user's
namespace. (The public namespace contains only entities.) The workflow can
contain only entities in the specified namespace. The workflow is validated
against the entities in the latest version of the user's namespace unless
another namespace version is specified in the request.
"""
def create_flow_template(client, input, options \\ []) do
request(client, "CreateFlowTemplate", input, options)
end
@doc """
Creates a system instance.
This action validates the system instance, prepares the deployment-related
resources. For Greengrass deployments, it updates the Greengrass group that
is specified by the `greengrassGroupName` parameter. It also adds a file to
the S3 bucket specified by the `s3BucketName` parameter. You need to call
`DeploySystemInstance` after running this action.
For Greengrass deployments, since this action modifies and adds resources
to a Greengrass group and an S3 bucket on the caller's behalf, the calling
identity must have write permissions to both the specified Greengrass group
and S3 bucket. Otherwise, the call will fail with an authorization error.
For cloud deployments, this action requires a `flowActionsRoleArn` value.
This is an IAM role that has permissions to access AWS services, such as
AWS Lambda and AWS IoT, that the flow uses when it executes.
If the definition document doesn't specify a version of the user's
namespace, the latest version will be used by default.
"""
def create_system_instance(client, input, options \\ []) do
request(client, "CreateSystemInstance", input, options)
end
@doc """
Creates a system. The system is validated against the entities in the
latest version of the user's namespace unless another namespace version is
specified in the request.
"""
def create_system_template(client, input, options \\ []) do
request(client, "CreateSystemTemplate", input, options)
end
@doc """
Deletes a workflow. Any new system or deployment that contains this
workflow will fail to update or deploy. Existing deployments that contain
the workflow will continue to run (since they use a snapshot of the
workflow taken at the time of deployment).
"""
def delete_flow_template(client, input, options \\ []) do
request(client, "DeleteFlowTemplate", input, options)
end
@doc """
Deletes the specified namespace. This action deletes all of the entities in
the namespace. Delete the systems and flows that use entities in the
namespace before performing this action.
"""
def delete_namespace(client, input, options \\ []) do
request(client, "DeleteNamespace", input, options)
end
@doc """
Deletes a system instance. Only system instances that have never been
deployed, or that have been undeployed can be deleted.
Users can create a new system instance that has the same ID as a deleted
system instance.
"""
def delete_system_instance(client, input, options \\ []) do
request(client, "DeleteSystemInstance", input, options)
end
@doc """
Deletes a system. New deployments can't contain the system after its
deletion. Existing deployments that contain the system will continue to
work because they use a snapshot of the system that is taken when it is
deployed.
"""
def delete_system_template(client, input, options \\ []) do
request(client, "DeleteSystemTemplate", input, options)
end
@doc """
**Greengrass and Cloud Deployments**
Deploys the system instance to the target specified in
`CreateSystemInstance`.
**Greengrass Deployments**
If the system or any workflows and entities have been updated before this
action is called, then the deployment will create a new Amazon Simple
Storage Service resource file and then deploy it.
Since this action creates a Greengrass deployment on the caller's behalf,
the calling identity must have write permissions to the specified
Greengrass group. Otherwise, the call will fail with an authorization
error.
For information about the artifacts that get added to your Greengrass core
device when you use this API, see [AWS IoT Things Graph and AWS IoT
Greengrass](https://docs.aws.amazon.com/thingsgraph/latest/ug/iot-tg-greengrass.html).
"""
def deploy_system_instance(client, input, options \\ []) do
request(client, "DeploySystemInstance", input, options)
end
@doc """
Deprecates the specified workflow. This action marks the workflow for
deletion. Deprecated flows can't be deployed, but existing deployments will
continue to run.
"""
def deprecate_flow_template(client, input, options \\ []) do
request(client, "DeprecateFlowTemplate", input, options)
end
@doc """
Deprecates the specified system.
"""
def deprecate_system_template(client, input, options \\ []) do
request(client, "DeprecateSystemTemplate", input, options)
end
@doc """
Gets the latest version of the user's namespace and the public version that
it is tracking.
"""
def describe_namespace(client, input, options \\ []) do
request(client, "DescribeNamespace", input, options)
end
@doc """
Dissociates a device entity from a concrete thing. The action takes only
the type of the entity that you need to dissociate because only one entity
of a particular type can be associated with a thing.
"""
def dissociate_entity_from_thing(client, input, options \\ []) do
request(client, "DissociateEntityFromThing", input, options)
end
@doc """
Gets definitions of the specified entities. Uses the latest version of the
user's namespace by default. This API returns the following TDM entities.
<ul> <li> Properties
</li> <li> States
</li> <li> Events
</li> <li> Actions
</li> <li> Capabilities
</li> <li> Mappings
</li> <li> Devices
</li> <li> Device Models
</li> <li> Services
</li> </ul> This action doesn't return definitions for systems, flows, and
deployments.
"""
def get_entities(client, input, options \\ []) do
request(client, "GetEntities", input, options)
end
@doc """
Gets the latest version of the `DefinitionDocument` and
`FlowTemplateSummary` for the specified workflow.
"""
def get_flow_template(client, input, options \\ []) do
request(client, "GetFlowTemplate", input, options)
end
@doc """
Gets revisions of the specified workflow. Only the last 100 revisions are
stored. If the workflow has been deprecated, this action will return
revisions that occurred before the deprecation. This action won't work for
workflows that have been deleted.
"""
def get_flow_template_revisions(client, input, options \\ []) do
request(client, "GetFlowTemplateRevisions", input, options)
end
@doc """
Gets the status of a namespace deletion task.
"""
def get_namespace_deletion_status(client, input, options \\ []) do
request(client, "GetNamespaceDeletionStatus", input, options)
end
@doc """
Gets a system instance.
"""
def get_system_instance(client, input, options \\ []) do
request(client, "GetSystemInstance", input, options)
end
@doc """
Gets a system.
"""
def get_system_template(client, input, options \\ []) do
request(client, "GetSystemTemplate", input, options)
end
@doc """
Gets revisions made to the specified system template. Only the previous 100
revisions are stored. If the system has been deprecated, this action will
return the revisions that occurred before its deprecation. This action
won't work with systems that have been deleted.
"""
def get_system_template_revisions(client, input, options \\ []) do
request(client, "GetSystemTemplateRevisions", input, options)
end
@doc """
Gets the status of the specified upload.
"""
def get_upload_status(client, input, options \\ []) do
request(client, "GetUploadStatus", input, options)
end
@doc """
Returns a list of objects that contain information about events in a flow
execution.
"""
def list_flow_execution_messages(client, input, options \\ []) do
request(client, "ListFlowExecutionMessages", input, options)
end
@doc """
Lists all tags on an AWS IoT Things Graph resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Searches for entities of the specified type. You can search for entities in
your namespace and the public namespace that you're tracking.
"""
def search_entities(client, input, options \\ []) do
request(client, "SearchEntities", input, options)
end
@doc """
Searches for AWS IoT Things Graph workflow execution instances.
"""
def search_flow_executions(client, input, options \\ []) do
request(client, "SearchFlowExecutions", input, options)
end
@doc """
Searches for summary information about workflows.
"""
def search_flow_templates(client, input, options \\ []) do
request(client, "SearchFlowTemplates", input, options)
end
@doc """
Searches for system instances in the user's account.
"""
def search_system_instances(client, input, options \\ []) do
request(client, "SearchSystemInstances", input, options)
end
@doc """
Searches for summary information about systems in the user's account. You
can filter by the ID of a workflow to return only systems that use the
specified workflow.
"""
def search_system_templates(client, input, options \\ []) do
request(client, "SearchSystemTemplates", input, options)
end
@doc """
Searches for things associated with the specified entity. You can search by
both device and device model.
For example, if two different devices, camera1 and camera2, implement the
camera device model, the user can associate thing1 to camera1 and thing2 to
camera2. `SearchThings(camera2)` will return only thing2, but
`SearchThings(camera)` will return both thing1 and thing2.
This action searches for exact matches and doesn't perform partial text
matching.
"""
def search_things(client, input, options \\ []) do
request(client, "SearchThings", input, options)
end
@doc """
Creates a tag for the specified resource.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes a system instance from its target (Cloud or Greengrass).
"""
def undeploy_system_instance(client, input, options \\ []) do
request(client, "UndeploySystemInstance", input, options)
end
@doc """
Removes a tag from the specified resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the specified workflow. All deployed systems and system instances
that use the workflow will see the changes in the flow when it is
redeployed. If you don't want this behavior, copy the workflow (creating a
new workflow with a different ID), and update the copy. The workflow can
contain only entities in the specified namespace.
"""
def update_flow_template(client, input, options \\ []) do
request(client, "UpdateFlowTemplate", input, options)
end
@doc """
Updates the specified system. You don't need to run this action after
updating a workflow. Any deployment that uses the system will see the
changes in the system when it is redeployed.
"""
def update_system_template(client, input, options \\ []) do
request(client, "UpdateSystemTemplate", input, options)
end
@doc """
Asynchronously uploads one or more entity definitions to the user's
namespace. The `document` parameter is required if
`syncWithPublicNamespace` and `deleteExistingEntites` are false. If the
`syncWithPublicNamespace` parameter is set to `true`, the user's namespace
will synchronize with the latest version of the public namespace. If
`deprecateExistingEntities` is set to true, all entities in the latest
version will be deleted before the new `DefinitionDocument` is uploaded.
When a user uploads entity definitions for the first time, the service
creates a new namespace for the user. The new namespace tracks the public
namespace. Currently users can have only one namespace. The namespace
version increments whenever a user uploads entity definitions that are
backwards-incompatible and whenever a user sets the
`syncWithPublicNamespace` parameter or the `deprecateExistingEntities`
parameter to `true`.
The IDs for all of the entities should be in URN format. Each entity must
be in the user's namespace. Users can't create entities in the public
namespace, but entity definitions can refer to entities in the public
namespace.
Valid entities are `Device`, `DeviceModel`, `Service`, `Capability`,
`State`, `Action`, `Event`, `Property`, `Mapping`, `Enum`.
"""
def upload_entity_definitions(client, input, options \\ []) do
request(client, "UploadEntityDefinitions", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "iotthingsgraph"}
host = build_host("iotthingsgraph", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "IotThingsGraphFrontEndService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/iot_things_graph.ex
| 0.841207
| 0.654812
|
iot_things_graph.ex
|
starcoder
|
defmodule Calendar do
@moduledoc """
This module defines the responsibilities for working with
calendars, dates, times and datetimes in Elixir.
Currently it defines types but may define the calendar
behaviour in future versions. For the actual date, time
and datetime structures, see `Date`, `Time`, `NaiveDateTime`
and `DateTime`.
Note the year, month, day, etc designations are over specified
(i.e. an integer instead of 1..12 for months) because different
calendars may have a different number of numbers and so on.
"""
@type year :: integer
@type month :: integer
@type day :: integer
@type hour :: integer
@type minute :: integer
@type second :: integer
@type microsecond :: integer
@typedoc "A calendar implementation"
@type calendar :: module
@typedoc "The time zone ID according to the IANA tz database (e.g. Europe/Zurich)"
@type time_zone :: String.t
@typedoc "The time zone abbreviation (e.g. CET or CEST or BST etc.)"
@type zone_abbr :: String.t
@typedoc "The time zone UTC offset in seconds"
@type utc_offset :: integer
@typedoc "The time zone standard offset in seconds (not zero in summer times)"
@type std_offset :: integer
@doc false
# TODO: Remove this on 1.4. It exists only to aid migration of those
# using the Calendar library.
defmacro __using__(_opts) do
%{file: file, line: line} = __CALLER__
:elixir_errors.warn(line, file, "use Calendar is deprecated as it is now part of Elixir")
quote do
alias Calendar.DateTime
alias Calendar.DateTime.Interval
alias Calendar.AmbiguousDateTime
alias Calendar.NaiveDateTime
alias Calendar.Date
alias Calendar.Time
alias Calendar.TimeZoneData
alias Calendar.TzPeriod
alias Calendar.Strftime
end
end
end
defmodule Date do
@moduledoc """
A date implementation.
"""
defstruct [:year, :month, :day, calendar: Calendar.ISO]
@type t :: %__MODULE__{year: Calendar.year, month: Calendar.month,
day: Calendar.day, calendar: Calendar.calendar}
end
defmodule Time do
@moduledoc """
A time implementation.
"""
defstruct [:hour, :minute, :second, :microsecond]
@type t :: %__MODULE__{hour: Calendar.hour, minute: Calendar.minute,
second: Calendar.second, microsecond: Calendar.microsecond}
end
defmodule NaiveDateTime do
@moduledoc """
A naive datetime implementation (without a time zone).
The naive bit implies this datetime representation does
not have a timezone. This means the datetime may not
actually exist in certain areas in the world even though
it is valid.
For example, when daylight saving changes are applied
by a region, the clock typically moves forward or backward
by one hour. This means certain datetimes never occur or
may occur more than once. Since `NaiveDateTime` is not
validated against a timezone, such errors would go unnoticed.
"""
defstruct [:year, :month, :day, :hour, :minute, :second, :microsecond, calendar: Calendar.ISO]
@type t :: %__MODULE__{year: Calendar.year, month: Calendar.month, day: Calendar.day,
calendar: Calendar.calendar, hour: Calendar.hour, minute: Calendar.minute,
second: Calendar.second, microsecond: Calendar.microsecond}
end
defmodule DateTime do
@moduledoc """
A datetime implementation with a time zone.
This datetime can be seen as an ephemeral snapshot
of a datetime at a given timezone. For such purposes,
it also includes both UTC and Standard offsets, as
well as the zone abbreviation field used exclusively
for formatting purposes.
"""
defstruct [:year, :month, :day, :hour, :minute, :second, :microsecond,
:time_zone, :zone_abbr, :utc_offset, :std_offset, calendar: Calendar.ISO]
@type t :: %__MODULE__{year: Calendar.year, month: Calendar.month, day: Calendar.day,
calendar: Calendar.calendar, hour: Calendar.hour, minute: Calendar.minute,
second: Calendar.second, microsecond: Calendar.microsecond,
time_zone: Calendar.time_zone, zone_abbr: Calendar.zone_abbr,
utc_offset: Calendar.utc_offset, std_offset: Calendar.std_offset}
end
|
lib/elixir/lib/calendar.ex
| 0.763263
| 0.583085
|
calendar.ex
|
starcoder
|
defmodule IBMSpeechToText.Client do
@moduledoc """
A client process responsible for communication with Speech to Text API
"""
use GenServer
alias IBMSpeechToText.{Token, Util, Response}
alias IBMSpeechToText.Message.{Start, Stop}
@endpoint_path "/speech-to-text/api/v1/recognize"
@doc """
Starts a client process responsible for communication with Speech to Text API
linked to the current process.
Requires API url or region atom (See `t:IBMSpeechToText.region/0`) and
an API key used to obtain `IBMSpeechToText.Token` ([here](https://cloud.ibm.com/docs/services/watson?topic=watson-iam) you can learn how to get it)
## Options
* `:stream_to` - pid of the process that will receive recognition results, defaults to the caller of `start_link/3`
* `:keep_alive` - if set to true, the client will automatically reconnect to the API after timeout (IBM API will close the connection after 30 seconds of silence or no data). False by default.
* Recognition parameters (such as `:model`) described in [IBM Cloud docs](https://cloud.ibm.com/apidocs/speech-to-text#WSRecognizeMethod)
## Example
```
#{inspect(__MODULE__)}.start_link(
:frankfurt,
"ABCDEFGHIJKLMNO",
model: "en-GB_BroadbandModel"
)
```
"""
@spec start_link(IBMSpeechToText.region() | charlist(), String.t(), Keyword.t()) ::
:ignore | {:error, any()} | {:ok, pid()}
def start_link(api_region_or_url, api_key, opts \\ []) do
do_start(:start_link, api_region_or_url, api_key, opts)
end
@doc """
Starts a client process without links.
See `start_link/3` for more info.
"""
@spec start(IBMSpeechToText.region() | charlist(), String.t(), Keyword.t()) ::
:ignore | {:error, any()} | {:ok, pid()}
def start(api_region_or_url, api_key, opts \\ []) do
do_start(:start, api_region_or_url, api_key, opts)
end
defp do_start(function, region, api_key, opts) when is_atom(region) do
with {:ok, api_url} <- IBMSpeechToText.api_host_name(region) do
do_start(function, api_url, api_key, opts)
end
end
defp do_start(function, api_url, api_key, opts) do
{client_opts, endpoint_opts} = Keyword.split(opts, [:stream_to, :keep_alive])
stream_to = client_opts |> Keyword.get(:stream_to, self())
keep_alive = client_opts |> Keyword.get(:keep_alive, false)
apply(GenServer, function, [
__MODULE__,
[api_url, api_key, stream_to, keep_alive, endpoint_opts]
])
end
@doc """
Sends a proper message over websocket to the API
"""
@spec send_message(GenServer.server(), Start.t() | Stop.t()) :: :ok
def send_message(client, %msg_module{} = msg) when msg_module in [Start, Stop] do
GenServer.cast(client, {:send_message, msg})
end
@doc """
Sends audio data over websocket
"""
@spec send_data(GenServer.server(), iodata()) :: :ok
def send_data(client, data) do
GenServer.cast(client, {:send_data, data})
end
@doc """
Stops the client process. A proxy for `GenServer.stop/3`.
"""
@spec stop(GenServer.server(), reason :: term(), GenServer.timeout()) :: :ok
def stop(client, reason \\ :normal, timeout \\ :infinity) do
GenServer.stop(client, reason, timeout)
end
@impl true
def init([api_url, api_key, stream_to, keep_alive, endpoint_opts]) do
with {:ok, conn_pid} <- :gun.open(api_url, Util.ssl_port(), Util.ssl_connection_opts()) do
monitor = Process.monitor(conn_pid)
task = Task.async(Token, :get, [api_key])
ws_path =
case endpoint_opts do
[] -> @endpoint_path
_ -> @endpoint_path <> "?" <> URI.encode_query(endpoint_opts)
end
state = %{
api_key: api_key,
conn_pid: conn_pid,
conn_monitor: monitor,
stream_to: stream_to,
keep_alive: keep_alive,
token: nil,
ws_path: ws_path,
ws_ref: nil
}
{:ok, state, {:continue, {:init, task}}}
end
end
@impl true
def handle_continue({:init, task}, %{conn_pid: conn_pid, ws_path: ws_path} = state) do
with {:ok, _protocol} <- :gun.await_up(conn_pid),
{:ok, token} <- Task.await(task),
ws_ref = :gun.ws_upgrade(conn_pid, ws_path, [token |> Token.auth_header()]),
:ok <- await_upgrade(conn_pid, ws_ref) do
{:noreply, %{state | token: token, ws_ref: ws_ref}}
else
{:error, reason} ->
raise "Error while initializing connection: #{inspect(reason)}"
end
end
@impl true
def handle_cast({:send_message, msg}, %{conn_pid: conn_pid} = state) do
encoded_msg = msg |> Jason.encode_to_iodata!()
:gun.ws_send(conn_pid, {:text, encoded_msg})
{:noreply, state}
end
@impl true
def handle_cast({:send_data, data}, %{conn_pid: conn_pid} = state) do
:gun.ws_send(conn_pid, {:binary, data})
{:noreply, state}
end
@impl true
def handle_info(
{:gun_ws, conn_pid, ws_ref, {:text, data}},
%{conn_pid: conn_pid, ws_ref: ws_ref, stream_to: stream_to} = state
) do
case Response.from_json(data) do
{:ok, %Response{} = response} ->
send(stream_to, response)
{:noreply, state}
{:ok, :listening} ->
{:noreply, state}
{:error, %Jason.DecodeError{} = error} ->
raise "Error while decoding response: #{Jason.DecodeError.message(error)}"
{:error, error} ->
if error =~ "timed out" and state.keep_alive do
{:noreply, reconnect(state)}
else
raise "Received error over websocket: #{error}"
end
end
end
@impl true
def handle_info(
{:DOWN, monitor, :process, conn_pid, reason},
%{conn_pid: conn_pid, conn_monitor: monitor}
) do
raise "Connection down: #{inspect(reason)}"
end
@impl true
def handle_info(
{:gun_response, :nofin, _code, _headers},
%{conn_pid: conn_pid, ws_ref: ws_ref, monitor: monitor}
) do
{:ok, body} = :gun.await_body(conn_pid, ws_ref, monitor)
raise "Error while upgrading to websocket: #{inspect(Jason.decode!(body), pretty: true)}"
end
@impl true
def handle_info({:gun_error, reason}, _state) do
raise "Gun error: #{inspect(reason, pretty: true)}"
end
@impl true
def handle_info(msg, _state) do
raise "Unknown message: #{inspect(msg, pretty: true)}"
end
@impl true
def terminate(_reason, state) do
Process.demonitor(state.conn_monitor)
end
defp await_upgrade(conn_pid, ws_ref) do
receive do
{:gun_upgrade, ^conn_pid, ^ws_ref, ["websocket"], _} -> :ok
after
5000 -> raise "Timeout while waiting for connection upgrade"
end
end
defp reconnect(%{conn_pid: conn_pid} = state) do
token_res =
if state.token |> Token.should_refresh?() do
Token.get(state.api_key)
else
{:ok, state.token}
end
with {:ok, token} <- token_res,
{:ok, _protocol} <- :gun.await_up(conn_pid, 10000),
:ok = :gun.flush(conn_pid),
ws_ref = :gun.ws_upgrade(conn_pid, state.ws_path, [token |> Token.auth_header()]),
:ok <- await_upgrade(conn_pid, ws_ref) do
%{state | token: token, ws_ref: ws_ref}
else
{:error, reason} -> raise "Error while reconnecting: #{inspect(reason)}"
end
end
end
|
lib/ibm_speech_to_text/client.ex
| 0.872252
| 0.548734
|
client.ex
|
starcoder
|
defmodule Glitchylicious do
@moduledoc """
Produces magnificent glitches,
by corrupting some jpg bytes.
Pretty much stolen from: georg @ http://fishnation.de/
Take a look at his js experiments, cool stuff!
Example:
alias Glitchylicious, as: G
def glitch do
G.glitch("./myimage.jpg", %{iter: 10, amount: 50, seed: 10}
end
"""
@doc """
Glitches jpg data.
Takes an input path (original image)
as well as a config map. Returns corrupted binary data.
Config map should include the following parameters:
iter - number of iterrations to perform.
amount - corruption severety.
seed - seed value.
mode (optional) - Glitch mode. Currently :normal (default) or :reverse
"""
@spec glitch(String.t, map) :: binary
def glitch(input, %{iter: iter, amount: amount, seed: seed, mode: mode}) do
raw = File.read!(input)
case mode do
:normal ->
do_glitch(raw, header_size(raw), iter, amount, seed)
:reverse ->
do_glitch_reverse(raw, header_size(raw), iter, amount, seed)
_ ->
do_glitch(raw, header_size(raw), iter, amount, seed)
end
end
def gltich(input, params = %{iter: iter, amount: amount, seed: seed}) do
glitch(input, Dict.put(params, :mode, :normal))
end
defp do_glitch(image, len, iter, amount, seed) do
do_glitch(image, len, 0, iter, amount / 100, seed / 100)
end
defp do_glitch(image, _len, i, iter, _amount, _seed) when i == iter do
image
end
defp do_glitch(image, len, i, iter, amount, seed) do
max_index = byte_size(image) - len - 4
px_min = round(max_index / iter * i)
px_max = round(max_index / iter * (i + 1))
delta = px_max - px_min
px_i = round(px_min + delta * seed)
byte_index = if px_i > max_index,
do: len + max_index,
else: len + px_i
replace(image, byte_index, Float.floor(amount * 256) |> trunc)
|> do_glitch(len, i + 1, iter, amount, seed)
end
defp do_glitch_reverse(image, len, iter, amount, seed) do
do_glitch_reverse(image, len, iter, iter, amount / 100, seed / 100)
end
defp do_glitch_reverse(image, _len, 0, iter, _amount, _seed) do
image
end
defp do_glitch_reverse(image, len, i, iter, amount, seed) do
max_index = byte_size(image) - len - 4
px_min = round(max_index / iter * i)
px_max = round(max_index / iter * (i + 1))
delta = px_max - px_min
px_i = round(px_min + delta * seed)
byte_index = if px_i > max_index,
do: len + max_index,
else: len + px_i
replace(image, byte_index, round(amount * 256))
|> do_glitch_reverse(len, i - 1, iter, amount, seed)
end
defp header_size(raw), do: header_size(raw, 0)
defp header_size(<< 255, 218, _data::binary >>, acc), do: acc + 2
defp header_size(<< _a::8, b::8, data::binary >>, acc), do: header_size(<< b :: 8, data :: binary >>, acc + 1)
defp replace(image, ind, replacement) do
<< head :: binary-size(ind), _ :: 8, rest :: binary >> = image
<< head :: binary, replacement, rest :: binary >>
end
end
|
lib/glitchylicious.ex
| 0.7011
| 0.471345
|
glitchylicious.ex
|
starcoder
|
defmodule MatrexNumerix.LinearAlgebra do
@moduledoc """
Linear algebra functions used for vector operations,
matrix factorization and matrix transformation.
"""
import Matrex
alias MatrexNumerix.Math
@doc """
The L1 norm of a vector, also known as Manhattan norm.
"""
@spec l1_norm(Common.maybe_vector()) :: Common.maybe_float()
def l1_norm(vector, weights \\ nil) do
norm(1, vector, weights)
end
@doc """
The L2 norm of a vector, also known as Euclidean norm.
"""
@spec l2_norm(Common.maybe_vector()) :: Common.maybe_float()
def l2_norm(vector, weights \\ nil) do
norm(2, vector, weights)
end
@doc """
The p-norm of a vector.
"""
@spec norm(integer, Common.maybe_vector()) :: Common.maybe_float()
def norm(p, x), do: norm(p, x, nil)
def norm(_p, nil, _), do: nil
def norm(p, x = %Matrex{}, nil) do
s = sum(pow(Matrex.apply(x, :abs), p))
Math.nth_root(s, p)
end
def norm(p, x = %Matrex{}, w = %Matrex{}) do
s = sum(pow(Matrex.apply(x, :abs), p) |> Matrex.dot(w))
Math.nth_root(s, p)
end
def ones_upper(n), do: ones_upper(n, n)
def ones_upper(ni,nj) do
m1r = Matrex.ones(1, nj)
for i <- 1..nj, reduce: Matrex.zeros(ni,nj) do
m! ->
m1sl = m1r |> Matrex.submatrix(1..1, i..nj)
m! |> Matrex.set_submatrix(i..i, i..nj, m1sl)
end
end
def ones_upper_offdiag(n), do: ones_upper_offdiag(n, n)
def ones_upper_offdiag(ni,nj) do
m1r = Matrex.ones(1, nj)
for i <- 1..(nj-1), reduce: Matrex.zeros(ni,nj) do
m! ->
m1sl = m1r |> Matrex.submatrix(1..1, (i+1)..nj)
m! |> Matrex.set_submatrix(i..i, (i+1)..nj, m1sl)
end
end
def reverse(m = %Matrex{}) do
m |> Enum.reverse() |> Matrex.from_list()
end
def reverse!(m = %Matrex{}) do
m |> Enum.reverse() |> Matrex.from_list() |> Matrex.transpose()
end
@doc """
Solve Uppder Right triangular matrix (symmetric).
"""
def backward_substitution(uu, y) do
use Matrex.Operators
{_, n} = size(uu)
x = Vector.zeros(Vector.size(y))
for i <- n..1, reduce: x do # loop over the rows from bottom to top
x ->
r = for k <- (i+1)..n |> Enum.reject(& &1 > n), reduce: y[i] do
r -> r - uu[i][k] * x[k]
end
Vector.set(x, i, r / uu[i][i] )
end
end
@doc """
Solve Uppder Right triangular matrix (symmetric).
"""
def forward_substitution(ll, b) do
use Matrex.Operators
{_ni, n} = size(ll)
c = Vector.zeros(Vector.size(b))
for i <- 1..n, reduce: c do
c ->
r = for j <- 1..(i-1) |> Enum.reject(& &1 < 1), reduce: b[i] do
r ->
r - ll[i][j] * c[j]
end
Vector.set(c, i, r / ll[i][i] )
end
end
end
|
lib/linear_algebra.ex
| 0.838481
| 0.742748
|
linear_algebra.ex
|
starcoder
|
defmodule Timedot.IR do
@moduledoc """
Intermediate representation of a timedot file.
Result of parsing a file and can be serialized again in a consistent manner.
"""
@type t :: list(line_like())
@type line_like :: comment() | day()
@type comment :: {:comment, String.t()}
@type day ::
{:comment,
%{
optional(:year) => integer(),
optional(:comment) => String.t(),
day: integer(),
month: integer(),
entries: entries()
}}
@type entries ::
list(
%{optional(:comment) => String.t(), account: String.t(), quantity: quantity()}
| {:comment, String.t()}
)
@type quantity :: {time_unit(), integer()} | {time_unit(), float()}
@type time_unit :: :seconds | :minutes | :dots | :hours | :days | :week | :months | :years
@doc """
Converts the given IR to a timedot string.
"""
@spec to_string(Timedot.IR.t()) :: String.t()
def to_string(ir) do
(for line_like <- ir do
case line_like do
{:comment, comment} ->
"# #{comment}"
{:day, [entries: []]} ->
""
{:day, d} ->
day_to_string(d)
end
end
|> Enum.join("\n")) <>
"\n"
end
defp day_to_string(d) do
date_line =
if Map.has_key?(d, :year) do
year = Integer.to_string(Map.fetch!(d, :year)) |> String.pad_leading(4, "0")
year <> "-"
else
""
end
%{day: day, month: month, entries: entries} = d
[month, day] =
Enum.map([month, day], fn v ->
Integer.to_string(v) |> String.pad_leading(2, "0")
end)
optional_comment = Map.get(d, :comment, "")
optional_comment =
if String.length(optional_comment) > 0 do
" # #{optional_comment}"
else
optional_comment
end
date_line = date_line <> "#{month}-#{day}#{optional_comment}\n"
date_line <>
(Enum.map(entries, fn entry ->
case entry do
{:comment, comment} ->
"# #{comment}"
{:entry, %{account: account, quantity: quantity, comment: comment}} ->
"#{account} #{quantity_to_string(quantity)} # #{comment}"
{:entry, %{account: account, quantity: quantity}} ->
"#{account} #{quantity_to_string(quantity)}"
end
end)
|> Enum.join("\n"))
end
@spec quantity_to_string(quantity()) :: String.t()
defp quantity_to_string({:dots, dots}) do
String.duplicate(".", dots)
|> String.to_charlist()
|> Enum.chunk_every(4)
|> Enum.join(" ")
end
defp quantity_to_string({unit, duration}),
do: "#{duration}#{unit_to_string(unit)}"
defp unit_to_string(:seconds), do: "s"
defp unit_to_string(:minutes), do: "m"
defp unit_to_string(:hours), do: "h"
defp unit_to_string(:days), do: "d"
defp unit_to_string(:weeks), do: "w"
defp unit_to_string(:months), do: "mo"
defp unit_to_string(:years), do: "y"
end
defimpl String.Chars, for: Timedot.IR do
def to_string(ir) do
Timedot.IR.to_string(ir)
end
end
|
lib/timedot/i_r.ex
| 0.820901
| 0.529689
|
i_r.ex
|
starcoder
|
defmodule RatError.Structure do
@moduledoc """
Specifies the Map structure of a RAT error.
This struct could be created from the specified options as below,
[
node: :error,
keys: %{
code: :code,
message: :message
}
]
References the 'RatError.Structure' configuration in 'config/*.exs' for
detail.
"""
alias __MODULE__
require Logger
defstruct [:node, :keys]
@support_fields [
# Error code defined by caller, e.g. an atom :no_entry, an integer 9 or a
# string "unexpected".
:code,
# Error file path.
:file,
# Error function name.
:function,
# Error file line.
:line,
# Error message of string passed in by caller.
:message,
# Error module.
:module
]
@doc """
Creates the struct from the default 'RatError.Structure' configuration.
The default configuration is set in 'config/*.exs'.
## Examples
References 'config/test.exs' for the test configuration.
iex> Structure.create_from_default_config
%RatError.Structure{
node: :error,
keys: %{
code: :code,
file: :file,
function: :function,
line: :line,
message: :message,
module: :module
}
}
"""
def create_from_default_config do
:rat_error
|> Application.get_env(RatError.Structure)
|> create
end
@doc """
Creates the struct from the specified options.
## Examples
iex> support_keys = %{code: :code, message: :message}
iex> Structure.create(node: :err, keys: support_keys)
%RatError.Structure{
node: :err,
keys: %{
code: :code,
message: :message
}
}
iex> support_keys = %{code: :code, message: :message}
iex> Structure.create(keys: support_keys)
%RatError.Structure{
node: nil,
keys: %{
code: :code,
message: :message
}
}
iex> Structure.create(keys: %{code: :code})
%RatError.Structure{
node: nil,
keys: %{code: :code}
}
"""
def create(opts) when is_list(opts) do
keys = filter_keys(opts[:keys])
%Structure{node: opts[:node], keys: keys}
end
@doc """
Updates the struct with the specified options.
## Examples
iex> structure = %Structure{node: :error, keys: %{code: :code}}
iex> Structure.update(structure, node: :err, keys: %{message: :message})
%RatError.Structure{
node: :err,
keys: %{message: :message}
}
"""
def update(%Structure{} = structure, opts) when is_list(opts) do
params =
Enum.reduce([:node, :keys], %{}, fn k, acc ->
case Keyword.fetch(opts, k) do
{:ok, v} -> Map.put(acc, k, v)
:error -> acc
end
end)
params =
if keys = params[:keys] do
%{params | keys: filter_keys(keys)}
else
params
end
Map.merge(structure, params)
end
defp filter_keys(nil), do: nil
defp filter_keys(keys) when is_map(keys) do
fields = Map.keys(keys)
filtered_fields = fields -- fields -- @support_fields
if is_nil(List.first(filtered_fields)) do
Logger.warn("there is no support keys - '#{inspect(keys)}'!")
end
Map.take(keys, filtered_fields)
end
end
|
lib/rat_error/structure.ex
| 0.854278
| 0.461866
|
structure.ex
|
starcoder
|
defmodule ElixirApiai.Query do
@moduledoc """
Query API wrapper [Api.ai Query](https://api.ai/docs/reference/agent/query)
"""
import ElixirApiai.Utils.Api
@doc """
Makes a `query` request to Api.ai
This method uses `POST /query` API method with JSON payload.
All parameters except `query` and `sessionId` could be passed as `options`.
Usage:
```elixir
iex(1)> ElixirApiai.Query.query("Hey", "randomSessionId")
{:ok,
%{id: "c728eb69-f5f5-41a9-b9fa-8760786f547d",
lang: "en",
result: %{action: "smalltalk.greetings.hello", actionIncomplete: false,
contexts: [],
fulfillment: %{messages: [%{speech: "Howdy.", type: 0}],
speech: "Good day!"}, metadata: %{}, parameters: %{},
resolvedQuery: "Hello", score: 1.0, source: "domains"},
sessionId: "random-session-id",
status: %{code: 200, errorType: "success"},
timestamp: "2017-08-17T14:36:28.608Z"}
}
```
And with additional options:
```elixir
iex(5)> ElixirApiai.Query.query("What is the weather ?", "random-session-id", %{location: %{latitude: 37.459157, longitude: -122.17926}})
{:ok,
%{id: "75648817-9f27-4eed-982f-ccfa5b93bcc4", lang: "en",
result: %{action: "weather", actionIncomplete: false, contexts: [],
fulfillment: %{messages: [%{speech: "Let me check ....", type: 0}],
speech: "Let me check ...."},
metadata: %{intentId: "7382c6b7-cd72-4113-adcd-aac3efa49f37",
intentName: "What is the weather ?", webhookForSlotFillingUsed: "false",
webhookUsed: "false"}, parameters: %{},
resolvedQuery: "What is the weather ?", score: 1.0, source: "agent"},
sessionId: "random-session-id", status: %{code: 200, errorType: "success"},
timestamp: "2017-08-17T14:40:05.186Z"}}
```
"""
@spec query(String.t, String.t, map) :: {:ok, map} | {:error, any}
def query(query, session_id, options \\ %{}) do
body = %{
query: query,
sessionId: session_id
}
post("query", [body: Map.merge(body, options)])
end
end
|
lib/elixir_apiai/query.ex
| 0.803444
| 0.690911
|
query.ex
|
starcoder
|
defmodule MailAddress do
@moduledoc """
Functions to handle RFC5321 Mail Addresses.
The library implements functions for handling email addresses as specified
mostly by RFC5321. A large chunk of the address syntax is implemented, with
a few exceptions:
* Handling of general address literals in domains (IPv4 and IPv6
address literals are supported).
* Handling of internationalized addresses (UTF8, punycode etc).
The address parser is slightly more permissive than RFC5321 allows,
as it will tolerate backslash quoted characters in the local part of
addresses outside of quoted strings - this is technically against the
RFC5321 grammar, but there are examples everywhere of this sort of address.
Despite this, encoded addresses produced by the library are always quoted
correctly.
## Creating Addresses
Addresses may be created a number of ways:
* `%MailAddress{}` - this will create a null address.
* Calling `new/3` - this will directly assign a local and domain part.
* Calling `MailAddress.Parser.parse/2` - this will parse a string into
an address.
### Examples
iex> %MailAddress{}
#MailAddress<>
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> addr
#MailAddress<<EMAIL>>
iex> {:ok, addr, ""} = MailAddress.Parser.parse("<EMAIL>")
iex> addr
#MailAddress<<EMAIL>>
## Modifying Addresses
Addresses can be modified by a number of functions, which return a new
address with the appropriate update:
* `set_domain/3` - updates domain.
* `set_local_part/3` - updates local part of address.
## Querying Addresses
Addresses can be queryied for their components:
* `address_literal?/1` - checks if the address has an address literal domain set.
* `address_literal/1` - returns address literal domain (or `nil` if none).
* `domain?/1` - checks if the address has a domain set.
* `domain/1` - returns the address domain.
* `local_part?/1` - checks if the address has a local part set.
* `local_part/1` - returns the address local part.
* `needs_quoting?/1` - checks if the address local part needs quoting.
* `null?/1` - returns true if the address is null (no local or domain parts).
## Comparing and Encoding Addresses
* `domains_equal?/2` - compares address domains.
* `encode/2` - encodes address as string, taking care of quoting etc.
* `equal?/2` - compares two addresses.
* `local_parts_equal?/2` - compares address local parts.
## Parsing Addresses
The module MailAddress.Parser contains parsing code.
* `MailAddress.Parser.parse/2` - parses a string into an address.
* `MailAddress.Parser.valid?/2` - determines if address has valid syntax.
## Specifying Options
The `MailAddress.Options` struct is used to store options for configuring
the library. Checks are applied after every change/creation operation.
## Protocols
The library implements the `Inspect` and `String.Chars` protocols for
`MailAddress` structs.
The `Inspect` protocol is used in the `iex` shell and by `inspect/2` to
pretty-print the `MailAddress` struct contents.
The `String.Chars` protocol enables a `MailAddress` struct to be directly
converted into an encoded string.
## Usage with Ecto
MailAddress provides cast/dump/load callbacks so that it can be used as
an `Ecto` type:
```elixir
defmodule EctoExample do
use Ecto.Schema
schema "emailtest" do
field :email, MailAddress
end
end
```
In migrations any MailAddress field should be defined as a type which
can hold a large enough (up to 256 chars) string, for example `:text`.
Addresses converted using from strings using cast/4 will be checked
for validity before they are accepted into the database.
Note that casting is done by default with a permissive set of options
(allowing null addresses etc) - if you wish to be stricter then you
can change the defaults in your config.exs (see below), or apply some
further validation yourself.
Note that if you wish to supply a default value for an address field,
then it should be specified as a %MailAddress{} struct, *NOT* a string.
### config.exs configuration when using with Ecto
The library uses `:mail_address` as the application name, and the
following boolean keys, which are either `true` to enable, or `false`
to disable.
* `:ecto_allow_address_literal` - IP address literal domains.
* `:ecto_allow_localhost` - allow `localhost` as domain.
* `:ecto_allow_null` - allow empty (null) addresses.
* `:ecto_downcase_domain` - force domain name to lower case.
* `:ecto_require_domain` - require domain part to be present in non-null
addresses.
## Usage with JSON libraries
MailAddress can be used with JSON libraries, and typically requires
implementation of the correct protocols to work, for example to
encode email addresses with `Poison`, the following needs to be
done:
```elixir
defimpl Poison.Encoder, for: MailAddress do
def encode(%MailAddress{} = addr, options) do
Poison.Encoder.BitString.encode(MailAddress.encode(addr, false), options)
end
end
```
This will encode any MailAddress structs as encoded strings.
"""
alias MailAddress.CharSet
@typedoc "Error return type - a tuple containing `:error` and a reason string."
@type error :: {:error, String.t()}
@typedoc "Represents an IPv4 or IPv6 address."
@type ip_address :: :inet.ip4_address() | :inet.ip6_address()
@typedoc "Success return type - a tuple containing `:ok` and a `MailAddress` struct."
@type success :: {:ok, %__MODULE__{}}
@typedoc "The `MailAddress` struct."
@type t :: %__MODULE__{
address_literal: nil | ip_address(),
local_part: String.t(),
domain: String.t(),
needs_quoting: boolean
}
@doc """
Address struct.
The struct *SHOULD* *be* *treated* *as* *opaque*
and not tampered with directly as it may change, and the `needs_quoting`
field is cached.
Callers should use the appropriate functions to get/set fields which
ensures that everything remains in-sync and valid.
"""
defstruct address_literal: nil,
local_part: "",
domain: "",
needs_quoting: false
defimpl Inspect, for: MailAddress do
import Inspect.Algebra
def inspect(%MailAddress{} = addr, opts) do
str = MailAddress.encode(addr, false)
insp = color(str, :string, opts)
concat(["#MailAddress<", insp, ">"])
end
end
defimpl String.Chars, for: MailAddress do
def to_string(%MailAddress{} = addr) do
MailAddress.encode(addr, true)
end
end
defmodule Options do
@moduledoc "Contains struct to hold configuration."
@typedoc "The `MailAddress.Options` struct."
@type t :: %__MODULE__{
allow_address_literal: boolean,
allow_localhost: boolean,
allow_null: boolean,
allow_null_local_part: boolean,
downcase_domain: boolean,
max_address_length: pos_integer,
max_domain_length: pos_integer,
max_local_part_length: pos_integer,
require_brackets: boolean,
require_domain: boolean
}
@doc """
Holds the configuration options for handling addresses.
* `:allow_address_literal` - if `true`, allows domain part to be an
address literal. Defaults to `false`.
* `:allow_localhost` - if `true`, allows domain part to be "localhost"
or the equivalent address literal (`[127.0.0.1]` or `[IPv6:::1]`).
Defaults to `false`.
* `:allow_null` - if `true` allows address to be null. Defaults to `false`.
* `:allow_null_local_part` - if `true` allows address to have an empty
local part. Defaults to `false`.
* `:downcase_domain` - if `true` downcases domain automatically. Defaults to
`false`.
* `:max_address_length` - the maximum total length in characters.
Defaults to 256 (from RFC5321).
* `:max_domain_length` - the maximum domain length in characters.
Defaults to 255 (from RFC5321).
* `:max_local_part_length` - the maximum local part length in characters.
Defaults to 64 (from RFC5321).
* `:require_brackets` - if `true`, insists that address must be surrounded
by angle brackets '<' and '>'. If `false` the brackets are optional
and any parsing will stop when either the end of string,
or a space after the last valid domain character is reached.
Defaults to `false`.
* `:require_domain` - if `true` then the address must have a domain
component unless it is a null address. Defaults to `true`.
"""
defstruct allow_address_literal: false,
allow_localhost: false,
allow_null: false,
allow_null_local_part: false,
downcase_domain: false,
max_address_length: 256,
max_domain_length: 255,
max_local_part_length: 64,
require_brackets: false,
require_domain: true
@doc "Returns relaxed parsing options."
def relaxed do
%__MODULE__{
allow_address_literal: true,
allow_localhost: true,
allow_null: true,
allow_null_local_part: true,
require_brackets: false,
require_domain: false
}
end
end
@doc """
Returns the decoded address literal domain (if any), or nil otherwise.
## Examples
iex> MailAddress.address_literal(%MailAddress{})
nil
iex> {:ok, addr} = MailAddress.new("test", "[192.168.0.1]", %MailAddress.Options{allow_address_literal: true})
iex> MailAddress.address_literal(addr)
{192, 168, 0, 1}
"""
@spec address_literal(MailAddress.t()) :: nil | ip_address()
def address_literal(%MailAddress{address_literal: a}), do: a
@doc """
Checks whether address has an address literal domain part.
## Examples
iex> MailAddress.address_literal?(%MailAddress{})
false
iex> {:ok, addr} = MailAddress.new("test", "[192.168.0.1]", %MailAddress.Options{allow_address_literal: true})
iex> MailAddress.address_literal?(addr)
true
"""
@spec address_literal?(MailAddress.t()) :: boolean
def address_literal?(%MailAddress{address_literal: nil}), do: false
def address_literal?(%MailAddress{}), do: true
@doc false
@spec cast(nil | String.t() | MailAddress.t()) :: {:ok, nil | MailAddress.t()} | :error
def cast(<<addr::binary>>) do
trimmed_addr = String.trim(addr)
case MailAddress.Parser.parse(trimmed_addr, ecto_parse_options()) do
{:ok, %MailAddress{} = parsed, ""} ->
{:ok, parsed}
_ ->
:error
end
end
def cast(%MailAddress{} = addr), do: {:ok, addr}
def cast(nil), do: {:ok, nil}
def cast(_), do: :error
@doc """
Applies checks and optional domain downcasing to given address using
passed options.
This function is automatically called as required by other functions
in the package, so doesn't normally need to be called unless you
are messing with the `MailAddress` struct directly (which isn't
a good idea).
If successful, returns `{:ok, new_address}`, otherwise returns
`{:error, error_message}`.
"""
@spec check(MailAddress.t(), Options.t()) :: {:ok, MailAddress.t()} | error()
def check(%MailAddress{} = addr, %MailAddress.Options{} = options) do
with :ok <- check_domain(addr, options),
:ok <- check_domain_length(addr, options),
:ok <- check_domain_address_literal(addr, options),
:ok <- check_local_part_length(addr, options),
:ok <- check_length(addr, options),
:ok <- check_null(addr, options),
{:ok, addr} <- check_needs_quoting(addr),
{:ok, addr} <- check_downcase(addr, options),
do: {:ok, addr}
end
# checks the domain isn't null (as long as entire address isn't null).
@spec check_domain(MailAddress.t(), Options.t()) :: :ok | error()
defp check_domain(%MailAddress{domain: ""} = addr, %Options{require_domain: true}) do
if byte_size(addr.local_part) == 0, do: :ok, else: {:error, "domain expected"}
end
defp check_domain(%MailAddress{} = addr, %Options{allow_localhost: false}) do
if domains_equal?(addr, "localhost") do
{:error, "domain can't be localhost"}
else
:ok
end
end
defp check_domain(%MailAddress{}, %MailAddress.Options{}), do: :ok
# checks the domain isn't an address literal (if configured to do so).
@spec check_domain_address_literal(MailAddress.t(), Options.t()) :: :ok | error()
defp check_domain_address_literal(%MailAddress{address_literal: nil}, %Options{
allow_address_literal: false
}),
do: :ok
defp check_domain_address_literal(%MailAddress{}, %Options{allow_address_literal: false}) do
{:error, "domain can't be an address literal"}
end
defp check_domain_address_literal(%MailAddress{}, %Options{}), do: :ok
# checks domain length is OK.
@spec check_domain_length(MailAddress.t(), MailAddress.Options.t()) :: :ok | error()
defp check_domain_length(%MailAddress{domain: dom}, %MailAddress.Options{} = options) do
max_length = options.max_domain_length
if byte_size(dom) > max_length do
{:error, "domain too long (must be <= #{max_length} characters)"}
else
:ok
end
end
# downcases the domain part if required.
@spec check_downcase(MailAddress.t(), Options.t()) :: {:ok, MailAddress.t()} | error()
defp check_downcase(%MailAddress{domain: dom} = addr, %Options{downcase_domain: true}) do
{:ok, %{addr | domain: String.downcase(dom)}}
end
defp check_downcase(%MailAddress{} = addr, %MailAddress.Options{}), do: {:ok, addr}
# checks overall length is OK.
@spec check_length(MailAddress.t(), MailAddress.Options.t()) :: :ok | error()
defp check_length(%MailAddress{local_part: loc, domain: dom}, %MailAddress.Options{} = options) do
max_length = options.max_address_length
if byte_size(loc) + 1 + byte_size(dom) > max_length do
{:error, "address too long (must be <= #{max_length} characters)"}
else
:ok
end
end
# checks a given local part contains only valid characters.
# returns either `:ok` or `{:error, error_message}`.
@spec check_local_part(String.t()) :: :ok | error()
defp check_local_part(<<local::binary>>) do
local
|> :binary.bin_to_list()
|> Enum.reduce_while(:ok, fn ch, acc ->
case CharSet.qpair?(ch) do
true ->
{:cont, acc}
false ->
{:halt, {:error, "invalid character #{CharSet.format(ch)} in address local part"}}
end
end)
end
# checks local part length is OK.
@spec check_local_part_length(MailAddress.t(), MailAddress.Options.t()) :: :ok | error()
defp check_local_part_length(
%MailAddress{domain: dom, local_part: loc},
%MailAddress.Options{} = options
) do
max_length = options.max_local_part_length
len = byte_size(loc)
cond do
len > max_length ->
{:error, "local part too long (must be <= #{max_length} characters"}
len == 0 && !options.allow_null_local_part && byte_size(dom) > 0 ->
{:error, "local part can't be null"}
true ->
:ok
end
end
# checks to see if address needs quoting
@spec check_needs_quoting(MailAddress.t()) :: {:ok, MailAddress.t()} | error()
defp check_needs_quoting(%MailAddress{local_part: "", domain: ""} = addr),
do: {:ok, %{addr | needs_quoting: false}}
defp check_needs_quoting(%MailAddress{local_part: ""} = addr),
do: {:ok, %{addr | needs_quoting: true}}
defp check_needs_quoting(%MailAddress{local_part: <<?.::size(8), _rest::binary>>} = addr),
do: {:ok, %{addr | needs_quoting: true}}
defp check_needs_quoting(%MailAddress{local_part: local} = addr) do
{needs_quoting, last_dot} =
local
|> :binary.bin_to_list()
|> Enum.reduce({false, false}, fn ch, {nq, ld} = acc ->
is_dot = ch == ?.
cond do
nq -> acc
is_dot && ld -> {true, ld}
is_dot -> {nq, true}
!CharSet.atext?(ch) -> {true, ld}
true -> {nq, false}
end
end)
{:ok, %{addr | needs_quoting: needs_quoting || last_dot}}
end
# checks the address isn't null.
@spec check_null(MailAddress.t(), Options.t()) :: :ok | error()
defp check_null(%MailAddress{local_part: "", domain: ""}, %Options{allow_null: false}) do
{:error, "address can't be null"}
end
defp check_null(%MailAddress{}, %MailAddress.Options{}), do: :ok
@doc """
Returns the domain part of the address.
## Examples
iex> MailAddress.domain(%MailAddress{})
""
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.domain(addr)
"example.org"
"""
@spec domain(MailAddress.t()) :: String.t()
def domain(%MailAddress{domain: d}), do: d
@doc """
Checks whether address has a domain part.
## Examples
iex> MailAddress.domain?(%MailAddress{})
false
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.domain?(addr)
true
"""
@spec domain?(MailAddress.t()) :: boolean
def domain?(%MailAddress{domain: ""}), do: false
def domain?(%MailAddress{}), do: true
@doc """
Compares domain of given address with `domain` (case-insensitively).
Returns `true` if the domains are the same, or `false` otherwise.
## Examples
iex> {:ok, addr_1} = MailAddress.new("test", "example.org")
iex> {:ok, addr_2} = MailAddress.new("another", "example.org")
iex> {:ok, addr_3} = MailAddress.new("test", "localhost", %MailAddress.Options{allow_localhost: true})
iex> MailAddress.domains_equal?(addr_1, "example.org")
true
iex> MailAddress.domains_equal?(addr_2, "EXAMPLE.ORG")
true
iex> MailAddress.domains_equal?(addr_1, "something_else")
false
iex> MailAddress.domains_equal?(addr_1, addr_2)
true
iex> MailAddress.domains_equal?(addr_1, %MailAddress{})
false
iex> MailAddress.domains_equal?(addr_3, "localhost")
true
iex> MailAddress.domains_equal?(addr_3, "[127.0.0.1]")
true
iex> MailAddress.domains_equal?(addr_3, "[IPv6:::1]")
true
"""
@spec domains_equal?(MailAddress.t(), String.t() | MailAddress.t()) :: boolean
def domains_equal?(%MailAddress{domain: d1} = addr, <<domain::binary>>) do
String.downcase(d1) == String.downcase(domain) ||
(localhost?(addr) && localhost_string?(domain))
end
def domains_equal?(%MailAddress{domain: d1} = a1, %MailAddress{domain: d2} = a2) do
String.downcase(d1) == String.downcase(d2) || (localhost?(a1) && localhost?(a2))
end
@doc false
@spec dump(MailAddress.t()) :: {:ok, String.t()}
def dump(%MailAddress{} = addr), do: {:ok, MailAddress.encode(addr, false)}
def dump(_), do: :error
# parses options for use with ecto - uses fairly sensible defaults, but
# most of these can be overridden using config.exs settings.
defp ecto_parse_options do
allow_address_literal = Application.get_env(:mail_address, :ecto_allow_address_literal, true)
allow_localhost = Application.get_env(:mail_address, :ecto_allow_localhost, false)
allow_null = Application.get_env(:mail_address, :ecto_allow_null, true)
downcase_domain = Application.get_env(:mail_address, :ecto_downcase_domain, true)
require_domain = Application.get_env(:mail_address, :ecto_require_domain, true)
%MailAddress.Options{
allow_address_literal: allow_address_literal,
allow_localhost: allow_localhost,
allow_null: allow_null,
allow_null_local_part: false,
downcase_domain: downcase_domain,
require_brackets: false,
require_domain: require_domain
}
end
@doc """
Returns address safely encoded, optionally (and by default) bracketed.
## Examples
iex> MailAddress.encode(%MailAddress{}, false)
""
iex> MailAddress.encode(%MailAddress{}, true)
"<>"
iex> {:ok, addr, ""} = MailAddress.Parser.parse("<EMAIL>")
iex> MailAddress.encode(addr, true)
"<<EMAIL>>"
iex> {:ok, addr, ""} = MailAddress.Parser.parse("\\\"@test\\\"@<EMAIL>")
iex> MailAddress.encode(addr, true)
"<\\"\\\\<EMAIL>>"
"""
@spec encode(MailAddress.t(), boolean) :: String.t()
def encode(_, bracket \\ true)
def encode(%MailAddress{} = addr, true) do
enc = encode(addr, false)
<<?<::size(8), enc::binary, ?>::size(8)>>
end
def encode(%MailAddress{local_part: "", domain: ""}, false),
do: <<>>
def encode(%MailAddress{needs_quoting: false} = addr, false),
do: <<addr.local_part::binary, encode_domain(addr)::binary>>
def encode(%MailAddress{needs_quoting: true} = addr, false) do
local =
addr.local_part
|> :binary.bin_to_list()
|> Enum.flat_map(fn ch ->
case CharSet.atext?(ch) do
true -> [ch]
false -> [?\\, ch]
end
end)
|> :binary.list_to_bin()
<<?"::size(8), local::binary, ?"::size(8), encode_domain(addr)::binary>>
end
# encodes domain part, including leading '@' if required.
defp encode_domain(%MailAddress{domain: ""}) do
""
end
defp encode_domain(%MailAddress{domain: domain}) do
<<?@::size(8), domain::binary>>
end
@doc """
Checks whether `addr_1` and `addr_2` are the same.
The local parts are compared case sensitively, whilst the domain parts
are compare case insensitively.
## Examples
iex> {:ok, addr_1} = MailAddress.new("test", "example.org")
iex> {:ok, addr_2} = MailAddress.new("test", "ExAmPlE.ORG")
iex> MailAddress.equal?(addr_1, addr_2)
true
iex> {:ok, addr_3} = MailAddress.new("fred", "ExAmPlE.ORG")
iex> MailAddress.equal?(addr_1, addr_3)
false
"""
@spec equal?(nil | MailAddress.t() | String.t(), nil | MailAddress.t() | String.t()) :: boolean
def equal?(%MailAddress{} = addr_1, %MailAddress{} = addr_2) do
local_parts_equal?(addr_1, addr_2) && domains_equal?(addr_1, addr_2)
end
def equal?(%MailAddress{}, nil), do: false
def equal?(nil, %MailAddress{}), do: false
def equal?(nil, nil), do: true
def equal?(str, %MailAddress{} = addr) when is_binary(str) do
opts = Options.relaxed()
case MailAddress.Parser.parse(str, opts) do
{:ok, %MailAddress{} = parsed_addr, ""} -> equal?(parsed_addr, addr)
{:ok, %MailAddress{}, _} -> false
{:error, _} -> false
end
end
def equal?(%MailAddress{} = addr, str) when is_binary(str),
do: equal?(str, addr)
def equal?(str_1, str_2) when is_binary(str_1) and is_binary(str_2) do
opts = Options.relaxed()
with {:ok, %MailAddress{} = addr_1, ""} <- MailAddress.Parser.parse(str_1, opts),
{:ok, %MailAddress{} = addr_2, ""} <- MailAddress.Parser.parse(str_2, opts) do
equal?(addr_1, addr_2)
else
{:ok, %MailAddress{}, _} -> false
{:error, _} -> false
end
end
@doc false
@spec load(String.t()) :: {:ok, MailAddress.t()} | :error
def load(<<data::binary>>) do
case MailAddress.Parser.parse(data, ecto_parse_options()) do
{:ok, %MailAddress{} = parsed, ""} -> {:ok, parsed}
_ -> :error
end
end
@doc """
Returns the local part of the address.
## Examples
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.local_part(addr)
"test"
"""
@spec local_part(MailAddress.t()) :: String.t()
def local_part(%MailAddress{local_part: l}), do: l
@doc """
Checks whether address has local part set.
## Examples
iex> MailAddress.local_part?(%MailAddress{})
false
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.local_part?(addr)
true
"""
@spec local_part?(MailAddress.t()) :: boolean
def local_part?(%MailAddress{local_part: ""}), do: false
def local_part?(%MailAddress{}), do: true
@doc """
Compares address local parts (case-sensitively).
The second parameter may be either a string or a `MailAddress` struct.
Returns `true` if the local parts are the same, or `false` otherwise.
## Examples
iex> {:ok, addr_1} = MailAddress.new("test", "example.org")
iex> {:ok, addr_2} = MailAddress.new("test", "example.com")
iex> MailAddress.local_parts_equal?(addr_1, addr_2)
true
iex> MailAddress.local_parts_equal?(addr_1, "test")
true
iex> MailAddress.local_parts_equal?(addr_2, "TEST")
false
"""
@spec local_parts_equal?(MailAddress.t(), MailAddress.t()) :: boolean
def local_parts_equal?(%MailAddress{local_part: l1}, <<local_part::binary>>),
do: l1 == local_part
def local_parts_equal?(%MailAddress{local_part: l1}, %MailAddress{local_part: l2}),
do: l1 == l2
@doc """
Checks whether domain part of address is 'localhost', or the domain
is an address literal and is [127.0.0.1] or [IPv6:::1].
## Examples
iex> {:ok, addr_1} = MailAddress.new("test", "example.org")
iex> MailAddress.localhost?(addr_1)
false
iex> {:ok, addr_2} = MailAddress.new("test", "localhost", %MailAddress.Options{allow_localhost: true})
iex> MailAddress.localhost?(addr_2)
true
iex> {:ok, addr_3} = MailAddress.new("test", "[127.0.0.1]", %MailAddress.Options{allow_address_literal: true, allow_localhost: true})
iex> MailAddress.localhost?(addr_3)
true
iex> {:ok, addr_4} = MailAddress.new("test", "[192.168.0.1]", %MailAddress.Options{allow_address_literal: true, allow_localhost: true})
iex> MailAddress.localhost?(addr_4)
false
iex> {:ok, addr_5} = MailAddress.new("test", "[IPv6:::1]", %MailAddress.Options{allow_address_literal: true, allow_localhost: true})
iex> MailAddress.localhost?(addr_5)
true
"""
@spec localhost?(MailAddress.t()) :: boolean
def localhost?(%MailAddress{domain: "localhost"}), do: true
def localhost?(%MailAddress{address_literal: {127, 0, 0, 1}}), do: true
def localhost?(%MailAddress{address_literal: {0, 0, 0, 0, 0, 0, 0, 1}}), do: true
def localhost?(%MailAddress{}), do: false
@doc """
Checks to see if the given string is "localhost" or equivalent
([127.0.0.1] or [IPv6:::1]).
## Examples:
iex> MailAddress.localhost_string?("test")
false
iex> MailAddress.localhost_string?("LOCALHOST")
true
iex> MailAddress.localhost_string?("[127.0.0.1]")
true
iex> MailAddress.localhost_string?("[127.0.0.1")
false
iex> MailAddress.localhost_string?("[192.168.0.1]")
false
iex> MailAddress.localhost_string?("[IPv6:::1]")
true
"""
@spec localhost_string?(String.t()) :: boolean
def localhost_string?(<<?[::size(8), _rest::binary>> = str) do
case MailAddress.Parser.Domain.parse(str) do
{:ok, _, _, {127, 0, 0, 1}} -> true
{:ok, _, _, {0, 0, 0, 0, 0, 0, 0, 1}} -> true
_ -> false
end
end
def localhost_string?(str) do
String.downcase(str) == "localhost"
end
@doc """
Checks whether the local part of the given address needs quoting.
The `needs_quoting` flag on the address is updated when the address
is changed, so calling this function is inexpensive.
"""
@spec needs_quoting?(MailAddress.t()) :: boolean
def needs_quoting?(%MailAddress{needs_quoting: nq}), do: nq
@doc """
Creates a new `MailAddress` setting both local and domain parts at
the same time using the provided (or default) `options`.
NOTE: the local part isn't parsed - it is just checked to ensure that
it only contains valid characters. This means that the local part
should be raw rather than quoted form.
Returns either `{:ok, new_address}` or `{:error, error_reason}`.
## Examples
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> addr
#MailAddress<<EMAIL>>
iex> {:ok, addr} = MailAddress.new("@test", "example.org")
iex> addr
#MailAddress<\"\\\@test\"@example.org>
iex> MailAddress.new("test", "example.org!")
{:error, "invalid domain"}
"""
@spec new(String.t(), String.t(), Options.t()) :: {:ok, MailAddress.t()} | error()
def new(
<<local::binary>>,
<<domain::binary>>,
%MailAddress.Options{} = options \\ %MailAddress.Options{}
) do
with :ok <- check_local_part(local),
{:ok, parsed_domain, "", literal} <- MailAddress.Parser.Domain.parse(domain) do
%MailAddress{address_literal: literal, local_part: local, domain: parsed_domain}
|> check(options)
else
{:ok, _, _, _} ->
{:error, "invalid domain"}
{:error, _} = err ->
err
end
end
@doc """
Checks whether the address in null (no local part and no domain).
## Examples
iex> MailAddress.null?(%MailAddress{})
true
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.null?(addr)
false
iex> {:ok, addr} = MailAddress.new("", "", %MailAddress.Options{allow_null: true})
iex> MailAddress.null?(addr)
true
"""
@spec null?(MailAddress.t()) :: boolean
def null?(%MailAddress{local_part: "", domain: ""}), do: true
def null?(%MailAddress{}), do: false
@doc """
Sets the domain part of the address using the provided (or default)
options.
Returns either `{:ok, new_address}` or `{:error, error_reason}`.
## Examples
iex> {:ok, addr} = MailAddress.set_domain(%MailAddress{}, "test", %MailAddress.Options{allow_null_local_part: true})
iex> MailAddress.domain(addr)
"test"
iex> {:ok, addr} = MailAddress.new("test", "example.com")
iex> MailAddress.domain(addr)
"example.com"
iex> {:ok, addr} = MailAddress.set_domain(addr, "example.org")
iex> MailAddress.domain(addr)
"example.org"
"""
@spec set_domain(MailAddress.t(), String.t(), Options.t()) :: {:ok, MailAddress.t()} | error()
def set_domain(
%MailAddress{} = addr,
<<domain::binary>>,
%MailAddress.Options{} = options \\ %MailAddress.Options{}
) do
case MailAddress.Parser.Domain.parse(domain) do
{:ok, parsed_domain, "", literal} ->
%{addr | address_literal: literal, domain: parsed_domain}
|> check(options)
{:ok, _, _, _} ->
{:error, "invalid domain"}
{:error, _} = err ->
err
end
end
@doc """
Sets the local part of the address using the provided (or default)
options.
NOTE: the local part isn't parsed - it is just checked to ensure that
it only contains valid characters, consequently it should be in raw
unquoted format.
Returns either `{:ok, new_address}` or `{:error, error_reason}`.
## Examples
iex> {:ok, addr} = MailAddress.set_local_part(%MailAddress{}, "test", %MailAddress.Options{require_domain: false})
iex> MailAddress.local_part(addr)
"test"
iex> MailAddress.set_domain(%MailAddress{}, "test", %MailAddress.Options{allow_null_local_part: false})
{:error, "local part can't be null"}
iex> {:ok, addr} = MailAddress.new("test", "example.org")
iex> MailAddress.local_part(addr)
"test"
iex> {:ok, addr} = MailAddress.set_local_part(addr, "other")
iex> MailAddress.local_part(addr)
"other"
"""
@spec set_local_part(MailAddress.t(), String.t(), Options.t()) ::
{:ok, MailAddress.t()} | error()
def set_local_part(
%MailAddress{} = addr,
<<local::binary>>,
%MailAddress.Options{} = options \\ %MailAddress.Options{}
) do
with :ok <- check_local_part(local) do
%{addr | local_part: local}
|> check(options)
end
end
@doc false
def type, do: :string
end
|
lib/mail_address.ex
| 0.889828
| 0.823009
|
mail_address.ex
|
starcoder
|
defmodule AdventOfCode.Day1 do
@input "R5, L2, L1, R1, R3, R3, L3, R3, R4, L2, R4, L4, R4, R3, L2, L1, L1, R2, R4, R4, L4, R3, L2, R1, L4, R1, R3, L5, L4, L5, R3, L3, L1, L1, R4, R2, R2, L1, L4, R191, R5, L2, R46, R3, L1, R74, L2, R2, R187, R3, R4, R1, L4, L4, L2, R4, L5, R4, R3, L2, L1, R3, R3, R3, R1, R1, L4, R4, R1, R5, R2, R1, R3, L4, L2, L2, R1, L3, R1, R3, L5, L3, R5, R3, R4, L1, R3, R2, R1, R2, L4, L1, L1, R3, L3, R4, L2, L4, L5, L5, L4, R2, R5, L4, R4, L2, R3, L4, L3, L5, R5, L4, L2, R3, R5, R5, L1, L4, R3, L1, R2, L5, L1, R4, L1, R5, R1, L4, L4, L4, R4, R3, L5, R1, L3, R4, R3, L2, L1, R1, R2, R2, R2, L1, L1, L2, L5, L3, L1"
def solve() do
@input
|> parse
|> walk
|> solve
end
defp parse(string) do
string
|> String.split(",")
|> Enum.map(fn(char_number) ->
char_number
|> String.trim
|> parse_char_number
end)
end
defp walk(directions) do
directions
|> Enum.reduce({:north, 0, 0, []}, fn({dir, distance}, {compass, x, y, history}) ->
{new_dir, new_x, new_y} = case {compass, dir} do
{:north, :right} -> {:east, x + distance, y}
{:north, :left} -> {:west, x - distance, y}
{:east, :right} -> {:south, x, y - distance}
{:east, :left} -> {:north, x, y + distance}
{:south, :right} -> {:west, x - distance, y}
{:south, :left} -> {:east, x + distance, y}
{:west, :right} -> {:north, x, y + distance}
{:west, :left} -> {:south, x, y - distance}
end
{new_dir, new_x, new_y, [{{:from, x, y}, {:to, new_x, new_y}} | history]}
end)
end
defp solve({_dir, x, y, history}) do
history_distance = history
|> visited_places
|> first_duplicate
|> distance
{distance({x, y}), history_distance}
end
defp distance({x, y}) do
abs(x) + abs(y)
end
defp parse_char_number(<<dir, number::binary>>) do
{
dir |> dir_to_atom,
number |> String.to_integer
}
end
defp dir_to_atom(?R), do: :right
defp dir_to_atom(?L), do: :left
defp visited_places(history) do
history
|> Enum.reverse
|> Enum.flat_map(fn({{:from, from_x, from_y}, {:to, to_x, to_y}}) ->
cond do
from_x == to_x ->
Range.new(from_y, to_y) |> Enum.reverse |> Enum.drop(1) |> Enum.reverse |> Enum.map(&({from_x, &1}))
from_y == to_y ->
Range.new(from_x, to_x) |> Enum.reverse |> Enum.drop(1) |> Enum.reverse |> Enum.map(&({&1, from_y}))
end
end)
end
defp first_duplicate(places) do
first_duplicate(places, [])
end
defp first_duplicate([place|places], visited) do
cond do
place in visited ->
place
true ->
first_duplicate(places, [place|visited])
end
end
defp first_duplicate([], _), do: nil
end
|
lib/advent_of_code/day1.ex
| 0.582372
| 0.796292
|
day1.ex
|
starcoder
|
defmodule Hedwig.Robot do
@moduledoc """
Defines a robot.
Robots receive messages from a chat source (XMPP, Slack, Console, etc), and
dispatch them to matching responders. See the documentation for
`Hedwig.Responder` for details on responders.
When used, the robot expects the `:otp_app` as option. The `:otp_app` should
point to an OTP application that has the robot configuration. For example,
the robot:
defmodule MyApp.Robot do
use Hedwig.Robot, otp_app: :my_app
end
Could be configured with:
config :my_app, MyApp.Robot,
adapter: Hedwig.Adapters.Console,
name: "alfred"
Most of the configuration that goes into the `config` is specific to the
adapter. Be sure to check the documentation for the adapter in use for all
of the available options.
## Robot configuration
* `adapter` - the adapter module name.
* `name` - the name the robot will respond to.
* `aka` - an alias the robot will respond to.
* `log_level` - the level to use when logging output.
* `responders` - a list of responders specified in the following format:
`{module, kwlist}`.
"""
defstruct adapter: nil,
aka: nil,
name: "",
opts: [],
responder_sup: nil,
responders: []
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
use GenServer
require Logger
{otp_app, adapter, robot_config} =
Hedwig.Robot.Supervisor.parse_config(__MODULE__, opts)
@adapter adapter
@before_compile adapter
@config robot_config
@log_level robot_config[:log_level] || :debug
@otp_app otp_app
def start_link(opts \\ []) do
Hedwig.start_robot(__MODULE__, opts)
end
def stop(robot) do
Hedwig.stop_robot(robot)
end
def config(opts \\ []) do
Hedwig.Robot.Supervisor.config(__MODULE__, @otp_app, opts)
end
def log(msg) do
Logger.unquote(@log_level)(fn ->
msg
end, [])
end
def __adapter__, do: @adapter
def init({robot, opts}) do
opts = robot.config(opts)
aka = Keyword.get(opts, :aka)
name = Keyword.get(opts, :name)
{responders, opts} = Keyword.pop(opts, :responders, [])
unless responders == [] do
GenServer.cast(self(), {:install_responders, responders})
end
{:ok, adapter} = @adapter.start_link(robot, opts)
{:ok, responder_sup} = Hedwig.Responder.Supervisor.start_link()
{:ok, %Hedwig.Robot{
adapter: adapter,
aka: aka,
name: name,
opts: opts,
responder_sup: responder_sup,
responders: responders}}
end
def handle_connect(state) do
{:ok, state}
end
def handle_disconnect(_reason, state) do
{:reconnect, state}
end
def handle_in(%Hedwig.Message{} = msg, state) do
{:dispatch, msg, state}
end
def handle_in(_msg, state) do
{:noreply, state}
end
def handle_call(:name, _from, %{name: name} = state) do
{:reply, name, state}
end
def handle_call(:responders, _from, %{responders: responders} = state) do
{:reply, responders, state}
end
def handle_call(:handle_connect, _from, state) do
case handle_connect(state) do
{:ok, state} ->
{:reply, :ok, state}
{:stop, reason, state} ->
{:stop, reason, state}
end
end
def handle_call({:handle_disconnect, reason}, _from, state) do
case handle_disconnect(reason, state) do
{:reconnect, state} ->
{:reply, :reconnect, state}
{:reconnect, timer, state} ->
{:reply, {:reconnect, timer}, state}
{:disconnect, reason, state} ->
{:stop, reason, {:disconnect, reason}, state}
end
end
def handle_cast({:send, msg}, %{adapter: adapter} = state) do
@adapter.send(adapter, msg)
{:noreply, state}
end
def handle_cast({:reply, msg}, %{adapter: adapter} = state) do
@adapter.reply(adapter, msg)
{:noreply, state}
end
def handle_cast({:emote, msg}, %{adapter: adapter} = state) do
@adapter.emote(adapter, msg)
{:noreply, state}
end
def handle_cast({:handle_in, msg}, %{responder_sup: sup} = state) do
case handle_in(msg, state) do
{:dispatch, %Hedwig.Message{} = msg, state} ->
responders = Supervisor.which_children(sup)
Hedwig.Responder.dispatch(msg, responders)
{:noreply, state}
{:dispatch, _msg, state} ->
log_incorrect_return(:dispatch)
{:noreply, state}
{fun, {%Hedwig.Message{} = msg, text}, state} when fun in [:send, :reply, :emote] ->
apply(Hedwig.Responder, fun, [msg, text])
{:noreply, state}
{fun, {_msg, _text}, state} when fun in [:send, :reply, :emote] ->
log_incorrect_return(fun)
{:noreply, state}
{:noreply, state} ->
{:noreply, state}
end
end
def handle_cast({:install_responders, responders}, %{aka: aka, name: name} = state) do
for {module, opts} <- responders do
args = [module, {aka, name, opts, self()}]
Supervisor.start_child(state.responder_sup, args)
end
{:noreply, state}
end
def handle_info(msg, state) do
{:noreply, state}
end
def terminate(_reason, _state) do
:ok
end
def code_change(_old, state, _extra) do
{:ok, state}
end
defp log_incorrect_return(atom) do
Logger.warn """
#{inspect atom} return value from `handle_in/2` only works with `%Hedwig.Message{}` structs.
"""
end
defoverridable [
{:handle_connect, 1},
{:handle_disconnect, 2},
{:handle_in, 2},
{:terminate, 2},
{:code_change, 3},
{:handle_info, 2}
]
end
end
@doc false
def start_link(robot, opts) do
GenServer.start_link(robot, {robot, opts})
end
@doc """
Send a message via the robot.
"""
def send(pid, msg) do
GenServer.cast(pid, {:send, msg})
end
@doc """
Send a reply message via the robot.
"""
def reply(pid, msg) do
GenServer.cast(pid, {:reply, msg})
end
@doc """
Send an emote message via the robot.
"""
def emote(pid, msg) do
GenServer.cast(pid, {:emote, msg})
end
@doc """
Get the name of the robot.
"""
def name(pid) do
GenServer.call(pid, :name)
end
@doc """
Get the list of the robot's responders.
"""
def responders(pid) do
GenServer.call(pid, :responders)
end
@doc """
Invokes a user defined `handle_in/2` function, if defined.
This function should be called by an adapter when a message arrives but
should be handled by the user module.
Returning `{:dispatch, msg, state}` will dispatch the message
to all installed responders.
Returning `{:send, {msg, text}, state}`, `{:reply, {msg, text}, state}`,
or `{:emote, {msg, text}, state}` will send the message directly to the
adapter without dispatching to any responders.
Returning `{:noreply, state}` will ignore the message.
"""
@spec handle_in(pid, any) :: :ok
def handle_in(robot, msg) do
GenServer.cast(robot, {:handle_in, msg})
end
@doc """
Invokes a user defined `handle_connect/1` function, if defined.
If the user has defined an `handle_connect/1` in the robot module, it will be
called with the robot's state. It is expected that the function return
`{:ok, state}` or `{:stop, reason, state}`.
"""
@spec handle_connect(pid, integer) :: :ok
def handle_connect(robot, timeout \\ 5000) do
GenServer.call(robot, :handle_connect, timeout)
end
@doc """
Invokes a user defined `handle_disconnect/1` function, if defined.
If the user has defined an `handle_disconnect/1` in the robot module, it will be
called with the robot's state. It is expected that the function return
`{:reconnect, state}` `{:reconnect, integer, state}`, or `{:disconnect, reason, state}`.
"""
@spec handle_disconnect(pid, any, integer) :: :reconnect | {:reconnect, integer} | {:disconnect, any}
def handle_disconnect(robot, reason, timeout \\ 5000) do
GenServer.call(robot, {:handle_disconnect, reason}, timeout)
end
end
|
lib/hedwig/robot.ex
| 0.836655
| 0.500549
|
robot.ex
|
starcoder
|
defmodule AWS.MarketplaceCommerceAnalytics do
@moduledoc """
Provides AWS Marketplace business intelligence data on-demand.
"""
@doc """
Given a data set type and data set publication date, asynchronously publishes
the requested data set to the specified S3 bucket and notifies the specified SNS
topic once the data is available.
Returns a unique request identifier that can be used to correlate requests with
notifications from the SNS topic. Data sets will be published in comma-separated
values (CSV) format with the file name {data_set_type}_YYYY-MM-DD.csv. If a file
with the same name already exists (e.g. if the same data set is requested
twice), the original file will be overwritten by the new file. Requires a Role
with an attached permissions policy providing Allow permissions for the
following actions: s3:PutObject, s3:GetBucketLocation, sns:GetTopicAttributes,
sns:Publish, iam:GetRolePolicy.
"""
def generate_data_set(client, input, options \\ []) do
request(client, "GenerateDataSet", input, options)
end
@doc """
Given a data set type and a from date, asynchronously publishes the requested
customer support data to the specified S3 bucket and notifies the specified SNS
topic once the data is available.
Returns a unique request identifier that can be used to correlate requests with
notifications from the SNS topic. Data sets will be published in comma-separated
values (CSV) format with the file name
{data_set_type}_YYYY-MM-DD'T'HH-mm-ss'Z'.csv. If a file with the same name
already exists (e.g. if the same data set is requested twice), the original file
will be overwritten by the new file. Requires a Role with an attached
permissions policy providing Allow permissions for the following actions:
s3:PutObject, s3:GetBucketLocation, sns:GetTopicAttributes, sns:Publish,
iam:GetRolePolicy.
"""
def start_support_data_export(client, input, options \\ []) do
request(client, "StartSupportDataExport", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "marketplacecommerceanalytics"}
host = build_host("marketplacecommerceanalytics", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "MarketplaceCommerceAnalytics20150701.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/marketplace_commerce_analytics.ex
| 0.833528
| 0.475971
|
marketplace_commerce_analytics.ex
|
starcoder
|
defmodule Yatzy.Sheet do
alias Yatzy.Roll
alias Yatzy.Scoring
alias Yatzy.Scoring.Score
@moduledoc """
Operations on a sheets used to keep track of player's
score
"""
@upper_section [:ones, :twos, :threes, :fours, :fives, :sixes]
@lower_section [
:one_pair,
:two_pairs,
:three_of_a_kind,
:four_of_a_kind,
:small_straight,
:large_straight,
:chance,
:yatzy
]
@upper_section_bonus_limit 63
@upper_section_bonus_value 50
use TypedStruct
typedstruct do
field :ones, struct(), default: %Yatzy.Scoring.Ones{}
field :twos, struct(), default: %Yatzy.Scoring.Twos{}
field :threes, struct(), default: %Yatzy.Scoring.Threes{}
field :fours, struct(), default: %Yatzy.Scoring.Fours{}
field :fives, struct(), default: %Yatzy.Scoring.Fives{}
field :sixes, struct(), default: %Yatzy.Scoring.Sixes{}
field :one_pair, struct(), default: %Yatzy.Scoring.OnePair{}
field :two_pairs, struct(), default: %Yatzy.Scoring.TwoPairs{}
field :three_of_a_kind, struct(), default: %Yatzy.Scoring.ThreeOfAKind{}
field :four_of_a_kind, struct(), default: %Yatzy.Scoring.FourOfAKind{}
field :small_straight, struct(), default: %Yatzy.Scoring.SmallStraight{}
field :large_straight, struct(), default: %Yatzy.Scoring.LargeStraight{}
field :chance, struct(), default: %Yatzy.Scoring.Chance{}
field :yatzy, struct(), default: %Yatzy.Scoring.Yatzy{}
end
@doc """
Calculate the total score for a given sheet
## Examples
iex> Yatzy.Sheet.total(%Yatzy.Sheet{})
0
"""
@spec total(sheet :: t()) :: integer()
def total(sheet = %__MODULE__{}) do
upper_section = section_total(sheet, @upper_section)
upper_bonus = upper_section_bonus(upper_section)
lower_section = section_total(sheet, @lower_section)
upper_section + upper_bonus + lower_section
end
@doc """
Update the score sheet with a roll under a rule.
## Example
iex> Yatzy.Sheet.record(%Yatzy.Sheet{}, %Yatzy.Roll{dice: [1, 1, 1, 1, 1]}, :ones) |> Yatzy.Sheet.total()
5
"""
@spec record(sheet :: t(), roll :: Roll.t(), rule :: atom()) :: t()
def record(sheet, roll, rule) do
if valid_rule(sheet, rule) do
Map.update!(sheet, rule, fn score -> update_roll(score, roll) end)
else
sheet
end
end
@doc """
Determine if the whole sheet has been filled out
## Example
iex> Yatzy.Sheet.completed?(%Yatzy.Sheet{})
false
"""
@spec completed?(sheet :: t()) :: boolean()
def completed?(sheet = %__MODULE__{}) do
sheet
|> Map.from_struct()
|> Enum.all?(fn {_rule, scoring} ->
scoring.roll.counter != 0 && scoring.roll.dice != []
end)
end
@spec update_roll(score :: Scoring.t(), roll :: Roll.t()) :: Scoring.t()
defp update_roll(score = %{roll: %Roll{dice: []}}, roll), do: %{score | roll: roll}
defp update_roll(score, _), do: score
@spec valid_rule(sheet :: t(), rule :: atom()) :: boolean()
defp valid_rule(sheet, rule) do
sheet |> Map.from_struct() |> Map.keys() |> Enum.member?(rule)
end
@spec section_total(sheet :: t(), [atom()]) :: integer()
defp section_total(sheet = %__MODULE__{}, fields) do
sheet
|> Map.take(fields)
|> Map.values()
|> Enum.map(&Score.execute/1)
|> Enum.sum()
end
@spec upper_section_bonus(total :: integer()) :: integer()
defp upper_section_bonus(total) when total >= @upper_section_bonus_limit,
do: @upper_section_bonus_value
defp upper_section_bonus(_total), do: 0
end
|
lib/yatzy/sheet.ex
| 0.827131
| 0.433082
|
sheet.ex
|
starcoder
|
defmodule AWS.KMS do
@moduledoc """
AWS Key Management Service
AWS Key Management Service (AWS KMS) is an encryption and key management
web service. This guide describes the AWS KMS operations that you can call
programmatically. For general information about AWS KMS, see the [AWS Key
Management Service Developer
Guide](http://docs.aws.amazon.com/kms/latest/developerguide/).
<note> AWS provides SDKs that consist of libraries and sample code for
various programming languages and platforms (Java, Ruby, .Net, iOS,
Android, etc.). The SDKs provide a convenient way to create programmatic
access to AWS KMS and other AWS services. For example, the SDKs take care
of tasks such as signing requests (see below), managing errors, and
retrying requests automatically. For more information about the AWS SDKs,
including how to download and install them, see [Tools for Amazon Web
Services](http://aws.amazon.com/tools/).
</note> We recommend that you use the AWS SDKs to make programmatic API
calls to AWS KMS.
Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS
1.2. Clients must also support cipher suites with Perfect Forward Secrecy
(PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral
Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later
support these modes.
**Signing Requests**
Requests must be signed by using an access key ID and a secret access key.
We strongly recommend that you *do not* use your AWS account (root) access
key ID and secret key for everyday work with AWS KMS. Instead, use the
access key ID and secret access key for an IAM user, or you can use the AWS
Security Token Service to generate temporary security credentials that you
can use to sign requests.
All AWS KMS operations require [Signature Version
4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
**Logging API Requests**
AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and
related events for your AWS account and delivers them to an Amazon S3
bucket that you specify. By using the information collected by CloudTrail,
you can determine what requests were made to AWS KMS, who made the request,
when it was made, and so on. To learn more about CloudTrail, including how
to turn it on and find your log files, see the [AWS CloudTrail User
Guide](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/).
**Additional Resources**
For more information about credentials and request signing, see the
following:
<ul> <li> [AWS Security
Credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)
- This topic provides general information about the types of credentials
used for accessing AWS.
</li> <li> [Temporary Security
Credentials](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html)
- This section of the *IAM User Guide* describes how to create and use
temporary security credentials.
</li> <li> [Signature Version 4 Signing
Process](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)
- This set of topics walks you through the process of signing a request
using an access key ID and a secret access key.
</li> </ul> **Commonly Used APIs**
Of the APIs discussed in this guide, the following will prove the most
useful for most applications. You will likely perform actions other than
these, such as creating keys and assigning policies, by using the console.
<ul> <li> `Encrypt`
</li> <li> `Decrypt`
</li> <li> `GenerateDataKey`
</li> <li> `GenerateDataKeyWithoutPlaintext`
</li> </ul>
"""
@doc """
Cancels the deletion of a customer master key (CMK). When this operation is
successful, the CMK is set to the `Disabled` state. To enable a CMK, use
`EnableKey`.
For more information about scheduling and canceling deletion of a CMK, see
[Deleting Customer Master
Keys](http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html)
in the *AWS Key Management Service Developer Guide*.
"""
def cancel_key_deletion(client, input, options \\ []) do
request(client, "CancelKeyDeletion", input, options)
end
@doc """
Creates a display name for a customer master key. An alias can be used to
identify a key and should be unique. The console enforces a one-to-one
mapping between the alias and a key. An alias name can contain only
alphanumeric characters, forward slashes (/), underscores (_), and dashes
(-). An alias must start with the word "alias" followed by a forward slash
(alias/). An alias that begins with "aws" after the forward slash
(alias/aws...) is reserved by Amazon Web Services (AWS).
The alias and the key it is mapped to must be in the same AWS account and
the same region.
To map an alias to a different key, call `UpdateAlias`.
"""
def create_alias(client, input, options \\ []) do
request(client, "CreateAlias", input, options)
end
@doc """
Adds a grant to a key to specify who can use the key and under what
conditions. Grants are alternate permission mechanisms to key policies.
For more information about grants, see
[Grants](http://docs.aws.amazon.com/kms/latest/developerguide/grants.html)
in the *AWS Key Management Service Developer Guide*.
"""
def create_grant(client, input, options \\ []) do
request(client, "CreateGrant", input, options)
end
@doc """
Creates a customer master key (CMK).
You can use a CMK to encrypt small amounts of data (4 KiB or less)
directly, but CMKs are more commonly used to encrypt data encryption keys
(DEKs), which are used to encrypt raw data. For more information about DEKs
and the difference between CMKs and DEKs, see the following:
<ul> <li> The `GenerateDataKey` operation
</li> <li> [AWS Key Management Service
Concepts](http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html)
in the *AWS Key Management Service Developer Guide*
</li> </ul>
"""
def create_key(client, input, options \\ []) do
request(client, "CreateKey", input, options)
end
@doc """
Decrypts ciphertext. Ciphertext is plaintext that has been previously
encrypted by using any of the following functions:
<ul> <li> `GenerateDataKey`
</li> <li> `GenerateDataKeyWithoutPlaintext`
</li> <li> `Encrypt`
</li> </ul> Note that if a caller has been granted access permissions to
all keys (through, for example, IAM user policies that grant `Decrypt`
permission on all resources), then ciphertext encrypted by using keys in
other accounts where the key grants access to the caller can be decrypted.
To remedy this, we recommend that you do not grant `Decrypt` access in an
IAM user policy. Instead grant `Decrypt` access only in key policies. If
you must grant `Decrypt` access in an IAM user policy, you should scope the
resource to specific keys or to specific trusted accounts.
"""
def decrypt(client, input, options \\ []) do
request(client, "Decrypt", input, options)
end
@doc """
Deletes the specified alias. To map an alias to a different key, call
`UpdateAlias`.
"""
def delete_alias(client, input, options \\ []) do
request(client, "DeleteAlias", input, options)
end
@doc """
Deletes key material that you previously imported and makes the specified
customer master key (CMK) unusable. For more information about importing
key material into AWS KMS, see [Importing Key
Material](http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
in the *AWS Key Management Service Developer Guide*.
When the specified CMK is in the `PendingDeletion` state, this operation
does not change the CMK's state. Otherwise, it changes the CMK's state to
`PendingImport`.
After you delete key material, you can use `ImportKeyMaterial` to reimport
the same key material into the CMK.
"""
def delete_imported_key_material(client, input, options \\ []) do
request(client, "DeleteImportedKeyMaterial", input, options)
end
@doc """
Provides detailed information about the specified customer master key.
"""
def describe_key(client, input, options \\ []) do
request(client, "DescribeKey", input, options)
end
@doc """
Sets the state of a customer master key (CMK) to disabled, thereby
preventing its use for cryptographic operations. For more information about
how key state affects the use of a CMK, see [How Key State Affects the Use
of a Customer Master
Key](http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
in the *AWS Key Management Service Developer Guide*.
"""
def disable_key(client, input, options \\ []) do
request(client, "DisableKey", input, options)
end
@doc """
Disables rotation of the specified key.
"""
def disable_key_rotation(client, input, options \\ []) do
request(client, "DisableKeyRotation", input, options)
end
@doc """
Marks a key as enabled, thereby permitting its use.
"""
def enable_key(client, input, options \\ []) do
request(client, "EnableKey", input, options)
end
@doc """
Enables rotation of the specified customer master key.
"""
def enable_key_rotation(client, input, options \\ []) do
request(client, "EnableKeyRotation", input, options)
end
@doc """
Encrypts plaintext into ciphertext by using a customer master key. The
`Encrypt` function has two primary use cases:
<ul> <li> You can encrypt up to 4 KB of arbitrary data such as an RSA key,
a database password, or other sensitive customer information.
</li> <li> If you are moving encrypted data from one region to another, you
can use this API to encrypt in the new region the plaintext data key that
was used to encrypt the data in the original region. This provides you with
an encrypted copy of the data key that can be decrypted in the new region
and used there to decrypt the encrypted data.
</li> </ul> Unless you are moving encrypted data from one region to
another, you don't use this function to encrypt a generated data key within
a region. You retrieve data keys already encrypted by calling the
`GenerateDataKey` or `GenerateDataKeyWithoutPlaintext` function. Data keys
don't need to be encrypted again by calling `Encrypt`.
If you want to encrypt data locally in your application, you can use the
`GenerateDataKey` function to return a plaintext data encryption key and a
copy of the key encrypted under the customer master key (CMK) of your
choosing.
"""
def encrypt(client, input, options \\ []) do
request(client, "Encrypt", input, options)
end
@doc """
Returns a data encryption key that you can use in your application to
encrypt data locally.
You must specify the customer master key (CMK) under which to generate the
data key. You must also specify the length of the data key using either the
`KeySpec` or `NumberOfBytes` field. You must specify one field or the
other, but not both. For common key lengths (128-bit and 256-bit symmetric
keys), we recommend that you use `KeySpec`.
This operation returns a plaintext copy of the data key in the `Plaintext`
field of the response, and an encrypted copy of the data key in the
`CiphertextBlob` field. The data key is encrypted under the CMK specified
in the `KeyId` field of the request.
We recommend that you use the following pattern to encrypt data locally in
your application:
<ol> <li> Use this operation (`GenerateDataKey`) to retrieve a data
encryption key.
</li> <li> Use the plaintext data encryption key (returned in the
`Plaintext` field of the response) to encrypt data locally, then erase the
plaintext data key from memory.
</li> <li> Store the encrypted data key (returned in the `CiphertextBlob`
field of the response) alongside the locally encrypted data.
</li> </ol> To decrypt data locally:
<ol> <li> Use the `Decrypt` operation to decrypt the encrypted data key
into a plaintext copy of the data key.
</li> <li> Use the plaintext data key to decrypt data locally, then erase
the plaintext data key from memory.
</li> </ol> To return only an encrypted copy of the data key, use
`GenerateDataKeyWithoutPlaintext`. To return an arbitrary unpredictable
byte string, use `GenerateRandom`.
If you use the optional `EncryptionContext` field, you must store at least
enough information to be able to reconstruct the full encryption context
when you later send the ciphertext to the `Decrypt` operation. It is a good
practice to choose an encryption context that you can reconstruct on the
fly to better secure the ciphertext. For more information, see [Encryption
Context](http://docs.aws.amazon.com/kms/latest/developerguide/encryption-context.html)
in the *AWS Key Management Service Developer Guide*.
"""
def generate_data_key(client, input, options \\ []) do
request(client, "GenerateDataKey", input, options)
end
@doc """
Returns a data encryption key encrypted under a customer master key (CMK).
This operation is identical to `GenerateDataKey` but returns only the
encrypted copy of the data key.
This operation is useful in a system that has multiple components with
different degrees of trust. For example, consider a system that stores
encrypted data in containers. Each container stores the encrypted data and
an encrypted copy of the data key. One component of the system, called the
*control plane*, creates new containers. When it creates a new container,
it uses this operation (`GenerateDataKeyWithoutPlaintext`) to get an
encrypted data key and then stores it in the container. Later, a different
component of the system, called the *data plane*, puts encrypted data into
the containers. To do this, it passes the encrypted data key to the
`Decrypt` operation, then uses the returned plaintext data key to encrypt
data, and finally stores the encrypted data in the container. In this
system, the control plane never sees the plaintext data key.
"""
def generate_data_key_without_plaintext(client, input, options \\ []) do
request(client, "GenerateDataKeyWithoutPlaintext", input, options)
end
@doc """
Generates an unpredictable byte string.
"""
def generate_random(client, input, options \\ []) do
request(client, "GenerateRandom", input, options)
end
@doc """
Retrieves a policy attached to the specified key.
"""
def get_key_policy(client, input, options \\ []) do
request(client, "GetKeyPolicy", input, options)
end
@doc """
Retrieves a Boolean value that indicates whether key rotation is enabled
for the specified key.
"""
def get_key_rotation_status(client, input, options \\ []) do
request(client, "GetKeyRotationStatus", input, options)
end
@doc """
Returns the items you need in order to import key material into AWS KMS
from your existing key management infrastructure. For more information
about importing key material into AWS KMS, see [Importing Key
Material](http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
in the *AWS Key Management Service Developer Guide*.
You must specify the key ID of the customer master key (CMK) into which you
will import key material. This CMK's `Origin` must be `EXTERNAL`. You must
also specify the wrapping algorithm and type of wrapping key (public key)
that you will use to encrypt the key material.
This operation returns a public key and an import token. Use the public key
to encrypt the key material. Store the import token to send with a
subsequent `ImportKeyMaterial` request. The public key and import token
from the same response must be used together. These items are valid for 24
hours, after which they cannot be used for a subsequent `ImportKeyMaterial`
request. To retrieve new ones, send another `GetParametersForImport`
request.
"""
def get_parameters_for_import(client, input, options \\ []) do
request(client, "GetParametersForImport", input, options)
end
@doc """
Imports key material into an AWS KMS customer master key (CMK) from your
existing key management infrastructure. For more information about
importing key material into AWS KMS, see [Importing Key
Material](http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
in the *AWS Key Management Service Developer Guide*.
You must specify the key ID of the CMK to import the key material into.
This CMK's `Origin` must be `EXTERNAL`. You must also send an import token
and the encrypted key material. Send the import token that you received in
the same `GetParametersForImport` response that contained the public key
that you used to encrypt the key material. You must also specify whether
the key material expires and if so, when. When the key material expires,
AWS KMS deletes the key material and the CMK becomes unusable. To use the
CMK again, you can reimport the same key material. If you set an expiration
date, you can change it only by reimporting the same key material and
specifying a new expiration date.
When this operation is successful, the specified CMK's key state changes to
`Enabled`, and you can use the CMK.
After you successfully import key material into a CMK, you can reimport the
same key material into that CMK, but you cannot import different key
material.
"""
def import_key_material(client, input, options \\ []) do
request(client, "ImportKeyMaterial", input, options)
end
@doc """
Lists all of the key aliases in the account.
"""
def list_aliases(client, input, options \\ []) do
request(client, "ListAliases", input, options)
end
@doc """
List the grants for a specified key.
"""
def list_grants(client, input, options \\ []) do
request(client, "ListGrants", input, options)
end
@doc """
Retrieves a list of policies attached to a key.
"""
def list_key_policies(client, input, options \\ []) do
request(client, "ListKeyPolicies", input, options)
end
@doc """
Lists the customer master keys.
"""
def list_keys(client, input, options \\ []) do
request(client, "ListKeys", input, options)
end
@doc """
Returns a list of all tags for the specified customer master key (CMK).
"""
def list_resource_tags(client, input, options \\ []) do
request(client, "ListResourceTags", input, options)
end
@doc """
Returns a list of all grants for which the grant's `RetiringPrincipal`
matches the one specified.
A typical use is to list all grants that you are able to retire. To retire
a grant, use `RetireGrant`.
"""
def list_retirable_grants(client, input, options \\ []) do
request(client, "ListRetirableGrants", input, options)
end
@doc """
Attaches a key policy to the specified customer master key (CMK).
For more information about key policies, see [Key
Policies](http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
in the *AWS Key Management Service Developer Guide*.
"""
def put_key_policy(client, input, options \\ []) do
request(client, "PutKeyPolicy", input, options)
end
@doc """
Encrypts data on the server side with a new customer master key (CMK)
without exposing the plaintext of the data on the client side. The data is
first decrypted and then reencrypted. You can also use this operation to
change the encryption context of a ciphertext.
Unlike other operations, `ReEncrypt` is authorized twice, once as
`ReEncryptFrom` on the source CMK and once as `ReEncryptTo` on the
destination CMK. We recommend that you include the `"kms:ReEncrypt*"`
permission in your [key
policies](http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
to permit reencryption from or to the CMK. This permission is automatically
included in the key policy when you create a CMK through the console, but
you must include it manually when you create a CMK programmatically or when
you set a key policy with the `PutKeyPolicy` operation.
"""
def re_encrypt(client, input, options \\ []) do
request(client, "ReEncrypt", input, options)
end
@doc """
Retires a grant. To clean up, you can retire a grant when you're done using
it. You should revoke a grant when you intend to actively deny operations
that depend on it. The following are permitted to call this API:
<ul> <li> The AWS account (root user) under which the grant was created
</li> <li> The `RetiringPrincipal`, if present in the grant
</li> <li> The `GranteePrincipal`, if `RetireGrant` is an operation
specified in the grant
</li> </ul> You must identify the grant to retire by its grant token or by
a combination of the grant ID and the Amazon Resource Name (ARN) of the
customer master key (CMK). A grant token is a unique variable-length
base64-encoded string. A grant ID is a 64 character unique identifier of a
grant. The `CreateGrant` operation returns both.
"""
def retire_grant(client, input, options \\ []) do
request(client, "RetireGrant", input, options)
end
@doc """
Revokes a grant. You can revoke a grant to actively deny operations that
depend on it.
"""
def revoke_grant(client, input, options \\ []) do
request(client, "RevokeGrant", input, options)
end
@doc """
Schedules the deletion of a customer master key (CMK). You may provide a
waiting period, specified in days, before deletion occurs. If you do not
provide a waiting period, the default period of 30 days is used. When this
operation is successful, the state of the CMK changes to `PendingDeletion`.
Before the waiting period ends, you can use `CancelKeyDeletion` to cancel
the deletion of the CMK. After the waiting period ends, AWS KMS deletes the
CMK and all AWS KMS data associated with it, including all aliases that
refer to it.
<important> Deleting a CMK is a destructive and potentially dangerous
operation. When a CMK is deleted, all data that was encrypted under the CMK
is rendered unrecoverable. To restrict the use of a CMK without deleting
it, use `DisableKey`.
</important> For more information about scheduling a CMK for deletion, see
[Deleting Customer Master
Keys](http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html)
in the *AWS Key Management Service Developer Guide*.
"""
def schedule_key_deletion(client, input, options \\ []) do
request(client, "ScheduleKeyDeletion", input, options)
end
@doc """
Adds or overwrites one or more tags for the specified customer master key
(CMK).
Each tag consists of a tag key and a tag value. Tag keys and tag values are
both required, but tag values can be empty (null) strings.
You cannot use the same tag key more than once per CMK. For example,
consider a CMK with one tag whose tag key is `Purpose` and tag value is
`Test`. If you send a `TagResource` request for this CMK with a tag key of
`Purpose` and a tag value of `Prod`, it does not create a second tag.
Instead, the original tag is overwritten with the new tag value.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes the specified tag or tags from the specified customer master key
(CMK).
To remove a tag, you specify the tag key for each tag to remove. You do not
specify the tag value. To overwrite the tag value for an existing tag, use
`TagResource`.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates an alias to map it to a different key.
An alias is not a property of a key. Therefore, an alias can be mapped to
and unmapped from an existing key without changing the properties of the
key.
An alias name can contain only alphanumeric characters, forward slashes
(/), underscores (_), and dashes (-). An alias must start with the word
"alias" followed by a forward slash (alias/). An alias that begins with
"aws" after the forward slash (alias/aws...) is reserved by Amazon Web
Services (AWS).
The alias and the key it is mapped to must be in the same AWS account and
the same region.
"""
def update_alias(client, input, options \\ []) do
request(client, "UpdateAlias", input, options)
end
@doc """
Updates the description of a customer master key (CMK).
"""
def update_key_description(client, input, options \\ []) do
request(client, "UpdateKeyDescription", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "kms"}
host = get_host("kms", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "TrentService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/kms.ex
| 0.890187
| 0.546859
|
kms.ex
|
starcoder
|
defmodule MlDHT do
@moduledoc ~S"""
Multinode - MlDHT is an Elixir package based off <NAME>'s mainline DHT package.
It allows creation of multiple DHT nodes for DHT indexing.
Number of DHT nodes defaults to 1, and can be set in config/config.exs like so:
```
config :mldht,
num_nodes: 2
```
"""
@typedoc """
A binary which contains the infohash of a torrent. An infohash is a SHA1
encoded hex sum which identifies a torrent.
"""
@type infohash :: binary
@typedoc """
A non negative integer (0--65565) which represents a TCP port number.
"""
@type tcp_port :: non_neg_integer
@name __MODULE__
use Application
import Supervisor.Spec, warn: false
@doc false
def start(_type, _arg) do
Supervisor.start_link(@name, [], name: @name)
end
def init([]) do
# Default to a single node if number not specified in config
num_nodes = Application.get_env(:mldht, :num_nodes) || 1
children = [
supervisor(Registry, [:unique, MlDHT.Namespace]),
supervisor(MlDHT.Supervisor, [num_nodes])
]
opts = [strategy: :one_for_one, name: MlDHT]
supervise(children, opts)
end
defdelegate new(num), to: MlDHT.Supervisor
@doc ~S"""
This function needs an infohash as binary and a callback function as
parameter. This function uses its own routing table as a starting point to
start a get_peers search for the given infohash.
## Example
iex> "3F19B149F53A50E14FC0B79926A391896EABAB6F" ## Ubuntu 15.04
|> Base.decode16!
|> MlDHT.search(fn(node) ->
{ip, port} = node
IO.puts "ip: #{inpsect ip} port: #{port}"
end)
"""
@spec search(infohash, fun) :: atom
def search(infohash, callback) do
MlDHT.NodeList.get()
|> Enum.map(& DHTServer.Worker.search(&1, infohash, callback))
end
@doc ~S"""
This function needs an infohash as binary and callback function as
parameter. This function does the same thing as the search/2 function, except
it sends an announce message to the found peers. This function does not need a
TCP port which means the announce message sets `:implied_port` to true.
## Example
iex> "3F19B149F53A50E14FC0B79926A391896EABAB6F" ## Ubuntu 15.04
|> Base.decode16!
|> MlDHT.search_announce(fn(node) ->
{ip, port} = node
IO.puts "ip: #{inspect ip} port: #{port}"
end)
"""
@spec search_announce(infohash, fun) :: atom
def search_announce(infohash, callback) do
MlDHT.NodeList.get()
|> Enum.map(& DHTServer.Worker.search_announce(&1, infohash, callback))
end
@doc ~S"""
This function needs an infohash as binary, a callback function as parameter,
and a TCP port as integer. This function does the same thing as the search/2
function, except it sends an announce message to the found peers.
## Example
iex> "3F19B149F53A50E14FC0B79926A391896EABAB6F" ## Ubuntu 15.04
|> Base.decode16!
|> MlDHT.search_announce(fn(node) ->
{ip, port} = node
IO.puts "ip: #{inspect ip} port: #{port}"
end, 6881)
"""
@spec search_announce(infohash, fun, tcp_port) :: atom
def search_announce(infohash, callback, port) do
MlDHT.NodeList.get()
|> Enum.map(& DHTServer.Worker.search_announce(&1, infohash, callback, port))
end
end
|
lib/mldht.ex
| 0.768993
| 0.807802
|
mldht.ex
|
starcoder
|
defmodule GenTimer do
@moduledoc """
Extends GenServer to give a timer functionality.
There is a small folder of examples in this repo to guide you.
## Callbacks
Supports the same callbacks as `GenServer`. The only considerations are:
### There Is Required State For `init/1`
The state returned by `c:GenServer.init/1` must include the required keys shown
in `t:valid_state/0`, but then you can add any other state you please.
### Repeated Funtion Callback
The callback `c:repeated_function/1` is where you choose what is done each
iteration. It will use the current state as the argument and will use the
returned state as the state of the GenServer going forward.
"""
@type valid_state :: %{milli: pos_integer, times: pos_integer | :infinite, last_return: any}
@doc """
This is where you choose what is done each iteration.
It will use the current state as the argument and will use the returned state
as the state of the GenServer going forward.
"""
@callback repeated_function(state :: valid_state) :: valid_state
defmacro __using__(_args) do
quote do
use GenServer, restart: :transient
@behaviour GenTimer
@spec last_returned_value(pid()) :: any()
def last_returned_value(pid) do
GenServer.call(pid, :last_returned_value)
end
# Callbacks
@impl true
def handle_info(:start_timer, state) do
new_state =
state
|> check_state()
|> Map.update(:times, 0, fn times -> schedule_remaining(state.milli, times) end)
{:noreply, new_state}
end
def handle_info(:perform, state) do
new_state = repeated_function(state)
{:stop, :normal, new_state}
end
def handle_info(:perform_and_reschedule, state) do
new_state =
state
|> repeated_function()
|> Map.update(:times, 0, fn times -> schedule_remaining(state.milli, times) end)
{:noreply, new_state}
end
@impl true
def handle_call(:last_returned_value, _from, state) do
return = Map.get(state, :last_return)
{:reply, return, state}
end
# Private
defp check_state(state) do
state |> check_keys() |> check_values()
end
defp check_keys(state) do
cond do
not Map.has_key?(state, :milli) -> raise GenTimer.RequiredKeyError, :milli
not Map.has_key?(state, :times) -> raise GenTimer.RequiredKeyError, :times
not Map.has_key?(state, :last_return) -> Map.put(state, :last_return, nil)
true -> state
end
end
defp check_values(%{milli: milli}) when not is_integer(milli) or milli < 1 do
raise GenTimer.InvalidDurationError, milli
end
defp check_values(%{times: times} = state) do
case times do
:infinite -> :ok
num when is_integer(num) and num > 0 -> :ok
other -> raise GenTimer.InvalidRepetitionError, other
end
state |> Map.delete(:times) |> check_values() |> Map.put(:times, times)
end
defp check_values(state), do: state
defp schedule_work(job, milli) do
Process.send_after(self(), job, milli)
end
defp schedule_remaining(milli, :infinite) do
schedule_work(:perform_and_reschedule, milli)
:infinite
end
defp schedule_remaining(milli, times) do
cond do
times > 1 -> schedule_work(:perform_and_reschedule, milli)
times == 1 -> schedule_work(:perform, milli)
true -> :ok
end
times - 1
end
end
end
@doc """
Use exactly the same as `GenServer.start_link/3`.
Only difference is that it will send a message to the process to start the timer.
"""
@spec start_link(atom, any, GenServer.options()) :: GenServer.on_start()
def start_link(module, args, options) do
module
|> GenServer.start_link(args, options)
|> send_start_signal()
end
@doc """
Use exactly the same as `GenServer.start/3`.
Only difference is that it will send a message to the process to start the timer.
"""
@spec start(atom, any, GenServer.options()) :: GenServer.on_start()
def start(module, args, options) do
module
|> GenServer.start(args, options)
|> send_start_signal()
end
defdelegate abcast(nodes, name, request), to: GenServer
defdelegate call(server, request, timeout), to: GenServer
defdelegate cast(server, request), to: GenServer
defdelegate multi_call(nodes, name, request, timeout), to: GenServer
defdelegate reply(client, reply), to: GenServer
defdelegate stop(server, reason, timeout), to: GenServer
defp send_start_signal({:ok, pid} = result) do
Process.send(pid, :start_timer, [])
result
end
defp send_start_signal(other), do: other
end
|
lib/gen_timer.ex
| 0.82741
| 0.567158
|
gen_timer.ex
|
starcoder
|
defmodule CPF do
@moduledoc """
CPF module that provides functions to verify if a CPF is valid.
"""
unless Version.match?(System.version(), ">= 1.7.0") do
IO.warn("""
You're running an old Elixir version. CPF will soon support Elixir versions
above 1.7.0.
""")
end
defstruct [:digits]
defguardp is_pos_integer(number) when is_integer(number) and number >= 0
@typep digit :: pos_integer()
@typedoc """
A text in `String.t()` or a positive integer
"""
@type input :: String.t() | pos_integer
@typedoc """
The CPF type. It' composed of eleven digits(0-9]).
"""
@opaque t :: %__MODULE__{
digits: {digit, digit, digit, digit, digit, digit, digit, digit, digit, digit, digit}
}
@doc """
Initializes a CPF.
## Examples
iex> CPF.new(563_606_676_73)
#CPF<563.606.676-73>
iex> CPF.new("56360667673")
#CPF<563.606.676-73>
This function doesn't check if CPF numbers are valid, only use this function
if the given `String.t` or the integer was validated before.
"""
@spec new(input) :: t
def new(valid_cpf) when is_pos_integer(valid_cpf) do
%__MODULE__{
digits: to_digits(valid_cpf)
}
end
def new(valid_cpf) when is_binary(valid_cpf) do
%__MODULE__{
digits: valid_cpf |> String.to_integer() |> to_digits()
}
end
@doc """
Checks if the given argument is `CPF.t()` type.
## Examples
iex> CPF.cpf?(563_606_676_73)
false
iex> "56360667673" |> CPF.new() |> CPF.cpf?()
true
"""
@spec cpf?(any) :: true | false
def cpf?(%CPF{}), do: true
def cpf?(_), do: false
@doc """
Returns `true` the given `cpf` is valid, otherwise `false`.
## Examples
iex> CPF.valid?(563_606_676_73)
true
iex> CPF.valid?(563_606_676_72)
false
iex> CPF.valid?("563.606.676-73")
true
iex> CPF.valid?("563/60.6-676/73")
false
iex> CPF.valid?("563.606.676-72")
false
iex> CPF.valid?("56360667673")
true
iex> CPF.valid?("56360667672")
false
"""
@spec valid?(input :: input) :: boolean
def valid?(input) when is_pos_integer(input) or is_binary(input) do
case parse(input) do
{:ok, _cpf} -> true
{:error, _reason} -> false
end
end
@doc """
Returns a formatted string from a given `cpf`.
## Examples
iex> 563_606_676_73 |> CPF.new() |> CPF.format()
"563.606.676-73"
"""
@spec format(cpf :: t()) :: String.t()
def format(%CPF{digits: digits}) do
{dig_1, dig_2, dig_3, dig_4, dig_5, dig_6, dig_7, dig_8, dig_9, dig_10, dig_11} = digits
to_string(dig_1) <>
to_string(dig_2) <>
to_string(dig_3) <>
"." <>
to_string(dig_4) <>
to_string(dig_5) <>
to_string(dig_6) <>
"." <>
to_string(dig_7) <>
to_string(dig_8) <> to_string(dig_9) <> "-" <> to_string(dig_10) <> to_string(dig_11)
end
@doc """
Builds a CPF struct by validating its digits and format. Returns an ok/error tuple for
valid/invalid CPFs.
## Examples
iex> {:ok, cpf} = CPF.parse(563_606_676_73)
iex> cpf
#CPF<563.606.676-73>
iex> CPF.parse(563_606_676_72)
{:error, %CPF.ParsingError{reason: :invalid_verifier}}
"""
@spec parse(input) :: {:ok, t()} | {:error, CPF.ParsingError.t()}
def parse(input) when is_pos_integer(input) do
digits = Integer.digits(input)
with {:ok, digits} <- add_padding(digits),
{:ok, digits} <- skip_same_digits(digits),
{:ok, digits} <- verify_digits(digits) do
{:ok, %CPF{digits: digits}}
end
end
def parse(
<<left_digits::bytes-size(3)>> <>
"." <>
<<middle_digits::bytes-size(3)>> <>
"." <>
<<right_digits::bytes-size(3)>> <>
"-" <>
<<verifier_digits::bytes-size(2)>>
) do
parse(left_digits <> middle_digits <> right_digits <> verifier_digits)
end
def parse(input) when is_binary(input) do
case Integer.parse(input) do
{int_input, ""} ->
parse(int_input)
_ ->
{:error, %CPF.ParsingError{reason: :invalid_format}}
end
end
@doc """
Builds a CPF struct by validating its digits and format. Returns an CPF type
or raises an `CPF.ParsingError` exception.
## Examples
iex> CPF.parse!(563_606_676_73)
#CPF<563.606.676-73>
iex> CPF.parse!(563_606_676_72)
** (CPF.ParsingError) invalid_verifier
"""
@spec parse!(input) :: t()
def parse!(input) do
case parse(input) do
{:ok, cpf} -> cpf
{:error, exception} -> raise exception
end
end
@doc """
Returns a tuple with the eleven digits of the given cpf.
"""
@spec digits(t) :: tuple
def digits(%CPF{digits: digits}), do: digits
@doc """
Returns a integer representation of the given cpf.
## Examples
iex> 4_485_847_608 |> CPF.new() |> CPF.to_integer()
4_485_847_608
"""
@spec to_integer(t) :: pos_integer
def to_integer(%CPF{digits: digits}) do
0..10
|> Enum.reduce(0, fn i, sum ->
:math.pow(10, 10 - i) * elem(digits, i) + sum
end)
|> trunc()
end
@doc """
Cleans up all characters of a given input except numbers. It can make CPF
validation more flexbile.
## Examples
iex> CPF.flex(" 04.4 .8*58().476-08 ")
"04485847608"
iex> " 04.4 .8*58().476-08 " |> CPF.flex() |> CPF.valid?()
true
"""
@spec flex(String.t()) :: String.t()
def flex(input) when is_binary(input), do: cleanup(input, "")
defp cleanup("", cleaned), do: String.reverse(cleaned)
defp cleanup(<<char::utf8, rest::binary>>, cleaned) when char in ?0..?9,
do: cleanup(rest, <<char::utf8>> <> cleaned)
defp cleanup(<<_char::utf8, rest::binary>>, cleaned), do: cleanup(rest, cleaned)
@doc """
Generates a random valid CPF.
## Examples
iex> CPF.generate() |> to_string() |> CPF.valid?
true
"""
@spec generate :: t()
def generate, do: gen()
@doc """
Generates a predictable random valid CPF wit the given `seed`.
## Examples
iex> seed = {:exrop, [40_738_532_209_663_091 | 74_220_507_755_601_615]}
iex> seed |> CPF.generate() |> CPF.format()
"671.835.731-68"
"""
@spec generate(seed :: :rand.builtin_alg() | :rand.state() | :rand.export_state()) :: t()
def generate(seed) do
:rand.seed(seed)
gen()
end
defp gen do
digits = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
digits =
Enum.reduce(0..8, digits, fn index, digits ->
put_elem(digits, index, Enum.random(0..9))
end)
{v1, v2} = calculate_verifier_digits(digits)
digits =
digits
|> put_elem(9, v1)
|> put_elem(10, v2)
%CPF{digits: digits}
end
defp add_padding(digits) do
padding = 11 - length(digits)
if padding >= 0 do
{:ok, add_padding(digits, padding)}
else
{:error, %CPF.ParsingError{reason: :too_long}}
end
end
defp add_padding(digits, 0), do: digits
defp add_padding(digits, padding) do
add_padding([0 | digits], padding - 1)
end
defp skip_same_digits(digits) do
if digits |> Enum.uniq() |> length() == 1 do
{:error, %CPF.ParsingError{reason: :same_digits}}
else
{:ok, digits}
end
end
defp verify_digits(digits) do
cpf_digits = List.to_tuple(digits)
{v1, v2} = calculate_verifier_digits(cpf_digits)
{given_v1, given_v2} = extract_given_verifier_digits(cpf_digits)
if v1 == given_v1 && v2 == given_v2 do
{:ok, cpf_digits}
else
{:error, %CPF.ParsingError{reason: :invalid_verifier}}
end
end
defp calculate_verifier_digits(digits) do
{v1_sum, v2_sum} = sum_digits(digits)
v1 = digit_verifier(v1_sum)
v2_sum = v2_sum + 2 * v1
v2 = digit_verifier(v2_sum)
{v1, v2}
end
defp sum_digits(digits) do
v1_weight = 10
v2_weight = 11
Enum.reduce(0..8, {0, 0}, fn i, {v1_sum, v2_sum} ->
v1 = (v1_weight - i) * elem(digits, i)
v2 = (v2_weight - i) * elem(digits, i)
{v1_sum + v1, v2_sum + v2}
end)
end
defp digit_verifier(sum) do
rem = rem(sum, 11)
if rem in [0, 1], do: 0, else: 11 - rem
end
defp extract_given_verifier_digits(digits) do
{elem(digits, 9), elem(digits, 10)}
end
defp to_digits(integer) do
digits = Integer.digits(integer)
padding = 11 - length(digits)
if padding >= 0 do
digits
|> add_padding(padding)
|> List.to_tuple()
else
raise ArgumentError, "it has more than 11 digits"
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(cpf, opts) do
formatted_cpf = cpf |> CPF.format() |> color(:atom, opts)
concat(["#CPF<", formatted_cpf, ">"])
end
end
defimpl String.Chars do
@doc """
Returns a `String.t` representation of the given `cpf`.
## Examples
iex> "04485847608" |> CPF.new() |> to_string()
"04485847608"
"""
@spec to_string(CPF.t()) :: String.t()
def to_string(cpf) do
digits = CPF.digits(cpf)
for i <- 0..10, into: "", do: digits |> elem(i) |> Kernel.to_string()
end
end
end
|
lib/cpf/cpf.ex
| 0.924726
| 0.459682
|
cpf.ex
|
starcoder
|
defmodule BbbLti.Validator do
@moduledoc """
Module to handle incoming HTTP requests from LTI Providers
Based on https://github.com/DefactoSoftware/lti
"""
@required_oauth_parameters [
"oauth_consumer_key",
"oauth_signature_method",
"oauth_timestamp",
"oauth_nonce",
"oauth_version"
]
@doc """
Determines a signature using the HTTP method of the request, the url
of the application's endpoint used by the providers to send the request,
the oauth parameters used to construct the base string, and the secret of
the corresponding LTI provider. The parameters are expected to be a list of tuples
containing a key and corresponding value. The result is a percent encoded signature.
## Examples
iex(0)> LTIResult.signature(
iex(0)> "https://example.com",
iex(0)> ~S"OAuth oauth_consumer_key=\"key1234\",oauth_signature_method=\"HMAC-SHA1\",oauth_timestamp=\"1525076552\",oauth_nonce=\"123\",oauth_version=\"1.0\",oauth_signature=\"iyyQNRQyXTlpLJPJns3ireWjQxo%3D\"",
iex(0)> "random_secret"
iex(0)> )
{:ok, "iyyQNRQyXTlpLJPJns3ireWjQxo%3D"}
"""
def validate_signature(url, oauth_header, secret) do
{parameters, [{"oauth_signature", received_signature}]} =
extract_header_elements(oauth_header)
with {:ok, _} <- validate_parameters(parameters) do
basestring = base_string(url, parameters)
signature = generate_signature(secret, basestring)
if signature == received_signature do
{:ok, signature}
else
{:error, [:unmatching_signatures]}
end
end
end
defp generate_signature(secret, basestring) do
:crypto.mac(
:hmac,
:sha,
percent_encode(secret) <> "&",
basestring
)
|> Base.encode64()
end
defp extract_header_elements(header) do
header
|> String.trim_leading("OAuth ")
|> String.split(",")
|> string_to_key_and_value()
|> trim_elements()
|> decode_values()
|> remove_realm_parameter()
|> extract_signature()
end
defp validate_parameters(parameters) do
{_, state} =
{parameters, []}
|> validate_oauth_version()
|> validate_duplication()
|> validate_required()
|> validate_supported()
case state do
[] -> {:ok, parameters}
_ -> {:error, state}
end
end
defp validate_oauth_version({parameters, state}) do
if List.keyfind(parameters, "oauth_version", 0) == {"oauth_version", "1.0"} do
{parameters, state}
else
{parameters, state ++ [:incorrect_version]}
end
end
defp validate_duplication({parameters, state}) do
if duplicated_elements?(parameters) do
{parameters, state ++ [:duplicated_parameters]}
else
{parameters, state}
end
end
defp validate_required({parameters, state}) do
if check_for_required_parameters(parameters) do
{parameters, state}
else
{parameters, state ++ [:missing_required_parameters]}
end
end
defp check_for_required_parameters(parameters) do
Enum.all?(@required_oauth_parameters, fn required_parameter ->
required_parameter in Enum.map(parameters, fn {key, _} -> key end)
end)
end
defp validate_supported({parameters, state}) do
if Enum.all?(parameters, fn {key, _} ->
# String.starts_with?(key, "oauth_")
true
end) do
{parameters, state}
else
{parameters, state ++ [:unsupported_parameters]}
end
end
defp duplicated_elements?(parameter_list, state \\ [])
defp duplicated_elements?([], _), do: false
defp duplicated_elements?([head | tail], existing_elements) do
if head in existing_elements do
true
else
duplicated_elements?(tail, existing_elements ++ [head])
end
end
defp base_string(url, parameters) do
encoded_url =
url
|> URI.parse()
|> downcase_scheme_and_host()
|> URI.to_string()
|> percent_encode()
query_string =
parameters
|> percent_encode_pairs()
|> Enum.sort()
|> normalized_string()
|> percent_encode()
"POST&#{encoded_url}&" <> query_string
end
defp downcase_scheme_and_host(%URI{scheme: scheme, host: host} = uri),
do: %URI{uri | scheme: String.downcase(scheme), host: String.downcase(host)}
defp percent_encode_pairs(pairs) do
Enum.map(pairs, fn {key, value} ->
{percent_encode(key), percent_encode(value)}
end)
end
defp normalized_string(sorted_pairs) when is_list(sorted_pairs) do
sorted_pairs
|> Enum.reduce("", fn {key, value}, acc ->
acc <> "&" <> "#{key}" <> "=" <> "#{value}"
end)
|> String.trim_leading("&")
end
defp percent_encode(object) do
object
|> to_string()
|> URI.encode(&URI.char_unreserved?/1)
end
defp percent_decode(object) do
object
|> to_string()
|> URI.decode()
end
defp string_to_key_and_value(key_value_strings) when is_list(key_value_strings) do
Enum.map(key_value_strings, fn key_value_string ->
[key, value] = String.split(key_value_string, "=")
{key, value}
end)
end
defp trim_elements(pairs) when is_list(pairs) do
Enum.map(pairs, fn {key, value} ->
{String.trim(key), String.trim(value, "\"")}
end)
end
defp decode_values(pairs) when is_list(pairs) do
Enum.map(pairs, fn {key, value} ->
{key, percent_decode(value)}
end)
end
defp extract_signature(pairs) do
Enum.split_with(pairs, fn {param_name, _} -> param_name != "oauth_signature" end)
end
defp remove_realm_parameter(pairs) when is_list(pairs) do
List.keydelete(pairs, "realm", 0)
end
end
|
lib/bbb_lti/validator.ex
| 0.790004
| 0.42176
|
validator.ex
|
starcoder
|
defmodule ExAws.Boto.Shape.Structure do
@moduledoc false
defstruct [:name, :module, :required, :members, :documentation, :metadata]
end
defmodule ExAws.Boto.Shape.List do
@moduledoc false
defstruct [:name, :module, :member_name, :member, :documentation, :metadata, min: nil, max: nil]
end
defmodule ExAws.Boto.Shape.Map do
@moduledoc false
defstruct [:name, :module, :key_module, :value_module, :documentation, :metadata]
end
defmodule ExAws.Boto.Shape.Basic do
@moduledoc false
defstruct [:name, :module, :type, :documentation, :def, :metadata]
end
defmodule ExAws.Boto.Shape do
@moduledoc false
@type t ::
%ExAws.Boto.Shape.Structure{}
| %ExAws.Boto.Shape.List{}
| %ExAws.Boto.Shape.Map{}
| %ExAws.Boto.Shape.Basic{}
@callback shape_spec() :: t()
@callback new(term()) :: term()
@callback destruct(term()) :: term()
alias ExAws.Boto.Util, as: Util
def from_service_json(%{"metadata" => %{"serviceId" => service_id}} = service_json, name) do
module = Util.module_name(service_id, name)
if function_exported?(module, :shape_spec, 0) do
module.shape_spec()
else
spec = generate_shape_spec(service_json, name)
spec
|> generate_module()
|> Code.compile_quoted("Shape #{inspect(module)}")
spec
end
end
def generate_shape_spec(%{"shapes" => shapes} = service_json, name) do
shape_def =
shapes
|> Map.get(name)
generate_shape_spec(service_json, name, shape_def)
end
@spec generate_shape_spec(map(), String.t()) :: t()
def generate_shape_spec(
%{"metadata" => %{"serviceId" => service_id} = service_meta},
name,
%{"members" => members} = shape_def
) do
%ExAws.Boto.Shape.Structure{
name: name,
module: Util.module_name(service_id, name),
required:
shape_def
|> Map.get("required", [])
|> Enum.map(&Util.key_to_atom/1),
members:
members
|> Enum.map(fn {name, shape} ->
{Util.key_to_atom(name), {name, Util.module_name(service_id, shape)}}
end)
|> Enum.into(%{}),
documentation:
shape_def
|> Map.get("documentation")
|> ExAws.Boto.DocParser.doc_to_markdown(),
metadata: service_meta
}
end
def generate_shape_spec(
%{"metadata" => %{"serviceId" => service_id} = service_meta},
name,
%{
"type" => "map",
"key" => %{"shape" => key_name},
"value" => %{"shape" => value_name}
} = shape_def
) do
%ExAws.Boto.Shape.Map{
name: name,
module: Util.module_name(service_id, name),
key_module: Util.module_name(service_id, key_name),
value_module: Util.module_name(service_id, value_name),
documentation:
shape_def
|> Map.get("documentation")
|> ExAws.Boto.DocParser.doc_to_markdown(),
metadata: service_meta
}
end
def generate_shape_spec(
%{"metadata" => %{"serviceId" => service_id} = service_meta},
name,
%{"type" => "list", "member" => %{"shape" => member_name}} = shape_def
) do
%ExAws.Boto.Shape.List{
name: name,
module: Util.module_name(service_id, name),
member_name: member_name,
member: Util.module_name(service_id, member_name),
min: Map.get(shape_def, "min"),
max: Map.get(shape_def, "max"),
documentation:
shape_def
|> Map.get("documentation")
|> ExAws.Boto.DocParser.doc_to_markdown(),
metadata: service_meta
}
end
def generate_shape_spec(
%{"metadata" => %{"serviceId" => service_id} = service_meta},
name,
%{"type" => basic} = shape_def
) do
%ExAws.Boto.Shape.Basic{
name: name,
module: Util.module_name(service_id, name),
type: basic,
def: shape_def,
documentation:
shape_def
|> Map.get("documentation")
|> ExAws.Boto.DocParser.doc_to_markdown(),
metadata: service_meta
}
end
@spec generate_module(t()) :: Macro.t()
def generate_module(
%ExAws.Boto.Shape.Structure{
module: module,
members: members,
documentation: docs
} = shape_spec
)
when module != nil do
members_types =
members
|> Enum.map(fn {property, {_name, member_mod}} ->
{
property,
quote do
unquote(member_mod).t()
end
}
end)
quote do
defmodule unquote(module) do
@behaviour ExAws.Boto.Shape
@moduledoc unquote(docs)
@type params :: unquote(members_types)
@type t :: %__MODULE__{unquote_splicing(members_types)}
defstruct unquote(members_types |> Enum.map(fn {name, _} -> name end))
@doc false
@impl ExAws.Boto.Shape
def shape_spec(), do: unquote(Macro.escape(shape_spec))
@spec new(params()) :: t()
@impl ExAws.Boto.Shape
def new(nil) do
%__MODULE__{}
end
def new(args) when is_map(args) do
%__MODULE__{
unquote_splicing(
members
|> Enum.map(fn {property, {name, member_mod}} ->
quote do
{
unquote(property),
unquote(member_mod).new(
Map.get(args, unquote(property)) || Map.get(args, unquote(name))
)
}
end
end)
)
}
end
def new(args) when is_list(args) do
args
|> Enum.into(%{})
|> new()
end
@doc false
@spec destruct(t()) :: map()
@impl ExAws.Boto.Shape
def destruct(%__MODULE__{} = struct) do
%{
unquote_splicing(
members
|> Enum.map(fn {property, {name, member_mod}} ->
quote do
{unquote(name), unquote(member_mod).destruct(struct.unquote(property))}
end
end)
)
}
end
end
end
end
def generate_module(
%ExAws.Boto.Shape.List{
module: module,
member: member_module,
documentation: docs
} = shape_spec
)
when module != nil and member_module != nil do
quote do
defmodule unquote(module) do
@behaviour ExAws.Boto.Shape
@moduledoc unquote(ExAws.Boto.DocParser.doc_to_markdown(docs))
@type t :: [unquote(member_module).t()]
@type params :: t()
@doc false
@impl ExAws.Boto.Shape
def shape_spec(), do: unquote(Macro.escape(shape_spec))
@spec new(params()) :: t()
@impl ExAws.Boto.Shape
def new(nil) do
[]
end
def new(list) when is_list(list) do
list
|> Enum.map(fn item ->
unquote(member_module).new(item)
end)
end
@spec destruct(t()) :: [...]
@impl ExAws.Boto.Shape
def destruct(list) when is_list(list) do
list |> Enum.map(&unquote(member_module).destruct/1)
end
end
end
end
def generate_module(
%ExAws.Boto.Shape.Map{
module: module,
key_module: key_module,
value_module: value_module,
documentation: docs
} = shape_spec
)
when module != nil do
quote do
defmodule unquote(module) do
@behaviour ExAws.Boto.Shape
@moduledoc unquote(ExAws.Boto.DocParser.doc_to_markdown(docs))
@type t :: %{optional(unquote(key_module).t()) => unquote(value_module).t()}
@type params :: t()
@doc false
@impl ExAws.Boto.Shape
def shape_spec(), do: unquote(Macro.escape(shape_spec))
@spec new(params()) :: t()
@impl ExAws.Boto.Shape
def new(nil) do
[]
end
def new(map) when is_map(map) do
map
|> Enum.map(fn {key, value} ->
{
unquote(key_module).new(key),
unquote(value_module).new(value)
}
end)
|> Enum.into(%{})
end
@spec destruct(t()) :: map()
@impl ExAws.Boto.Shape
def destruct(map) when is_map(map) do
map
|> Enum.map(fn {key, value} ->
{
unquote(key_module).destruct(key),
unquote(value_module).destruct(value)
}
end)
|> Enum.into(%{})
end
end
end
end
def generate_module(
%ExAws.Boto.Shape.Basic{
module: module
} = shape_spec
)
when module != nil do
quote do
defmodule unquote(module) do
@behaviour ExAws.Boto.Shape
@moduledoc unquote(generate_docs(shape_spec))
@type t :: unquote(generate_type_spec(shape_spec))
@type params :: t()
@doc false
@impl ExAws.Boto.Shape
def shape_spec(), do: unquote(Macro.escape(shape_spec))
@spec new(params()) :: t()
@impl ExAws.Boto.Shape
def new(val) do
val
end
@spec destruct(t()) :: t()
@impl ExAws.Boto.Shape
def destruct(val) do
val
end
end
end
end
def generate_docs(%ExAws.Boto.Shape.Structure{documentation: docs} = _shape_spec) do
quote do
unquote(ExAws.Boto.DocParser.doc_to_markdown(docs))
end
end
def generate_docs(_) do
false
end
def generate_type_spec(shape_spec) do
# In actually implementing this function, we need to be careful about recursive data types.
# A mapset is used to track which structs we've actually generated, so if there's a
# cycle in the type hierarchy, we can just reference the `.t()` typespec and move on.
do_generate_type_spec(shape_spec, MapSet.new())
end
defp do_generate_type_spec(nil, _in_progress) do
quote do: nil
end
defp do_generate_type_spec(atom, in_progress) when is_atom(atom) do
do_generate_type_spec(atom.shape_spec(), in_progress)
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Structure{
module: module,
members: members,
required: required
}, in_progress) do
if MapSet.member?(in_progress, module) do
quote do
unquote(module).t()
end
else
in_progress = MapSet.put(in_progress, module)
quote do
%unquote(module){
unquote_splicing(
members
|> Enum.map(fn {attr, {_name, module}} ->
cond do
Enum.member?(required, attr) ->
{attr, do_generate_type_spec(module.shape_spec(), in_progress)}
true ->
{attr,
quote do
nil | unquote(do_generate_type_spec(module.shape_spec(), in_progress))
end}
end
end)
)
}
end
end
end
defp do_generate_type_spec(%ExAws.Boto.Shape.List{
member: member
}, in_progress) do
quote do
[unquote(do_generate_type_spec(member.shape_spec(), in_progress))]
end
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Map{
key_module: key_module,
value_module: value_module
}, in_progress) do
quote do
%{
optional(unquote(do_generate_type_spec(key_module, in_progress))) =>
unquote(do_generate_type_spec(value_module, in_progress))
}
end
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "string"}, _in_progress) do
quote do: String.t()
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "timestamp"}, _in_progress) do
quote do: String.t()
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "integer"}, _in_progress) do
quote do: integer()
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "long"}, _in_progress) do
quote do: integer()
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "boolean"}, _in_progress) do
quote do: true | false
end
defp do_generate_type_spec(%ExAws.Boto.Shape.Basic{type: "blob"}, _in_progress) do
quote do: binary()
end
defp do_generate_type_spec(_, _) do
quote do: any()
end
end
|
lib/ex_aws/boto/shape.ex
| 0.550124
| 0.517388
|
shape.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.