code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule LevelWeb.Schema.Mutations do
@moduledoc false
use Absinthe.Schema.Notation
@desc "A validation error."
object :error do
@desc "The name of the invalid attribute."
field :attribute, non_null(:string)
@desc "A human-friendly error message."
field :message, non_null(:string)
end
@desc "The response to creating a space."
object :create_space_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :space, :space
end
@desc "The response to completing a setup step."
object :complete_setup_step_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc """
The next state.
"""
field :state, :space_setup_state
end
@desc "The response to creating a group."
object :create_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
end
@desc "The response to updating a group."
object :update_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
end
@desc "The response to bulk creating groups."
object :bulk_create_groups_payload do
@desc "A list of result payloads for each group."
field :payloads, non_null(list_of(:bulk_create_group_payload))
end
@desc "The payload for an individual group in a bulk create payload."
object :bulk_create_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :group, :group
@desc "The original arguments for this particular object."
field :args, non_null(:bulk_create_group_args)
end
@desc "The arguments for an individual bulk-created group."
object :bulk_create_group_args do
@desc "The name of the group."
field :name, non_null(:string)
end
@desc "The response to updating a group."
object :update_group_membership_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :membership, non_null(:group_membership)
end
@desc "The payload for an updating group bookmark state."
object :bookmark_group_payload do
@desc "The current bookmark status."
field :is_bookmarked, non_null(:boolean)
@desc "The group."
field :group, non_null(:group)
end
@desc "The response to posting a message to a group."
object :post_to_group_payload do
@desc """
A boolean indicating if the mutation was successful. If true, the errors
list will be empty. Otherwise, errors may contain objects describing why
the mutation failed.
"""
field :success, non_null(:boolean)
@desc "A list of validation errors."
field :errors, list_of(:error)
@desc """
The mutated object. If the mutation was not successful,
this field may be null.
"""
field :post, :post
end
end
|
lib/level_web/schema/mutations.ex
| 0.820433
| 0.4112
|
mutations.ex
|
starcoder
|
defmodule ExEnum do
##== Preamble ===========================================================
@moduledoc """
A simple enumeration library for Elixir.
Just add **use ExEnum, from: [ ... ]** to your module and it will
automagically acquire the following functionality:
- Ability to list all the values in the enumeration
- Ability to check if an arbitrary value belongs to the enumeration
- Ability to access a value from the enumeration via a dedicated accessor
function
- Ability to list all the keys that can be used to access each of the
enumeration values
This functionality is realised by means of the following functions:
**values/0**, **is_valid?/1**, **keys/0** and **\<key>/0**. Note that
your module will have as many **\<key>/0** functions as enumeration
values are in the `use ExEnum, from: [ ... ]` clause.
## Example(s)
```elixir
defmodule Planet do
use ExEnum, from: [
"MERCURY",
"VENUS",
"EARTH",
"MARS",
"JUPITER",
"SATURN",
"URANUS",
"NEPTUNE"
]
end
Planet._MERCURY
# => "MERCURY"
Planet.values
# => ["MERCURY", "VENUS", "EARTH", "MARS", "JUPITER",
# "SATURN", "URANUS", "NEPTUNE"]
Planet.keys
# => [:_MERCURY, :_VENUS, :_EARTH, :_MARS, :_JUPITER,
# :SATURN, :_URANUS, :_NEPTUNE]
Planet.is_valid?("PLUTO")
# => false
Planet.from_value("EARTH")
# => "EARTH"
Planet.from_value("PLUTO")
# => nil
```
```elixir
defmodule Direction do
use ExEnum, from: [
{:north, 1},
{:east, 2},
{:south, 3},
{:west, 4}
]
end
Direction.north
# => 1
Direction.values
# => [1, 2, 3, 4]
Direction.keys
# => [:north, :east, :south, :west]
Planets.is_valid?(:north_east)
# => false
Planets.from_value(:north)
# => 1
Planets.from_value(:north_east)
# => nil
```
"""
##== API ================================================================
# Callback invoked by `use`.
defmacro __using__(opts) do
data = opts[:from]
kvs = Enum.map(
data,
fn({k, v}) -> {k, v}
(v) ->
k = to_fname(v)
{k, v}
end)
ks = Keyword.keys(kvs)
vs = Keyword.values(kvs)
ks_f = quote do: def keys(), do: unquote(ks)
vs_f = quote do: def values(), do: unquote(vs)
iv_f =
Enum.reduce(
vs,
[quote do: def is_valid?(_), do: false],
fn(v, acc) ->
f = quote do: def is_valid?(unquote(v)), do: true
[f| acc]
end)
fv_f =
Enum.reduce(
data,
[quote do: def from_value(_), do: nil],
fn({k, v}, acc) ->
f = quote do: def from_value(unquote(v)), do: unquote(k)
[f| acc]
(v, acc) ->
f = quote do: def from_value(unquote(v)), do: unquote(v)
[f| acc]
end)
fs =
Enum.map(
kvs,
fn
{k, v} ->
quote do: def unquote(k)(), do: unquote(v)
end)
List.flatten([ks_f, vs_f, iv_f, fv_f, fs])
end
##== Auxiliary functions ================================================
defp to_fname(atom) when is_atom(atom) do
atom
end
defp to_fname(str) do
String.to_atom("_" <> to_string(str))
end
end
|
lib/exenum.ex
| 0.713032
| 0.881411
|
exenum.ex
|
starcoder
|
defmodule AWS.Organizations do
@moduledoc """
Organizations is a web service that enables you to consolidate your multiple
Amazon Web Services accounts into an *organization* and centrally manage your
accounts and their resources.
This guide provides descriptions of the Organizations operations. For more
information about using this service, see the [Organizations User Guide](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html).
## Support and feedback for Organizations
We welcome your feedback. Send your comments to
[<EMAIL>](mailto:<EMAIL>) or post your feedback and questions in the [Organizations support
forum](http://forums.aws.amazon.com/forum.jspa?forumID=219). For more
information about the Amazon Web Services support forums, see [Forums Help](http://forums.aws.amazon.com/help.jspa).
## Endpoint to call When using the CLI or the Amazon Web Services SDK
For the current release of Organizations, specify the `us-east-1` region for all
Amazon Web Services API and CLI calls made from the commercial Amazon Web
Services Regions outside of China. If calling from one of the Amazon Web
Services Regions in China, then specify `cn-northwest-1`. You can do this in the
CLI by using these parameters and commands:
* Use the following parameter with each command to specify both the
endpoint and its region:
`--endpoint-url https://organizations.us-east-1.amazonaws.com` *(from commercial
Amazon Web Services Regions outside of China)*
or
`--endpoint-url https://organizations.cn-northwest-1.amazonaws.com.cn` *(from
Amazon Web Services Regions in China)*
* Use the default endpoint, but configure your default region with
this command:
`aws configure set default.region us-east-1` *(from commercial Amazon Web
Services Regions outside of China)*
or
`aws configure set default.region cn-northwest-1` *(from Amazon Web Services
Regions in China)*
* Use the following parameter with each command to specify the
endpoint:
`--region us-east-1` *(from commercial Amazon Web Services Regions outside of
China)*
or
`--region cn-northwest-1` *(from Amazon Web Services Regions in China)*
## Recording API Requests
Organizations supports CloudTrail, a service that records Amazon Web Services
API calls for your Amazon Web Services account and delivers log files to an
Amazon S3 bucket. By using information collected by CloudTrail, you can
determine which requests the Organizations service received, who made the
request and when, and so on. For more about Organizations and its support for
CloudTrail, see [Logging Organizations Events with CloudTrail](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_incident-response.html#orgs_cloudtrail-integration)
in the *Organizations User Guide*. To learn more about CloudTrail, including how
to turn it on and find your log files, see the [CloudTrail User Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Organizations",
api_version: "2016-11-28",
content_type: "application/x-amz-json-1.1",
credential_scope: "us-east-1",
endpoint_prefix: "organizations",
global?: true,
protocol: "json",
service_id: "Organizations",
signature_version: "v4",
signing_name: "organizations",
target_prefix: "AWSOrganizationsV20161128"
}
end
@doc """
Sends a response to the originator of a handshake agreeing to the action
proposed by the handshake request.
This operation can be called only by the following principals when they also
have the relevant IAM permissions:
* **Invitation to join** or ## Approve all features request
handshakes: only a principal from the member account.
The user who calls the API for an invitation to join must have the
`organizations:AcceptHandshake` permission. If you enabled all features in the
organization, the user must also have the `iam:CreateServiceLinkedRole`
permission so that Organizations can create the required service-linked role
named `AWSServiceRoleForOrganizations`. For more information, see [Organizations and Service-Linked
Roles](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles)
in the *Organizations User Guide*.
* **Enable all features final confirmation** handshake: only a
principal from the management account.
For more information about invitations, see [Inviting an Amazon Web Services account to join your
organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html)
in the *Organizations User Guide.* For more information about requests to enable
all features in the organization, see [Enabling all features in your organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *Organizations User Guide.*
After you accept a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def accept_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AcceptHandshake", input, options)
end
@doc """
Attaches a policy to a root, an organizational unit (OU), or an individual
account.
How the policy affects accounts depends on the type of policy. Refer to the
*Organizations User Guide* for information about each policy type:
*
[AISERVICES_OPT_OUT_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) *
[BACKUP_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html)
*
[SERVICE_CONTROL_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) *
[TAG_POLICY](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
This operation can be called only from the organization's management account.
"""
def attach_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AttachPolicy", input, options)
end
@doc """
Cancels a handshake.
Canceling a handshake sets the handshake state to `CANCELED`.
This operation can be called only from the account that originated the
handshake. The recipient of the handshake can't cancel it, but can use
`DeclineHandshake` instead. After a handshake is canceled, the recipient can no
longer respond to that handshake.
After you cancel a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def cancel_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelHandshake", input, options)
end
@doc """
Closes an Amazon Web Services member account within an organization.
You can't close the management account with this API. This is an asynchronous
request that Amazon Web Services performs in the background. Because
`CloseAccount` operates asynchronously, it can return a successful completion
message even though account closure might still be in progress. You need to wait
a few minutes before the account is fully closed. To check the status of the
request, do one of the following:
* Use the `AccountId` that you sent in the `CloseAccount` request to
provide as a parameter to the `DescribeAccount` operation.
While the close account request is in progress, Account status will indicate
PENDING_CLOSURE. When the close account request completes, the status will
change to SUSPENDED.
* Check the CloudTrail log for the `CloseAccountResult` event that
gets published after the account closes successfully. For information on using
CloudTrail with Organizations, see [Logging and monitoring in Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html#orgs_cloudtrail-integration)
in the *Organizations User Guide.*
You can only close 10% of active member accounts within a rolling
30 day period. This quota is not bound by a calendar month, but starts when you
close an account. Within 30 days of that initial account closure, you can't
exceed the 10% account closure limit.
To reinstate a closed account, contact Amazon Web Services Support
within the 90-day grace period while the account is in SUSPENDED status.
If the Amazon Web Services account you attempt to close is linked
to an Amazon Web Services GovCloud (US) account, the `CloseAccount` request will
close both accounts. To learn important pre-closure details, see [ Closing an Amazon Web Services GovCloud (US)
account](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/Closing-govcloud-account.html)
in the * Amazon Web Services GovCloud User Guide*.
For more information about closing accounts, see [Closing an Amazon Web Services account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
in the *Organizations User Guide.*
"""
def close_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CloseAccount", input, options)
end
@doc """
Creates an Amazon Web Services account that is automatically a member of the
organization whose credentials made the request.
This is an asynchronous request that Amazon Web Services performs in the
background. Because `CreateAccount` operates asynchronously, it can return a
successful completion message even though account initialization might still be
in progress. You might need to wait a few minutes before you can successfully
access the account. To check the status of the request, do one of the following:
* Use the `Id` member of the `CreateAccountStatus` response element
from this operation to provide as a parameter to the
`DescribeCreateAccountStatus` operation.
* Check the CloudTrail log for the `CreateAccountResult` event. For
information on using CloudTrail with Organizations, see [Logging and monitoring in
Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html#orgs_cloudtrail-integration)
in the *Organizations User Guide.*
The user who calls the API to create an account must have the
`organizations:CreateAccount` permission. If you enabled all features in the
organization, Organizations creates the required service-linked role named
`AWSServiceRoleForOrganizations`. For more information, see [Organizations and Service-Linked
Roles](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs)
in the *Organizations User Guide*.
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
Organizations preconfigures the new member account with a role (named
`OrganizationAccountAccessRole` by default) that grants users in the management
account administrator permissions in the new member account. Principals in the
management account can assume the role. Organizations clones the company name
and address information for the new account from the organization's management
account.
This operation can be called only from the organization's management account.
For more information about creating accounts, see [Creating an Amazon Web Services account in Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
in the *Organizations User Guide.*
When you create an account in an organization using the
Organizations console, API, or CLI commands, the information required for the
account to operate as a standalone account, such as a payment method and signing
the end user license agreement (EULA) is *not* automatically collected. If you
must remove an account from your organization later, you can do so only after
you provide the missing information. Follow the steps at [ To leave an organization as a member
account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *Organizations User Guide*.
If you get an exception that indicates that you exceeded your
account limits for the organization, contact [Amazon Web Services Support](https://console.aws.amazon.com/support/home#/).
If you get an exception that indicates that the operation failed
because your organization is still initializing, wait one hour and then try
again. If the error persists, contact [Amazon Web Services Support](https://console.aws.amazon.com/support/home#/).
Using `CreateAccount` to create multiple temporary accounts isn't
recommended. You can only close an account from the Billing and Cost Management
console, and you must be signed in as the root user. For information on the
requirements and process for closing an account, see [Closing an Amazon Web Services
account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
in the *Organizations User Guide*.
When you create a member account with this operation, you can choose whether to
create the account with the ## IAM User and Role Access to Billing Information
switch enabled. If you enable it, IAM users and roles that have appropriate
permissions can view billing information for the account. If you disable it,
only the account root user can access billing information. For information about
how to disable this switch for an account, see [Granting Access to Your Billing Information and
Tools](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html).
"""
def create_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateAccount", input, options)
end
@doc """
This action is available if all of the following are true:
* You're authorized to create accounts in the Amazon Web Services
GovCloud (US) Region.
For more information on the Amazon Web Services GovCloud (US) Region, see the [
*Amazon Web Services GovCloud User
Guide*.](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/welcome.html)
* You already have an account in the Amazon Web Services GovCloud
(US) Region that is paired with a management account of an organization in the
commercial Region.
* You call this action from the management account of your
organization in the commercial Region.
* You have the `organizations:CreateGovCloudAccount` permission.
Organizations automatically creates the required service-linked role named
`AWSServiceRoleForOrganizations`. For more information, see [Organizations and Service-Linked
Roles](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs)
in the *Organizations User Guide.*
Amazon Web Services automatically enables CloudTrail for Amazon Web Services
GovCloud (US) accounts, but you should also do the following:
* Verify that CloudTrail is enabled to store logs.
* Create an Amazon S3 bucket for CloudTrail log storage.
For more information, see [Verifying CloudTrail Is Enabled](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html)
in the *Amazon Web Services GovCloud User Guide*.
If the request includes tags, then the requester must have the
`organizations:TagResource` permission. The tags are attached to the commercial
account associated with the GovCloud account, rather than the GovCloud account
itself. To add tags to the GovCloud account, call the `TagResource` operation in
the GovCloud Region after the new GovCloud account exists.
You call this action from the management account of your organization in the
commercial Region to create a standalone Amazon Web Services account in the
Amazon Web Services GovCloud (US) Region. After the account is created, the
management account of an organization in the Amazon Web Services GovCloud (US)
Region can invite it to that organization. For more information on inviting
standalone accounts in the Amazon Web Services GovCloud (US) to join an
organization, see
[Organizations](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) in the *Amazon Web Services GovCloud User Guide.*
Calling `CreateGovCloudAccount` is an asynchronous request that Amazon Web
Services performs in the background. Because `CreateGovCloudAccount` operates
asynchronously, it can return a successful completion message even though
account initialization might still be in progress. You might need to wait a few
minutes before you can successfully access the account. To check the status of
the request, do one of the following:
* Use the `OperationId` response element from this operation to
provide as a parameter to the `DescribeCreateAccountStatus` operation.
* Check the CloudTrail log for the `CreateAccountResult` event. For
information on using CloudTrail with Organizations, see [Monitoring the Activity
in Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html)
in the *Organizations User Guide.*
When you call the `CreateGovCloudAccount` action, you create two accounts: a
standalone account in the Amazon Web Services GovCloud (US) Region and an
associated account in the commercial Region for billing and support purposes.
The account in the commercial Region is automatically a member of the
organization whose credentials made the request. Both accounts are associated
with the same email address.
A role is created in the new account in the commercial Region that allows the
management account in the organization in the commercial Region to assume it. An
Amazon Web Services GovCloud (US) account is then created and associated with
the commercial account that you just created. A role is also created in the new
Amazon Web Services GovCloud (US) account that can be assumed by the Amazon Web
Services GovCloud (US) account that is associated with the management account of
the commercial organization. For more information and to view a diagram that
explains how account access works, see
[Organizations](https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) in the *Amazon Web Services GovCloud User Guide.*
For more information about creating accounts, see [Creating an Amazon Web
Services account in Your
Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html)
in the *Organizations User Guide.*
When you create an account in an organization using the
Organizations console, API, or CLI commands, the information required for the
account to operate as a standalone account is *not* automatically collected.
This includes a payment method and signing the end user license agreement
(EULA). If you must remove an account from your organization later, you can do
so only after you provide the missing information. Follow the steps at [ To leave an organization as a member
account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *Organizations User Guide.*
If you get an exception that indicates that you exceeded your
account limits for the organization, contact [Amazon Web Services Support](https://console.aws.amazon.com/support/home#/).
If you get an exception that indicates that the operation failed
because your organization is still initializing, wait one hour and then try
again. If the error persists, contact [Amazon Web Services Support](https://console.aws.amazon.com/support/home#/).
Using `CreateGovCloudAccount` to create multiple temporary accounts
isn't recommended. You can only close an account from the Amazon Web Services
Billing and Cost Management console, and you must be signed in as the root user.
For information on the requirements and process for closing an account, see
[Closing an Amazon Web Services account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html)
in the *Organizations User Guide*.
When you create a member account with this operation, you can choose whether to
create the account with the ## IAM User and Role Access to Billing Information
switch enabled. If you enable it, IAM users and roles that have appropriate
permissions can view billing information for the account. If you disable it,
only the account root user can access billing information. For information about
how to disable this switch for an account, see [Granting Access to Your Billing Information and
Tools](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html).
"""
def create_gov_cloud_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateGovCloudAccount", input, options)
end
@doc """
Creates an Amazon Web Services organization.
The account whose user is calling the `CreateOrganization` operation
automatically becomes the [management account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#account)
of the new organization.
This operation must be called using credentials from the account that is to
become the new organization's management account. The principal must also have
the relevant IAM permissions.
By default (or if you set the `FeatureSet` parameter to `ALL`), the new
organization is created with all features enabled and service control policies
automatically enabled in the root. If you instead choose to create the
organization supporting only the consolidated billing features by setting the
`FeatureSet` parameter to `CONSOLIDATED_BILLING"`, no policy types are enabled
by default, and you can't use organization policies
"""
def create_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOrganization", input, options)
end
@doc """
Creates an organizational unit (OU) within a root or parent OU.
An OU is a container for accounts that enables you to organize your accounts to
apply policies according to your business requirements. The number of levels
deep that you can nest OUs is dependent upon the policy types enabled for that
root. For service control policies, the limit is five.
For more information about OUs, see [Managing Organizational Units](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_ous.html)
in the *Organizations User Guide.*
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def create_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateOrganizationalUnit", input, options)
end
@doc """
Creates a policy of a specified type that you can attach to a root, an
organizational unit (OU), or an individual Amazon Web Services account.
For more information about policies and their use, see [Managing Organization Policies](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html).
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def create_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreatePolicy", input, options)
end
@doc """
Declines a handshake request.
This sets the handshake state to `DECLINED` and effectively deactivates the
request.
This operation can be called only from the account that received the handshake.
The originator of the handshake can use `CancelHandshake` instead. The
originator can't reactivate a declined request, but can reinitiate the process
with a new handshake request.
After you decline a handshake, it continues to appear in the results of relevant
APIs for only 30 days. After that, it's deleted.
"""
def decline_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeclineHandshake", input, options)
end
@doc """
Deletes the organization.
You can delete an organization only by using credentials from the management
account. The organization must be empty of member accounts.
"""
def delete_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOrganization", input, options)
end
@doc """
Deletes an organizational unit (OU) from a root or another OU.
You must first remove all accounts and child OUs from the OU that you want to
delete.
This operation can be called only from the organization's management account.
"""
def delete_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteOrganizationalUnit", input, options)
end
@doc """
Deletes the specified policy from your organization.
Before you perform this operation, you must first detach the policy from all
organizational units (OUs), roots, and accounts.
This operation can be called only from the organization's management account.
"""
def delete_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeletePolicy", input, options)
end
@doc """
Removes the specified member Amazon Web Services account as a delegated
administrator for the specified Amazon Web Services service.
Deregistering a delegated administrator can have unintended impacts on the
functionality of the enabled Amazon Web Services service. See the documentation
for the enabled service before you deregister a delegated administrator so that
you understand any potential impacts.
You can run this action only for Amazon Web Services services that support this
feature. For a current list of services that support it, see the column
*Supports Delegated Administrator* in the table at [Amazon Web Services Services that you can use with
Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html)
in the *Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def deregister_delegated_administrator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeregisterDelegatedAdministrator", input, options)
end
@doc """
Retrieves Organizations-related information about the specified account.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def describe_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAccount", input, options)
end
@doc """
Retrieves the current status of an asynchronous request to create an account.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def describe_create_account_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCreateAccountStatus", input, options)
end
@doc """
Returns the contents of the effective policy for specified policy type and
account.
The effective policy is the aggregation of any policies of the specified type
that the account inherits, plus any policy of that type that is directly
attached to the account.
This operation applies only to policy types *other* than service control
policies (SCPs).
For more information about policy inheritance, see [How Policy Inheritance Works](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies-inheritance.html)
in the *Organizations User Guide*.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def describe_effective_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEffectivePolicy", input, options)
end
@doc """
Retrieves information about a previously requested handshake.
The handshake ID comes from the response to the original
`InviteAccountToOrganization` operation that generated the handshake.
You can access handshakes that are `ACCEPTED`, `DECLINED`, or `CANCELED` for
only 30 days after they change to that state. They're then deleted and no longer
accessible.
This operation can be called from any account in the organization.
"""
def describe_handshake(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHandshake", input, options)
end
@doc """
Retrieves information about the organization that the user's account belongs to.
This operation can be called from any account in the organization.
Even if a policy type is shown as available in the organization, you can disable
it separately at the root level with `DisablePolicyType`. Use `ListRoots` to see
the status of policy types for a specified root.
"""
def describe_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrganization", input, options)
end
@doc """
Retrieves information about an organizational unit (OU).
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def describe_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeOrganizationalUnit", input, options)
end
@doc """
Retrieves information about a policy.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def describe_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribePolicy", input, options)
end
@doc """
Detaches a policy from a target root, organizational unit (OU), or account.
If the policy being detached is a service control policy (SCP), the changes to
permissions for Identity and Access Management (IAM) users and roles in affected
accounts are immediate.
Every root, OU, and account must have at least one SCP attached. If you want to
replace the default `FullAWSAccess` policy with an SCP that limits the
permissions that can be delegated, you must attach the replacement SCP before
you can remove the default SCP. This is the authorization strategy of an "[allow list](https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_allowlist)".
If you instead attach a second SCP and leave the `FullAWSAccess` SCP still
attached, and specify `"Effect": "Deny"` in the second SCP to override the
`"Effect": "Allow"` in the `FullAWSAccess` policy (or any other attached SCP),
you're using the authorization strategy of a "[deny list](https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_denylist)".
This operation can be called only from the organization's management account.
"""
def detach_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DetachPolicy", input, options)
end
@doc """
Disables the integration of an Amazon Web Services service (the service that is
specified by `ServicePrincipal`) with Organizations.
When you disable integration, the specified service no longer can create a
[service-linked role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in *new* accounts in your organization. This means the service can't perform
operations on your behalf on any new accounts in your organization. The service
can still perform operations in older accounts until the service completes its
clean-up from Organizations.
We ** *strongly recommend* ** that you don't use this command to disable
integration between Organizations and the specified Amazon Web Services service.
Instead, use the console or commands that are provided by the specified service.
This lets the trusted service perform any required initialization when enabling
trusted access, such as creating any required resources and any required clean
up of resources when disabling trusted access.
For information about how to disable trusted service access to your organization
using the trusted service, see the **Learn more** link under the **Supports
Trusted Access** column at [Amazon Web Services services that you can use with Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html).
on this page.
If you disable access by using this command, it causes the following actions to
occur:
The service can no longer create a service-linked role in the
accounts in your organization. This means that the service can't perform
operations on your behalf on any new accounts in your organization. The service
can still perform operations in older accounts until the service completes its
clean-up from Organizations.
The service can no longer perform tasks in the member accounts in
the organization, unless those operations are explicitly permitted by the IAM
policies that are attached to your roles. This includes any data aggregation
from the member accounts to the management account, or to a delegated
administrator account, where relevant.
Some services detect this and clean up any remaining data or
resources related to the integration, while other services stop accessing the
organization but leave any historical data and configuration in place to support
a possible re-enabling of the integration.
Using the other service's console or commands to disable the integration ensures
that the other service is aware that it can clean up any resources that are
required only for the integration. How the service cleans up its resources in
the organization's accounts depends on that service. For more information, see
the documentation for the other Amazon Web Services service.
After you perform the `DisableAWSServiceAccess` operation, the specified service
can no longer perform operations in your organization's accounts
For more information about integrating other services with Organizations,
including the list of services that work with Organizations, see [Integrating Organizations with Other Amazon Web Services
Services](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def disable_aws_service_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisableAWSServiceAccess", input, options)
end
@doc """
Disables an organizational policy type in a root.
A policy of a certain type can be attached to entities in a root only if that
type is enabled in the root. After you perform this operation, you no longer can
attach policies of the specified type to that root or to any organizational unit
(OU) or account in that root. You can undo this by using the `EnablePolicyType`
operation.
This is an asynchronous request that Amazon Web Services performs in the
background. If you disable a policy type for a root, it still appears enabled
for the organization if [all features](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
are enabled for the organization. Amazon Web Services recommends that you first
use `ListRoots` to see the status of policy types for a specified root, and then
use this operation.
This operation can be called only from the organization's management account.
To view the status of available policy types in the organization, use
`DescribeOrganization`.
"""
def disable_policy_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisablePolicyType", input, options)
end
@doc """
Enables all features in an organization.
This enables the use of organization policies that can restrict the services and
actions that can be called in each account. Until you enable all features, you
have access only to consolidated billing, and you can't use any of the advanced
account administration features that Organizations supports. For more
information, see [Enabling All Features in Your Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html)
in the *Organizations User Guide.*
This operation is required only for organizations that were created explicitly
with only the consolidated billing features enabled. Calling this operation
sends a handshake to every invited account in the organization. The feature set
change can be finalized and the additional features enabled only after all
administrators in the invited accounts approve the change by accepting the
handshake.
After you enable all features, you can separately enable or disable individual
policy types in a root using `EnablePolicyType` and `DisablePolicyType`. To see
the status of policy types in a root, use `ListRoots`.
After all invited member accounts accept the handshake, you finalize the feature
set change by accepting the handshake that contains `"Action":
"ENABLE_ALL_FEATURES"`. This completes the change.
After you enable all features in your organization, the management account in
the organization can apply policies on all member accounts. These policies can
restrict what users and even administrators in those accounts can do. The
management account can apply policies that prevent accounts from leaving the
organization. Ensure that your account administrators are aware of this.
This operation can be called only from the organization's management account.
"""
def enable_all_features(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableAllFeatures", input, options)
end
@doc """
Enables the integration of an Amazon Web Services service (the service that is
specified by `ServicePrincipal`) with Organizations.
When you enable integration, you allow the specified service to create a
[service-linked role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)
in all the accounts in your organization. This allows the service to perform
operations on your behalf in your organization and its accounts.
We recommend that you enable integration between Organizations and the specified
Amazon Web Services service by using the console or commands that are provided
by the specified service. Doing so ensures that the service is aware that it can
create the resources that are required for the integration. How the service
creates those resources in the organization's accounts depends on that service.
For more information, see the documentation for the other Amazon Web Services
service.
For more information about enabling services to integrate with Organizations,
see [Integrating Organizations with Other Amazon Web Services Services](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *Organizations User Guide.*
This operation can be called only from the organization's management account and
only if the organization has [enabled all features](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html).
"""
def enable_aws_service_access(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnableAWSServiceAccess", input, options)
end
@doc """
Enables a policy type in a root.
After you enable a policy type in a root, you can attach policies of that type
to the root, any organizational unit (OU), or account in that root. You can undo
this by using the `DisablePolicyType` operation.
This is an asynchronous request that Amazon Web Services performs in the
background. Amazon Web Services recommends that you first use `ListRoots` to see
the status of policy types for a specified root, and then use this operation.
This operation can be called only from the organization's management account.
You can enable a policy type in a root only if that policy type is available in
the organization. To view the status of available policy types in the
organization, use `DescribeOrganization`.
"""
def enable_policy_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "EnablePolicyType", input, options)
end
@doc """
Sends an invitation to another account to join your organization as a member
account.
Organizations sends email on your behalf to the email address that is associated
with the other account's owner. The invitation is implemented as a `Handshake`
whose details are in the response.
You can invite Amazon Web Services accounts only from the same
seller as the management account. For example, if your organization's management
account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web
Services seller in India, you can invite only other AISPL accounts to your
organization. You can't combine accounts from AISPL and Amazon Web Services or
from any other Amazon Web Services seller. For more information, see
[Consolidated Billing in India](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html).
If you receive an exception that indicates that you exceeded your
account limits for the organization or that the operation failed because your
organization is still initializing, wait one hour and then try again. If the
error persists after an hour, contact [Amazon Web Services Support](https://console.aws.amazon.com/support/home#/).
If the request includes tags, then the requester must have the
`organizations:TagResource` permission.
This operation can be called only from the organization's management account.
"""
def invite_account_to_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "InviteAccountToOrganization", input, options)
end
@doc """
Removes a member account from its parent organization.
This version of the operation is performed by the account that wants to leave.
To remove a member account as a user in the management account, use
`RemoveAccountFromOrganization` instead.
This operation can be called only from a member account in the organization.
The management account in an organization with all features enabled
can set service control policies (SCPs) that can restrict what administrators of
member accounts can do. This includes preventing them from successfully calling
`LeaveOrganization` and leaving the organization.
You can leave an organization as a member account only if the
account is configured with the information required to operate as a standalone
account. When you create an account in an organization using the Organizations
console, API, or CLI commands, the information required of standalone accounts
is *not* automatically collected. For each account that you want to make
standalone, you must perform the following steps. If any of the steps are
already completed for this account, that step doesn't appear.
Choose a support plan
Provide and verify the required contact information
Provide a current payment method
Amazon Web Services uses the payment method to charge for any billable (not free
tier) Amazon Web Services activity that occurs while the account isn't attached
to an organization. Follow the steps at [ To leave an organization when all required account information has not yet been
provided](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *Organizations User Guide.*
The account that you want to leave must not be a delegated
administrator account for any Amazon Web Services service enabled for your
organization. If the account is a delegated administrator, you must first change
the delegated administrator account to another account that is remaining in the
organization.
You can leave an organization only after you enable IAM user access
to billing in your account. For more information, see [Activating Access to the Billing and Cost Management
Console](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate)
in the *Amazon Web Services Billing and Cost Management User Guide.*
After the account leaves the organization, all tags that were
attached to the account object in the organization are deleted. Amazon Web
Services accounts outside of an organization do not support tags.
A newly created account has a waiting period before it can be
removed from its organization. If you get an error that indicates that a wait
period is required, then try again in a few days.
"""
def leave_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "LeaveOrganization", input, options)
end
@doc """
Lists all the accounts in the organization.
To request only the accounts in a specified root or organizational unit (OU),
use the `ListAccountsForParent` operation instead.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_accounts(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAccounts", input, options)
end
@doc """
Lists the accounts in an organization that are contained by the specified target
root or organizational unit (OU).
If you specify the root, you get a list of all the accounts that aren't in any
OU. If you specify an OU, you get a list of all the accounts in only that OU and
not in any child OUs. To get a list of all accounts in the organization, use the
`ListAccounts` operation.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_accounts_for_parent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAccountsForParent", input, options)
end
@doc """
Returns a list of the Amazon Web Services services that you enabled to integrate
with your organization.
After a service on this list creates the resources that it requires for the
integration, it can perform operations on your organization and its accounts.
For more information about integrating other services with Organizations,
including the list of services that currently work with Organizations, see
[Integrating Organizations with Other Amazon Web Services Services](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html)
in the *Organizations User Guide.*
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_aws_service_access_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"ListAWSServiceAccessForOrganization",
input,
options
)
end
@doc """
Lists all of the organizational units (OUs) or accounts that are contained in
the specified parent OU or root.
This operation, along with `ListParents` enables you to traverse the tree
structure that makes up this root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_children(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListChildren", input, options)
end
@doc """
Lists the account creation requests that match the specified status that is
currently being tracked for the organization.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_create_account_status(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCreateAccountStatus", input, options)
end
@doc """
Lists the Amazon Web Services accounts that are designated as delegated
administrators in this organization.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_delegated_administrators(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDelegatedAdministrators", input, options)
end
@doc """
List the Amazon Web Services services for which the specified account is a
delegated administrator.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_delegated_services_for_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDelegatedServicesForAccount", input, options)
end
@doc """
Lists the current handshakes that are associated with the account of the
requesting user.
Handshakes that are `ACCEPTED`, `DECLINED`, `CANCELED`, or `EXPIRED` appear in
the results of this API for only 30 days after changing to that state. After
that, they're deleted and no longer accessible.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called from any account in the organization.
"""
def list_handshakes_for_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHandshakesForAccount", input, options)
end
@doc """
Lists the handshakes that are associated with the organization that the
requesting user is part of.
The `ListHandshakesForOrganization` operation returns a list of handshake
structures. Each structure contains details and status about a handshake.
Handshakes that are `ACCEPTED`, `DECLINED`, `CANCELED`, or `EXPIRED` appear in
the results of this API for only 30 days after changing to that state. After
that, they're deleted and no longer accessible.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_handshakes_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHandshakesForOrganization", input, options)
end
@doc """
Lists the organizational units (OUs) in a parent organizational unit or root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_organizational_units_for_parent(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOrganizationalUnitsForParent", input, options)
end
@doc """
Lists the root or organizational units (OUs) that serve as the immediate parent
of the specified child OU or account.
This operation, along with `ListChildren` enables you to traverse the tree
structure that makes up this root.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
In the current release, a child can have only a single parent.
"""
def list_parents(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListParents", input, options)
end
@doc """
Retrieves the list of all policies in an organization of a specified type.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_policies(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPolicies", input, options)
end
@doc """
Lists the policies that are directly attached to the specified target root,
organizational unit (OU), or account.
You must specify the policy type that you want included in the returned list.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_policies_for_target(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListPoliciesForTarget", input, options)
end
@doc """
Lists the roots that are defined in the current organization.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
Policy types can be enabled and disabled in roots. This is distinct from whether
they're available in the organization. When you enable all features, you make
policy types available for use in that organization. Individual policy types can
then be enabled and disabled in a root. To see the availability of a policy type
in an organization, use `DescribeOrganization`.
"""
def list_roots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListRoots", input, options)
end
@doc """
Lists tags that are attached to the specified resource.
You can attach tags to the following resources in Organizations.
* Amazon Web Services account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Lists all the roots, organizational units (OUs), and accounts that the specified
policy is attached to.
Always check the `NextToken` response parameter for a `null` value when calling
a `List*` operation. These operations can occasionally return an empty set of
results even when there are more results available. The `NextToken` response
parameter value is `null` *only* when there are no more results to display.
This operation can be called only from the organization's management account or
by a member account that is a delegated administrator for an Amazon Web Services
service.
"""
def list_targets_for_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTargetsForPolicy", input, options)
end
@doc """
Moves an account from its current source parent root or organizational unit (OU)
to the specified destination parent root or OU.
This operation can be called only from the organization's management account.
"""
def move_account(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "MoveAccount", input, options)
end
@doc """
Enables the specified member account to administer the Organizations features of
the specified Amazon Web Services service.
It grants read-only access to Organizations service data. The account still
requires IAM permissions to access and administer the Amazon Web Services
service.
You can run this action only for Amazon Web Services services that support this
feature. For a current list of services that support it, see the column
*Supports Delegated Administrator* in the table at [Amazon Web Services Services that you can use with
Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services_list.html)
in the *Organizations User Guide.*
This operation can be called only from the organization's management account.
"""
def register_delegated_administrator(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterDelegatedAdministrator", input, options)
end
@doc """
Removes the specified account from the organization.
The removed account becomes a standalone account that isn't a member of any
organization. It's no longer subject to any policies and is responsible for its
own bill payments. The organization's management account is no longer charged
for any expenses accrued by the member account after it's removed from the
organization.
This operation can be called only from the organization's management account.
Member accounts can remove themselves with `LeaveOrganization` instead.
You can remove an account from your organization only if the
account is configured with the information required to operate as a standalone
account. When you create an account in an organization using the Organizations
console, API, or CLI commands, the information required of standalone accounts
is *not* automatically collected. For an account that you want to make
standalone, you must choose a support plan, provide and verify the required
contact information, and provide a current payment method. Amazon Web Services
uses the payment method to charge for any billable (not free tier) Amazon Web
Services activity that occurs while the account isn't attached to an
organization. To remove an account that doesn't yet have this information, you
must sign in as the member account and follow the steps at [ To leave an organization when all required account information has not yet been
provided](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info)
in the *Organizations User Guide.*
The account that you want to leave must not be a delegated
administrator account for any Amazon Web Services service enabled for your
organization. If the account is a delegated administrator, you must first change
the delegated administrator account to another account that is remaining in the
organization.
After the account leaves the organization, all tags that were
attached to the account object in the organization are deleted. Amazon Web
Services accounts outside of an organization do not support tags.
"""
def remove_account_from_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveAccountFromOrganization", input, options)
end
@doc """
Adds one or more tags to the specified resource.
Currently, you can attach tags to the following resources in Organizations.
* Amazon Web Services account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes any tags with the specified keys from the specified resource.
You can attach tags to the following resources in Organizations.
* Amazon Web Services account
* Organization root
* Organizational unit (OU)
* Policy (any type)
This operation can be called only from the organization's management account.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Renames the specified organizational unit (OU).
The ID and ARN don't change. The child OUs and accounts remain in place, and any
attached policies of the OU remain attached.
This operation can be called only from the organization's management account.
"""
def update_organizational_unit(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateOrganizationalUnit", input, options)
end
@doc """
Updates an existing policy with a new name, description, or content.
If you don't supply any parameter, that value remains unchanged. You can't
change a policy's type.
This operation can be called only from the organization's management account.
"""
def update_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdatePolicy", input, options)
end
end
|
lib/aws/generated/organizations.ex
| 0.831143
| 0.580679
|
organizations.ex
|
starcoder
|
defmodule LogstashJson.Event do
@moduledoc """
This module contains functions for generating and serializing logs events.
"""
@doc "Generate a log event from log data"
def event(level, msg, ts, md, %{fields: fields, utc_log: utc_log, formatter: formatter}) do
fields
|> format_fields(md, %{
"@timestamp": timestamp(ts, utc_log),
level: level,
message: to_string(msg),
module: md[:module],
function: md[:function],
line: md[:line]
})
|> formatter.()
end
@doc "Serialize a log event to a JSON string"
def json(event) do
event |> pre_encode |> Poison.encode()
end
def format_fields(fields, metadata, field_overrides) do
metadata
|> format_metadata()
|> Map.merge(fields)
|> Map.merge(field_overrides)
end
defp format_metadata(metadata) do
metadata
|> Enum.into(%{})
end
def resolve_formatter_config(formatter_spec, default_formatter \\ & &1) do
# Find an appropriate formatter, if possible, from this config spec.
case formatter_spec do
{module, function} ->
if Keyword.has_key?(module.__info__(:functions), function) do
{:ok, &apply(module, function, [&1])}
else
{:error, {module, function}}
end
fun when is_function(fun) ->
{:ok, fun}
nil ->
{:ok, default_formatter}
bad_formatter ->
{:error, bad_formatter}
end
end
# Functions for generating timestamp
defp timestamp(ts, utc_log) do
datetime(ts) <> timezone(utc_log)
end
defp datetime({{year, month, day}, {hour, min, sec, millis}}) do
{:ok, ndt} = NaiveDateTime.new(year, month, day, hour, min, sec, {millis * 1000, 3})
NaiveDateTime.to_iso8601(ndt)
end
defp timezone(true), do: "+00:00"
defp timezone(_), do: timezone()
defp timezone() do
offset = timezone_offset()
minute = offset |> abs() |> rem(3600) |> div(60)
hour = offset |> abs() |> div(3600)
sign(offset) <> zero_pad(hour, 2) <> ":" <> zero_pad(minute, 2)
end
defp timezone_offset() do
t_utc = :calendar.universal_time()
t_local = :calendar.universal_time_to_local_time(t_utc)
s_utc = :calendar.datetime_to_gregorian_seconds(t_utc)
s_local = :calendar.datetime_to_gregorian_seconds(t_local)
s_local - s_utc
end
defp sign(total) when total < 0, do: "-"
defp sign(_), do: "+"
defp zero_pad(val, count) do
num = Integer.to_string(val)
:binary.copy("0", count - byte_size(num)) <> num
end
# traverse data and stringify special Elixir/Erlang terms
defp pre_encode(it) when is_pid(it), do: inspect(it)
defp pre_encode(it) when is_function(it), do: inspect(it)
defp pre_encode(it) when is_list(it), do: Enum.map(it, &pre_encode/1)
defp pre_encode(it) when is_tuple(it), do: pre_encode(Tuple.to_list(it))
defp pre_encode(%module{} = it) do
try do
:ok = Protocol.assert_impl!(Poison.Encoder, module)
it
rescue
ArgumentError -> pre_encode(Map.from_struct(it))
end
end
defp pre_encode(it) when is_map(it) do
it
|> Enum.filter(fn {k, _} when k == :mfa -> false; _ -> true end)
|> Enum.into(%{}, fn {k, v} -> {pre_encode(k), pre_encode(v)} end)
end
defp pre_encode(it) when is_binary(it) do
it
|> String.valid?()
|> case do
true -> it
false -> inspect(it)
end
end
defp pre_encode(it), do: it
end
|
lib/logstash_json/event.ex
| 0.630571
| 0.462594
|
event.ex
|
starcoder
|
defmodule Dotx do
@typedoc """
An `id` type is a value in DOT : either a simple string or an HTML string.
The `%Dotx.HTML{}` allows you to match the latter.
"""
@type id :: binary | %Dotx.HTML{html: binary}
@typedoc """
A `nodeid` type is a either a simple node id `["myid"]` or a node id with a port : `["myid","myport"]`
"""
@type nodeid :: [binary]
@type graph :: graph(edge) | graph(flatedge)
@typedoc """
The main structure containing all parsed info from a DOT graph :
- `strict` is `true` if the strict prefix is present
- `type` is `:digraph` if graph is a directed graph, `:graph` otherwise
- `attrs` are the attributes of the graph itself : any key-values are allowed
- `(nodes|edges|graphs)_attrs` are attributes which all subelements
(respectively node, edge or subgraph) will inherited (`node [key=value]` in DOT)
- `children` is the list of childs : dot, edge or subgraph
"""
@type graph(edgetype) :: %Dotx.Graph{
strict: boolean,
type: :graph | :digraph,
id: nil | id,
attrs: %{optional(id) => id},
nodes_attrs: %{optional(id) => id},
edges_attrs: %{optional(id) => id},
graphs_attrs: %{optional(id) => id},
children: [dotnode | edgetype | subgraph(edgetype)]
}
@typedoc """
A `dotnode` is the leaf structure of the graph: only an id and its attributes as a free map.
"""
@type dotnode :: %Dotx.Node{id: nodeid, attrs: %{optional(id) => id}}
@typedoc """
An `edge` is a link between nodes (from:,to:), it has attributes which are set by itself or inherited (see `graph()`)
`to` can be another edge (`a->b->c->d`) to inline multiple edges or subgraph `{a b}->{c d}` as a shortcut to
`a->c a->d b->c b->d`. You can use `Dotx.flatten/1` to expand edges and get only `flatedge()` with link between raw nodes.
"""
@type edge :: %Dotx.Edge{
attrs: %{optional(id) => id}, bidir: boolean,
from: dotnode | subgraph(edge), to: dotnode | subgraph(edge) | edge
}
@typedoc "see `edge()` : an edge with raw `dotnode()`, after `Dotx.flatten` all edges are `flatedge()`"
@type flatedge :: %Dotx.Edge{
attrs: %{optional(id) => id}, bidir: boolean,
from: dotnode, to: dotnode
}
@typedoc "see `graph()` : same as graph without graph type specification"
@type subgraph(edgetype) :: %Dotx.SubGraph{
id: nil | id,
attrs: %{optional(id) => id},
nodes_attrs: %{optional(id) => id},
edges_attrs: %{optional(id) => id},
graphs_attrs: %{optional(id) => id},
children: [dotnode | edgetype | subgraph(edgetype)]
}
@moduledoc """
This library is a DOT parser and generator.
Main functions are `encode/1` and `decode/1` (usable also via `to_string`
and the `String.Chars` protocol).
The structure of type `graph()` allows easy handling of decoding dot graph,
the principle is that the structure is exactly homogeneous with a dot graph :
- it contains all inherited attributes for nodes, edges and subgraphs (`*_attrs`)
- it contains `attrs` of itself in addition of the `id`
- it is a recursive structure containing `children`: either `subgraph` or `node` or `edge`
- and edge can be `from` and `to` nodes but also subgraph (`a->{c d}`) or
other edge (`a->c->d`). They are *edge shorthands* which are actually
sugars to define lists of `node->node` edges.
The structure is usable by itself, but subgraph tree, edge shorthands and attribute
inheritance make it non trivial to handle. So to help you manage this complexity Dotx provides
helper functions :
- `flatten/1` create unitary edge for every DOT shortand (inline edge
`a->b->c` or graph edge `{a b}->c`) so all edges are expanded to get only
`node->node` edges (`a->b a->c b->c`)
- `spread_attributes/1` spread default attributes from graph/subgraphs tree to
all children handling inheritance of attributes, but keeping original graph structure.
- `identify/1` add an identifier to all graph and subgraph without id in
original graph : `xN` where `N` is the index of the subgraph in the order
of appearance in the file.
- `to_nodes/1` returns a flat databases of nodes and graphs containing
additional special attributes to preserve the graph informations
(`"graph"`,`"edges_from"`,`"parent"`,`"children"`), and where all
inherited attributes are filled.
- `to_edges/1` returns a flat databases of edges and graphs containing
additional special attributes to preserve the graph informations
(`"graph"`,`"parent"`,`"children"`) and where the `from` and `to` fields
are filled with complete node structures with inherited attributes.
- `to_digraph/1` returns an erlang `digraph` structure where vertices are
nodes id. This allows you to use `:digraph_utils` module to do complex graph
computations.
"""
@doc "Main lib function: same as `to_string(graph)`, encode (pretty) graph as a DOT binary string"
@spec encode(graph) :: binary
def encode(graph) do to_string(graph) end
@doc "Main lib function: parse a DOT graph to get a `Dotx.Graph` structure"
@spec decode(binary) :: {:ok,graph(edge)} | {:error,msg :: binary}
defdelegate decode(bin), to: Dotx.Graph, as: :parse
@doc "Same as `decode/1` but with an `BadArgument` error if DOT file is not valid"
@spec decode!(binary) :: graph(edge)
defdelegate decode!(bin), to: Dotx.Graph, as: :parse!
@doc """
flatten all dot edge shortcuts (`a->{b c}->d` became `a->b a->c b->d c->d`), so that all `Dotx.Edge`
have only `Dotx.Node` in both sides (from and to).
"""
@spec flatten(graph(edge)) :: graph(flatedge)
defdelegate flatten(graph), to: Dotx.Helpers
@doc """
Spread all inherited attributes `(nodes|edges|graphs)_attrs` or graphs to
descendants `attrs`
"""
@spec spread_attributes(graph) :: graph
defdelegate spread_attributes(graph), to: Dotx.Helpers
@doc """
Give an `id` to all graph and subgraph if none are given :
`{ a { b c } }` became `subgraph x0 { a subgraph x1 { b c } }`
"""
@spec identify(graph(edge)) :: graph(edge)
defdelegate identify(graph), to: Dotx.Helpers
@doc """
Returns a flat databases of nodes and graphs containing
additional special attributes to preserve the graph informations
(`"graph"`,`"edges_from"`,`"parent"`,`"children"`), and where all
inherited attributes are filled :
- `identify/1` is called to ensure every subgraph has an id
- `flatten/1` is called to ensure that every unitary edges are expanded from DOT shorthands.
For nodes returned :
- the attrs are filled with inherited attributes from parent subgraphs `nodes_attrs` (`node [k=v]`)
- `"graph"` attribute is added to each node and contains the identifier of
the subgraph owning the node (the deepest subgraph containing the node in the DOT graph tree)
- `"edges_from"` attribute is added to every node and contains the list of
`%Dotx.Edge{}` from this node in the graph. For these edges structures :
- the `"graph"` is also filled (the graph owning the edge is not
necessary the one owning the nodes on both sides)
- the attrs are filled with inherited attributes from parent subgraphs `edges_attrs` (`edge [k=v]`)
- the `from` and `to` `%Dotx.Node` contains only `id`, attributes
`attrs` are not set to avoid redundancy of data with parent nodes
data.
For graphs returned :
- the attrs are filled with inherited attributes from parent subgraphs `graphs_attrs` (`graph [k=v]`)
- the `"parent"` attribute is added containing parent graph id in the subgraph tree
- the `"children"` attribute is added containing childs graph id list in the subgraph tree
- the `:children` is set to empty list `[]` to only use the graph
structure to get attributes and not nodes and edges already present in the
nodes map returned.
"""
@type nodes_db :: {
nodes :: %{ nodeid() => node() },
graphs :: %{ id() => graph() }
}
@spec to_nodes(graph) :: nodes_db
defdelegate to_nodes(graph), to: Dotx.Helpers
@doc """
Other variant of `to_nodes/1` : fill edges and nodes with all inherited
attributes and also with a `"graph"` attribute. But instead of returning
nodes with edges filled in attribute `edges_from`, it returns the list of all edges
where all nodes in `from` and `to` are fully filled `%Dotx.Node{}` structures.
- The function actually call `to_nodes/1` so you can put `to_nodes/1` result as parameter
to avoid doing heavy computation 2 times.
- all rules for graphs, nodes and edges fullfilment are the same as `to_nodes/1`
"""
@spec to_edges(graph | nodes_db) :: {edges :: [flatedge()], graphs :: %{ id() => graph() }}
defdelegate to_edges(graph_or_nodesdb), to: Dotx.Helpers
@doc """
Create an erlang `:digraph` structure from graph (see [erlang doc](http://erlang.org/doc/man/digraph.html))
where vertices are `nodeid()`.
This allows to easily use `:digraph` and `:digraph_utils` handlers to go
through the graph and make complex analysis of the graph.
"""
@spec to_digraph(graph) :: :digraph.graph()
defdelegate to_digraph(graph), to: Dotx.Helpers
end
|
lib/dotx.ex
| 0.953046
| 0.754847
|
dotx.ex
|
starcoder
|
defmodule Export.Ruby do
@moduledoc """
Wrapper for ruby.
## Example
```elixir
defmodule SomeRubyCall do
use Export.Ruby
def call_ruby_method
# path to ruby files
{:ok, ruby} = Ruby.start(ruby_lib: Path.expand("lib/ruby"))
# call "upcase" method from "test" file with "hello" argument
ruby |> Ruby.call("test", "upcase", ["hello"])
# same as above but prettier
ruby |> Ruby.call(upcase("hello"), from_file: "test")
end
end
```
"""
import Export.Helpers
@doc false
defmacro __using__(_opts) do
quote do
alias Export.Ruby
require Export.Ruby
end
end
@doc """
Start Ruby instance with the default options.
Returns `{:ok, pid}`.
## Examples
iex> Export.Ruby.start()
{:ok, pid}
"""
def start(), do: :ruby.start()
@doc """
Start Ruby instance with options.
The `options` argument should be a map with the following options.
## Ruby options
- ruby: Path to the Ruby interpreter executable
- ruby_lib: The Ruby programs search path. The Path variable can be a string in RUBYLIB format or a list of paths.
"""
def start(options), do: options |> convert_options |> :ruby.start
@doc """
Start Ruby instance with name and options.
The instance will be registered with name. The `options` argument should be a map with the following options.
## Ruby options
- ruby: Path to the Ruby interpreter executable
- ruby_lib: The Ruby programs search path. The Path variable can be a string in RUBYLIB format or a list of paths.
"""
def start(name, options) when not is_tuple(name), do: :ruby.start({:local, name}, options |> convert_options)
def start(name, options), do: :ruby.start(name, options |> convert_options)
@doc """
The same as start/0 except the link to the current process is also created.
"""
def start_link(), do: :ruby.start_link()
@doc """
The same as start/1 except the link to the current process is also created.
"""
def start_link(options), do: options |> convert_options |> :ruby.start_link
@doc """
The same as start/2 except the link to the current process is also created.
"""
def start_link(name, options) when not is_tuple(name), do: :ruby.start_link({:local, name}, options |> convert_options)
def start_link(name, options), do: :ruby.start_link(name, options |> convert_options)
@doc """
Stop Ruby instance
"""
def stop(instance), do: :ruby.stop(instance)
@doc """
Call Ruby function.
## Parameters
- instance: pid witch is returned by one of the `start` function
- file: file to run ruby function from
- function: name of the function
- arguments: arguments to pass to the function
## Example
```
# call "upcase" method from "test" file with "hello" argument
ruby |> Ruby.call("test", "upcase", ["hello"])
```
"""
def call(instance, file, function, arguments), do: :ruby.call(instance, String.to_atom(file), String.to_atom(function), arguments)
@doc """
Call Ruby function.
## Parameters
- instance: pid witch is returned by one of the `start` function
- expression: function expression to execute in ruby world
- from_file: file to run ruby function from
## Example
```
# call "upcase" method from "test" file with "hello" argument
ruby |> Ruby.call(upcase("hello"), from_file: "test")
```
"""
defmacro call(instance, expression, from_file: file) do
{function, _meta, arguments} = expression
arguments = arguments || []
quote do
:ruby.call(unquote(instance), String.to_atom(unquote(file)), unquote(function), unquote(arguments))
end
end
end
|
lib/export/ruby.ex
| 0.850903
| 0.778313
|
ruby.ex
|
starcoder
|
defmodule Mux.Video.Spaces do
@moduledoc """
This module provides functions for managing Spaces in Mux Video. [API Documentation](https://docs.mux.com/api-reference/video#tag/spaces)
"""
alias Mux.{Base, Fixtures}
@path "/video/v1/spaces"
@doc """
Create a new space.
Returns `{:ok, space, %Tesla.Client{}}`.
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {:ok, space, _env} = Mux.Video.Spaces.create(client, %{type: "server", passthrough: "example", broadcasts: [%{passthrough: "example", live_stream_id: "vJvFbCojkuSDAAeEK4EddOA01wRqN1mP4", layout: "gallery", background: "https://example.com/background.jpg", resolution: "1920x1080"}]})
iex> space
#{inspect(Fixtures.space())}
"""
def create(client, params) do
Base.post(client, @path, params)
end
@doc """
List spaces.
Returns a tuple such as `{:ok, spaces, %Telsa.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {:ok, spaces, _env} = Mux.Video.Spaces.list(client)
iex> spaces
#{inspect([Fixtures.space(), Fixtures.space()])}
"""
def list(client, params \\ []), do: Base.get(client, @path, query: params)
@doc """
Retrieve a space by ID.
Returns a tuple such as `{:ok, space, %Telsa.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {:ok, space, _env} = Mux.Video.Spaces.get(client, "xe00FkgJMdZrYQ001VC53bd01lf9ADs6YWk")
iex> space
#{inspect(Fixtures.space())}
"""
def get(client, space_id, params \\ []) do
Base.get(client, "#{@path}/#{space_id}", query: params)
end
@doc """
Delete a space.
Returns a tuple such as `{:ok, "", %Telsa.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {status, "", _env} = Mux.Video.Spaces.delete(client, "xe00FkgJMdZrYQ001VC53bd01lf9ADs6YWk")
iex> status
:ok
"""
def delete(client, space_id, params \\ []) do
Base.delete(client, "#{@path}/#{space_id}", query: params)
end
@doc """
Create a new space broadcast.
Returns a tuple such as `{:ok, broadcast, %Tesla.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {:ok, broadcast, _env} = Mux.Video.Spaces.create_space_broadcast(client, "xe00FkgJMdZrYQ001VC53bd01lf9ADs6YWk", %{passthrough: "example", live_stream_id: "vJvFbCojkuSDAAeEK4EddOA01wRqN1mP4", layout: "gallery", background: "https://example.com/background.jpg", resolution: "1920x1080"})
iex> broadcast
#{inspect(Fixtures.broadcast())}
"""
def create_space_broadcast(client, space_id, params) do
Base.post(client, "#{@path}/#{space_id}/broadcasts", params)
end
@doc """
Retrieve a space broadcast.
Returns a tuple such as `{:ok, broadcast, %Tesla.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {:ok, broadcast, _env} = Mux.Video.Spaces.get_space_broadcast(client, "<KEY>", "<KEY>")
iex> broadcast
#{inspect(Fixtures.broadcast())}
"""
def get_space_broadcast(client, space_id, broadcast_id, params \\ []) do
Base.get(client, "#{@path}/#{space_id}/broadcasts/#{broadcast_id}", query: params)
end
@doc """
Delete a space broadcast.
Returns a tuple such as `{:ok, "", %Tesla.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {status, "", _env} = Mux.Video.Spaces.delete_space_broadcast(client, "<KEY>", "<KEY>")
iex> status
:ok
"""
def delete_space_broadcast(client, space_id, broadcast_id, params \\ []) do
Base.delete(client, "#{@path}/#{space_id}/broadcasts/#{broadcast_id}", query: params)
end
@doc """
Start a space broadcast.
Returns a tuple such as `{:ok, %{}, %Tesla.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {status, %{}, _env} = Mux.Video.Spaces.start_space_broadcast(client, "<KEY>", "<KEY>")
iex> status
:ok
"""
def start_space_broadcast(client, space_id, broadcast_id) do
Base.post(client, "#{@path}/#{space_id}/broadcasts/#{broadcast_id}/start", %{})
end
@doc """
Stop a space broadcast.
Returns a tuple such as `{:ok, %{}, %Tesla.Env{}}`
## Examples
iex> client = Mux.Base.new("my_token_id", "my_token_secret")
iex> {status, %{}, _env} = Mux.Video.Spaces.stop_space_broadcast(client, "xe00FkgJMdZrYQ001VC53bd01lf9ADs6YWk", "fZw6qjWmKLmjfi0200NBzsgGrXZImT3KiJ")
iex> status
:ok
"""
def stop_space_broadcast(client, space_id, broadcast_id) do
Base.post(client, "#{@path}/#{space_id}/broadcasts/#{broadcast_id}/stop", %{})
end
end
|
lib/mux/video/spaces.ex
| 0.825062
| 0.412441
|
spaces.ex
|
starcoder
|
defmodule Benchmarks.GoogleMessage3.Message22853 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field22869, 1, optional: true, type: Benchmarks.GoogleMessage3.Enum22854, enum: true
field :field22870, 2, repeated: true, type: :uint32, packed: true, deprecated: false
field :field22871, 3, repeated: true, type: :float, packed: true, deprecated: false
field :field22872, 5, repeated: true, type: :float, packed: true, deprecated: false
field :field22873, 4, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message24345 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field24533, 1, optional: true, type: :string
field :field24534, 22, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field24535, 2, optional: true, type: Benchmarks.GoogleMessage3.Message24346
field :field24536, 3, optional: true, type: :string
field :field24537, 4, optional: true, type: :string
field :field24538, 23, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field24539, 5, optional: true, type: :string
field :field24540, 6, required: true, type: :string
field :field24541, 7, optional: true, type: :string
field :field24542, 8, optional: true, type: :string
field :field24543, 9, optional: true, type: Benchmarks.GoogleMessage3.Message24316
field :field24544, 10, optional: true, type: Benchmarks.GoogleMessage3.Message24376
field :field24545, 11, optional: true, type: :string
field :field24546, 19, optional: true, type: :string
field :field24547, 20, optional: true, type: :string
field :field24548, 21, optional: true, type: :string
field :field24549, 12, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24550, 13, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24551, 14, repeated: true, type: :string
field :field24552, 15, optional: true, type: :string
field :field24553, 18, optional: true, type: :int32
field :field24554, 16, optional: true, type: Benchmarks.GoogleMessage3.Message24379
field :field24555, 17, optional: true, type: :string
field :field24556, 24, repeated: true, type: Benchmarks.GoogleMessage3.Message24356
field :field24557, 25, repeated: true, type: Benchmarks.GoogleMessage3.Message24366
end
defmodule Benchmarks.GoogleMessage3.Message24403 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field24681, 1, optional: true, type: Benchmarks.GoogleMessage3.Message24401
field :field24682, 2, optional: true, type: Benchmarks.GoogleMessage3.Message24402
end
defmodule Benchmarks.GoogleMessage3.Message24391 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field24631, 1, optional: true, type: :string
field :field24632, 2, optional: true, type: :string
field :field24633, 3, repeated: true, type: :string
field :field24634, 4, optional: true, type: :string
field :field24635, 5, repeated: true, type: :string
field :field24636, 16, repeated: true, type: :string
field :field24637, 17, optional: true, type: :string
field :field24638, 25, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24639, 7, optional: true, type: :string
field :field24640, 18, optional: true, type: :string
field :field24641, 19, optional: true, type: :string
field :field24642, 20, optional: true, type: :string
field :field24643, 24, optional: true, type: :int32
field :field24644, 8, optional: true, type: Benchmarks.GoogleMessage3.Message24379
field :field24645, 9, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24646, 10, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24647, 11, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24648, 12, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24649, 13, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24650, 14, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field24651, 21, optional: true, type: :string
field :field24652, 22, optional: true, type: :int32
field :field24653, 23, optional: true, type: :int32
field :field24654, 15, repeated: true, type: :string
field :field24655, 6, repeated: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message27454 do
@moduledoc false
use Protobuf, syntax: :proto2
end
defmodule Benchmarks.GoogleMessage3.Message27357 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field27410, 1, optional: true, type: :string
field :field27411, 2, optional: true, type: :float
field :field27412, 3, optional: true, type: :string
field :field27413, 4, optional: true, type: :bool
field :field27414, 5, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message27360 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field27426, 1, optional: true, type: Benchmarks.GoogleMessage3.Message27358
field :field27427, 2, optional: true, type: Benchmarks.GoogleMessage3.Enum27361, enum: true
field :field27428, 3, optional: true, type: Benchmarks.GoogleMessage3.Message27358
field :field27429, 4, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message34387 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field34446, 1, optional: true, type: :string
field :field34447, 2, repeated: true, type: Benchmarks.GoogleMessage3.Message34381
field :field34448, 3, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field34449, 4, optional: true, type: Benchmarks.GoogleMessage3.Enum34388, enum: true
field :field34450, 5, optional: true, type: :int64
end
defmodule Benchmarks.GoogleMessage3.Message34621 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field34651, 1, optional: true, type: :double
field :field34652, 2, optional: true, type: :double
field :field34653, 3, optional: true, type: :double
field :field34654, 4, optional: true, type: :double
field :field34655, 11, optional: true, type: :double
field :field34656, 13, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34657, 14, optional: true, type: Benchmarks.GoogleMessage3.Message34619
field :field34658, 5, optional: true, type: :string
field :field34659, 9, optional: true, type: :string
field :field34660, 12, optional: true, type: :double
field :field34661, 19, optional: true, type: :bytes
field :field34662, 15, optional: true, type: :string
field :field34663, 16, optional: true, type: :string
field :field34664, 17, optional: true, type: :string
field :field34665, 18, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34666, 20, optional: true, type: Benchmarks.GoogleMessage3.Message34621
field :field34667, 100, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field34668, 101, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message35476 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field35484, 1, optional: true, type: :string
field :field35485, 2, optional: true, type: :string
field :field35486, 3, optional: true, type: :string
field :field35487, 4, optional: true, type: Benchmarks.GoogleMessage3.Enum35477, enum: true
field :field35488, 5, optional: true, type: :float
field :field35489, 6, optional: true, type: :float
field :field35490, 7, optional: true, type: :float
field :field35491, 8, optional: true, type: :float
field :field35492, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field35493, 10, optional: true, type: :int32
field :field35494, 11, optional: true, type: :int32
field :field35495, 12, optional: true, type: :int32
field :field35496, 13, optional: true, type: :string
field :field35497, 14, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message949 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field955, 1, optional: true, type: :string
field :field956, 2, optional: true, type: :int64
field :field957, 3, optional: true, type: :int64
field :field958, 4, optional: true, type: Benchmarks.GoogleMessage3.Message730
field :field959, 5, repeated: true, type: :string
field :field960, 6, optional: true, type: :string
field :field961, 7, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message36869 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field36970, 1, optional: true, type: :int32
field :field36971, 2, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message33968.Message33969 do
@moduledoc false
use Protobuf, syntax: :proto2
end
defmodule Benchmarks.GoogleMessage3.Message33968 do
@moduledoc false
use Protobuf, syntax: :proto2
field :message33969, 1, repeated: true, type: :group
field :field33989, 3, repeated: true, type: Benchmarks.GoogleMessage3.Message33958
field :field33990, 106, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field33991, 108, optional: true, type: :bool
field :field33992, 107, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
end
defmodule Benchmarks.GoogleMessage3.Message6644 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field6701, 8, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6702, 1, optional: true, type: :string
field :field6703, 2, optional: true, type: :double
field :field6704, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6705, 3, optional: true, type: :bytes
field :field6706, 19, optional: true, type: :bytes
field :field6707, 4, optional: true, type: Benchmarks.GoogleMessage3.Message6637
field :field6708, 18, repeated: true, type: Benchmarks.GoogleMessage3.Message6126
field :field6709, 6, optional: true, type: :bool
field :field6710, 10, optional: true, type: Benchmarks.GoogleMessage3.Message6643
field :field6711, 12, optional: true, type: :string
field :field6712, 14, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6713, 15, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6714, 16, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field6715, 17, optional: true, type: :int32
field :field6716, 20, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
end
defmodule Benchmarks.GoogleMessage3.Message18831.Message18832.Message18833 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field18843, 7, required: true, type: :uint64
field :field18844, 8, optional: true, type: :string
field :field18845, 10, optional: true, type: :float
field :field18846, 12, optional: true, type: :int32
field :field18847, 13, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message18831.Message18832 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field18836, 2, optional: true, type: :int32
field :field18837, 5, optional: true, type: :string
field :field18838, 3, optional: true, type: :float
field :field18839, 9, optional: true, type: :float
field :field18840, 11, optional: true, type: :int32
field :field18841, 4, repeated: true, type: :uint64
field :message18833, 6, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message18831 do
@moduledoc false
use Protobuf, syntax: :proto2
field :message18832, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message13090 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field13141, 1, optional: true, type: Benchmarks.GoogleMessage3.Message13083
field :field13142, 2, optional: true, type: Benchmarks.GoogleMessage3.Message13088
end
defmodule Benchmarks.GoogleMessage3.Message11874 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field11888, 3, optional: true, type: Benchmarks.GoogleMessage3.Message10391
field :field11889, 4, optional: true, type: :string
field :field11890, 6, optional: true, type: Benchmarks.GoogleMessage3.Message11873
field :field11891, 7, optional: true, type: :bool
extensions [{1, 2}, {2, 3}, {5, 6}]
end
defmodule Benchmarks.GoogleMessage3.Message4144.Message4145 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field4165, 2, required: true, type: Benchmarks.GoogleMessage3.Enum4146, enum: true
field :field4166, 3, required: true, type: :int32
field :field4167, 9, optional: true, type: Benchmarks.GoogleMessage3.Enum4160, enum: true
field :field4168, 4, optional: true, type: :bytes
field :field4169, 5, optional: true, type: Benchmarks.GoogleMessage3.Enum4152, enum: true
field :field4170, 6, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message4144 do
@moduledoc false
use Protobuf, syntax: :proto2
field :message4145, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35574 do
@moduledoc false
use Protobuf, syntax: :proto2
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35575.Message35576 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field35747, 5, optional: true, type: :fixed64
field :field35748, 6, optional: true, type: :int32
field :field35749, 49, optional: true, type: :int32
field :field35750, 7, optional: true, type: :int32
field :field35751, 59, optional: true, type: :uint32
field :field35752, 14, optional: true, type: :int32
field :field35753, 15, optional: true, type: :int32
field :field35754, 35, optional: true, type: :int32
field :field35755, 53, optional: true, type: :bytes
field :field35756, 8, optional: true, type: :int32
field :field35757, 9, optional: true, type: :string
field :field35758, 10, optional: true, type: :fixed64
field :field35759, 11, optional: true, type: :int32
field :field35760, 12, optional: true, type: :int32
field :field35761, 41, optional: true, type: :int32
field :field35762, 30, optional: true, type: :int32
field :field35763, 31, optional: true, type: :int32
field :field35764, 13, optional: true, type: :int32
field :field35765, 39, optional: true, type: :bytes
field :field35766, 29, optional: true, type: :string
field :field35767, 42, optional: true, type: :int32
field :field35768, 32, repeated: true, type: :int32
field :field35769, 51, repeated: true, type: :int32
field :field35770, 54, optional: true, type: :int64
field :field35771, 55, optional: true, type: Benchmarks.GoogleMessage3.Message0
end
defmodule Benchmarks.GoogleMessage3.Message35573.Message35575 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field35709, 2, optional: true, type: :int64
field :field35710, 3, optional: true, type: :string
field :field35711, 19, optional: true, type: :string
field :field35712, 20, optional: true, type: :int32
field :field35713, 21, optional: true, type: :int32
field :field35714, 22, optional: true, type: :int32
field :field35715, 23, optional: true, type: :bool
field :field35716, 47, optional: true, type: :int32
field :field35717, 48, optional: true, type: :int32
field :field35718, 24, optional: true, type: :bool
field :field35719, 25, optional: true, type: :fixed64
field :field35720, 52, optional: true, type: :bytes
field :field35721, 18, optional: true, type: :int32
field :field35722, 43, optional: true, type: :fixed32
field :field35723, 26, optional: true, type: :bool
field :field35724, 27, optional: true, type: :int32
field :field35725, 17, optional: true, type: :int32
field :field35726, 45, optional: true, type: :bool
field :field35727, 33, repeated: true, type: :int32
field :field35728, 58, repeated: true, type: :int32
field :field35729, 34, optional: true, type: :float
field :field35730, 1009, optional: true, type: :float
field :field35731, 28, optional: true, type: :int32
field :field35732, 1001, repeated: true, type: :fixed64
field :field35733, 1002, repeated: true, type: :fixed64
field :field35734, 44, optional: true, type: :int32
field :field35735, 50, optional: true, type: :int32
field :field35736, 36, optional: true, type: :int32
field :field35737, 40, optional: true, type: :int32
field :field35738, 1016, optional: true, type: :bool
field :field35739, 1010, optional: true, type: :bool
field :field35740, 37, optional: true, type: :int32
field :field35741, 38, optional: true, type: :int32
field :field35742, 46, optional: true, type: :string
field :field35743, 60, optional: true, type: :uint32
field :field35744, 56, repeated: true, type: :bytes
field :field35745, 57, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :message35576, 4, required: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message35573 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field35695, 16, optional: true, type: :fixed64
field :field35696, 1000, optional: true, type: :string
field :field35697, 1004, optional: true, type: :string
field :field35698, 1003, optional: true, type: :int32
field :message35574, 1012, repeated: true, type: :group
field :field35700, 1011, optional: true, type: :int64
field :field35701, 1005, optional: true, type: :int64
field :field35702, 1006, optional: true, type: :int64
field :field35703, 1007, optional: true, type: :int64
field :field35704, 1008, optional: true, type: :int64
field :message35575, 1, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message36858.Message36859 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field36968, 9, required: true, type: Benchmarks.GoogleMessage3.Enum36860, enum: true
field :field36969, 10, optional: true, type: :float
end
defmodule Benchmarks.GoogleMessage3.Message36858 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field36956, 1, repeated: true, type: :int32
field :field36957, 2, repeated: true, type: :string
field :field36958, 12, repeated: true, type: :string
field :field36959, 3, optional: true, type: :int32
field :field36960, 4, optional: true, type: :int32
field :field36961, 14, optional: true, type: :int32
field :field36962, 11, optional: true, type: :string
field :field36963, 5, optional: true, type: :bool
field :field36964, 13, optional: true, type: :bool
field :field36965, 6, optional: true, type: :int64
field :field36966, 7, optional: true, type: Benchmarks.GoogleMessage3.Message35506
field :message36859, 8, repeated: true, type: :group
end
defmodule Benchmarks.GoogleMessage3.Message13174 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field13237, 6, required: true, type: :int32
field :field13238, 3, optional: true, type: :int32
field :field13239, 4, required: true, type: :int32
field :field13240, 8, optional: true, type: :int32
field :field13241, 5, optional: true, type: :double
field :field13242, 7, optional: true, type: :double
field :field13243, 17, optional: true, type: :int32
field :field13244, 19, optional: true, type: :int32
field :field13245, 20, optional: true, type: :double
field :field13246, 9, optional: true, type: :int32
field :field13247, 10, optional: true, type: :double
field :field13248, 11, optional: true, type: :int32
field :field13249, 21, optional: true, type: Benchmarks.GoogleMessage3.Message13151
field :field13250, 1, optional: true, type: :int32
field :field13251, 2, optional: true, type: :double
field :field13252, 15, optional: true, type: :double
field :field13253, 16, optional: true, type: :double
field :field13254, 12, optional: true, type: :double
field :field13255, 13, optional: true, type: :double
field :field13256, 14, optional: true, type: :double
field :field13257, 18, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.Message18283 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field18478, 1, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18479, 4, optional: true, type: :int32
field :field18480, 106, optional: true, type: :int32
field :field18481, 107, optional: true, type: :int32
field :field18482, 108, optional: true, type: :int32
field :field18483, 109, optional: true, type: :int32
field :field18484, 105, optional: true, type: :int32
field :field18485, 113, optional: true, type: :int32
field :field18486, 114, optional: true, type: :int32
field :field18487, 124, optional: true, type: :int32
field :field18488, 125, optional: true, type: :int32
field :field18489, 128, optional: true, type: :int32
field :field18490, 135, optional: true, type: :int32
field :field18491, 166, optional: true, type: :bool
field :field18492, 136, optional: true, type: :bool
field :field18493, 140, optional: true, type: :int32
field :field18494, 171, optional: true, type: :int32
field :field18495, 148, optional: true, type: :int32
field :field18496, 145, optional: true, type: :int32
field :field18497, 117, optional: true, type: :float
field :field18498, 146, optional: true, type: :int32
field :field18499, 3, optional: true, type: :string
field :field18500, 5, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18501, 6, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18502, 9, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18503, 155, optional: true, type: Benchmarks.GoogleMessage3.Message18253
field :field18504, 184, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18505, 163, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18506, 16, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18507, 20, repeated: true, type: :int32
field :field18508, 7, repeated: true, type: :int32
field :field18509, 194, repeated: true, type: :string
field :field18510, 30, optional: true, type: :bytes
field :field18511, 31, optional: true, type: :int32
field :field18512, 178, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18513, 8, optional: true, type: :string
field :field18514, 2, optional: true, type: :float
field :field18515, 100, optional: true, type: :float
field :field18516, 101, optional: true, type: :float
field :field18517, 102, optional: true, type: :float
field :field18518, 103, optional: true, type: :int32
field :field18519, 104, repeated: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18520, 110, optional: true, type: :int32
field :field18521, 112, optional: true, type: :int32
field :field18522, 111, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18523, 115, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18524, 119, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18525, 127, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18526, 185, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18527, 120, optional: true, type: :int32
field :field18528, 132, optional: true, type: :int32
field :field18529, 126, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18530, 129, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18531, 131, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18532, 150, optional: true, type: :fixed64
field :field18533, 133, optional: true, type: :int32
field :field18534, 134, optional: true, type: :int32
field :field18535, 139, optional: true, type: :int32
field :field18536, 137, optional: true, type: :fixed64
field :field18537, 138, optional: true, type: :fixed64
field :field18538, 141, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18539, 142, optional: true, type: :int32
field :field18540, 181, optional: true, type: :int32
field :field18541, 143, optional: true, type: Benchmarks.GoogleMessage3.Message16816
field :field18542, 154, optional: true, type: Benchmarks.GoogleMessage3.Message16685
field :field18543, 144, optional: true, type: :int32
field :field18544, 147, optional: true, type: :int64
field :field18545, 149, optional: true, type: :int64
field :field18546, 151, optional: true, type: :int32
field :field18547, 152, optional: true, type: :int32
field :field18548, 153, optional: true, type: :int32
field :field18549, 161, optional: true, type: :float
field :field18550, 123, optional: true, type: Benchmarks.GoogleMessage3.Message0
field :field18551, 156, repeated: true, type: :int64
field :field18552, 157, optional: true, type: :int32
field :field18553, 188, repeated: true, type: :fixed64
field :field18554, 158, optional: true, type: :int32
field :field18555, 159, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18556, 160, optional: true, type: :bool
field :field18557, 162, optional: true, type: :uint64
field :field18558, 164, optional: true, type: :int32
field :field18559, 10, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18560, 167, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18561, 168, optional: true, type: :int32
field :field18562, 169, repeated: true, type: :fixed64
field :field18563, 170, repeated: true, type: :string
field :field18564, 172, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18565, 173, optional: true, type: :int64
field :field18566, 174, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18567, 175, optional: true, type: :int64
field :field18568, 189, optional: true, type: :uint32
field :field18569, 176, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18570, 177, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18571, 179, optional: true, type: :uint32
field :field18572, 180, optional: true, type: :uint32
field :field18573, 182, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18574, 183, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18575, 121, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18576, 186, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18577, 187, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18578, 190, optional: true, type: Benchmarks.GoogleMessage3.UnusedEmptyMessage
field :field18579, 191, optional: true, type: :int32
field :field18580, 192, optional: true, type: :float
field :field18581, 193, optional: true, type: :bool
extensions [{116, 117}, {118, 119}, {130, 131}, {165, 166}]
end
defmodule Benchmarks.GoogleMessage3.Message13169 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field13223, 1, repeated: true, type: Benchmarks.GoogleMessage3.Message13168
field :field13224, 2, required: true, type: Benchmarks.GoogleMessage3.Message13167
field :field13225, 3, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message19255 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field19257, 1, optional: true, type: :string
end
defmodule Benchmarks.GoogleMessage3.Message35542 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field35543, 1, optional: true, type: :bool
field :field35544, 2, optional: true, type: :bool
field :field35545, 3, optional: true, type: :bool
end
defmodule Benchmarks.GoogleMessage3.Message3901 do
@moduledoc false
use Protobuf, syntax: :proto2
field :field3990, 1, optional: true, type: :int32
field :field3991, 2, optional: true, type: :int32
field :field3992, 3, optional: true, type: :int32
field :field3993, 4, optional: true, type: :int32
field :field3994, 7, optional: true, type: :int32
field :field3995, 8, optional: true, type: :int32
field :field3996, 9, optional: true, type: :int32
field :field3997, 10, optional: true, type: :int32
field :field3998, 11, optional: true, type: :int32
field :field3999, 12, optional: true, type: :int32
field :field4000, 6, optional: true, type: Benchmarks.GoogleMessage3.UnusedEnum, enum: true
field :field4001, 5, optional: true, type: :int32
end
defmodule Benchmarks.GoogleMessage3.PbExtension do
@moduledoc false
use Protobuf, syntax: :proto2
extend Benchmarks.GoogleMessage3.Message0, :"Message34621.field34669", 17_562_023,
optional: true,
type: Benchmarks.GoogleMessage3.Message34621
end
|
bench/lib/datasets/google_message3/benchmark_message3_2.pb.ex
| 0.635901
| 0.487551
|
benchmark_message3_2.pb.ex
|
starcoder
|
defmodule ExUnit.CaptureIO do
@moduledoc ~S"""
Functionality to capture IO for testing.
## Examples
defmodule AssertionTest do
use ExUnit.Case
import ExUnit.CaptureIO
test "example" do
assert capture_io(fn -> IO.puts("a") end) == "a\n"
end
test "checking the return value and the IO output" do
fun = fn ->
assert Enum.each(["some", "example"], &IO.puts(&1)) == :ok
end
assert capture_io(fun) == "some\nexample\n"
# tip: or use only: "capture_io(fun)" to silence the IO output (so only assert the return value)
end
end
"""
@doc """
Captures IO generated when evaluating `fun`.
Returns the binary which is the captured output.
By default, `capture_io` replaces the `group_leader` (`:stdio`)
for the current process. Capturing the group leader is done per
process and therefore can be done concurrently.
However, the capturing of any other named device, such as `:stderr`,
happens globally and requires `async: false`.
A developer can set a string as an input. The default input
is an empty string.
## IO devices
You may capture the IO from any registered IO device. The device name given
must be an atom representing the name of a registered process. In addition,
Elixir provides two shortcuts:
* `:stdio` - a shortcut for `:standard_io`, which maps to
the current `Process.group_leader/0` in Erlang
* `:stderr` - a shortcut for the named process `:standard_error`
provided in Erlang
## Options
* `:capture_prompt` - Define if prompts (specified as arguments to
`IO.get*` functions) should be captured. Defaults to `true`. For
IO devices other than `:stdio`, the option is ignored.
* `:encoding` (since v1.10.0) - encoding of the IO device. Allowed
values are `:unicode` (default) and `:latin1`.
## Examples
iex> capture_io(fn -> IO.write("john") end) == "john"
true
iex> capture_io(:stderr, fn -> IO.write(:stderr, "john") end) == "john"
true
iex> capture_io(:standard_error, fn -> IO.write(:stderr, "john") end) == "john"
true
iex> capture_io("this is input", fn ->
...> input = IO.gets("> ")
...> IO.write(input)
...> end) == "> this is input"
true
iex> capture_io([input: "this is input", capture_prompt: false], fn ->
...> input = IO.gets("> ")
...> IO.write(input)
...> end) == "this is input"
true
## Returning values
As seen in the examples above, `capture_io` returns the captured output.
If you want to also capture the result of the function executed inside
the `capture_io`, you can use `Kernel.send/2` to send yourself a message
and use `ExUnit.Assertions.assert_received/2` to match on the results:
capture_io([input: "this is input", capture_prompt: false], fn ->
send(self(), {:block_result, 42})
# ...
end)
assert_received {:block_result, 42}
"""
@spec capture_io((() -> any())) :: String.t()
def capture_io(fun) when is_function(fun, 0) do
capture_io(:stdio, [], fun)
end
@spec capture_io(atom(), (() -> any())) :: String.t()
def capture_io(device, fun) when is_atom(device) and is_function(fun, 0) do
capture_io(device, [], fun)
end
@spec capture_io(String.t(), (() -> any())) :: String.t()
def capture_io(input, fun) when is_binary(input) and is_function(fun, 0) do
capture_io(:stdio, [input: input], fun)
end
@spec capture_io(keyword(), (() -> any())) :: String.t()
def capture_io(options, fun) when is_list(options) and is_function(fun, 0) do
capture_io(:stdio, options, fun)
end
@spec capture_io(atom(), String.t(), (() -> any())) :: String.t()
def capture_io(device, input, fun)
when is_atom(device) and is_binary(input) and is_function(fun, 0) do
capture_io(device, [input: input], fun)
end
@spec capture_io(atom(), keyword(), (() -> any())) :: String.t()
def capture_io(device, options, fun)
when is_atom(device) and is_list(options) and is_function(fun, 0) do
do_capture_io(map_dev(device), options, fun)
end
defp map_dev(:stdio), do: :standard_io
defp map_dev(:stderr), do: :standard_error
defp map_dev(other), do: other
defp do_capture_io(:standard_io, options, fun) do
prompt_config = Keyword.get(options, :capture_prompt, true)
encoding = Keyword.get(options, :encoding, :unicode)
input = Keyword.get(options, :input, "")
original_gl = Process.group_leader()
{:ok, capture_gl} = StringIO.open(input, capture_prompt: prompt_config, encoding: encoding)
try do
Process.group_leader(self(), capture_gl)
do_capture_io(capture_gl, fun)
after
Process.group_leader(self(), original_gl)
end
end
defp do_capture_io(device, options, fun) do
input = Keyword.get(options, :input, "")
encoding = Keyword.get(options, :encoding, :unicode)
{:ok, string_io} = StringIO.open(input, encoding: encoding)
case ExUnit.CaptureServer.device_capture_on(device, string_io) do
{:ok, ref} ->
try do
do_capture_io(string_io, fun)
after
ExUnit.CaptureServer.device_capture_off(ref)
end
{:error, :no_device} ->
_ = StringIO.close(string_io)
raise "could not find IO device registered at #{inspect(device)}"
{:error, :already_captured} ->
_ = StringIO.close(string_io)
raise "IO device registered at #{inspect(device)} is already captured"
end
end
defp do_capture_io(string_io, fun) do
try do
fun.()
catch
kind, reason ->
_ = StringIO.close(string_io)
:erlang.raise(kind, reason, __STACKTRACE__)
else
_ ->
{:ok, {_input, output}} = StringIO.close(string_io)
output
end
end
end
|
lib/ex_unit/lib/ex_unit/capture_io.ex
| 0.854582
| 0.751899
|
capture_io.ex
|
starcoder
|
defmodule Hammox.TypeMatchError do
@moduledoc false
defexception [:message]
alias Hammox.Utils
@impl true
def exception({:error, reasons}) do
%__MODULE__{
message: "\n" <> message_string(reasons)
}
end
defp human_reason({:arg_type_mismatch, index, value, type}) do
"#{Ordinal.ordinalize(index + 1)} argument value #{inspect(value)} does not match #{
Ordinal.ordinalize(index + 1)
} parameter's type #{type_to_string(type)}."
end
defp human_reason({:return_type_mismatch, value, type}) do
"Returned value #{inspect(value)} does not match type #{type_to_string(type)}."
end
defp human_reason({:tuple_elem_type_mismatch, index, elem, elem_type}) do
"#{Ordinal.ordinalize(index + 1)} tuple element #{inspect(elem)} does not match #{
Ordinal.ordinalize(index + 1)
} element type #{type_to_string(elem_type)}."
end
defp human_reason({:elem_type_mismatch, index, elem, elem_type}) do
"Element #{inspect(elem)} at index #{index} does not match element type #{
type_to_string(elem_type)
}."
end
defp human_reason({:empty_list_type_mismatch, type}) do
"Got an empty list but expected #{type_to_string(type)}."
end
defp human_reason({:proper_list_type_mismatch, type}) do
"Got a proper list but expected #{type_to_string(type)}."
end
defp human_reason({:improper_list_type_mismatch, type}) do
"Got an improper list but expected #{type_to_string(type)}."
end
defp human_reason({:improper_list_terminator_type_mismatch, terminator, terminator_type}) do
"Improper list terminator #{inspect(terminator)} does not match terminator type #{
type_to_string(terminator_type)
}."
end
defp human_reason({:function_arity_type_mismatch, expected, actual}) do
"Expected function to have arity #{expected} but got #{actual}."
end
defp human_reason({:type_mismatch, value, type}) do
"Value #{inspect(value)} does not match type #{type_to_string(type)}."
end
defp human_reason({:map_key_type_mismatch, key, key_types}) when is_list(key_types) do
"Map key #{inspect(key)} does not match any of the allowed map key types #{
key_types
|> Enum.map(&type_to_string/1)
|> Enum.join(", ")
}."
end
defp human_reason({:map_key_type_mismatch, key, key_type}) do
"Map key #{inspect(key)} does not match map key type #{type_to_string(key_type)}."
end
defp human_reason({:map_value_type_mismatch, key, value, value_types})
when is_list(value_types) do
"Map value #{inspect(value)} for key #{inspect(key)} does not match any of the allowed map value types #{
value_types
|> Enum.map(&type_to_string/1)
|> Enum.join(", ")
}."
end
defp human_reason({:map_value_type_mismatch, key, value, value_type}) do
"Map value #{inspect(value)} for key #{inspect(key)} does not match map value type #{
type_to_string(value_type)
}."
end
defp human_reason({:required_field_unfulfilled_map_type_mismatch, entry_type}) do
"Could not find a map entry matching #{type_to_string(entry_type)}."
end
defp human_reason({:struct_name_type_mismatch, nil, expected_struct_name}) do
"Expected the value to be #{Utils.module_to_string(expected_struct_name)}."
end
defp human_reason({:struct_name_type_mismatch, actual_struct_name, expected_struct_name}) do
"Expected the value to be a #{Utils.module_to_string(expected_struct_name)}, got a #{
Utils.module_to_string(actual_struct_name)
}."
end
defp human_reason({:module_fetch_failure, module_name}) do
"Could not load module #{Utils.module_to_string(module_name)}."
end
defp human_reason({:remote_type_fetch_failure, {module_name, type_name, arity}}) do
"Could not find type #{type_name}/#{arity} in #{Utils.module_to_string(module_name)}."
end
defp human_reason({:protocol_type_mismatch, value, protocol_name}) do
"Value #{inspect(value)} does not implement the #{protocol_name} protocol."
end
defp message_string(reasons) when is_list(reasons) do
reasons
|> Enum.zip(0..length(reasons))
|> Enum.map(fn {reason, index} ->
reason
|> human_reason()
|> leftpad(index)
end)
|> Enum.join("\n")
end
defp message_string(reason) when is_tuple(reason) do
message_string([reason])
end
defp leftpad(string, level) do
padding =
for(_ <- 0..level, do: " ")
|> Enum.drop(1)
|> Enum.join()
padding <> string
end
defp type_to_string({:type, _, :map_field_exact, [type1, type2]}) do
"required(#{type_to_string(type1)}) => #{type_to_string(type2)}"
end
defp type_to_string({:type, _, :map_field_assoc, [type1, type2]}) do
"optional(#{type_to_string(type1)}) => #{type_to_string(type2)}"
end
defp type_to_string(type) do
# We really want to access Code.Typespec.typespec_to_quoted/1 here but it's
# private... this hack needs to suffice.
{:foo, type, []}
|> Code.Typespec.type_to_quoted()
|> Macro.to_string()
|> String.split(" :: ")
|> case do
[_, type_string] -> type_string
[_, type_name, type_string] -> "#{type_string} (\"#{type_name}\")"
end
end
end
|
lib/hammox/type_match_error.ex
| 0.795698
| 0.430117
|
type_match_error.ex
|
starcoder
|
defmodule Serum.Project.ElixirValidator do
@moduledoc false
_moduledocp = "A module for validation of Serum project definition data."
@type result() :: :ok | {:invalid, binary()} | {:invalid, [binary()]}
@all_keys [
:site_name,
:site_description,
:author,
:author_email,
:server_root,
:base_url,
:date_format,
:list_title_all,
:list_title_tag,
:pagination,
:posts_per_page,
:preview_length,
:plugins,
:theme
]
@required_keys [
:site_name,
:site_description,
:author,
:author_email,
:base_url
]
@spec validate(map()) :: result()
def validate(term)
def validate(%{} = map) do
keys = map |> Map.keys() |> MapSet.new()
with {:missing, []} <- check_missing_keys(keys),
{:extra, []} <- check_extra_keys(keys),
:ok <- check_constraints(map) do
:ok
else
{:missing, [x]} ->
{:invalid, "missing required property: #{x}"}
{:missing, xs} ->
props_str = Enum.join(xs, ", ")
{:invalid, "missing required properties: #{props_str}"}
{:extra, [x]} ->
{:invalid, "unknown property: #{x}"}
{:extra, xs} ->
props_str = Enum.join(xs, ", ")
{:invalid, "unknown properties: #{props_str}"}
{:error, messages} ->
{:invalid, messages}
end
end
def validate(term) do
{:invalid, "expected a map, got: #{inspect(term)}"}
end
@spec check_missing_keys(MapSet.t()) :: {:missing, [atom()]}
defp check_missing_keys(keys) do
missing =
@required_keys
|> MapSet.new()
|> MapSet.difference(keys)
|> MapSet.to_list()
{:missing, missing}
end
@spec check_extra_keys(MapSet.t()) :: {:extra, [atom()]}
defp check_extra_keys(keys) do
extra =
keys
|> MapSet.difference(MapSet.new(@all_keys))
|> MapSet.to_list()
{:extra, extra}
end
@spec check_constraints(map()) :: :ok | {:error, [binary()]}
defp check_constraints(map) do
map
|> Enum.map(fn {k, v} -> {k, validate_field(k, v)} end)
|> Enum.filter(&(elem(&1, 1) != :ok))
|> case do
[] ->
:ok
errors ->
messages =
Enum.map(errors, fn {k, {:fail, s}} ->
[
"the property ",
[:bright, :yellow, to_string(k), :reset],
" violates the constraint ",
[:bright, :yellow, s, :reset]
]
|> IO.ANSI.format()
|> IO.iodata_to_binary()
end)
{:error, messages}
end
end
rules =
quote do
[
site_name: [is_binary: []],
site_description: [is_binary: []],
author: [is_binary: []],
author_email: [is_binary: []],
server_root: [is_binary: [], =~: [~r[^https?://.+]]],
base_url: [is_binary: [], =~: [~r[(^/$|^/.*/$)]]],
date_format: [is_binary: []],
list_title_all: [is_binary: []],
list_title_tag: [is_binary: []],
pagination: [is_boolean: []],
posts_per_page: [is_integer: [], >=: [1]],
preview_length: [valid_preview_length?: []],
plugins: [is_list: []],
theme: [is_atom: []]
]
end
@spec validate_field(atom(), term()) :: :ok | {:fail, binary()}
defp validate_field(key, value)
Enum.each(rules, fn {key, exprs} ->
[x | xs] =
Enum.map(exprs, fn {func, args} ->
quote(do: unquote(func)(var!(value), unquote_splicing(args)))
end)
check_expr = Enum.reduce(xs, x, "e(do: unquote(&2) and unquote(&1)))
[y | ys] =
Enum.map(exprs, fn {func, args} ->
quote(do: unquote(func)(value, unquote_splicing(args)))
end)
check_str =
ys
|> Enum.reduce(y, "e(do: unquote(&2) and unquote(&1)))
|> Macro.to_string()
defp validate_field(unquote(key), value) do
if unquote(check_expr) do
:ok
else
{:fail, unquote(check_str)}
end
end
end)
@spec valid_preview_length?(term()) :: boolean()
defp valid_preview_length?(value)
defp valid_preview_length?(n) when is_integer(n) and n >= 0, do: true
defp valid_preview_length?({:chars, n}) when is_integer(n) and n >= 0, do: true
defp valid_preview_length?({:words, n}) when is_integer(n) and n >= 0, do: true
defp valid_preview_length?({:paragraphs, n}) when is_integer(n) and n >= 0, do: true
defp valid_preview_length?(_), do: false
end
|
lib/serum/project/elixir_validator.ex
| 0.771413
| 0.445107
|
elixir_validator.ex
|
starcoder
|
defmodule ATECC508A.Certificate do
@moduledoc """
Convert between X.509 certificates and ATECC508A compressed certificates
This is an implementation of the compressed certificate definition described in
Atmel-8974A-CryptoAuth-ATECC-Compressed-Certificate-Definition-ApplicationNote_112015.
"""
import X509.ASN1, except: [extension: 2, basic_constraints: 1]
alias X509.{PublicKey, RDNSequence, SignatureAlgorithm}
alias X509.Certificate.Template
@hash :sha256
@curve :secp256r1
@validity_years 31
@version :v3
@era 2000
@doc """
Create a new device certificate.
The created certificate is compatible with ATECC508A certificate compression.
Parameters:
* `atecc508a_public_key` - the public key to be signed (from ATECC508A)
* `atecc508a_sn` - the ATECC508a's serial number - used to compute the certificate's serial number
* `manufacturer_sn` - the manufacturer's desired serial number - used as the common name
* `signer` - the signer's certificate
* `signer_key` - the signer's private key
"""
@spec new_device(
:public_key.ec_public_key(),
ATECC508A.serial_number(),
String.t(),
X509.Certificate.t(),
:public_key.ec_private_key()
) :: X509.Certificate.t()
def new_device(atecc508a_public_key, atecc508a_sn, manufacturer_sn, signer, signer_key) do
byte_size(manufacturer_sn) <= 16 || raise "Manufacturer serial number too long"
subject_rdn = "/CN=" <> manufacturer_sn
{not_before_dt, not_after_dt} = ATECC508A.Validity.create_compatible_validity(@validity_years)
compressed_validity = ATECC508A.Validity.compress(not_before_dt, not_after_dt)
x509_validity = X509.Certificate.Validity.new(not_before_dt, not_after_dt)
x509_cert_sn = ATECC508A.SerialNumber.from_device_sn(atecc508a_sn, compressed_validity)
template = device_template(x509_cert_sn, x509_validity)
X509.Certificate.new(atecc508a_public_key, subject_rdn, signer, signer_key, template: template)
end
@doc """
Create a new signer certificate.
The signer certificate is a root certificate. I.e. it's not signed by
anyone else. Signer certificates and their associated private keys
should be stored safely, though. Their overall use is limited to automating
the registration of devices to cloud servers like Nerves Hub and
Amazon IoT. Once a device has registered, the cloud server will
ignore the signer certificate. It is therefore possible to time limit
signer certificates, uninstall them from the cloud server, or limit
the number of devices they can auto-register.
The created signer certificate is compatible with ATECC508A certificate
compression.
Parameters:
* `validity_years` - how many years is this signer certificate valid
"""
@spec new_signer(pos_integer()) :: X509.Certificate.t()
def new_signer(validity_years) do
# Create a new private key -> consider making this a separate step
signer_key = X509.PrivateKey.new_ec(@curve)
signer_public_key = X509.PublicKey.derive(signer_key)
{not_before_dt, not_after_dt} = ATECC508A.Validity.create_compatible_validity(validity_years)
compressed_validity = ATECC508A.Validity.compress(not_before_dt, not_after_dt)
x509_validity = X509.Certificate.Validity.new(not_before_dt, not_after_dt)
raw_public_key = public_key_to_raw(signer_public_key)
x509_cert_sn = ATECC508A.SerialNumber.from_public_key(raw_public_key, compressed_validity)
subject_rdn = X509.RDNSequence.new("/CN=Signer", :otp)
tbs_cert =
otp_tbs_certificate(
version: @version,
serialNumber: x509_cert_sn,
signature: SignatureAlgorithm.new(@hash, signer_key),
issuer: subject_rdn,
validity: x509_validity,
subject: subject_rdn,
subjectPublicKeyInfo: PublicKey.wrap(signer_public_key, :OTPSubjectPublicKeyInfo),
extensions: [
X509.Certificate.Extension.basic_constraints(true, 0),
X509.Certificate.Extension.key_usage([:digitalSignature, :keyCertSign, :cRLSign]),
X509.Certificate.Extension.ext_key_usage([:serverAuth, :clientAuth]),
X509.Certificate.Extension.subject_key_identifier(signer_public_key),
X509.Certificate.Extension.authority_key_identifier(signer_public_key)
]
)
signer_cert =
tbs_cert
|> :public_key.pkix_sign(signer_key)
|> X509.Certificate.from_der!()
{signer_cert, signer_key}
end
@spec curve() :: :secp256r1
def curve(), do: @curve
@spec hash() :: :sha256
def hash(), do: @hash
@doc """
Compress an X.509 certificate for storage in an ATECC508A slot.
Not all X.509 certificates are compressible. Most aren't. It's probably
only practical to go through `new_device` and `new_signer`.
Parameters:
* `cert` - the certificate to compress
* `template` - the template that will be used on the decompression side
"""
# @spec compress(X509.Certificate.t(), ATECC508A.Certificate.Template.t()) ::
# ATECC508A.Certificate.Compressed.t()
def compress(cert, template) do
compressed_signature =
signature(cert)
|> compress_signature()
compressed_validity =
X509.Certificate.validity(cert)
|> compress_validity()
serial_number_source = serial_number_source(template.sn_source)
format_version = 0x00
reserved = 0x00
data =
<<compressed_signature::binary-size(64), compressed_validity::binary-size(3),
template.signer_id::size(16), template.template_id::size(4), template.chain_id::size(4),
serial_number_source::size(4), format_version::size(4), reserved>>
%ATECC508A.Certificate.Compressed{
data: data,
device_sn: template.device_sn,
public_key: X509.Certificate.public_key(cert) |> public_key_to_raw(),
serial_number: X509.Certificate.serial(cert),
subject_rdn: X509.Certificate.subject(cert),
issuer_rdn: X509.Certificate.issuer(cert),
template: template
}
end
@doc """
Decompress an ECC508A certificate back to it's X.509 form.
"""
@spec decompress(ATECC508A.Certificate.Compressed.t()) :: X509.Certificate.t()
def decompress(compressed) do
<<
compressed_signature::binary-size(64),
compressed_validity::binary-size(3),
signer_id::size(16),
template_id::size(4),
chain_id::size(4),
serial_number_source::size(4),
format_version::size(4),
0::size(8)
>> = compressed.data
template = compressed.template
format_version == 0 || raise "Format version mismatch"
template_id == template.template_id || raise "Template ID mismatch"
signer_id == template.signer_id || raise "Signer ID mismatch"
chain_id == template.chain_id || raise "Chain ID mismatch"
x509_serial_number = decompress_sn(serial_number_source, compressed, compressed_validity)
subject_public_key = raw_to_public_key(compressed.public_key)
signature_alg = SignatureAlgorithm.new(@hash, :ecdsa)
otp_tbs_certificate =
otp_tbs_certificate(
version: @version,
serialNumber: x509_serial_number,
signature: signature_alg,
issuer:
case compressed.issuer_rdn do
{:rdnSequence, _} -> compressed.issuer_rdn
name when is_binary(name) -> RDNSequence.new(name, :otp)
end,
validity: decompress_validity(compressed_validity),
subject:
case compressed.subject_rdn do
{:rdnSequence, _} -> compressed.subject_rdn
name when is_binary(name) -> RDNSequence.new(name, :otp)
end,
subjectPublicKeyInfo: PublicKey.wrap(subject_public_key, :OTPSubjectPublicKeyInfo),
extensions: template.extensions
)
otp_certificate(
tbsCertificate: otp_tbs_certificate,
signatureAlgorithm: signature_alg,
signature: decompress_signature(compressed_signature)
)
end
@doc """
Compress an X.509 signature into the raw format expected on the ECC508A
"""
@spec compress_signature(binary()) :: <<_::512>>
def compress_signature(signature) do
<<0x30, _len, 0x02, r_len, r::signed-unit(8)-size(r_len), 0x02, s_len,
s::signed-unit(8)-size(s_len)>> = signature
<<r::unsigned-size(256), s::unsigned-size(256)>>
end
@doc """
Decompress an ECC508A signature into X.509 form.
"""
@spec decompress_signature(<<_::512>>) :: binary()
def decompress_signature(<<r::binary-size(32), s::binary-size(32)>>) do
r = unsigned_to_signed_bin(r)
s = unsigned_to_signed_bin(s)
r_len = byte_size(r)
s_len = byte_size(s)
r = <<0x02, r_len, r::binary>>
s = <<0x02, s_len, s::binary>>
len = byte_size(r) + byte_size(s)
<<0x30, len, r::binary, s::binary>>
end
@spec compress_validity(X509.Certificate.Validity.t()) :: ATECC508A.encoded_dates()
def compress_validity(valid_dates) do
X509.ASN1.validity(notBefore: nb, notAfter: na) = valid_dates
not_before = to_datetime(nb)
not_after = to_datetime(na)
ATECC508A.Validity.compress(not_before, not_after)
end
@spec decompress_validity(ATECC508A.encoded_dates()) :: X509.Certificate.Validity.t()
def decompress_validity(compressed_validity) do
{not_before, not_after} = ATECC508A.Validity.decompress(compressed_validity)
X509.Certificate.Validity.new(not_before, not_after)
end
def decompress_sn(0x00, compressed, _compressed_validity) do
# Stored serial number
compressed.serial_number
end
def decompress_sn(0x0A, compressed, compressed_validity) do
# Calculated from public key
ATECC508A.SerialNumber.from_public_key(compressed.public_key, compressed_validity)
end
def decompress_sn(0x0B, compressed, compressed_validity) do
# Calculated from device serial number
ATECC508A.SerialNumber.from_device_sn(compressed.device_sn, compressed_validity)
end
@spec signature(X509.Certificate.t()) :: any()
def signature(otp_cert) do
otp_certificate(otp_cert, :signature)
end
@spec get_authority_key_identifier(X509.Certificate.t()) :: any()
def get_authority_key_identifier(otp_certificate) do
otp_certificate
|> X509.Certificate.extensions()
|> X509.Certificate.Extension.find(:authority_key_identifier)
|> X509.ASN1.extension()
|> Keyword.get(:extnValue)
|> X509.ASN1.authority_key_identifier()
|> Keyword.get(:keyIdentifier)
end
@doc """
Return the raw public key bits from one in X509 form.
"""
@spec public_key_to_raw(X509.PublicKey.t()) :: ATECC508A.ecc_public_key()
def public_key_to_raw(public_key) do
{{:ECPoint, <<4, raw_key::64-bytes>>}, {:namedCurve, {1, 2, 840, 10045, 3, 1, 7}}} =
public_key
raw_key
end
@doc """
Convert a raw public key bits to an X509 public key.
"""
@spec raw_to_public_key(ATECC508A.ecc_public_key()) :: X509.PublicKey.t()
def raw_to_public_key(raw_key) do
{{:ECPoint, <<4, raw_key::64-bytes>>}, {:namedCurve, {1, 2, 840, 10045, 3, 1, 7}}}
end
# Helpers
defp unsigned_to_signed_bin(<<1::size(1), _::size(7), _::binary>> = bin),
do: <<0x00, bin::binary>>
defp unsigned_to_signed_bin(bin), do: bin
defp serial_number_source(:random), do: 0x00
defp serial_number_source(:public_key), do: 0xA
defp serial_number_source(:device_sn), do: 0xB
defp serial_number_source(invalid) do
raise """
Invalid serial number source : #{inspect(invalid)}
Must be one of:
:random - randomly generated
:public_key - Use the Public Key and encoded dates to generate the certificate serial number.
:device_sn - Use the unique device serial number and encoded dates to generate the certificate serial number.
"""
end
defp device_template(serial, validity) do
%Template{
serial: serial,
validity: validity,
hash: @hash,
extensions: [
basic_constraints: X509.Certificate.Extension.basic_constraints(false),
key_usage: X509.Certificate.Extension.key_usage([:digitalSignature, :keyEncipherment]),
ext_key_usage: X509.Certificate.Extension.ext_key_usage([:clientAuth]),
subject_key_identifier: false,
authority_key_identifier: true
]
}
|> Template.new()
end
defp to_datetime({:utcTime, timestamp}) do
<<year::binary-unit(8)-size(2), month::binary-unit(8)-size(2), day::binary-unit(8)-size(2),
hour::binary-unit(8)-size(2), minute::binary-unit(8)-size(2),
second::binary-unit(8)-size(2), "Z">> = to_string(timestamp)
NaiveDateTime.new(
String.to_integer(year) + @era,
String.to_integer(month),
String.to_integer(day),
String.to_integer(hour),
String.to_integer(minute),
String.to_integer(second)
)
|> case do
{:ok, naive_date_time} ->
DateTime.from_naive!(naive_date_time, "Etc/UTC")
error ->
error
end
end
defp to_datetime({:generalTime, timestamp}) do
<<year::binary-unit(8)-size(4), month::binary-unit(8)-size(2), day::binary-unit(8)-size(2),
hour::binary-unit(8)-size(2), minute::binary-unit(8)-size(2),
second::binary-unit(8)-size(2), "Z">> = to_string(timestamp)
NaiveDateTime.new(
String.to_integer(year),
String.to_integer(month),
String.to_integer(day),
String.to_integer(hour),
String.to_integer(minute),
String.to_integer(second)
)
|> case do
{:ok, naive_date_time} ->
DateTime.from_naive!(naive_date_time, "Etc/UTC")
error ->
error
end
end
end
|
lib/atecc508a/certificate.ex
| 0.874935
| 0.491578
|
certificate.ex
|
starcoder
|
defmodule ReWeb.Types.Interest do
@moduledoc """
GraphQL types for interests
"""
use Absinthe.Schema.Notation
import Absinthe.Resolution.Helpers, only: [dataloader: 1]
alias ReWeb.Resolvers.Interests, as: InterestsResolver
object :interest do
field :id, :id
field :uuid, :uuid
field :name, :string
field :email, :string
field :phone, :string
field :message, :string
field :listing, :listing, resolve: dataloader(Re.Listings)
field :interest_type, :interest_type, resolve: dataloader(Re.Interests.Types)
end
input_object :interest_input do
field :name, :string
field :phone, :string
field :email, :string
field :message, :string
field :interest_type_id, non_null(:id)
field :listing_id, non_null(:id)
end
object :contact do
field :id, :id
field :name, :string
field :email, :string
field :phone, :string
field :message, :string
field :state, :string
field :city, :string
field :neighborhood, :string
end
object :price_request do
field :id, :id
field :name, :string
field :email, :string
field :area, :integer
field :rooms, :integer
field :bathrooms, :integer
field :garage_spots, :integer
field :is_covered, :boolean
field :suggested_price, :float
field :address, :address, resolve: dataloader(Re.Addresses)
field :user, :user, resolve: dataloader(Re.Accounts)
end
object :interest_type do
field :id, :id
field :name, :string
end
object :simulation do
field :cem, :string
field :cet, :string
end
input_object :simulation_request do
field :mutuary, non_null(:string)
field :birthday, non_null(:date)
field :include_coparticipant, non_null(:boolean)
field :net_income, non_null(:decimal)
field :net_income_coparticipant, :decimal
field :birthday_coparticipant, :date
field :fundable_value, non_null(:decimal)
field :term, non_null(:integer)
field :amortization, :boolean
field :annual_interest, :float
field :home_equity_annual_interest, :float
field :calculate_tr, :boolean
field :evaluation_rate, :decimal
field :itbi_value, :decimal
field :listing_price, :decimal
field :listing_type, :string
field :product_type, :string
field :sum, :boolean
field :insurer, :string
end
object :interest_queries do
@desc "Interest types"
field :interest_types,
type: list_of(:interest_type),
resolve: &InterestsResolver.interest_types/2
@desc "Request funding simulation"
field :simulate, type: :simulation do
arg :input, non_null(:simulation_request)
resolve &InterestsResolver.simulate/2
end
end
object :interest_mutations do
@desc "Show interest in listing"
field :interest_create, type: :interest do
arg :input, non_null(:interest_input)
resolve &InterestsResolver.create_interest/2
end
@desc "Request contact"
field :request_contact, type: :contact do
arg :name, :string
arg :phone, :string
arg :email, :string
arg :message, :string
resolve &InterestsResolver.request_contact/2
end
@desc "Request price suggestion"
field :request_price_suggestion, type: :price_request do
arg :name, :string
arg :email, :string
arg :area, non_null(:integer)
arg :rooms, non_null(:integer)
arg :bathrooms, non_null(:integer)
arg :garage_spots, non_null(:integer)
arg :is_covered, non_null(:boolean)
arg :address, non_null(:address_input)
resolve &InterestsResolver.request_price_suggestion/2
end
@desc "Request notification when covered"
field :notify_when_covered, type: :contact do
arg :name, :string
arg :phone, :string
arg :email, :string
arg :message, :string
arg :state, non_null(:string)
arg :city, non_null(:string)
arg :neighborhood, non_null(:string)
resolve &InterestsResolver.notify_when_covered/2
end
end
object :interest_subscriptions do
@desc "Subscribe to email change"
field :interest_created, :interest do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "interest_created"}
_ -> {:error, :unauthorized}
end
end)
trigger :interest_create,
topic: fn _ ->
"interest_created"
end
end
@desc "Subscribe to email change"
field :contact_requested, :contact do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "contact_requested"}
_ -> {:error, :unauthorized}
end
end)
trigger :request_contact,
topic: fn _ ->
"contact_requested"
end
end
@desc "Subscribe to price suggestion requests"
field :price_suggestion_requested, :price_request do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "price_suggestion_requested"}
_ -> {:error, :unauthorized}
end
end)
trigger :request_price_suggestion,
topic: fn _ ->
"price_suggestion_requested"
end
end
@desc "Subscribe to price suggestion requests"
field :notification_coverage_asked, :contact do
config(fn _args, %{context: %{current_user: current_user}} ->
case current_user do
:system -> {:ok, topic: "notification_coverage_asked"}
_ -> {:error, :unauthorized}
end
end)
trigger :notify_when_covered,
topic: fn _ ->
"notification_coverage_asked"
end
end
end
end
|
apps/re_web/lib/graphql/types/interest.ex
| 0.608594
| 0.488893
|
interest.ex
|
starcoder
|
defmodule MaxwellTimber.Middleware do
@moduledoc """
Maxwell middleware for logging outgoing requests to Timber.io.
Using this middleware will log all requests and responses using Timber.io formatting and metadata.
### Example usage
```
defmodule MyClient do
use Maxwell.Builder, ~w(get)a
middleware MaxwellTimber.Middleware
end
```
### Options
- `:service_name` - the name of the external service (optional)
"""
require Logger
use Maxwell.Middleware
alias Maxwell.Conn
alias Timber.Events.{HTTPRequestEvent, HTTPResponseEvent}
def call(conn, next, opts) do
log_request(conn, opts)
timer = Timber.start_timer()
response = next.(conn)
case response do
{:error, reason, _conn} ->
log_error(reason)
%Conn{} = response_conn ->
time_ms = Timber.duration_ms(timer)
log_response(response_conn, time_ms, opts)
end
response
end
defp request_id do
Logger.metadata()[:request_id]
end
defp log_request(conn, opts) do
req_event =
HTTPRequestEvent.new(
direction: "outgoing",
url: serialize_url(conn),
method: conn.method,
headers: conn.req_headers,
body: conn.req_body,
request_id: request_id(),
service_name: opts[:service_name]
)
req_message = HTTPRequestEvent.message(req_event)
Logger.info(req_message, event: req_event)
end
defp log_response(conn, time_ms, opts) do
resp_event =
HTTPResponseEvent.new(
direction: "incoming",
status: conn.status,
time_ms: time_ms,
headers: conn.resp_headers,
body: normalize_body(conn),
request_id: request_id(),
service_name: opts[:service_name]
)
resp_message = HTTPResponseEvent.message(resp_event)
Logger.info(resp_message, event: resp_event)
end
defp log_error(reason) do
reason
|> inspect
|> Logger.error()
end
defp serialize_url(%Conn{url: url, path: path, query_string: query_string}) do
Maxwell.Adapter.Util.url_serialize(url, path, query_string)
end
defp normalize_body(conn) do
if Conn.get_resp_header(conn, "content-encoding") == "gzip" do
"[gzipped]"
else
conn.resp_body
end
end
end
|
lib/maxwell_timber/middleware.ex
| 0.828939
| 0.548855
|
middleware.ex
|
starcoder
|
defmodule Cafex.Consumer.Manager do
@moduledoc """
This module is the main manager for a high-level kafka consumer
## structure
The manager works together with a offset manager and a group manager to
manage the consumer workers.
The group manager handles the client assignment via kafka(0.9) or zookeeper.
All consumers in a group will elect a group leader, and the leader collects
all the other consumers infomation in the group, performs the load balance
of partitions.
The offset manager is responsible for workers offset commit/fetch. It will
buffer the offset commit requests to improve the throughput.
## Options
All this options must not be ommitted, expect `:client_id`.
* `:client_id` Optional, default client_id is "cafex"
* `:handler` Worker handler module
* `:brokers` Kafka brokers list
* `:lock` Indicate which lock implementation will be use in the worker, default is `:consul`, another option is `:zookeeper`
* `:group_manager` Default group manager is `:kafka` which depends on the kafka server with 0.9.x or above.
* `:offset_storage` Indicate where to store the consumer's offset, default is `:kafka`, another option is `:zookeeper`
* `:auto_commit`
* `:auto_commit_interval`
* `:auto_commit_max_buffers`
* `:auto_offset_reset`
* `:fetch_wait_time`
* `:fetch_min_bytes`
* `:fetch_max_bytes`
* `:zookeeper`
These options for `start_link/3` can be put under the `:cafex` key in the `config/config.exs` file:
```elixir
config :cafex, :myconsumer,
client_id: "cafex",
brokers: [{"192.168.99.100", 9092}, {"192.168.99.101", 9092}]
zookeeper: [
servers: [{"192.168.99.100", 2181}],
path: "/cafex"
],
handler: {MyConsumer, []}
```
And then start the manager or start it in your supervisor tree
```elixir
Cafex.Consumer.Manager.start_link(:myconsumer, "interested_topic")
```
"""
use GenServer
require Logger
alias Cafex.Util
alias Cafex.Kafka.GroupCoordinator
alias Cafex.Consumer.OffsetManager
alias Cafex.Consumer.Worker
alias Cafex.Consumer.WorkerPartition
alias Cafex.Consumer.GroupManager
@default_client_id "cafex"
@default_group_manager :kafka
@default_lock :consul
@typedoc "Options used by the `start_link/3` functions"
@type options :: [option]
@type client_id :: String.t
@type zookeeper :: [zookeeper_option]
@type zookeeper_option :: {:servers, [Cafex.server]} |
{:path, String.t} |
{:timeout, non_neg_integer}
@type consul :: [consul_option]
@type consul_option :: {:ttl, integer} |
{:delay_lock, integer} |
{:behavior, atom}
@type option :: {:client_id, client_id} |
{:topic, String.t} |
{:handler, Cafex.Consumer.Worker.handler} |
{:brokers, [Cafex.broker]} |
{:fetch_wait_time, integer} |
{:fetch_min_bytes, integer} |
{:fetch_max_bytes, integer} |
{:auto_commit, boolean} |
{:auto_commit_interval, integer} |
{:auto_commit_max_buffers, integer} |
{:auto_offset_reset, :earliest | :latest} |
{:lock, :consul | :zookeeper} |
{:group_manager, :kafka | :zookeeper} |
{:offset_storage, :kafka | :zookeeper} |
{:zooKeeper, zookeeper} |
{:consul, consul}
defmodule State do
@moduledoc false
defstruct group: nil,
topic: nil,
client_id: nil,
feed_brokers: [],
handler: nil,
brokers: nil,
leaders: nil,
partitions: nil,
lock: nil,
group_manager: {nil, nil},
group_manager_cfg: [],
group_coordinator: nil,
offset_manager: nil,
worker_cfg: nil,
workers: WorkerPartition.new,
trefs: %{},
offset_manager_cfg: [
auto_offset_reset: :latest,
offset_storage: :kafka
]
end
# ===================================================================
# API
# ===================================================================
@doc """
Start a consumer manager.
## Arguments
* `name` Consumer group name
* `topic` The topic name which will be consumed
* `options` Starting options
## Options
Read above.
"""
@spec start_link(name :: atom, options) :: GenServer.on_start
def start_link(name, opts \\ []) do
GenServer.start_link __MODULE__, [name, opts], name: name
end
def stop(pid) do
GenServer.call pid, :stop, :infinity
end
# ===================================================================
# GenServer callbacks
# ===================================================================
def init([name, opts]) do
Process.flag(:trap_exit, true)
cfg = Application.get_env(:cafex, name, [])
topic = Util.get_config(opts, cfg, :topic)
client_id = Util.get_config(opts, cfg, :client_id, @default_client_id)
handler = Util.get_config(opts, cfg, :handler)
brokers = Util.get_config(opts, cfg, :brokers)
lock = Util.get_config(opts, cfg, :lock, @default_lock)
fetch_wait_time = Util.get_config(opts, cfg, :fetch_wait_time)
fetch_min_bytes = Util.get_config(opts, cfg, :fetch_min_bytes)
fetch_max_bytes = Util.get_config(opts, cfg, :fetch_max_bytes)
pre_fetch_size = Util.get_config(opts, cfg, :pre_fetch_size)
zk_config = Util.get_config(opts, cfg, :zookeeper, [])
zk_cfg = [
servers: (Keyword.get(zk_config, :servers) || [])
|> Enum.map(fn {h, p} -> {:erlang.bitstring_to_list(h), p} end),
chroot: Keyword.get(zk_config, :chroot),
timeout: Keyword.get(zk_config, :timeout)
]
kafka_group_cfg = [
timeout: Util.get_config(opts, cfg, :group_session_timeout)
]
consul_cfg = Util.get_config(opts, cfg, :consul, [])
lock_cfg = case lock do
:consul -> {Cafex.Lock.Consul, consul_cfg}
:zookeeper -> {Cafex.Lock.ZK, zk_cfg}
end
{group_manager, group_manager_cfg} =
case Util.get_config(opts, cfg, :group_manager, @default_group_manager) do
:kafka ->
{GroupManager.Kafka, kafka_group_cfg}
:zookeeper ->
{GroupManager.ZK, zk_cfg}
end
group = Atom.to_string(name)
Logger.info "Starting consumer: #{group} ..."
offset_manager_cfg = [
offset_storage: Util.get_config(opts, cfg, :offset_storage),
auto_commit: Util.get_config(opts, cfg, :auto_commit, true),
interval: Util.get_config(opts, cfg, :auto_commit_interval),
max_buffers: Util.get_config(opts, cfg, :auto_commit_max_buffers),
offset_reset: Util.get_config(opts, cfg, :auto_offset_reset, :latest)
]
state = %State{ group: group,
topic: topic,
client_id: client_id,
feed_brokers: brokers,
handler: handler,
lock: lock,
worker_cfg: [
pre_fetch_size: pre_fetch_size,
max_wait_time: fetch_wait_time,
min_bytes: fetch_min_bytes,
max_bytes: fetch_max_bytes,
lock_cfg: lock_cfg
],
offset_manager_cfg: offset_manager_cfg,
group_manager: {group_manager, nil},
group_manager_cfg: group_manager_cfg}
|> load_metadata
|> find_group_coordinator
|> start_offset_manager
|> start_group_manager
{:ok, state}
end
def handle_call(:stop, _from, state) do
{:stop, :normal, :ok, state}
end
def handle_info({:timeout, _tref, {:restart_worker, partition}}, %{trefs: trefs} = state) do
state = %{state | trefs: Map.delete(trefs, partition)}
state = start_worker(partition, state)
{:noreply, state}
end
def handle_info({:rebalanced, assignment}, state) do
state = maybe_restart_workers(assignment, state)
{:noreply, state}
end
# handle linked process EXIT
def handle_info({:EXIT, pid, reason}, %{offset_manager: pid} = state) do
Logger.warn "OffsetManager exit with the reason #{inspect reason}"
state = start_offset_manager(state)
{:noreply, state}
end
def handle_info({:EXIT, pid, reason}, %{group_manager: {manager, pid}} = state) do
Logger.error "GroupManager exit with the reason #{inspect reason}"
{:stop, reason, %{state | group_manager: {manager, nil}}}
end
def handle_info({:EXIT, pid, :not_leader_for_partition}, state) do
Logger.warn "Worker stopped due to not_leader_for_partition, reload leader and restart it"
state = load_metadata(state)
state = try_restart_worker(pid, state)
{:noreply, state}
end
def handle_info({:EXIT, pid, :lock_timeout}, %{workers: workers, trefs: trefs} = state) do
# worker lock_timeout, wait for sometimes and then restart it
state = case WorkerPartition.partition(workers, pid) do
nil -> state
partition ->
tref = :erlang.start_timer(5000, self, {:restart_worker, partition})
%{state | trefs: Map.put(trefs, partition, tref)}
end
{:noreply, state}
end
def handle_info({:EXIT, _pid, :normal}, state) do
{:noreply, state}
end
def handle_info({:EXIT, pid, reason}, %{workers: workers, trefs: trefs} = state) do
case WorkerPartition.partition(workers, pid) do
nil ->
{:noreply, state}
partition ->
Logger.info "Worker #{inspect pid} for partition #{inspect partition} " <>
"stopped with the reason: #{inspect reason}, maybe restarted."
case reason do
:brutal_kill ->
{:stop, {:worker_brutal_kill, partition}, state}
_else ->
state = load_metadata(state)
tref = :erlang.start_timer(5000, self, {:restart_worker, partition})
{:noreply, %{state | trefs: Map.put(trefs, partition, tref)}}
end
end
end
# TODO handle worker lock timeout
def terminate(_reason, state) do
state
|> stop_workers
|> stop_offset_manager
|> stop_group_manager
:ok
end
# ===================================================================
# Internal functions
# ===================================================================
defp load_metadata(%{feed_brokers: brokers, topic: topic} = state) do
{:ok, metadata} = Cafex.Kafka.Metadata.request(brokers, topic)
metadata = Cafex.Kafka.Metadata.extract_metadata(metadata)
%{state | brokers: metadata.brokers,
leaders: metadata.leaders,
partitions: metadata.partitions}
end
defp find_group_coordinator(%{group: group, brokers: brokers} = state) do
{:ok, {host, port}} = GroupCoordinator.request(Map.values(brokers), group)
%{state | group_coordinator: {host, port}}
end
defp start_offset_manager(%{group: group,
topic: topic,
partitions: partitions,
group_coordinator: group_coordinator,
offset_manager: nil,
offset_manager_cfg: cfg,
client_id: client_id} = state) do
cfg = Keyword.merge([client_id: client_id], cfg)
{:ok, pid} = OffsetManager.start_link(group_coordinator, partitions, group, topic, cfg)
%{state | offset_manager: pid}
end
defp stop_offset_manager(%{offset_manager: nil} = state), do: state
defp stop_offset_manager(%{offset_manager: pid} = state) do
if Process.alive?(pid) do
OffsetManager.stop(pid)
end
%{state | offset_manager: nil}
end
defp start_group_manager(%{group_manager: {manager, nil},
group: group,
topic: topic,
partitions: partitions,
group_manager_cfg: cfg} = state) do
opts = Map.take(state, [:offset_manager, :group_coordinator])
|> Map.to_list
|> Keyword.merge(cfg)
{:ok, pid} = manager.start_link(self, topic, group, partitions, opts)
%{state | group_manager: {manager, pid}}
end
defp stop_group_manager(%{group_manager: {_, nil}} = state), do: state
defp stop_group_manager(%{group_manager: {manager, pid}} = state) do
if Process.alive?(pid) do
manager.stop(pid)
end
%{state | group_manager: {manager, nil}}
end
defp maybe_restart_workers(assignment, %{workers: workers} = state) do
should_stop = WorkerPartition.partitions(workers) -- assignment
state =
Enum.reduce should_stop, state, fn partition, acc ->
stop_worker(partition, acc)
end
Enum.reduce assignment, state, fn partition, acc ->
start_worker(partition, acc)
end
end
defp try_restart_worker(pid, %{workers: workers} = state) do
case WorkerPartition.partition(workers, pid) do
nil -> state
partition ->
state = %{state | workers: WorkerPartition.delete(workers, partition, pid)}
start_worker partition, state
end
end
defp start_worker(partition, %{workers: workers} = state) do
case WorkerPartition.worker(workers, partition) do
nil ->
{:ok, pid} = do_start_worker(partition, state)
%{state | workers: WorkerPartition.update(workers, partition, pid)}
pid ->
case Process.alive?(pid) do
false ->
start_worker(partition, %{state | workers: WorkerPartition.delete(workers, partition, pid)})
true ->
state
end
end
end
defp stop_worker(partition, %{group: group, topic: topic, workers: workers} = state) do
case WorkerPartition.worker(workers, partition) do
nil ->
state
pid ->
if Process.alive?(pid) do
Logger.info "Stopping consumer worker: #{topic}:#{group}:#{partition}"
Worker.stop(pid)
end
%{state | workers: WorkerPartition.delete(workers, partition, pid)}
end
end
defp do_start_worker(partition, %{group: group,
topic: topic,
brokers: brokers,
leaders: leaders,
handler: handler,
client_id: client_id,
worker_cfg: worker_cfg,
offset_manager: offset_manager}) do
Logger.info "Starting consumer worker: #{topic}:#{group}:#{partition}"
leader = Map.get(leaders, partition)
broker = Map.get(brokers, leader)
worker_cfg = Keyword.merge([client_id: client_id], worker_cfg)
Worker.start_link(offset_manager, handler, topic, group, partition, broker, worker_cfg)
end
defp stop_workers(%{workers: workers} = state) do
Enum.each WorkerPartition.workers(workers), fn pid ->
if Process.alive?(pid), do: Worker.stop(pid)
end
%{state | workers: WorkerPartition.new}
end
end
|
lib/cafex/consumer/manager.ex
| 0.793346
| 0.783988
|
manager.ex
|
starcoder
|
defmodule JSON.LD do
use RDF.Serialization.Format
import RDF.Sigils
alias JSON.LD.{Compaction, Context, Expansion, Flattening, Options}
@id ~I<http://www.w3.org/ns/formats/JSON-LD>
@name :jsonld
@extension "jsonld"
@media_type "application/ld+json"
@keywords ~w[
@base
@container
@context
@default
@graph
@id
@index
@language
@list
@reverse
@set
@type
@value
@vocab
:
]
@spec options :: Options.t()
def options, do: Options.new()
@doc """
The set of all JSON-LD keywords.
see <https://www.w3.org/TR/json-ld/#syntax-tokens-and-keywords>
"""
@spec keywords :: [String.t()]
def keywords, do: @keywords
@doc """
Returns if the given value is a JSON-LD keyword.
"""
@spec keyword?(String.t()) :: boolean
def keyword?(value) when is_binary(value) and value in @keywords, do: true
def keyword?(_value), do: false
@doc """
Expands the given input according to the steps in the JSON-LD Expansion Algorithm.
> Expansion is the process of taking a JSON-LD document and applying a `@context`
> such that all IRIs, types, and values are expanded so that the `@context` is
> no longer necessary.
-- <https://www.w3.org/TR/json-ld/#expanded-document-form>
Details at <http://json-ld.org/spec/latest/json-ld-api/#expansion-algorithm>
"""
@spec expand(map, Options.t() | Enum.t()) :: [map]
defdelegate expand(input, options \\ %Options{}),
to: Expansion
@doc """
Compacts the given input according to the steps in the JSON-LD Compaction Algorithm.
> Compaction is the process of applying a developer-supplied context to shorten
> IRIs to terms or compact IRIs and JSON-LD values expressed in expanded form
> to simple values such as strings or numbers. Often this makes it simpler to
> work with document as the data is expressed in application-specific terms.
> Compacted documents are also typically easier to read for humans.
-- <https://www.w3.org/TR/json-ld/#compacted-document-form>
Details at <https://www.w3.org/TR/json-ld-api/#compaction-algorithms>
"""
@spec compact(map | [map], map | nil, Options.t() | Enum.t()) :: map
defdelegate compact(input, context, options \\ %Options{}),
to: Compaction
@doc """
Flattens the given input according to the steps in the JSON-LD Flattening Algorithm.
> Flattening collects all properties of a node in a single JSON object and labels
> all blank nodes with blank node identifiers. This ensures a shape of the data
> and consequently may drastically simplify the code required to process JSON-LD
> in certain applications.
-- <https://www.w3.org/TR/json-ld/#flattened-document-form>
Details at <https://www.w3.org/TR/json-ld-api/#flattening-algorithms>
"""
@spec flatten(map | [map], map | nil, Options.t() | Enum.t()) :: [map]
defdelegate flatten(input, context \\ nil, options \\ %Options{}),
to: Flattening
@doc """
Generator function for `JSON.LD.Context`s.
You can either pass a map with a `"@context"` key having the JSON-LD context
object its value, or the JSON-LD context object directly.
"""
@spec context(map, Options.t()) :: Context.t()
def context(args, opts \\ %Options{})
def context(%{"@context" => _} = object, options),
do: Context.create(object, options)
def context(context, options),
do: Context.create(%{"@context" => context}, options)
@doc """
Generator function for JSON-LD node maps.
"""
@spec node_map([map], pid | nil) :: map
defdelegate node_map(input, node_id_map \\ nil),
to: Flattening
end
|
lib/json_ld.ex
| 0.8308
| 0.44734
|
json_ld.ex
|
starcoder
|
defmodule Oban.Crontab.Parser do
@moduledoc false
@doc """
Parses the given `binary` as cron.
Returns `{:ok, [token], rest, context, position, byte_offset}` or
`{:error, reason, rest, context, line, byte_offset}` where `position`
describes the location of the cron (start position) as `{line, column_on_line}`.
## Options
* `:line` - the initial line, defaults to 1
* `:byte_offset` - the initial byte offset, defaults to 0
* `:context` - the initial context value. It will be converted
to a map
"""
@spec cron(binary, keyword) ::
{:ok, [term], rest, context, line, byte_offset}
| {:error, reason, rest, context, line, byte_offset}
when line: {pos_integer, byte_offset},
byte_offset: pos_integer,
rest: binary,
reason: String.t(),
context: map()
def cron(binary, opts \\ []) when is_binary(binary) do
line = Keyword.get(opts, :line, 1)
offset = Keyword.get(opts, :byte_offset, 0)
context = Map.new(Keyword.get(opts, :context, []))
case(cron__0(binary, [], [], context, {line, offset}, offset)) do
{:ok, acc, rest, context, line, offset} ->
{:ok, :lists.reverse(acc), rest, context, line, offset}
{:error, _, _, _, _, _} = error ->
error
end
end
defp cron__0(rest, acc, stack, context, line, offset) do
cron__1(rest, [], [acc | stack], context, line, offset)
end
defp cron__1(rest, acc, stack, context, line, offset) do
cron__32(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__3(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__4(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__3(rest, _acc, _stack, context, line, offset) do
{:error,
"expected byte in the range ?0..?9, followed by byte in the range ?0..?9, followed by string \"-\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*/\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__4(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__5(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__3(rest, [], stack, context, line, offset)
end
defp cron__6(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__7(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__6(rest, acc, stack, context, line, offset) do
cron__5(rest, acc, stack, context, line, offset)
end
defp cron__7(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__8(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__6(rest, [], stack, context, line, offset)
end
defp cron__9(rest, acc, stack, context, line, offset) do
cron__10(rest, [], [acc | stack], context, line, offset)
end
defp cron__10(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__11(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__10(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__8(rest, acc, stack, context, line, offset)
end
defp cron__11(rest, acc, stack, context, line, offset) do
cron__12(rest, [], [acc | stack], context, line, offset)
end
defp cron__12(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__13(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__12(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__8(rest, acc, stack, context, line, offset)
end
defp cron__13(rest, acc, stack, context, line, offset) do
cron__15(rest, acc, [1 | stack], context, line, offset)
end
defp cron__15(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__16(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__15(rest, acc, stack, context, line, offset) do
cron__14(rest, acc, stack, context, line, offset)
end
defp cron__14(rest, acc, [_ | stack], context, line, offset) do
cron__17(rest, acc, stack, context, line, offset)
end
defp cron__16(rest, acc, [1 | stack], context, line, offset) do
cron__17(rest, acc, stack, context, line, offset)
end
defp cron__16(rest, acc, [count | stack], context, line, offset) do
cron__15(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__17(rest, user_acc, [acc | stack], context, line, offset) do
cron__18(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__18(rest, user_acc, [acc | stack], context, line, offset) do
cron__19(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__19(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__20(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__9(rest, [], stack, context, line, offset)
end
defp cron__21(rest, acc, stack, context, line, offset) do
cron__22(rest, [], [acc | stack], context, line, offset)
end
defp cron__22(rest, acc, stack, context, line, offset) do
cron__23(rest, [], [acc | stack], context, line, offset)
end
defp cron__23(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__24(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__23(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__20(rest, acc, stack, context, line, offset)
end
defp cron__24(rest, acc, stack, context, line, offset) do
cron__26(rest, acc, [1 | stack], context, line, offset)
end
defp cron__26(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__27(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__26(rest, acc, stack, context, line, offset) do
cron__25(rest, acc, stack, context, line, offset)
end
defp cron__25(rest, acc, [_ | stack], context, line, offset) do
cron__28(rest, acc, stack, context, line, offset)
end
defp cron__27(rest, acc, [1 | stack], context, line, offset) do
cron__28(rest, acc, stack, context, line, offset)
end
defp cron__27(rest, acc, [count | stack], context, line, offset) do
cron__26(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__28(rest, user_acc, [acc | stack], context, line, offset) do
cron__29(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__29(rest, user_acc, [acc | stack], context, line, offset) do
cron__30(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__30(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__31(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__21(rest, [], stack, context, line, offset)
end
defp cron__32(rest, acc, stack, context, line, offset) do
cron__33(rest, [], [acc | stack], context, line, offset)
end
defp cron__33(rest, acc, stack, context, line, offset) do
cron__34(rest, [], [acc | stack], context, line, offset)
end
defp cron__34(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__35(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__34(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__31(rest, acc, stack, context, line, offset)
end
defp cron__35(rest, acc, stack, context, line, offset) do
cron__37(rest, acc, [1 | stack], context, line, offset)
end
defp cron__37(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__38(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__37(rest, acc, stack, context, line, offset) do
cron__36(rest, acc, stack, context, line, offset)
end
defp cron__36(rest, acc, [_ | stack], context, line, offset) do
cron__39(rest, acc, stack, context, line, offset)
end
defp cron__38(rest, acc, [1 | stack], context, line, offset) do
cron__39(rest, acc, stack, context, line, offset)
end
defp cron__38(rest, acc, [count | stack], context, line, offset) do
cron__37(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__39(rest, user_acc, [acc | stack], context, line, offset) do
cron__40(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__40(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__41(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__40(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__31(rest, acc, stack, context, line, offset)
end
defp cron__41(rest, acc, stack, context, line, offset) do
cron__42(rest, [], [acc | stack], context, line, offset)
end
defp cron__42(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__43(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__42(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__31(rest, acc, stack, context, line, offset)
end
defp cron__43(rest, acc, stack, context, line, offset) do
cron__45(rest, acc, [1 | stack], context, line, offset)
end
defp cron__45(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__46(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__45(rest, acc, stack, context, line, offset) do
cron__44(rest, acc, stack, context, line, offset)
end
defp cron__44(rest, acc, [_ | stack], context, line, offset) do
cron__47(rest, acc, stack, context, line, offset)
end
defp cron__46(rest, acc, [1 | stack], context, line, offset) do
cron__47(rest, acc, stack, context, line, offset)
end
defp cron__46(rest, acc, [count | stack], context, line, offset) do
cron__45(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__47(rest, user_acc, [acc | stack], context, line, offset) do
cron__48(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__48(rest, user_acc, [acc | stack], context, line, offset) do
cron__49(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__49(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__2(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__2(rest, acc, stack, context, line, offset) do
cron__51(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__51(rest, acc, stack, context, line, offset) do
cron__82(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__53(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__54(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__53(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__50(rest, acc, stack, context, line, offset)
end
defp cron__54(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__52(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__55(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__53(rest, [], stack, context, line, offset)
end
defp cron__56(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__57(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__56(rest, acc, stack, context, line, offset) do
cron__55(rest, acc, stack, context, line, offset)
end
defp cron__57(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__52(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__58(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__56(rest, [], stack, context, line, offset)
end
defp cron__59(rest, acc, stack, context, line, offset) do
cron__60(rest, [], [acc | stack], context, line, offset)
end
defp cron__60(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__61(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__60(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__58(rest, acc, stack, context, line, offset)
end
defp cron__61(rest, acc, stack, context, line, offset) do
cron__62(rest, [], [acc | stack], context, line, offset)
end
defp cron__62(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__63(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__62(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__58(rest, acc, stack, context, line, offset)
end
defp cron__63(rest, acc, stack, context, line, offset) do
cron__65(rest, acc, [1 | stack], context, line, offset)
end
defp cron__65(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__66(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__65(rest, acc, stack, context, line, offset) do
cron__64(rest, acc, stack, context, line, offset)
end
defp cron__64(rest, acc, [_ | stack], context, line, offset) do
cron__67(rest, acc, stack, context, line, offset)
end
defp cron__66(rest, acc, [1 | stack], context, line, offset) do
cron__67(rest, acc, stack, context, line, offset)
end
defp cron__66(rest, acc, [count | stack], context, line, offset) do
cron__65(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__67(rest, user_acc, [acc | stack], context, line, offset) do
cron__68(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__68(rest, user_acc, [acc | stack], context, line, offset) do
cron__69(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__69(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__52(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__70(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__59(rest, [], stack, context, line, offset)
end
defp cron__71(rest, acc, stack, context, line, offset) do
cron__72(rest, [], [acc | stack], context, line, offset)
end
defp cron__72(rest, acc, stack, context, line, offset) do
cron__73(rest, [], [acc | stack], context, line, offset)
end
defp cron__73(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__74(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__73(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__70(rest, acc, stack, context, line, offset)
end
defp cron__74(rest, acc, stack, context, line, offset) do
cron__76(rest, acc, [1 | stack], context, line, offset)
end
defp cron__76(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__77(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__76(rest, acc, stack, context, line, offset) do
cron__75(rest, acc, stack, context, line, offset)
end
defp cron__75(rest, acc, [_ | stack], context, line, offset) do
cron__78(rest, acc, stack, context, line, offset)
end
defp cron__77(rest, acc, [1 | stack], context, line, offset) do
cron__78(rest, acc, stack, context, line, offset)
end
defp cron__77(rest, acc, [count | stack], context, line, offset) do
cron__76(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__78(rest, user_acc, [acc | stack], context, line, offset) do
cron__79(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__79(rest, user_acc, [acc | stack], context, line, offset) do
cron__80(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__80(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__52(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__81(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__71(rest, [], stack, context, line, offset)
end
defp cron__82(rest, acc, stack, context, line, offset) do
cron__83(rest, [], [acc | stack], context, line, offset)
end
defp cron__83(rest, acc, stack, context, line, offset) do
cron__84(rest, [], [acc | stack], context, line, offset)
end
defp cron__84(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__85(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__84(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__81(rest, acc, stack, context, line, offset)
end
defp cron__85(rest, acc, stack, context, line, offset) do
cron__87(rest, acc, [1 | stack], context, line, offset)
end
defp cron__87(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__88(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__87(rest, acc, stack, context, line, offset) do
cron__86(rest, acc, stack, context, line, offset)
end
defp cron__86(rest, acc, [_ | stack], context, line, offset) do
cron__89(rest, acc, stack, context, line, offset)
end
defp cron__88(rest, acc, [1 | stack], context, line, offset) do
cron__89(rest, acc, stack, context, line, offset)
end
defp cron__88(rest, acc, [count | stack], context, line, offset) do
cron__87(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__89(rest, user_acc, [acc | stack], context, line, offset) do
cron__90(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__90(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__91(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__90(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__81(rest, acc, stack, context, line, offset)
end
defp cron__91(rest, acc, stack, context, line, offset) do
cron__92(rest, [], [acc | stack], context, line, offset)
end
defp cron__92(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__93(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__92(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__81(rest, acc, stack, context, line, offset)
end
defp cron__93(rest, acc, stack, context, line, offset) do
cron__95(rest, acc, [1 | stack], context, line, offset)
end
defp cron__95(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__96(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__95(rest, acc, stack, context, line, offset) do
cron__94(rest, acc, stack, context, line, offset)
end
defp cron__94(rest, acc, [_ | stack], context, line, offset) do
cron__97(rest, acc, stack, context, line, offset)
end
defp cron__96(rest, acc, [1 | stack], context, line, offset) do
cron__97(rest, acc, stack, context, line, offset)
end
defp cron__96(rest, acc, [count | stack], context, line, offset) do
cron__95(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__97(rest, user_acc, [acc | stack], context, line, offset) do
cron__98(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__98(rest, user_acc, [acc | stack], context, line, offset) do
cron__99(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__99(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__52(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__50(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__100(rest, acc, stack, context, line, offset)
end
defp cron__52(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__51(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__100(rest, user_acc, [acc | stack], context, line, offset) do
cron__101(rest, [minutes: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__101(rest, acc, stack, context, line, offset) do
cron__102(rest, [], [acc | stack], context, line, offset)
end
defp cron__102(rest, acc, stack, context, line, offset) do
cron__103(rest, [], [acc | stack], context, line, offset)
end
defp cron__103(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__104(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__103(rest, _acc, _stack, context, line, offset) do
{:error, "expected byte equal to ? or equal to 9", rest, context, line, offset}
end
defp cron__104(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__106(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__104(rest, acc, stack, context, line, offset) do
cron__105(rest, acc, stack, context, line, offset)
end
defp cron__106(rest, acc, stack, context, line, offset) do
cron__104(rest, acc, stack, context, line, offset)
end
defp cron__105(rest, _user_acc, [acc | stack], context, line, offset) do
cron__107(rest, acc, stack, context, line, offset)
end
defp cron__107(rest, _user_acc, [acc | stack], context, line, offset) do
cron__108(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__108(rest, acc, stack, context, line, offset) do
cron__109(rest, [], [acc | stack], context, line, offset)
end
defp cron__109(rest, acc, stack, context, line, offset) do
cron__140(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__111(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__112(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__111(rest, _acc, _stack, context, line, offset) do
{:error,
"expected byte in the range ?0..?9, followed by byte in the range ?0..?9, followed by string \"-\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*/\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__112(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__110(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__113(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__111(rest, [], stack, context, line, offset)
end
defp cron__114(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__115(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__114(rest, acc, stack, context, line, offset) do
cron__113(rest, acc, stack, context, line, offset)
end
defp cron__115(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__110(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__116(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__114(rest, [], stack, context, line, offset)
end
defp cron__117(rest, acc, stack, context, line, offset) do
cron__118(rest, [], [acc | stack], context, line, offset)
end
defp cron__118(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__119(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__118(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__116(rest, acc, stack, context, line, offset)
end
defp cron__119(rest, acc, stack, context, line, offset) do
cron__120(rest, [], [acc | stack], context, line, offset)
end
defp cron__120(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__121(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__120(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__116(rest, acc, stack, context, line, offset)
end
defp cron__121(rest, acc, stack, context, line, offset) do
cron__123(rest, acc, [1 | stack], context, line, offset)
end
defp cron__123(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__124(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__123(rest, acc, stack, context, line, offset) do
cron__122(rest, acc, stack, context, line, offset)
end
defp cron__122(rest, acc, [_ | stack], context, line, offset) do
cron__125(rest, acc, stack, context, line, offset)
end
defp cron__124(rest, acc, [1 | stack], context, line, offset) do
cron__125(rest, acc, stack, context, line, offset)
end
defp cron__124(rest, acc, [count | stack], context, line, offset) do
cron__123(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__125(rest, user_acc, [acc | stack], context, line, offset) do
cron__126(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__126(rest, user_acc, [acc | stack], context, line, offset) do
cron__127(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__127(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__110(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__128(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__117(rest, [], stack, context, line, offset)
end
defp cron__129(rest, acc, stack, context, line, offset) do
cron__130(rest, [], [acc | stack], context, line, offset)
end
defp cron__130(rest, acc, stack, context, line, offset) do
cron__131(rest, [], [acc | stack], context, line, offset)
end
defp cron__131(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__132(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__131(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__128(rest, acc, stack, context, line, offset)
end
defp cron__132(rest, acc, stack, context, line, offset) do
cron__134(rest, acc, [1 | stack], context, line, offset)
end
defp cron__134(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__135(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__134(rest, acc, stack, context, line, offset) do
cron__133(rest, acc, stack, context, line, offset)
end
defp cron__133(rest, acc, [_ | stack], context, line, offset) do
cron__136(rest, acc, stack, context, line, offset)
end
defp cron__135(rest, acc, [1 | stack], context, line, offset) do
cron__136(rest, acc, stack, context, line, offset)
end
defp cron__135(rest, acc, [count | stack], context, line, offset) do
cron__134(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__136(rest, user_acc, [acc | stack], context, line, offset) do
cron__137(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__137(rest, user_acc, [acc | stack], context, line, offset) do
cron__138(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__138(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__110(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__139(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__129(rest, [], stack, context, line, offset)
end
defp cron__140(rest, acc, stack, context, line, offset) do
cron__141(rest, [], [acc | stack], context, line, offset)
end
defp cron__141(rest, acc, stack, context, line, offset) do
cron__142(rest, [], [acc | stack], context, line, offset)
end
defp cron__142(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__143(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__142(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__139(rest, acc, stack, context, line, offset)
end
defp cron__143(rest, acc, stack, context, line, offset) do
cron__145(rest, acc, [1 | stack], context, line, offset)
end
defp cron__145(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__146(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__145(rest, acc, stack, context, line, offset) do
cron__144(rest, acc, stack, context, line, offset)
end
defp cron__144(rest, acc, [_ | stack], context, line, offset) do
cron__147(rest, acc, stack, context, line, offset)
end
defp cron__146(rest, acc, [1 | stack], context, line, offset) do
cron__147(rest, acc, stack, context, line, offset)
end
defp cron__146(rest, acc, [count | stack], context, line, offset) do
cron__145(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__147(rest, user_acc, [acc | stack], context, line, offset) do
cron__148(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__148(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__149(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__148(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__139(rest, acc, stack, context, line, offset)
end
defp cron__149(rest, acc, stack, context, line, offset) do
cron__150(rest, [], [acc | stack], context, line, offset)
end
defp cron__150(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__151(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__150(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__139(rest, acc, stack, context, line, offset)
end
defp cron__151(rest, acc, stack, context, line, offset) do
cron__153(rest, acc, [1 | stack], context, line, offset)
end
defp cron__153(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__154(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__153(rest, acc, stack, context, line, offset) do
cron__152(rest, acc, stack, context, line, offset)
end
defp cron__152(rest, acc, [_ | stack], context, line, offset) do
cron__155(rest, acc, stack, context, line, offset)
end
defp cron__154(rest, acc, [1 | stack], context, line, offset) do
cron__155(rest, acc, stack, context, line, offset)
end
defp cron__154(rest, acc, [count | stack], context, line, offset) do
cron__153(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__155(rest, user_acc, [acc | stack], context, line, offset) do
cron__156(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__156(rest, user_acc, [acc | stack], context, line, offset) do
cron__157(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__157(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__110(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__110(rest, acc, stack, context, line, offset) do
cron__159(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__159(rest, acc, stack, context, line, offset) do
cron__190(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__161(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__162(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__161(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__158(rest, acc, stack, context, line, offset)
end
defp cron__162(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__160(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__163(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__161(rest, [], stack, context, line, offset)
end
defp cron__164(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__165(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__164(rest, acc, stack, context, line, offset) do
cron__163(rest, acc, stack, context, line, offset)
end
defp cron__165(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__160(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__166(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__164(rest, [], stack, context, line, offset)
end
defp cron__167(rest, acc, stack, context, line, offset) do
cron__168(rest, [], [acc | stack], context, line, offset)
end
defp cron__168(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__169(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__168(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__166(rest, acc, stack, context, line, offset)
end
defp cron__169(rest, acc, stack, context, line, offset) do
cron__170(rest, [], [acc | stack], context, line, offset)
end
defp cron__170(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__171(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__170(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__166(rest, acc, stack, context, line, offset)
end
defp cron__171(rest, acc, stack, context, line, offset) do
cron__173(rest, acc, [1 | stack], context, line, offset)
end
defp cron__173(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__174(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__173(rest, acc, stack, context, line, offset) do
cron__172(rest, acc, stack, context, line, offset)
end
defp cron__172(rest, acc, [_ | stack], context, line, offset) do
cron__175(rest, acc, stack, context, line, offset)
end
defp cron__174(rest, acc, [1 | stack], context, line, offset) do
cron__175(rest, acc, stack, context, line, offset)
end
defp cron__174(rest, acc, [count | stack], context, line, offset) do
cron__173(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__175(rest, user_acc, [acc | stack], context, line, offset) do
cron__176(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__176(rest, user_acc, [acc | stack], context, line, offset) do
cron__177(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__177(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__160(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__178(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__167(rest, [], stack, context, line, offset)
end
defp cron__179(rest, acc, stack, context, line, offset) do
cron__180(rest, [], [acc | stack], context, line, offset)
end
defp cron__180(rest, acc, stack, context, line, offset) do
cron__181(rest, [], [acc | stack], context, line, offset)
end
defp cron__181(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__182(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__181(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__178(rest, acc, stack, context, line, offset)
end
defp cron__182(rest, acc, stack, context, line, offset) do
cron__184(rest, acc, [1 | stack], context, line, offset)
end
defp cron__184(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__185(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__184(rest, acc, stack, context, line, offset) do
cron__183(rest, acc, stack, context, line, offset)
end
defp cron__183(rest, acc, [_ | stack], context, line, offset) do
cron__186(rest, acc, stack, context, line, offset)
end
defp cron__185(rest, acc, [1 | stack], context, line, offset) do
cron__186(rest, acc, stack, context, line, offset)
end
defp cron__185(rest, acc, [count | stack], context, line, offset) do
cron__184(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__186(rest, user_acc, [acc | stack], context, line, offset) do
cron__187(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__187(rest, user_acc, [acc | stack], context, line, offset) do
cron__188(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__188(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__160(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__189(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__179(rest, [], stack, context, line, offset)
end
defp cron__190(rest, acc, stack, context, line, offset) do
cron__191(rest, [], [acc | stack], context, line, offset)
end
defp cron__191(rest, acc, stack, context, line, offset) do
cron__192(rest, [], [acc | stack], context, line, offset)
end
defp cron__192(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__193(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__192(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__189(rest, acc, stack, context, line, offset)
end
defp cron__193(rest, acc, stack, context, line, offset) do
cron__195(rest, acc, [1 | stack], context, line, offset)
end
defp cron__195(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__196(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__195(rest, acc, stack, context, line, offset) do
cron__194(rest, acc, stack, context, line, offset)
end
defp cron__194(rest, acc, [_ | stack], context, line, offset) do
cron__197(rest, acc, stack, context, line, offset)
end
defp cron__196(rest, acc, [1 | stack], context, line, offset) do
cron__197(rest, acc, stack, context, line, offset)
end
defp cron__196(rest, acc, [count | stack], context, line, offset) do
cron__195(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__197(rest, user_acc, [acc | stack], context, line, offset) do
cron__198(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__198(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__199(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__198(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__189(rest, acc, stack, context, line, offset)
end
defp cron__199(rest, acc, stack, context, line, offset) do
cron__200(rest, [], [acc | stack], context, line, offset)
end
defp cron__200(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__201(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__200(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__189(rest, acc, stack, context, line, offset)
end
defp cron__201(rest, acc, stack, context, line, offset) do
cron__203(rest, acc, [1 | stack], context, line, offset)
end
defp cron__203(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__204(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__203(rest, acc, stack, context, line, offset) do
cron__202(rest, acc, stack, context, line, offset)
end
defp cron__202(rest, acc, [_ | stack], context, line, offset) do
cron__205(rest, acc, stack, context, line, offset)
end
defp cron__204(rest, acc, [1 | stack], context, line, offset) do
cron__205(rest, acc, stack, context, line, offset)
end
defp cron__204(rest, acc, [count | stack], context, line, offset) do
cron__203(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__205(rest, user_acc, [acc | stack], context, line, offset) do
cron__206(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__206(rest, user_acc, [acc | stack], context, line, offset) do
cron__207(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__207(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__160(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__158(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__208(rest, acc, stack, context, line, offset)
end
defp cron__160(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__159(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__208(rest, user_acc, [acc | stack], context, line, offset) do
cron__209(rest, [hours: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__209(rest, acc, stack, context, line, offset) do
cron__210(rest, [], [acc | stack], context, line, offset)
end
defp cron__210(rest, acc, stack, context, line, offset) do
cron__211(rest, [], [acc | stack], context, line, offset)
end
defp cron__211(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__212(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__211(rest, _acc, _stack, context, line, offset) do
{:error, "expected byte equal to ? or equal to 9", rest, context, line, offset}
end
defp cron__212(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__214(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__212(rest, acc, stack, context, line, offset) do
cron__213(rest, acc, stack, context, line, offset)
end
defp cron__214(rest, acc, stack, context, line, offset) do
cron__212(rest, acc, stack, context, line, offset)
end
defp cron__213(rest, _user_acc, [acc | stack], context, line, offset) do
cron__215(rest, acc, stack, context, line, offset)
end
defp cron__215(rest, _user_acc, [acc | stack], context, line, offset) do
cron__216(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__216(rest, acc, stack, context, line, offset) do
cron__217(rest, [], [acc | stack], context, line, offset)
end
defp cron__217(rest, acc, stack, context, line, offset) do
cron__248(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__219(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__220(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__219(rest, _acc, _stack, context, line, offset) do
{:error,
"expected byte in the range ?0..?9, followed by byte in the range ?0..?9, followed by string \"-\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*/\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__220(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__218(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__221(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__219(rest, [], stack, context, line, offset)
end
defp cron__222(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__223(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__222(rest, acc, stack, context, line, offset) do
cron__221(rest, acc, stack, context, line, offset)
end
defp cron__223(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__218(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__224(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__222(rest, [], stack, context, line, offset)
end
defp cron__225(rest, acc, stack, context, line, offset) do
cron__226(rest, [], [acc | stack], context, line, offset)
end
defp cron__226(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__227(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__226(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__224(rest, acc, stack, context, line, offset)
end
defp cron__227(rest, acc, stack, context, line, offset) do
cron__228(rest, [], [acc | stack], context, line, offset)
end
defp cron__228(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__229(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__228(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__224(rest, acc, stack, context, line, offset)
end
defp cron__229(rest, acc, stack, context, line, offset) do
cron__231(rest, acc, [1 | stack], context, line, offset)
end
defp cron__231(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__232(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__231(rest, acc, stack, context, line, offset) do
cron__230(rest, acc, stack, context, line, offset)
end
defp cron__230(rest, acc, [_ | stack], context, line, offset) do
cron__233(rest, acc, stack, context, line, offset)
end
defp cron__232(rest, acc, [1 | stack], context, line, offset) do
cron__233(rest, acc, stack, context, line, offset)
end
defp cron__232(rest, acc, [count | stack], context, line, offset) do
cron__231(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__233(rest, user_acc, [acc | stack], context, line, offset) do
cron__234(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__234(rest, user_acc, [acc | stack], context, line, offset) do
cron__235(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__235(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__218(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__236(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__225(rest, [], stack, context, line, offset)
end
defp cron__237(rest, acc, stack, context, line, offset) do
cron__238(rest, [], [acc | stack], context, line, offset)
end
defp cron__238(rest, acc, stack, context, line, offset) do
cron__239(rest, [], [acc | stack], context, line, offset)
end
defp cron__239(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__240(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__239(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__236(rest, acc, stack, context, line, offset)
end
defp cron__240(rest, acc, stack, context, line, offset) do
cron__242(rest, acc, [1 | stack], context, line, offset)
end
defp cron__242(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__243(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__242(rest, acc, stack, context, line, offset) do
cron__241(rest, acc, stack, context, line, offset)
end
defp cron__241(rest, acc, [_ | stack], context, line, offset) do
cron__244(rest, acc, stack, context, line, offset)
end
defp cron__243(rest, acc, [1 | stack], context, line, offset) do
cron__244(rest, acc, stack, context, line, offset)
end
defp cron__243(rest, acc, [count | stack], context, line, offset) do
cron__242(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__244(rest, user_acc, [acc | stack], context, line, offset) do
cron__245(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__245(rest, user_acc, [acc | stack], context, line, offset) do
cron__246(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__246(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__218(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__247(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__237(rest, [], stack, context, line, offset)
end
defp cron__248(rest, acc, stack, context, line, offset) do
cron__249(rest, [], [acc | stack], context, line, offset)
end
defp cron__249(rest, acc, stack, context, line, offset) do
cron__250(rest, [], [acc | stack], context, line, offset)
end
defp cron__250(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__251(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__250(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__247(rest, acc, stack, context, line, offset)
end
defp cron__251(rest, acc, stack, context, line, offset) do
cron__253(rest, acc, [1 | stack], context, line, offset)
end
defp cron__253(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__254(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__253(rest, acc, stack, context, line, offset) do
cron__252(rest, acc, stack, context, line, offset)
end
defp cron__252(rest, acc, [_ | stack], context, line, offset) do
cron__255(rest, acc, stack, context, line, offset)
end
defp cron__254(rest, acc, [1 | stack], context, line, offset) do
cron__255(rest, acc, stack, context, line, offset)
end
defp cron__254(rest, acc, [count | stack], context, line, offset) do
cron__253(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__255(rest, user_acc, [acc | stack], context, line, offset) do
cron__256(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__256(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__257(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__256(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__247(rest, acc, stack, context, line, offset)
end
defp cron__257(rest, acc, stack, context, line, offset) do
cron__258(rest, [], [acc | stack], context, line, offset)
end
defp cron__258(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__259(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__258(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__247(rest, acc, stack, context, line, offset)
end
defp cron__259(rest, acc, stack, context, line, offset) do
cron__261(rest, acc, [1 | stack], context, line, offset)
end
defp cron__261(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__262(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__261(rest, acc, stack, context, line, offset) do
cron__260(rest, acc, stack, context, line, offset)
end
defp cron__260(rest, acc, [_ | stack], context, line, offset) do
cron__263(rest, acc, stack, context, line, offset)
end
defp cron__262(rest, acc, [1 | stack], context, line, offset) do
cron__263(rest, acc, stack, context, line, offset)
end
defp cron__262(rest, acc, [count | stack], context, line, offset) do
cron__261(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__263(rest, user_acc, [acc | stack], context, line, offset) do
cron__264(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__264(rest, user_acc, [acc | stack], context, line, offset) do
cron__265(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__265(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__218(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__218(rest, acc, stack, context, line, offset) do
cron__267(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__267(rest, acc, stack, context, line, offset) do
cron__298(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__269(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__270(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__269(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__266(rest, acc, stack, context, line, offset)
end
defp cron__270(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__268(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__271(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__269(rest, [], stack, context, line, offset)
end
defp cron__272(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__273(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__272(rest, acc, stack, context, line, offset) do
cron__271(rest, acc, stack, context, line, offset)
end
defp cron__273(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__268(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__274(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__272(rest, [], stack, context, line, offset)
end
defp cron__275(rest, acc, stack, context, line, offset) do
cron__276(rest, [], [acc | stack], context, line, offset)
end
defp cron__276(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__277(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__276(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__274(rest, acc, stack, context, line, offset)
end
defp cron__277(rest, acc, stack, context, line, offset) do
cron__278(rest, [], [acc | stack], context, line, offset)
end
defp cron__278(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__279(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__278(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__274(rest, acc, stack, context, line, offset)
end
defp cron__279(rest, acc, stack, context, line, offset) do
cron__281(rest, acc, [1 | stack], context, line, offset)
end
defp cron__281(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__282(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__281(rest, acc, stack, context, line, offset) do
cron__280(rest, acc, stack, context, line, offset)
end
defp cron__280(rest, acc, [_ | stack], context, line, offset) do
cron__283(rest, acc, stack, context, line, offset)
end
defp cron__282(rest, acc, [1 | stack], context, line, offset) do
cron__283(rest, acc, stack, context, line, offset)
end
defp cron__282(rest, acc, [count | stack], context, line, offset) do
cron__281(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__283(rest, user_acc, [acc | stack], context, line, offset) do
cron__284(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__284(rest, user_acc, [acc | stack], context, line, offset) do
cron__285(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__285(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__268(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__286(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__275(rest, [], stack, context, line, offset)
end
defp cron__287(rest, acc, stack, context, line, offset) do
cron__288(rest, [], [acc | stack], context, line, offset)
end
defp cron__288(rest, acc, stack, context, line, offset) do
cron__289(rest, [], [acc | stack], context, line, offset)
end
defp cron__289(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__290(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__289(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__286(rest, acc, stack, context, line, offset)
end
defp cron__290(rest, acc, stack, context, line, offset) do
cron__292(rest, acc, [1 | stack], context, line, offset)
end
defp cron__292(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__293(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__292(rest, acc, stack, context, line, offset) do
cron__291(rest, acc, stack, context, line, offset)
end
defp cron__291(rest, acc, [_ | stack], context, line, offset) do
cron__294(rest, acc, stack, context, line, offset)
end
defp cron__293(rest, acc, [1 | stack], context, line, offset) do
cron__294(rest, acc, stack, context, line, offset)
end
defp cron__293(rest, acc, [count | stack], context, line, offset) do
cron__292(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__294(rest, user_acc, [acc | stack], context, line, offset) do
cron__295(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__295(rest, user_acc, [acc | stack], context, line, offset) do
cron__296(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__296(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__268(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__297(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__287(rest, [], stack, context, line, offset)
end
defp cron__298(rest, acc, stack, context, line, offset) do
cron__299(rest, [], [acc | stack], context, line, offset)
end
defp cron__299(rest, acc, stack, context, line, offset) do
cron__300(rest, [], [acc | stack], context, line, offset)
end
defp cron__300(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__301(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__300(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__297(rest, acc, stack, context, line, offset)
end
defp cron__301(rest, acc, stack, context, line, offset) do
cron__303(rest, acc, [1 | stack], context, line, offset)
end
defp cron__303(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__304(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__303(rest, acc, stack, context, line, offset) do
cron__302(rest, acc, stack, context, line, offset)
end
defp cron__302(rest, acc, [_ | stack], context, line, offset) do
cron__305(rest, acc, stack, context, line, offset)
end
defp cron__304(rest, acc, [1 | stack], context, line, offset) do
cron__305(rest, acc, stack, context, line, offset)
end
defp cron__304(rest, acc, [count | stack], context, line, offset) do
cron__303(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__305(rest, user_acc, [acc | stack], context, line, offset) do
cron__306(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__306(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__307(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__306(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__297(rest, acc, stack, context, line, offset)
end
defp cron__307(rest, acc, stack, context, line, offset) do
cron__308(rest, [], [acc | stack], context, line, offset)
end
defp cron__308(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__309(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__308(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__297(rest, acc, stack, context, line, offset)
end
defp cron__309(rest, acc, stack, context, line, offset) do
cron__311(rest, acc, [1 | stack], context, line, offset)
end
defp cron__311(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__312(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__311(rest, acc, stack, context, line, offset) do
cron__310(rest, acc, stack, context, line, offset)
end
defp cron__310(rest, acc, [_ | stack], context, line, offset) do
cron__313(rest, acc, stack, context, line, offset)
end
defp cron__312(rest, acc, [1 | stack], context, line, offset) do
cron__313(rest, acc, stack, context, line, offset)
end
defp cron__312(rest, acc, [count | stack], context, line, offset) do
cron__311(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__313(rest, user_acc, [acc | stack], context, line, offset) do
cron__314(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__314(rest, user_acc, [acc | stack], context, line, offset) do
cron__315(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__315(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__268(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__266(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__316(rest, acc, stack, context, line, offset)
end
defp cron__268(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__267(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__316(rest, user_acc, [acc | stack], context, line, offset) do
cron__317(rest, [days: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__317(rest, acc, stack, context, line, offset) do
cron__318(rest, [], [acc | stack], context, line, offset)
end
defp cron__318(rest, acc, stack, context, line, offset) do
cron__319(rest, [], [acc | stack], context, line, offset)
end
defp cron__319(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__320(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__319(rest, _acc, _stack, context, line, offset) do
{:error, "expected byte equal to ? or equal to 9", rest, context, line, offset}
end
defp cron__320(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__322(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__320(rest, acc, stack, context, line, offset) do
cron__321(rest, acc, stack, context, line, offset)
end
defp cron__322(rest, acc, stack, context, line, offset) do
cron__320(rest, acc, stack, context, line, offset)
end
defp cron__321(rest, _user_acc, [acc | stack], context, line, offset) do
cron__323(rest, acc, stack, context, line, offset)
end
defp cron__323(rest, _user_acc, [acc | stack], context, line, offset) do
cron__324(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__324(rest, acc, stack, context, line, offset) do
cron__325(rest, [], [acc | stack], context, line, offset)
end
defp cron__325(rest, acc, stack, context, line, offset) do
cron__377(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__327(rest, acc, stack, context, line, offset) do
cron__358(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__329(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__330(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__329(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"JAN\" or string \"FEB\" or string \"MAR\" or string \"APR\" or string \"MAY\" or string \"JUN\" or string \"JUL\" or string \"AUG\" or string \"SEP\" or string \"OCT\" or string \"NOV\" or string \"DEC\" or byte in the range ?0..?9, followed by byte in the range ?0..?9, followed by string \"-\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*/\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__330(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__328(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__331(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__329(rest, [], stack, context, line, offset)
end
defp cron__332(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__333(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__332(rest, acc, stack, context, line, offset) do
cron__331(rest, acc, stack, context, line, offset)
end
defp cron__333(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__328(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__334(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__332(rest, [], stack, context, line, offset)
end
defp cron__335(rest, acc, stack, context, line, offset) do
cron__336(rest, [], [acc | stack], context, line, offset)
end
defp cron__336(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__337(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__336(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__334(rest, acc, stack, context, line, offset)
end
defp cron__337(rest, acc, stack, context, line, offset) do
cron__338(rest, [], [acc | stack], context, line, offset)
end
defp cron__338(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__339(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__338(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__334(rest, acc, stack, context, line, offset)
end
defp cron__339(rest, acc, stack, context, line, offset) do
cron__341(rest, acc, [1 | stack], context, line, offset)
end
defp cron__341(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__342(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__341(rest, acc, stack, context, line, offset) do
cron__340(rest, acc, stack, context, line, offset)
end
defp cron__340(rest, acc, [_ | stack], context, line, offset) do
cron__343(rest, acc, stack, context, line, offset)
end
defp cron__342(rest, acc, [1 | stack], context, line, offset) do
cron__343(rest, acc, stack, context, line, offset)
end
defp cron__342(rest, acc, [count | stack], context, line, offset) do
cron__341(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__343(rest, user_acc, [acc | stack], context, line, offset) do
cron__344(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__344(rest, user_acc, [acc | stack], context, line, offset) do
cron__345(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__345(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__328(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__346(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__335(rest, [], stack, context, line, offset)
end
defp cron__347(rest, acc, stack, context, line, offset) do
cron__348(rest, [], [acc | stack], context, line, offset)
end
defp cron__348(rest, acc, stack, context, line, offset) do
cron__349(rest, [], [acc | stack], context, line, offset)
end
defp cron__349(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__350(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__349(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__346(rest, acc, stack, context, line, offset)
end
defp cron__350(rest, acc, stack, context, line, offset) do
cron__352(rest, acc, [1 | stack], context, line, offset)
end
defp cron__352(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__353(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__352(rest, acc, stack, context, line, offset) do
cron__351(rest, acc, stack, context, line, offset)
end
defp cron__351(rest, acc, [_ | stack], context, line, offset) do
cron__354(rest, acc, stack, context, line, offset)
end
defp cron__353(rest, acc, [1 | stack], context, line, offset) do
cron__354(rest, acc, stack, context, line, offset)
end
defp cron__353(rest, acc, [count | stack], context, line, offset) do
cron__352(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__354(rest, user_acc, [acc | stack], context, line, offset) do
cron__355(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__355(rest, user_acc, [acc | stack], context, line, offset) do
cron__356(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__356(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__328(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__357(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__347(rest, [], stack, context, line, offset)
end
defp cron__358(rest, acc, stack, context, line, offset) do
cron__359(rest, [], [acc | stack], context, line, offset)
end
defp cron__359(rest, acc, stack, context, line, offset) do
cron__360(rest, [], [acc | stack], context, line, offset)
end
defp cron__360(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__361(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__360(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__357(rest, acc, stack, context, line, offset)
end
defp cron__361(rest, acc, stack, context, line, offset) do
cron__363(rest, acc, [1 | stack], context, line, offset)
end
defp cron__363(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__364(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__363(rest, acc, stack, context, line, offset) do
cron__362(rest, acc, stack, context, line, offset)
end
defp cron__362(rest, acc, [_ | stack], context, line, offset) do
cron__365(rest, acc, stack, context, line, offset)
end
defp cron__364(rest, acc, [1 | stack], context, line, offset) do
cron__365(rest, acc, stack, context, line, offset)
end
defp cron__364(rest, acc, [count | stack], context, line, offset) do
cron__363(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__365(rest, user_acc, [acc | stack], context, line, offset) do
cron__366(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__366(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__367(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__366(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__357(rest, acc, stack, context, line, offset)
end
defp cron__367(rest, acc, stack, context, line, offset) do
cron__368(rest, [], [acc | stack], context, line, offset)
end
defp cron__368(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__369(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__368(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__357(rest, acc, stack, context, line, offset)
end
defp cron__369(rest, acc, stack, context, line, offset) do
cron__371(rest, acc, [1 | stack], context, line, offset)
end
defp cron__371(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__372(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__371(rest, acc, stack, context, line, offset) do
cron__370(rest, acc, stack, context, line, offset)
end
defp cron__370(rest, acc, [_ | stack], context, line, offset) do
cron__373(rest, acc, stack, context, line, offset)
end
defp cron__372(rest, acc, [1 | stack], context, line, offset) do
cron__373(rest, acc, stack, context, line, offset)
end
defp cron__372(rest, acc, [count | stack], context, line, offset) do
cron__371(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__373(rest, user_acc, [acc | stack], context, line, offset) do
cron__374(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__374(rest, user_acc, [acc | stack], context, line, offset) do
cron__375(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__375(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__328(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__328(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__326(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__376(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__327(rest, [], stack, context, line, offset)
end
defp cron__377(rest, acc, stack, context, line, offset) do
cron__378(rest, [], [acc | stack], context, line, offset)
end
defp cron__378(<<"JAN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"FEB", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"MAR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"APR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"MAY", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"JUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"JUL", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, [7] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"AUG", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, '\b' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"SEP", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, '\t' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"OCT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, '\n' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"NOV", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, '\v' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(<<"DEC", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__379(rest, '\f' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__378(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__376(rest, acc, stack, context, line, offset)
end
defp cron__379(rest, user_acc, [acc | stack], context, line, offset) do
cron__380(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__380(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__326(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__326(rest, acc, stack, context, line, offset) do
cron__382(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__382(rest, acc, stack, context, line, offset) do
cron__434(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__384(rest, acc, stack, context, line, offset) do
cron__415(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__386(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__387(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__386(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__381(rest, acc, stack, context, line, offset)
end
defp cron__387(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__385(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__388(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__386(rest, [], stack, context, line, offset)
end
defp cron__389(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__390(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__389(rest, acc, stack, context, line, offset) do
cron__388(rest, acc, stack, context, line, offset)
end
defp cron__390(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__385(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__391(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__389(rest, [], stack, context, line, offset)
end
defp cron__392(rest, acc, stack, context, line, offset) do
cron__393(rest, [], [acc | stack], context, line, offset)
end
defp cron__393(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__394(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__393(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__391(rest, acc, stack, context, line, offset)
end
defp cron__394(rest, acc, stack, context, line, offset) do
cron__395(rest, [], [acc | stack], context, line, offset)
end
defp cron__395(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__396(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__395(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__391(rest, acc, stack, context, line, offset)
end
defp cron__396(rest, acc, stack, context, line, offset) do
cron__398(rest, acc, [1 | stack], context, line, offset)
end
defp cron__398(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__399(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__398(rest, acc, stack, context, line, offset) do
cron__397(rest, acc, stack, context, line, offset)
end
defp cron__397(rest, acc, [_ | stack], context, line, offset) do
cron__400(rest, acc, stack, context, line, offset)
end
defp cron__399(rest, acc, [1 | stack], context, line, offset) do
cron__400(rest, acc, stack, context, line, offset)
end
defp cron__399(rest, acc, [count | stack], context, line, offset) do
cron__398(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__400(rest, user_acc, [acc | stack], context, line, offset) do
cron__401(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__401(rest, user_acc, [acc | stack], context, line, offset) do
cron__402(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__402(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__385(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__403(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__392(rest, [], stack, context, line, offset)
end
defp cron__404(rest, acc, stack, context, line, offset) do
cron__405(rest, [], [acc | stack], context, line, offset)
end
defp cron__405(rest, acc, stack, context, line, offset) do
cron__406(rest, [], [acc | stack], context, line, offset)
end
defp cron__406(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__407(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__406(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__403(rest, acc, stack, context, line, offset)
end
defp cron__407(rest, acc, stack, context, line, offset) do
cron__409(rest, acc, [1 | stack], context, line, offset)
end
defp cron__409(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__410(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__409(rest, acc, stack, context, line, offset) do
cron__408(rest, acc, stack, context, line, offset)
end
defp cron__408(rest, acc, [_ | stack], context, line, offset) do
cron__411(rest, acc, stack, context, line, offset)
end
defp cron__410(rest, acc, [1 | stack], context, line, offset) do
cron__411(rest, acc, stack, context, line, offset)
end
defp cron__410(rest, acc, [count | stack], context, line, offset) do
cron__409(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__411(rest, user_acc, [acc | stack], context, line, offset) do
cron__412(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__412(rest, user_acc, [acc | stack], context, line, offset) do
cron__413(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__413(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__385(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__414(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__404(rest, [], stack, context, line, offset)
end
defp cron__415(rest, acc, stack, context, line, offset) do
cron__416(rest, [], [acc | stack], context, line, offset)
end
defp cron__416(rest, acc, stack, context, line, offset) do
cron__417(rest, [], [acc | stack], context, line, offset)
end
defp cron__417(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__418(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__417(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__418(rest, acc, stack, context, line, offset) do
cron__420(rest, acc, [1 | stack], context, line, offset)
end
defp cron__420(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__421(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__420(rest, acc, stack, context, line, offset) do
cron__419(rest, acc, stack, context, line, offset)
end
defp cron__419(rest, acc, [_ | stack], context, line, offset) do
cron__422(rest, acc, stack, context, line, offset)
end
defp cron__421(rest, acc, [1 | stack], context, line, offset) do
cron__422(rest, acc, stack, context, line, offset)
end
defp cron__421(rest, acc, [count | stack], context, line, offset) do
cron__420(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__422(rest, user_acc, [acc | stack], context, line, offset) do
cron__423(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__423(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__424(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__423(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__424(rest, acc, stack, context, line, offset) do
cron__425(rest, [], [acc | stack], context, line, offset)
end
defp cron__425(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__426(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__425(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__414(rest, acc, stack, context, line, offset)
end
defp cron__426(rest, acc, stack, context, line, offset) do
cron__428(rest, acc, [1 | stack], context, line, offset)
end
defp cron__428(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__429(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__428(rest, acc, stack, context, line, offset) do
cron__427(rest, acc, stack, context, line, offset)
end
defp cron__427(rest, acc, [_ | stack], context, line, offset) do
cron__430(rest, acc, stack, context, line, offset)
end
defp cron__429(rest, acc, [1 | stack], context, line, offset) do
cron__430(rest, acc, stack, context, line, offset)
end
defp cron__429(rest, acc, [count | stack], context, line, offset) do
cron__428(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__430(rest, user_acc, [acc | stack], context, line, offset) do
cron__431(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__431(rest, user_acc, [acc | stack], context, line, offset) do
cron__432(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__432(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__385(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__385(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__383(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__433(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__384(rest, [], stack, context, line, offset)
end
defp cron__434(rest, acc, stack, context, line, offset) do
cron__435(rest, [], [acc | stack], context, line, offset)
end
defp cron__435(<<"JAN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"FEB", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"MAR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"APR", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"MAY", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"JUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"JUL", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, [7] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"AUG", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, '\b' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"SEP", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, '\t' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"OCT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, '\n' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"NOV", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, '\v' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(<<"DEC", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__436(rest, '\f' ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__435(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__433(rest, acc, stack, context, line, offset)
end
defp cron__436(rest, user_acc, [acc | stack], context, line, offset) do
cron__437(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__437(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__383(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__381(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__438(rest, acc, stack, context, line, offset)
end
defp cron__383(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__382(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__438(rest, user_acc, [acc | stack], context, line, offset) do
cron__439(rest, [months: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__439(rest, acc, stack, context, line, offset) do
cron__440(rest, [], [acc | stack], context, line, offset)
end
defp cron__440(rest, acc, stack, context, line, offset) do
cron__441(rest, [], [acc | stack], context, line, offset)
end
defp cron__441(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__442(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__441(rest, _acc, _stack, context, line, offset) do
{:error, "expected byte equal to ? or equal to 9", rest, context, line, offset}
end
defp cron__442(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 === 32 or x0 === 9 do
cron__444(rest, acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__442(rest, acc, stack, context, line, offset) do
cron__443(rest, acc, stack, context, line, offset)
end
defp cron__444(rest, acc, stack, context, line, offset) do
cron__442(rest, acc, stack, context, line, offset)
end
defp cron__443(rest, _user_acc, [acc | stack], context, line, offset) do
cron__445(rest, acc, stack, context, line, offset)
end
defp cron__445(rest, _user_acc, [acc | stack], context, line, offset) do
cron__446(rest, [] ++ acc, stack, context, line, offset)
end
defp cron__446(rest, acc, stack, context, line, offset) do
cron__447(rest, [], [acc | stack], context, line, offset)
end
defp cron__447(rest, acc, stack, context, line, offset) do
cron__499(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__449(rest, acc, stack, context, line, offset) do
cron__480(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__451(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__452(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__451(rest, _acc, _stack, context, line, offset) do
{:error,
"expected string \"MON\" or string \"TUE\" or string \"WED\" or string \"THU\" or string \"FRI\" or string \"SAT\" or string \"SUN\" or byte in the range ?0..?9, followed by byte in the range ?0..?9, followed by string \"-\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*/\", followed by byte in the range ?0..?9, followed by byte in the range ?0..?9 or string \"*\" or string \",\"",
rest, context, line, offset}
end
defp cron__452(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__450(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__453(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__451(rest, [], stack, context, line, offset)
end
defp cron__454(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__455(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__454(rest, acc, stack, context, line, offset) do
cron__453(rest, acc, stack, context, line, offset)
end
defp cron__455(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__450(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__456(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__454(rest, [], stack, context, line, offset)
end
defp cron__457(rest, acc, stack, context, line, offset) do
cron__458(rest, [], [acc | stack], context, line, offset)
end
defp cron__458(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__459(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__458(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__456(rest, acc, stack, context, line, offset)
end
defp cron__459(rest, acc, stack, context, line, offset) do
cron__460(rest, [], [acc | stack], context, line, offset)
end
defp cron__460(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__461(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__460(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__456(rest, acc, stack, context, line, offset)
end
defp cron__461(rest, acc, stack, context, line, offset) do
cron__463(rest, acc, [1 | stack], context, line, offset)
end
defp cron__463(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__464(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__463(rest, acc, stack, context, line, offset) do
cron__462(rest, acc, stack, context, line, offset)
end
defp cron__462(rest, acc, [_ | stack], context, line, offset) do
cron__465(rest, acc, stack, context, line, offset)
end
defp cron__464(rest, acc, [1 | stack], context, line, offset) do
cron__465(rest, acc, stack, context, line, offset)
end
defp cron__464(rest, acc, [count | stack], context, line, offset) do
cron__463(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__465(rest, user_acc, [acc | stack], context, line, offset) do
cron__466(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__466(rest, user_acc, [acc | stack], context, line, offset) do
cron__467(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__467(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__450(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__468(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__457(rest, [], stack, context, line, offset)
end
defp cron__469(rest, acc, stack, context, line, offset) do
cron__470(rest, [], [acc | stack], context, line, offset)
end
defp cron__470(rest, acc, stack, context, line, offset) do
cron__471(rest, [], [acc | stack], context, line, offset)
end
defp cron__471(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__472(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__471(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__468(rest, acc, stack, context, line, offset)
end
defp cron__472(rest, acc, stack, context, line, offset) do
cron__474(rest, acc, [1 | stack], context, line, offset)
end
defp cron__474(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__475(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__474(rest, acc, stack, context, line, offset) do
cron__473(rest, acc, stack, context, line, offset)
end
defp cron__473(rest, acc, [_ | stack], context, line, offset) do
cron__476(rest, acc, stack, context, line, offset)
end
defp cron__475(rest, acc, [1 | stack], context, line, offset) do
cron__476(rest, acc, stack, context, line, offset)
end
defp cron__475(rest, acc, [count | stack], context, line, offset) do
cron__474(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__476(rest, user_acc, [acc | stack], context, line, offset) do
cron__477(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__477(rest, user_acc, [acc | stack], context, line, offset) do
cron__478(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__478(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__450(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__479(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__469(rest, [], stack, context, line, offset)
end
defp cron__480(rest, acc, stack, context, line, offset) do
cron__481(rest, [], [acc | stack], context, line, offset)
end
defp cron__481(rest, acc, stack, context, line, offset) do
cron__482(rest, [], [acc | stack], context, line, offset)
end
defp cron__482(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__483(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__482(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__479(rest, acc, stack, context, line, offset)
end
defp cron__483(rest, acc, stack, context, line, offset) do
cron__485(rest, acc, [1 | stack], context, line, offset)
end
defp cron__485(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__486(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__485(rest, acc, stack, context, line, offset) do
cron__484(rest, acc, stack, context, line, offset)
end
defp cron__484(rest, acc, [_ | stack], context, line, offset) do
cron__487(rest, acc, stack, context, line, offset)
end
defp cron__486(rest, acc, [1 | stack], context, line, offset) do
cron__487(rest, acc, stack, context, line, offset)
end
defp cron__486(rest, acc, [count | stack], context, line, offset) do
cron__485(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__487(rest, user_acc, [acc | stack], context, line, offset) do
cron__488(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__488(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__489(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__488(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__479(rest, acc, stack, context, line, offset)
end
defp cron__489(rest, acc, stack, context, line, offset) do
cron__490(rest, [], [acc | stack], context, line, offset)
end
defp cron__490(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__491(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__490(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__479(rest, acc, stack, context, line, offset)
end
defp cron__491(rest, acc, stack, context, line, offset) do
cron__493(rest, acc, [1 | stack], context, line, offset)
end
defp cron__493(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__494(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__493(rest, acc, stack, context, line, offset) do
cron__492(rest, acc, stack, context, line, offset)
end
defp cron__492(rest, acc, [_ | stack], context, line, offset) do
cron__495(rest, acc, stack, context, line, offset)
end
defp cron__494(rest, acc, [1 | stack], context, line, offset) do
cron__495(rest, acc, stack, context, line, offset)
end
defp cron__494(rest, acc, [count | stack], context, line, offset) do
cron__493(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__495(rest, user_acc, [acc | stack], context, line, offset) do
cron__496(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__496(rest, user_acc, [acc | stack], context, line, offset) do
cron__497(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__497(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__450(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__450(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__448(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__498(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__449(rest, [], stack, context, line, offset)
end
defp cron__499(rest, acc, stack, context, line, offset) do
cron__500(rest, [], [acc | stack], context, line, offset)
end
defp cron__500(<<"MON", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"TUE", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"WED", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"THU", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"FRI", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"SAT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(<<"SUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__501(rest, [0] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__500(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__498(rest, acc, stack, context, line, offset)
end
defp cron__501(rest, user_acc, [acc | stack], context, line, offset) do
cron__502(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__502(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__448(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__448(rest, acc, stack, context, line, offset) do
cron__504(rest, [], [{rest, acc, context, line, offset} | stack], context, line, offset)
end
defp cron__504(rest, acc, stack, context, line, offset) do
cron__556(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__506(rest, acc, stack, context, line, offset) do
cron__537(rest, [], [{rest, context, line, offset}, acc | stack], context, line, offset)
end
defp cron__508(<<",", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__509(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__508(rest, _acc, stack, context, line, offset) do
[_, _, _, acc | stack] = stack
cron__503(rest, acc, stack, context, line, offset)
end
defp cron__509(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__507(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__510(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__508(rest, [], stack, context, line, offset)
end
defp cron__511(<<"*", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__512(rest, [wild: "*"] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__511(rest, acc, stack, context, line, offset) do
cron__510(rest, acc, stack, context, line, offset)
end
defp cron__512(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__507(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__513(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__511(rest, [], stack, context, line, offset)
end
defp cron__514(rest, acc, stack, context, line, offset) do
cron__515(rest, [], [acc | stack], context, line, offset)
end
defp cron__515(<<"*/", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__516(rest, [] ++ acc, stack, context, comb__line, comb__offset + 2)
end
defp cron__515(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__513(rest, acc, stack, context, line, offset)
end
defp cron__516(rest, acc, stack, context, line, offset) do
cron__517(rest, [], [acc | stack], context, line, offset)
end
defp cron__517(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__518(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__517(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__513(rest, acc, stack, context, line, offset)
end
defp cron__518(rest, acc, stack, context, line, offset) do
cron__520(rest, acc, [1 | stack], context, line, offset)
end
defp cron__520(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__521(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__520(rest, acc, stack, context, line, offset) do
cron__519(rest, acc, stack, context, line, offset)
end
defp cron__519(rest, acc, [_ | stack], context, line, offset) do
cron__522(rest, acc, stack, context, line, offset)
end
defp cron__521(rest, acc, [1 | stack], context, line, offset) do
cron__522(rest, acc, stack, context, line, offset)
end
defp cron__521(rest, acc, [count | stack], context, line, offset) do
cron__520(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__522(rest, user_acc, [acc | stack], context, line, offset) do
cron__523(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__523(rest, user_acc, [acc | stack], context, line, offset) do
cron__524(
rest,
[
step:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__524(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__507(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__525(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__514(rest, [], stack, context, line, offset)
end
defp cron__526(rest, acc, stack, context, line, offset) do
cron__527(rest, [], [acc | stack], context, line, offset)
end
defp cron__527(rest, acc, stack, context, line, offset) do
cron__528(rest, [], [acc | stack], context, line, offset)
end
defp cron__528(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__529(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__528(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__525(rest, acc, stack, context, line, offset)
end
defp cron__529(rest, acc, stack, context, line, offset) do
cron__531(rest, acc, [1 | stack], context, line, offset)
end
defp cron__531(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__532(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__531(rest, acc, stack, context, line, offset) do
cron__530(rest, acc, stack, context, line, offset)
end
defp cron__530(rest, acc, [_ | stack], context, line, offset) do
cron__533(rest, acc, stack, context, line, offset)
end
defp cron__532(rest, acc, [1 | stack], context, line, offset) do
cron__533(rest, acc, stack, context, line, offset)
end
defp cron__532(rest, acc, [count | stack], context, line, offset) do
cron__531(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__533(rest, user_acc, [acc | stack], context, line, offset) do
cron__534(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__534(rest, user_acc, [acc | stack], context, line, offset) do
cron__535(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__535(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__507(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__536(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__526(rest, [], stack, context, line, offset)
end
defp cron__537(rest, acc, stack, context, line, offset) do
cron__538(rest, [], [acc | stack], context, line, offset)
end
defp cron__538(rest, acc, stack, context, line, offset) do
cron__539(rest, [], [acc | stack], context, line, offset)
end
defp cron__539(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__540(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__539(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__536(rest, acc, stack, context, line, offset)
end
defp cron__540(rest, acc, stack, context, line, offset) do
cron__542(rest, acc, [1 | stack], context, line, offset)
end
defp cron__542(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__543(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__542(rest, acc, stack, context, line, offset) do
cron__541(rest, acc, stack, context, line, offset)
end
defp cron__541(rest, acc, [_ | stack], context, line, offset) do
cron__544(rest, acc, stack, context, line, offset)
end
defp cron__543(rest, acc, [1 | stack], context, line, offset) do
cron__544(rest, acc, stack, context, line, offset)
end
defp cron__543(rest, acc, [count | stack], context, line, offset) do
cron__542(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__544(rest, user_acc, [acc | stack], context, line, offset) do
cron__545(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__545(<<"-", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__546(rest, [] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__545(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__536(rest, acc, stack, context, line, offset)
end
defp cron__546(rest, acc, stack, context, line, offset) do
cron__547(rest, [], [acc | stack], context, line, offset)
end
defp cron__547(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__548(rest, [x0 - 48] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__547(rest, _acc, stack, context, line, offset) do
[_, acc | stack] = stack
cron__536(rest, acc, stack, context, line, offset)
end
defp cron__548(rest, acc, stack, context, line, offset) do
cron__550(rest, acc, [1 | stack], context, line, offset)
end
defp cron__550(<<x0::integer, rest::binary>>, acc, stack, context, comb__line, comb__offset)
when x0 >= 48 and x0 <= 57 do
cron__551(rest, [x0] ++ acc, stack, context, comb__line, comb__offset + 1)
end
defp cron__550(rest, acc, stack, context, line, offset) do
cron__549(rest, acc, stack, context, line, offset)
end
defp cron__549(rest, acc, [_ | stack], context, line, offset) do
cron__552(rest, acc, stack, context, line, offset)
end
defp cron__551(rest, acc, [1 | stack], context, line, offset) do
cron__552(rest, acc, stack, context, line, offset)
end
defp cron__551(rest, acc, [count | stack], context, line, offset) do
cron__550(rest, acc, [count - 1 | stack], context, line, offset)
end
defp cron__552(rest, user_acc, [acc | stack], context, line, offset) do
cron__553(
rest,
(
[head | tail] = :lists.reverse(user_acc)
[:lists.foldl(fn x, acc -> x - 48 + acc * 10 end, head, tail)]
) ++ acc,
stack,
context,
line,
offset
)
end
defp cron__553(rest, user_acc, [acc | stack], context, line, offset) do
cron__554(rest, [range: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__554(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__507(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__507(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__505(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__555(_, _, [{rest, context, line, offset} | _] = stack, _, _, _) do
cron__506(rest, [], stack, context, line, offset)
end
defp cron__556(rest, acc, stack, context, line, offset) do
cron__557(rest, [], [acc | stack], context, line, offset)
end
defp cron__557(<<"MON", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [1] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"TUE", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [2] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"WED", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [3] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"THU", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [4] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"FRI", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [5] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"SAT", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [6] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(<<"SUN", rest::binary>>, acc, stack, context, comb__line, comb__offset) do
cron__558(rest, [0] ++ acc, stack, context, comb__line, comb__offset + 3)
end
defp cron__557(rest, _acc, stack, context, line, offset) do
[acc | stack] = stack
cron__555(rest, acc, stack, context, line, offset)
end
defp cron__558(rest, user_acc, [acc | stack], context, line, offset) do
cron__559(
rest,
[
literal:
case(:lists.reverse(user_acc)) do
[one] ->
one
many ->
raise("unwrap_and_tag/3 expected a single token, got: #{inspect(many)}")
end
] ++ acc,
stack,
context,
line,
offset
)
end
defp cron__559(rest, acc, [_, previous_acc | stack], context, line, offset) do
cron__505(rest, acc ++ previous_acc, stack, context, line, offset)
end
defp cron__503(_, _, [{rest, acc, context, line, offset} | stack], _, _, _) do
cron__560(rest, acc, stack, context, line, offset)
end
defp cron__505(
inner_rest,
inner_acc,
[{rest, acc, context, line, offset} | stack],
inner_context,
inner_line,
inner_offset
) do
_ = {rest, acc, context, line, offset}
cron__504(
inner_rest,
[],
[{inner_rest, inner_acc ++ acc, inner_context, inner_line, inner_offset} | stack],
inner_context,
inner_line,
inner_offset
)
end
defp cron__560(rest, user_acc, [acc | stack], context, line, offset) do
cron__561(rest, [weekdays: :lists.reverse(user_acc)] ++ acc, stack, context, line, offset)
end
defp cron__561(rest, acc, _stack, context, line, offset) do
{:ok, acc, rest, context, line, offset}
end
end
|
lib/oban/crontab/parser.ex
| 0.844377
| 0.472136
|
parser.ex
|
starcoder
|
defmodule Yodlee.Cobrand do
@moduledoc """
Functions for `cobrand` endpoint.
"""
import Yodlee
alias Yodlee.Utils
defstruct cobrand_id: nil, application_id: nil, locale: nil, session: nil
@type t :: %__MODULE__{cobrand_id: integer,
application_id: String.t,
locale: String.t,
session: String.t
}
@type cob_session :: String.t
@type error :: Yodlee.Error.t | HTTPoison.Error.t
@endpoint "cobrand"
@doc """
Get's Cobrand session. Uses credentials provided in configuration by default.
Automatically sets the session token to the configuration variable `cob_session`
if the login is successful.
```
cred = %{
cobrandLogin: "your_cobrand_login",
cobrandPassword: "<PASSWORD>"
}
```
"""
@spec login(map | nil) :: {:ok, Yodlee.Cobrand.t} | {:error, error}
def login(cred \\ get_cobrand_cred()) do
endpoint = "#{@endpoint}/login"
params = %{cobrand: Map.merge(cred, Utils.locale())}
make_request(:post, endpoint, params)
|> Utils.handle_resp(:cobrand)
end
@doc """
Adds a webhook callback URL to the Cobrand.
"""
@spec add_webhook(cob_session, String.t, String.t) :: {:ok, map} | {:error, error | atom}
def add_webhook(session, event_name, callback_url) when event_name in ["REFRESH", "DATA_UPDATES"] do
endpoint = "#{@endpoint}/config/notifications/events/#{event_name}"
params = %{event: %{callbackUrl: callback_url}}
make_request_in_session(:post, endpoint, session, params)
|> Utils.handle_resp(:any)
end
def add_webhook(_session, _event_name, _callback_url) do
{:error, :invalid_params}
end
@doc """
Lists a Cobrand's webhooks.
"""
@spec list_webhooks(cob_session) :: {:ok, [Yodlee.Webhook.t]} | {:error, error}
def list_webhooks(session) do
endpoint = "#{@endpoint}/config/notifications/events"
make_request_in_session(:get, endpoint, session)
|> Utils.handle_resp(:webhook)
end
@doc """
Deletes a Cobrand's webhooks associated with event name.
"""
@spec delete_webhook(cob_session, String.t) :: {:ok, map} | {:error, error}
def delete_webhook(session, event_name) do
endpoint = "#{@endpoint}/config/notifications/events/#{event_name}"
make_request_in_session(:delete, endpoint, session)
|> Utils.handle_resp(:any)
end
end
|
lib/yodlee/cobrand.ex
| 0.776623
| 0.404919
|
cobrand.ex
|
starcoder
|
defmodule SiteEncrypt.Phoenix do
@moduledoc """
`SiteEncrypt` adapter for Phoenix endpoints.
## Usage
1. Add `use SiteEncrypt.Phoenix` to your endpoint immediately after `use Phoenix.Endpoint`
2. Configure https via `configure_https/2`.
3. Add the implementation of `c:SiteEncrypt.certification/0` to the endpoint (the
`@behaviour SiteEncrypt` is injected when this module is used).
4. Start the endpoint by providing `{SiteEncrypt.Phoenix, PhoenixDemo.Endpoint}` as a supervisor child.
"""
use SiteEncrypt.Adapter
alias SiteEncrypt.Adapter
@spec child_spec(endpoint :: module) :: Supervisor.child_spec()
@doc "Starts the endpoint managed by `SiteEncrypt`."
@spec start_link(endpoint :: module) :: Supervisor.on_start()
def start_link(endpoint), do: Adapter.start_link(__MODULE__, endpoint, endpoint)
@doc """
Merges paths to key and certificates to the `:https` configuration of the endpoint config.
Invoke this macro from `c:Phoenix.Endpoint.init/2` to complete the https configuration:
defmodule MyEndpoint do
# ...
@impl Phoenix.Endpoint
def init(_key, config) do
# this will merge key, cert, and chain into `:https` configuration from config.exs
{:ok, SiteEncrypt.Phoenix.configure_https(config)}
# to completely configure https from `init/2`, invoke:
# SiteEncrypt.Phoenix.configure_https(config, port: 4001, ...)
end
# ...
end
The `options` are any valid adapter HTTPS options. For many great tips on configuring HTTPS for
production refer to the [Plug HTTPS guide](https://hexdocs.pm/plug/https.html#content).
"""
defmacro configure_https(config, https_opts \\ []) do
quote bind_quoted: [config: config, https_opts: https_opts] do
https_config =
(Keyword.get(config, :https) || [])
|> Config.Reader.merge(https_opts)
|> Config.Reader.merge(SiteEncrypt.https_keys(__MODULE__))
Keyword.put(config, :https, https_config)
end
end
@doc false
defmacro __using__(_opts) do
quote do
unless Enum.member?(@behaviour, Phoenix.Endpoint),
do: raise("SiteEncrypt.Phoenix must be used after Phoenix.Endpoint")
@behaviour SiteEncrypt
require SiteEncrypt
require SiteEncrypt.Phoenix
plug SiteEncrypt.AcmeChallenge, __MODULE__
@impl SiteEncrypt
def handle_new_cert, do: :ok
defoverridable handle_new_cert: 0
end
end
@impl Adapter
def config(_id, endpoint) do
%{
certification: endpoint.certification(),
site_spec: endpoint.child_spec([])
}
end
@impl Adapter
def http_port(_id, endpoint) do
if server?(endpoint) do
http_config = endpoint.config(:http)
with true <- Keyword.keyword?(http_config),
port when is_integer(port) <- Keyword.get(http_config, :port) do
{:ok, port}
else
_ ->
raise_http_required(http_config)
end
else
:error
end
end
defp raise_http_required(http_config) do
raise "Unable to retrieve HTTP port from the HTTP configuration. SiteEncrypt relies on the Lets Encrypt " <>
"HTTP-01 challenge type which requires an HTTP version of the endpoint to be running and " <>
"the configuration received did not include an http port.\n" <>
"Received: #{inspect(http_config)}"
end
defp server?(endpoint) do
endpoint.config(:server) ||
Application.get_env(:phoenix, :serve_endpoints, false)
end
end
|
lib/site_encrypt/phoenix.ex
| 0.854171
| 0.510863
|
phoenix.ex
|
starcoder
|
defmodule Gotham.TokenStore do
@moduledoc """
The `Gotham.TokenStore` is a simple `GenServer` that manages storage and retrieval
of tokens `Gotham.Token`. When adding to the token store, it also queues tokens
for a refresh before they expire: ten seconds before the token is set to expire,
the `TokenStore` will call the API to get a new token and replace the expired
token in the store.
"""
use GenServer
alias Gotham.{Token, GCPClient}
# APIs
def start_link(account_name: account_name, keyfile_content: _) do
GenServer.start_link(
__MODULE__,
%{account_name: account_name},
name: worker_name(account_name)
)
end
def for_scope(scope) do
account_name = Gotham.get_account_name()
account_name
|> worker_name
|> GenServer.call({:fetch, scope})
end
def request(scope) do
scope |> GCPClient.get_access_token()
end
def store(%Token{account_name: account_name, scope: scope} = token) do
account_name
|> worker_name
|> GenServer.call({:store, token})
end
def refresh(%{account_name: account_name} = original_token) do
account_name
|> worker_name
|> GenServer.call({:refresh, original_token})
end
# Callbacks
def init(state) do
Gotham.put_account_name(state.account_name)
{:ok, state}
end
def handle_call({:fetch, scope}, _from, state) do
with :error <- Map.fetch(state, scope),
{:ok, token} <- GCPClient.get_access_token(scope),
{:ok, new_state} <- put_token(state, token),
{:ok, _pid} <- queue_for_refresh(token) do
{:reply, {:ok, token}, new_state}
else
token ->
{:reply, token, state}
end
end
def handle_call({:store, token}, _from, state) do
{:ok, new_state} = put_token(state, token)
{:reply, new_state, new_state}
end
def handle_call({:refresh, %{scope: scope}}, _from, state) do
with {:ok, new_token} <- scope |> GCPClient.get_access_token() do
{:ok, new_state} = state |> put_token(new_token)
{:reply, new_token, new_state}
end
end
def handle_info(_, state) do
{:noreply, state}
end
defp put_token(state, token) do
{:ok, Map.put(state, token.scope, token)}
end
defp queue_for_refresh(%{expire_at: expire_at, account_name: acount_name, scope: scope} = token) do
Task.async(fn ->
refresh_loop(token)
end)
{:ok, nil}
end
defp refresh_loop(%{expire_at: expire_at} = token) do
diff = expire_at - :os.system_time(:seconds)
if diff <= 10 do
token = __MODULE__.refresh(token)
else
Process.sleep((diff - 10) * 1_000)
end
refresh_loop(token)
end
defp worker_name(account_name) do
:"gotham_token_store_for_#{account_name}"
end
defp task_supervisor_name(account_name, scope) do
scope_suffix = scope |> String.split("/") |> List.last()
account_name = Gotham.get_account_name()
:"gotham_token_store_for_#{account_name}_#{scope_suffix}"
end
end
|
lib/gotham/token_store.ex
| 0.753557
| 0.461199
|
token_store.ex
|
starcoder
|
defmodule Similarity.Simhash do
@moduledoc """
Simhash string similarity algorithm.
[Description of Simhash](https://matpalm.com/resemblance/simhash/)
iex> Similarity.simhash("Barna", "Kovacs")
0.59375
iex> Similarity.simhash("Austria", "Australia")
0.65625
"""
@moduledoc since: "0.1.1"
@doc """
Calculates the similarity between the left and right string, using Simhash.
Returns a float representing similarity between `left` and `right` strings.
## Options
* `:ngram_size` - defaults to 3
## Examples
iex> Similarity.simhash("khan academy", "khan academia")
0.890625
iex> Similarity.simhash("khan academy", "academy khan", ngram_size: 1)
1.0
"""
@spec similarity(String.t(), String.t(), pos_integer) :: float
def similarity(left, right, options \\ []) when is_binary(left) and is_binary(right) do
n = options[:ngram_size] || 3
hash_similarity(hash(left, n), hash(right, n))
end
@doc """
Returns the hash for the given string.
## Examples
Similarity.Simhash.hash("alma korte", 3)
[1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, ...]
"""
@spec hash(String.t(), pos_integer) :: list(0 | 1)
def hash(string, n) do
string
|> ngram_hashes(n)
|> vector_addition
|> normalize_bits
end
@doc false
def ngram_hashes(string, n) do
string
|> FastNgram.letter_ngrams(n)
|> Enum.map(&(&1 |> siphash |> to_list))
end
@doc false
def hash_similarity(left, right) do
1 - hamming_distance(left, right) / 64
end
@doc """
Returns Hamming distance between the `left` and `right` hash,
given as lists of bits.
## Examples
iex> Similarity.Simhash.hamming_distance([1, 1, 0, 1, 0], [0, 1, 1, 1, 0])
2
"""
def hamming_distance(left, right, acc \\ 0)
def hamming_distance([same | tl_left], [same | tl_right], acc) do
hamming_distance(tl_left, tl_right, acc)
end
def hamming_distance([_ | tl_left], [_ | tl_right], acc) do
hamming_distance(tl_left, tl_right, acc + 1)
end
def hamming_distance([], [], acc), do: acc
defp vector_addition([hd_list | tl_lists]) do
vector_addition(tl_lists, hd_list)
end
defp vector_addition([hd_list | tl_lists], acc_list) do
new_acc_list = :lists.zipwith(fn x, y -> x + y end, hd_list, acc_list)
vector_addition(tl_lists, new_acc_list)
end
defp vector_addition([], acc_list), do: acc_list
defp to_list(<<1::size(1), data::bitstring>>), do: [1 | to_list(data)]
defp to_list(<<0::size(1), data::bitstring>>), do: [-1 | to_list(data)]
defp to_list(<<>>), do: []
defp normalize_bits([head | tail]) when head > 0, do: [1 | normalize_bits(tail)]
defp normalize_bits([_head | tail]), do: [0 | normalize_bits(tail)]
defp normalize_bits([]), do: []
defp siphash(str) do
int = SipHash.hash!("0123456789ABCDEF", str)
<<int::64>>
end
end
|
lib/similarity/simhash.ex
| 0.925668
| 0.704275
|
simhash.ex
|
starcoder
|
defmodule Backpack.Moment.Presenter do
@minutes_in_year 525600
@minutes_in_quarter_year 131400
@minutes_in_three_quarters_year 394200
def time_ago_in_words(from, opts) do
unit = Keyword.get(opts, :unit, :seconds)
distance_of_time_in_words(from, Backpack.Moment.timestamp(unit), opts)
end
def distance_of_time_in_words(from, to, opts) do
unit = Keyword.get(opts, :unit, :seconds)
include_seconds = Keyword.get(opts, :include_seconds, false)
from_time = Backpack.Moment.to_unix(from, unit)
from_time = System.convert_time_unit(from_time, unit, :seconds)
to_time = Backpack.Moment.to_unix(to, unit)
to_time = System.convert_time_unit(to_time, unit, :seconds)
[from_time, to_time] = if from_time > to_time, do: [to_time, from_time], else: [from_time, to_time]
distance_in_minutes = round((to_time - from_time) / 60)
distance_in_seconds = round(to_time - from_time)
if distance_in_minutes <= 1 do
if include_seconds do
distance_in_seconds_to_words(distance_in_seconds)
else
"less than a minute"
end
else
with :almost_2_years <- distance_in_minutes_to_words(distance_in_minutes) do
minutes_with_offset = calculate_minutes_offset(from, to, distance_in_minutes)
remainder = rem(minutes_with_offset, @minutes_in_year)
distance_in_years = div(minutes_with_offset, @minutes_in_year)
cond do
remainder < @minutes_in_quarter_year ->
"about #{distance_in_years} years"
remainder < @minutes_in_three_quarters_year ->
"over #{distance_in_years} years"
true ->
"almost #{distance_in_years + 1} years"
end
end
end
end
defp distance_in_seconds_to_words(seconds) when seconds < 4,
do: "less than 5 seconds"
defp distance_in_seconds_to_words(seconds) when seconds < 9,
do: "less than 10 seconds"
defp distance_in_seconds_to_words(seconds) when seconds < 19,
do: "less than 20 seconds"
defp distance_in_seconds_to_words(seconds) when seconds < 39,
do: "half a minute"
defp distance_in_seconds_to_words(seconds) when seconds < 59,
do: "less than a minute"
defp distance_in_seconds_to_words(_seconds),
do: "a minute"
defp distance_in_minutes_to_words(minutes) when minutes < 45,
do: "#{minutes} minutes"
defp distance_in_minutes_to_words(minutes) when minutes < 90,
do: "about an hour"
defp distance_in_minutes_to_words(minutes) when minutes < 1440,
do: "about #{round(minutes / 60)} hours"
defp distance_in_minutes_to_words(minutes) when minutes < 2520,
do: "a day"
defp distance_in_minutes_to_words(minutes) when minutes < 43200,
do: "#{round(minutes / 1440)} days"
defp distance_in_minutes_to_words(minutes) when minutes < 86400,
do: "about a month"
defp distance_in_minutes_to_words(minutes) when minutes < 525600,
do: "#{round(minutes / 43200)} months"
defp distance_in_minutes_to_words(minutes) when minutes < 525600 + @minutes_in_quarter_year,
do: "about a year"
defp distance_in_minutes_to_words(minutes) when minutes < 525600 + @minutes_in_three_quarters_year,
do: "over a year"
defp distance_in_minutes_to_words(_minutes),
do: :almost_2_years
defp calculate_minutes_offset(%from_struct{} = from, %to_struct{} = to, distance_in_minutes)
when from_struct in [DateTime, NaiveDateTime, Date]
and to_struct in [DateTime, NaiveDateTime, Date] do
from_year = from.year
from_year = if from.month >= 3, do: from_year + 1, else: from_year
to_year = to.year
to_year = if to.month < 3, do: to_year - 1, else: to_year
leap_years =
if from_year > to_year do
0
else
from_year..to_year
|> Enum.filter(&Calendar.ISO.leap_year?/1)
|> Enum.count()
end
minutes_offset_for_leap_year = leap_years * 1440
# Discount the leap year days when calculating year distance.
# e.g. if there are 20 leap year days between 2 dates having the same day
# and month then the based on 365 days calculation
# the distance in years will come out to over 80 years when in written
# English it would read better as about 80 years.
distance_in_minutes - minutes_offset_for_leap_year
end
defp calculate_minutes_offset(_from, _to, distance_in_minutes),
do: distance_in_minutes
end
|
lib/backpack/moment/presenter.ex
| 0.547706
| 0.51312
|
presenter.ex
|
starcoder
|
defmodule NimbleTOTP do
@moduledoc ~S"""
NimbleTOTP is a tiny library for Two-factor authentication (2FA) that
allows developers to implement Time-Based One-Time Passwords (TOTP)
for their applications.
## Two-factor authentication (2FA)
The concept of 2FA is quite simple. It's an extra layer of security
that demands a user to provide two pieces of evidence (factors) to
the authentication system before access can be granted.
One way to implement 2FA is to generate a random secret for the user
and whenever the system needs to perform a critical action it will
ask the user to enter a validation code. This validation code is a
Time-Based One-Time Password (TOTP) based on the user's secret and can be
provided by an authentication app like Google Authenticator or Authy, which
should be previously installed and configured on a compatible device, e.g.
a smartphone.
> **Note:** A critical action can mean different things depending on
the application. For instance, while in a banking system the login itself
is already considered a critical action, in other systems a user may
be allowed to log in using just the password and only when trying to
update critical data (e.g. its profile) 2FA will be required.
## Using NimbleTOTP
In order to allow developers to implement 2FA, NimbleTOTP provides functions to:
* Generate secrets composed of random bytes.
* Generate URIs to be encoded in a QR Code.
* Generate Time-Based One-Time Passwords based on a secret.
### Generating the secret
The first step to set up 2FA for a user is to generate (and later persist) its random
secret. You can achieve that using `NimbleTOTP.secret/1`.
Example:
secret = NimbleTOTP.secret()
#=> <<178, 117, 46, 7, 172, 202, 108, 127, 186, 180, ...>>
By default, a binary with 20 random bytes is generated per the
[HOTP RFC](https://tools.ietf.org/html/rfc4226#section-4).
### Generating URIs for QR Code
Before persisting the secret, you need to make sure the user has already
configured the authentication app in a compatible device. The most common
way to do that is to generate a QR Code that can be read by the app.
You can use `NimbleTOTP.otpauth_uri/3` along with
[eqrcode](https://github.com/SiliconJungles/eqrcode) to generate the QR
code as **SVG**.
Example:
uri = NimbleTOTP.otpauth_uri("Acme:alice", secret, issuer: "Acme")
#=> "otpauth://totp/Acme:alice?secret=MFRGGZA&issuer=Acme"
uri |> EQRCode.encode() |> EQRCode.svg()
#=> "<?xml version=\\"1.0\\" standalone=\\"yes\\"?>\\n<svg version=\\"1.1\\" ...
### Generating a Time-Based One-Time Password
After successfully reading the QR Code, the app will start generating a
different 6 digit code every `30s`. You can compute the verification code
with:
NimbleTOTP.verification_code(secret)
#=> "569777"
The code can be validated using the `valid?/3` function. Example:
NimbleTOTP.valid?(secret, "569777")
#=> true
NimbleTOTP.valid?(secret, "012345")
#=> false
After validating the code, you can finally persist the user's secret so you use
it later whenever you need to authorize any critical action using 2FA.
## Preventing codes from being reused
The [TOTP RFC](https://tools.ietf.org/html/rfc6238#section-5.2) requires that a
code can only be used once. This is a security feature that prevents codes from
being reused. To ensure codes are only considered valid if they have not been
used, you need to keep track of the last time the user entered a TOTP code.
NimbleTOTP.valid?(user.totp_secret, code, since: user.last_totp_at)
Assuming the code itself is valid for the given secret, if `since` is `nil`,
the code will be considered valid. If since is given, it will not allow
codes in the same time period (30 seconds by default) to be reused. The user
will have to wait for the next code to be generated.
"""
import Bitwise
@totp_size 6
@default_totp_period 30
@typedoc "Unix time in seconds, `t:DateTime.t()` or `t:NaiveDateTime.t()`."
@type time() :: DateTime.t() | NaiveDateTime.t() | integer()
@typedoc "Options for `verification_code/2` and `valid?/3`."
@type option() :: {:time, time()} | {:period, pos_integer()}
@typedoc "Options for `valid?/3`."
@type validate_option() :: {:since, time() | nil}
@doc """
Generate the uri to be encoded in the QR code.
## Examples
iex> NimbleTOTP.otpauth_uri("Acme:alice", "abcd", issuer: "Acme")
"otpauth://totp/Acme:alice?secret=MFRGGZA&issuer=Acme"
"""
@spec otpauth_uri(String.t(), String.t(), keyword()) :: String.t()
def otpauth_uri(label, secret, uri_params \\ []) do
key = Base.encode32(secret, padding: false)
params = [{:secret, key} | uri_params]
query = URI.encode_query(params)
"otpauth://totp/#{URI.encode(label)}?#{query}"
end
@doc """
Generate a binary composed of random bytes.
The number of bytes is defined by the `size` argument. Default is `20` per the
[HOTP RFC](https://tools.ietf.org/html/rfc4226#section-4).
## Examples
NimbleTOTP.secret()
#=> <<178, 117, 46, 7, 172, 202, 108, 127, 186, 180, ...>>
"""
@spec secret(non_neg_integer()) :: binary()
def secret(size \\ 20) do
:crypto.strong_rand_bytes(size)
end
@doc """
Generate Time-Based One-Time Password.
## Options
* :time - The time (either `%NaiveDateTime{}`, `%DateTime{}` or unix format) to
be used. Default is `System.os_time(:second)`
* :period - The period (in seconds) in which the code is valid. Default is `30`.
## Examples
NimbleTOTP.verification_code(secret)
#=> "569777"
"""
@spec verification_code(binary(), [option()]) :: binary()
def verification_code(secret, opts \\ []) do
time = opts |> Keyword.get(:time, System.os_time(:second)) |> to_unix()
period = Keyword.get(opts, :period, @default_totp_period)
verification_code(secret, time, period)
end
@spec verification_code(binary(), integer(), pos_integer()) :: binary()
defp verification_code(secret, time, period) do
secret
|> hmac(time, period)
|> hmac_truncate()
|> rem(1_000_000)
|> to_string()
|> String.pad_leading(@totp_size, "0")
end
defp hmac(secret, time, period) do
moving_factor = <<Integer.floor_div(time, period)::64>>
hmac_sha(secret, moving_factor)
end
# TODO: Remove me when we require OTP 22.1
if Code.ensure_loaded?(:crypto) and function_exported?(:crypto, :mac, 4) do
defp hmac_sha(key, data), do: :crypto.mac(:hmac, :sha, key, data)
else
defp hmac_sha(key, data), do: :crypto.hmac(:sha, key, data)
end
defp hmac_truncate(hmac) do
<<_::19-binary, _::4, offset::4>> = hmac
<<_::size(offset)-binary, p::4-binary, _::binary>> = hmac
<<_::1, bits::31>> = p
bits
end
@doc """
Checks if the given `otp` code matches the secret.
## Options
* :time - The time (either `%NaiveDateTime{}`, `%DateTime{}` or unix format) to
be used. Default is `System.os_time(:second)`
* :since - The last time the secret was used, see "Preventing TOTP code reuse" next
* :period - The period (in seconds) in which the code is valid. Default is `30`.
## Preventing TOTP code reuse
The `:since` option can be used to prevent TOTP codes from being reused. When set
to the time when the last code was entered, only codes generated after that will
be considered valid. This means a user may have to wait for the duration of the
`:period` before they can enter a valid code again. This implementation meets the
[TOTP RFC](https://datatracker.ietf.org/doc/html/rfc6238#section-5.2) requirements.
## Grace period
In some cases it is preferable to allow the user more time to validate the code than
the initial period (mostly 30 seconds), the so-called grace period. Although this library
does not support this out of the box, you can achieve the same functionality by using
the `:time` option.
def valid_code?(secret, otp) do
time = System.os_time(:second)
NimbleTOTP.valid?(secret, otp, time: time) or NimbleTOTP.valid?(secret, otp, time: time - 30)
end
In this example by validating first against the current time, but also against 30 seconds
ago, we allow the _previous_ code, to be still valid.
"""
@spec valid?(binary(), [option() | validate_option()]) :: boolean()
def valid?(secret, otp, opts \\ [])
def valid?(secret, <<a1, a2, a3, a4, a5, a6>>, opts) do
time = opts |> Keyword.get(:time, System.os_time(:second)) |> to_unix()
period = Keyword.get(opts, :period, @default_totp_period)
<<e1, e2, e3, e4, e5, e6>> = verification_code(secret, time, period)
(bxor(e1, a1) ||| bxor(e2, a2) ||| bxor(e3, a3) ||| bxor(e4, a4) ||| bxor(e5, a5) |||
bxor(e6, a6)) === 0 and not reused?(time, period, opts)
end
def valid?(_secret, _otp, _opts), do: false
@spec reused?(integer(), pos_integer(), [option() | validate_option()]) :: boolean()
defp reused?(time, period, opts) do
if since = Keyword.get(opts, :since) do
Integer.floor_div(time, period) <= Integer.floor_div(to_unix(since), period)
else
false
end
end
@spec to_unix(NaiveDateTime.t()) :: integer()
defp to_unix(%NaiveDateTime{} = naive_date_time),
do: NaiveDateTime.diff(naive_date_time, ~N[1970-01-01 00:00:00])
@spec to_unix(DateTime.t()) :: integer()
defp to_unix(%DateTime{} = date_time), do: DateTime.to_unix(date_time)
@spec to_unix(integer()) :: integer()
defp to_unix(epoch) when is_integer(epoch), do: epoch
end
|
lib/nimble_totp.ex
| 0.889984
| 0.75101
|
nimble_totp.ex
|
starcoder
|
defmodule Kino.Output do
@moduledoc """
A number of output formats supported by Livebook.
"""
import Kernel, except: [inspect: 2]
@typedoc """
Livebook cell output may be one of these values and gets rendered accordingly.
"""
@type t ::
ignored()
| stdout()
| text()
| markdown()
| image()
| js()
| frame()
| input()
| control()
@typedoc """
An empty output that should be ignored whenever encountered.
"""
@type ignored :: :ignored
@typedoc """
IO text output, adjacent such outputs are treated as a whole
"""
@type stdout :: {:stdout, binary()}
@typedoc """
Standalone text block.
"""
@type text :: {:text, binary()}
@typedoc """
Markdown content.
"""
@type markdown :: {:markdown, binary()}
@typedoc """
A raw image in the given format.
"""
@type image :: {:image, content :: binary(), mime_type :: binary()}
@typedoc """
JavaScript powered output with dynamic data and events.
This output points to a server process that serves data requests
and sends custom events.
## Communication protocol
A client process should connect to the server process by sending:
{:connect, pid(), info :: %{ref: ref(), origin: term()}}
And expect the following reply:
{:connect_reply, initial_data, info :: %{ref: ref()}}
The server process may then keep sending one of the following events:
{:event, event :: String.t(), payload :: term(), info :: %{ref: ref()}}
The client process may keep sending one of the following events:
{:event, event :: String.t(), payload :: term(), info :: %{ref: ref(), origin: term()}}
See `Kino.JS` and `Kino.JS.Live` for more details.
"""
@type js() :: {:js, info :: js_info()}
@typedoc """
Data describing a custom JS output component.
* `:ref` - unique output identifier
* `:pid` - the server process holding the data
## Assets
The `:assets` map includes information about the relevant files.
* `:archive_path` - an absolute path to a `.tar.gz` archive
with all the assets
* `:hash` - a checksum of all assets in the archive
* `:js_path` - a relative asset path pointing to the JavaScript
entrypoint module
## Export
The `:export` map describes how the output should be persisted.
The output data is put in a Markdown fenced code block.
* `:info_string` - used as the info string for the Markdown
code block
* `:key` - in case the data is a map and only a specific part
should be exported
"""
@type js_info :: %{
ref: ref(),
pid: Process.dest(),
assets: %{
archive_path: String.t(),
hash: String.t(),
js_path: String.t()
},
export:
nil
| %{
info_string: String.t(),
key: nil | term()
}
}
@typedoc """
Outputs placeholder.
Frame with type `:default` includes the initial list of outputs.
Other types can be used to update outputs within the given frame.
In all cases the outputs order is reversed, that is, most recent
outputs are at the top of the stack.
"""
@type frame :: {:frame, outputs :: list(t()), frame_info()}
@type frame_info :: %{
ref: frame_ref(),
type: :default | :replace | :append
}
@type frame_ref :: String.t()
@typedoc """
An input field.
All inputs have the following properties:
* `:type` - one of the recognised input types
* `:ref` - a unique identifier
* `:id` - a persistent input identifier, the same on every reevaluation
* `:label` - an arbitrary text used as the input caption
* `:default` - the initial input value
* `:destination` - the process to send event messages to
On top of that, each input type may have additional attributes.
"""
@type input :: {:input, attrs :: input_attrs()}
@type input_id :: String.t()
@type input_attrs ::
%{
type: :text,
ref: ref(),
id: input_id(),
label: String.t(),
default: String.t(),
destination: Process.dest()
}
| %{
type: :textarea,
ref: ref(),
id: input_id(),
label: String.t(),
default: String.t(),
destination: Process.dest()
}
| %{
type: :password,
ref: ref(),
id: input_id(),
label: String.t(),
default: String.t(),
destination: Process.dest()
}
| %{
type: :number,
ref: ref(),
id: input_id(),
label: String.t(),
default: number() | nil,
destination: Process.dest()
}
| %{
type: :url,
ref: ref(),
id: input_id(),
label: String.t(),
default: String.t() | nil,
destination: Process.dest()
}
| %{
type: :select,
ref: ref(),
id: input_id(),
label: String.t(),
default: term(),
destination: Process.dest(),
options: list({value :: term(), label :: String.t()})
}
| %{
type: :checkbox,
ref: ref(),
id: input_id(),
label: String.t(),
default: boolean(),
destination: Process.dest()
}
| %{
type: :range,
ref: ref(),
id: input_id(),
label: String.t(),
default: number(),
destination: Process.dest(),
min: number(),
max: number(),
step: number()
}
| %{
type: :color,
ref: ref(),
id: input_id(),
label: String.t(),
default: String.t(),
destination: Process.dest()
}
@typedoc """
A control widget.
All controls have the following properties:
* `:type` - one of the recognised control types
* `:ref` - a unique identifier
* `:destination` - the process to send event messages to
On top of that, each control type may have additional attributes.
## Events
All control events are sent to `:destination` as `{:event, id, info}`,
where info is a map including additional details. In particular, it
always includes `:origin`, which is an opaque identifier of the client
that triggered the event.
"""
@type control :: {:control, attrs :: control_attrs()}
@type control_attrs ::
%{
type: :keyboard,
ref: ref(),
destination: Process.dest(),
events: list(:keyup | :keydown | :status)
}
| %{
type: :button,
ref: ref(),
destination: Process.dest(),
label: String.t()
}
| %{
type: :form,
ref: ref(),
destination: Process.dest(),
fields: list({field :: atom(), input_attrs()}),
submit: String.t() | nil,
# Currently we always use true, but we can support
# other tracking modes in the future
report_changes: %{(field :: atom()) => true},
reset_on_submit: list(field :: atom())
}
@type ref :: String.t()
@doc """
See `t:text/0`.
"""
@spec text(binary()) :: t()
def text(text) when is_binary(text) do
{:text, text}
end
@doc """
See `t:markdown/0`.
"""
@spec markdown(binary()) :: t()
def markdown(content) when is_binary(content) do
{:markdown, content}
end
@doc """
See `t:image/0`.
"""
@spec image(binary(), binary()) :: t()
def image(content, mime_type) when is_binary(content) and is_binary(mime_type) do
{:image, content, mime_type}
end
@doc """
See `t:js/0`.
"""
@spec js(js_info()) :: t()
def js(info) when is_map(info) do
{:js, info}
end
@doc """
See `t:frame/0`.
"""
@spec frame(list(t()), frame_info()) :: t()
def frame(outputs, info) when is_list(outputs) and is_map(info) do
{:frame, outputs, info}
end
@doc """
See `t:input/0`.
"""
@spec input(input_attrs()) :: t()
def input(attrs) when is_map(attrs) do
{:input, attrs}
end
@doc """
See `t:control/0`.
"""
@spec control(control_attrs()) :: t()
def control(attrs) when is_map(attrs) do
{:control, attrs}
end
@doc """
Returns `t:text/0` with the inspected term.
"""
@spec inspect(term(), keyword()) :: t()
def inspect(term, opts \\ []) do
inspected = Kernel.inspect(term, inspect_opts(opts))
text(inspected)
end
defp inspect_opts(opts) do
default_opts = [pretty: true, width: 100, syntax_colors: syntax_colors()]
config_opts = Kino.Config.configuration(:inspect, [])
default_opts
|> Keyword.merge(config_opts)
|> Keyword.merge(opts)
end
defp syntax_colors() do
[
atom: :blue,
boolean: :magenta,
number: :blue,
nil: :magenta,
regex: :red,
string: :green,
reset: :reset
]
end
@doc """
Generates a random binary identifier.
"""
@spec random_ref() :: ref()
def random_ref() do
:crypto.strong_rand_bytes(20) |> Base.encode32(case: :lower)
end
end
|
lib/kino/output.ex
| 0.922596
| 0.610163
|
output.ex
|
starcoder
|
defprotocol Casex.Serializable do
@moduledoc """
Protocol controlling how a value is serialized. It is useful to handle custom
structs of your app, without that the `Casex` will be skipped and passed directly to Jason.
## Deriving
The protocol allows leveraging the Elixir's `@derive` feature
to simplify protocol implementation in trivial cases. Accepted
options are:
* `:only` - encodes only values of specified keys.
* `:except` - encodes all struct fields except specified keys.
By default all keys except the `:__struct__` key are serialized.
It also returns a compile time dict of the camelized keys in order
to increase the speed of the case conversion.
## Example
Let's assume a presence of the following struct:
defmodule Test do
defstruct [:foo, :bar, :baz]
end
If we were to call `@derive Casex.Serializable` just before `defstruct`,
an implementation similar to the following implementation would be generated:
defimpl Casex.Serializable, for: Test do
def serialize(data) do
{Map.take(data, [:foo, :bar, :baz]), %{foo: "foo", bar: "bar", baz: "baz"}}
end
end
If we called `@derive {Casex.Serializable, only: [:foo]}`, an implementation
similar to the following implementation would be generated:
defimpl Casex.Serializable, for: Test do
def serialize(data) do
{Map.take(data, [:foo]), %{foo: "foo"}}
end
end
If we called `@derive {Casex.Serializable, except: [:foo]}`, an implementation
similar to the following implementation would be generated:
defimpl Casex.Serializable, for: Test do
def serialize(data) do
{Map.take(data, [:bar, :baz]), %{bar: "bar", baz: "baz"}}
end
end
"""
@fallback_to_any true
@spec serialize(data :: any()) :: any() | {any(), camelized_dict :: map()}
def serialize(data)
end
defimpl Casex.Serializable, for: Any do
defmacro __deriving__(module, struct, options) do
fields = fields_to_encode(struct, options)
camelized_dict =
fields
|> Enum.map(fn field -> {field, field |> to_string() |> Recase.to_camel()} end)
|> Map.new()
quote do
defimpl Casex.Serializable, for: unquote(module) do
def serialize(data) do
{Map.take(data, unquote(fields)), unquote(Macro.escape(camelized_dict))}
end
end
end
end
def serialize(data), do: data
defp fields_to_encode(struct, opts) do
cond do
only = Keyword.get(opts, :only) ->
only
except = Keyword.get(opts, :except) ->
Map.keys(struct) -- [:__struct__ | except]
true ->
Map.keys(struct) -- [:__struct__]
end
end
end
|
lib/casex/serializable.ex
| 0.912932
| 0.639215
|
serializable.ex
|
starcoder
|
defimpl Inspect, for: Graph do
# For graphs with less than 100 vertices, we'll try to pretty print it,
# however we should avoid doing so with larger graphs, as it will likely cause outrageous
# memory consumption, not to mention be expensive to calculate, and the pretty form is not
# very useful at that size anyway
def inspect(%Graph{type: type, vertices: vs, out_edges: es, edges: meta}, opts)
when map_size(vs) < 100 do
# The goal here is to strip out the ids map, convert the vertices map to a list of vertices
# and convert the map of edges to their reified forms (i.e. the actual vertex term is used in place of ids)
# we also want to respect the inspect options as much as possible, so we do this all the hard way by
# constructing the inspect algebra by hand
vs_doc = Inspect.Algebra.to_doc(Map.values(vs), opts)
doc =
Inspect.Algebra.concat([
Inspect.Algebra.empty(),
"#Graph<type: #{type}, vertices:",
" ",
vs_doc,
",",
" ",
"edges: [",
""
])
doc =
Stream.flat_map(es, fn {v_id, out_neighbors} ->
v = Inspect.Algebra.to_doc(Map.get(vs, v_id), opts)
Enum.flat_map(out_neighbors, fn out_id ->
out_v = Map.get(vs, out_id)
out_v_doc = Inspect.Algebra.to_doc(out_v, opts)
Enum.map(Map.fetch!(meta, {v_id, out_id}), fn
{nil, _} when type == :directed ->
[v, " -> ", out_v_doc]
{nil, _} ->
[v, " <-> ", out_v_doc]
{label, _}
when type == :directed and (is_binary(label) or is_number(label) or is_atom(label)) ->
[v, " -[#{label}]-> ", out_v_doc]
{label, _} when is_binary(label) or is_number(label) or is_atom(label) ->
[v, " <-[#{label}]-> ", out_v_doc]
{label, _} when type == :directed ->
[v, " -[#{inspect(label)}]-> ", out_v_doc]
{label, _} ->
[v, " <-[#{inspect(label)}]-> ", out_v_doc]
end)
end)
end)
|> Enum.intersperse(", ")
|> Enum.reduce(doc, fn
doc_part, doc when is_list(doc_part) ->
Inspect.Algebra.concat([doc | doc_part])
doc_part, doc ->
Inspect.Algebra.concat(doc, doc_part)
end)
Inspect.Algebra.concat(doc, "]>")
end
# For large graphs, just print summary information about the graph
def inspect(%Graph{type: type} = g, _opts) do
num_vertices = Graph.num_vertices(g)
num_edges = Graph.num_edges(g)
"#Graph<type: #{type}, num_vertices: #{num_vertices}, num_edges: #{num_edges}>"
end
end
|
lib/graph/inspect.ex
| 0.547464
| 0.633169
|
inspect.ex
|
starcoder
|
defmodule Automaton.Types.BT do
@moduledoc """
Implements the Behavior Tree (BT) state space representation.
Each tree is goal-oriented, i.e. associated with a distinct, high-level goal
which it attempts to achieve.
Behavior trees are a unique combination of state space representation
(graphical, or tree) and action-selection decision scheme with plugin
variations, where the user can choose or customize the logic for traversal and
lifecycle management.
## Notes:
- Initialization and shutdown require extra care:
- on_init: receive extra parameters, fetch data from blackboard/utility, make requests, etc..
- shutdown: free resources to not effect other actions
TODO: store any currently processing nodes along with any nodes with monitor decorators
so when monitors are activated, reactivity is achieved.
Use Zipper Tree to store both?
"""
alias Automaton.Types.BT.CompositeServer
alias Automaton.Types.BT.ComponentServer
alias Automaton.Types.BT.Config.Parser
alias Automaton.Types.BT.Behavior
defmacro __using__(opts) do
automaton_config = opts[:automaton_config]
{node_type, c_types, cn_types} = Parser.call(automaton_config)
prepend =
quote do
use Behavior, automaton_config: unquote(automaton_config)
end
node_type =
cond do
Enum.member?(c_types, node_type) ->
quote do: use(CompositeServer, automaton_config: unquote(automaton_config))
Enum.member?(cn_types, node_type) ->
quote do: use(ComponentServer, automaton_config: unquote(automaton_config))
end
control =
quote bind_quoted: [automaton_config: automaton_config] do
def tick(state) do
init_state = if state.status != :bh_running, do: on_init(state), else: state
{:ok, updated_state} = update(init_state)
status = updated_state.status
if status != :bh_running do
on_terminate(updated_state)
else
if !unquote(automaton_config[:children]) do
schedule_next_tick(updated_state.tick_freq)
end
end
[status, updated_state]
end
def schedule_next_tick(ms_delay) do
Process.send_after(self(), :scheduled_tick, ms_delay)
end
def handle_call(:tick, _from, state) do
[status, new_state] = tick(state)
{:reply, status, %{new_state | status: status}}
end
def handle_info(:scheduled_tick, state) do
[status, new_state] = tick(state)
{:noreply, %{new_state | status: status}}
end
# called on startup to access parent's state
def handle_cast({:initialize, parent_pid}, state) do
parent_state = GenServer.call(parent_pid, :get_state)
new_state =
if !unquote(automaton_config[:children]) do
%{state | tick_freq: parent_state.tick_freq}
else
state
end
{:noreply, %{new_state | parent: parent_pid}}
end
def handle_call(:get_state, _from, state) do
{:reply, state, state}
end
def handle_call(:status, _from, state) do
{:reply, state.status, state}
end
def handle_call(:set_running, _from, state) do
{:reply, :ok, %{state | status: :bh_running}}
end
def handle_call(:succeed, _from, state) do
{:reply, :ok, %{state | status: :bh_success}}
end
def handle_call(:fail, _from, state) do
{:reply, :ok, %{state | status: :bh_failure}}
end
def handle_call(:running?, _from, state) do
{:reply, state.status == :bh_running, state}
end
def handle_call(:aborted?, _from, state) do
{:reply, state.status == :bh_aborted, state}
end
def handle_call(:terminated?, _from, state) do
status = state.status
{:reply, status == :bh_success || status == :bh_failure, state}
end
def handle_call(:abort, _from, state) do
on_terminate(state)
{:reply, true, %{state | status: :bh_aborted}}
end
def handle_call(:reset, _from, state) do
{:reply, true, %{state | status: :bh_invalid}}
end
# Defoverridable makes the given functions in the current module overridable
defoverridable update: 1, on_init: 1, on_terminate: 1
end
[prepend, node_type, control]
end
end
|
lib/automata/automaton_types/graphical/behavior_tree/control/bt.ex
| 0.599016
| 0.522568
|
bt.ex
|
starcoder
|
defmodule Akin.SubstringSet do
@moduledoc """
Functions for string comparsion where common words between the strings are compared in sets.
Two sets
#MapSet<["alice", "in", "wonderland"]>
#MapSet<["adventures", "alice", "glass", "looking", "s", "the", "through"]>
Commonality
* `common_words = "alice"`
* `common_words_plus_remaining_words_left = "aliceinwonderland"`
* `common_words_plus_remaining_words_right = "aliceadventuresglasslookingsthethrough"`
Ratio is based on difference in string length
* if words are of similar in length according to Akin.Strategy.determine/2
* ratio is String.jaro_distance
* if words are of dissimilar in length according to Akin.Strategy.determine/2
* ratio is Akin.SubstringComparison.similarity/2 * @ratio * scale (determined by Akin.Strategy)
Match level is based on `opt` :level
* "normal" returns average ratio
* "weak" returns maximum ratio
"""
@behaviour Akin.Task
import Akin.Util, only: [opts: 2]
alias Akin.{Corpus, Strategy, Helper.SubstringComparison}
@bias 0.95
@spec compare(%Corpus{}, %Corpus{}, Keyword.t()) :: float()
def compare(
%Corpus{string: l_string, set: l_set},
%Corpus{string: r_string, set: r_set},
opts \\ []
) do
case Strategy.determine(l_string, r_string) do
:standard ->
similarity(l_set, r_set) |> score(opts(opts, :level))
{:substring, scale} ->
score =
substring_similarity(l_set, r_set)
|> score(opts(opts, :level))
score * @bias * scale
{:error, _} ->
0.0
end
end
defp score(scores, "weak"), do: Enum.max(scores)
defp score(scores, _) do
Enum.sum(scores) / (Enum.count(scores) - 1)
end
defp similarity(left, right) do
{common_words, common_words_plus_remaining_words_left,
common_words_plus_remaining_words_right} = set_operations(left, right)
[
0.0,
String.jaro_distance(common_words, common_words_plus_remaining_words_left),
String.jaro_distance(common_words, common_words_plus_remaining_words_right),
String.jaro_distance(
common_words_plus_remaining_words_left,
common_words_plus_remaining_words_right
)
]
end
defp substring_similarity(left, right) do
similarity(left, right, SubstringComparison)
end
defp similarity(left, right, ratio_mod) do
{common_words, common_words_plus_remaining_words_left,
common_words_plus_remaining_words_right} = set_operations(left, right)
[
0.0,
ratio_mod.similarity(common_words, common_words_plus_remaining_words_left),
ratio_mod.similarity(common_words, common_words_plus_remaining_words_right),
ratio_mod.similarity(
common_words_plus_remaining_words_left,
common_words_plus_remaining_words_right
)
]
end
defp set_operations(left, right) do
common_words = MapSet.intersection(left, right)
common_words_string =
common_words
|> Enum.sort()
|> Enum.join()
[
common_words_plus_remaining_words_l_string,
common_words_plus_remaining_words_r_string
] =
[left, right]
|> Enum.map(fn x ->
common_words_string <>
(x
|> MapSet.difference(common_words)
|> Enum.sort()
|> Enum.join())
end)
{
common_words_string,
common_words_plus_remaining_words_l_string,
common_words_plus_remaining_words_r_string
}
end
end
|
lib/akin/algorithms/substring_set.ex
| 0.855006
| 0.408749
|
substring_set.ex
|
starcoder
|
defmodule MapQueue do
@moduledoc """
MapQueue is an implemenation of a queue with a single map as the underlying
data structure.
The benefit of using a MapQueue over a list is that appending to the tail of
a list `++` can become quite an expensive operation when the list is large.
With MapQueue adding or removing items from the queue is roughtly as fast as
the same operation on a Map.
MapQueue keeps track of items in a map with integers as keys, and also tracks
the highest (`last`) and lowest (`first`) items in the `map`.
Since each item in a MapQueue is tracked by an integer, the memory consumption of
a MapQueue will be higher than that of an equivalent list or Erlang `:queue`.
"""
@type t :: %__MODULE__{
first: integer(),
last: integer(),
map: %{required(integer()) => any()}
}
defstruct first: 0,
last: 0,
map: %{}
@spec new() :: MapQueue.t()
def new() do
%MapQueue{}
end
@spec new(any()) :: MapQueue.t()
def new(enumerable) do
append(%MapQueue{}, enumerable)
end
@spec size(MapQueue.t()) :: non_neg_integer()
def size(%MapQueue{map: map}) do
map_size(map)
end
@spec append(MapQueue.t(), any()) :: MapQueue.t()
def append(%MapQueue{} = queue, enumerable) do
Enum.into(enumerable, queue)
end
@spec prepend(MapQueue.t(), any()) :: MapQueue.t()
def prepend(%MapQueue{} = queue, enumerable) do
enumerable
|> Enum.reverse()
|> Enum.reduce(queue, fn item, acc ->
push_front(acc, item)
end)
end
@spec pop(MapQueue.t()) :: :empty | {any(), MapQueue.t()}
def pop(%MapQueue{} = queue) do
do_pop(queue, :first)
end
@spec pop(MapQueue.t(), non_neg_integer()) :: {list(), MapQueue.t()}
def pop(%MapQueue{} = queue, 0) do
{[], queue}
end
def pop(%MapQueue{map: map} = queue, _) when map_size(map) == 0 do
{[], queue}
end
def pop(%MapQueue{first: first, last: last} = queue, count) when count >= 0 do
last_popped = min(first + count - 1, last)
do_pop_indexes(queue, first..last_popped, :first, min(last_popped, last))
end
@spec pop_rear(MapQueue.t()) :: :empty | {any(), MapQueue.t()}
def pop_rear(%MapQueue{} = queue) do
do_pop(queue, :last)
end
@spec pop_rear(MapQueue.t(), non_neg_integer()) :: {list(), MapQueue.t()}
def pop_rear(%MapQueue{} = queue, 0) do
{[], queue}
end
def pop_rear(%MapQueue{map: map} = queue, _) when map_size(map) == 0 do
{[], queue}
end
def pop_rear(%MapQueue{first: first, last: last} = queue, count) when count > 0 do
first_popped = max(last - count, first)
do_pop_indexes(queue, last..first_popped, :last, max(first_popped - 1, first))
end
@spec push(MapQueue.t(), any()) :: MapQueue.t()
def push(%MapQueue{last: last} = queue, value) do
add_value(queue, :last, last + 1, value)
end
@spec push_front(MapQueue.t(), any()) :: MapQueue.t()
def push_front(%MapQueue{first: first} = queue, value) do
add_value(queue, :first, first - 1, value)
end
@spec slice(MapQueue.t(), any(), any()) :: MapQueue.t()
def slice(%MapQueue{last: last}, index, _) when index > last do
MapQueue.new()
end
def slice(%MapQueue{first: first, map: map}, index, amount) do
rel_start = first + index
rel_start..(rel_start + amount - 1)
|> Enum.reduce(%MapQueue{}, fn index, acc ->
push(acc, Map.get(map, index))
end)
end
defp add_value(%MapQueue{map: map} = queue, _spot, _, value) when map_size(map) == 0 do
%MapQueue{queue | map: Map.put(map, 0, value)}
end
defp add_value(%MapQueue{map: map} = queue, spot, index, value) do
queue
|> Map.put(spot, index)
|> Map.put(:map, Map.put(map, index, value))
end
defp diff_by_index_type(:last), do: -1
defp diff_by_index_type(:first), do: 1
defp do_pop(%MapQueue{map: map}, _) when map_size(map) == 0 do
:empty
end
defp do_pop(%MapQueue{map: map} = queue, index_type) do
index = Map.get(queue, index_type)
{value, new_map} = Map.pop(map, index)
queue =
queue
|> Map.put(:map, new_map)
|> Map.put(index_type, index + diff_by_index_type(index_type))
{value, queue}
end
defp do_pop_indexes(%MapQueue{map: map} = queue, a..b, first_or_last, boundary_value) do
indexes = Enum.into(a..b, [])
popped_items = Enum.map(indexes, fn index -> Map.get(map, index) end)
updated_map = Map.drop(map, indexes)
updated_queue =
queue
|> Map.put(first_or_last, boundary_value)
|> Map.put(:map, updated_map)
{popped_items, updated_queue}
end
defimpl Enumerable do
@spec count(MapQueue.t()) :: {:ok, non_neg_integer()}
def count(%MapQueue{map: map}) do
{:ok, map_size(map)}
end
def member?(%MapQueue{map: map}, item) do
map
|> Enum.find(fn {_, i} -> i == item end)
|> case do
nil ->
{:ok, false}
{_, _} ->
{:ok, true}
end
end
@type suspended_function ::
({:cont, any()} | {:halt, any()} | {:suspend, any()} ->
{:done, any()} | {:halted, any()} | {:suspended, any(), any()})
@spec reduce(any(), {:cont, any()} | {:halt, any()} | {:suspend, any()}, any()) ::
{:done, any()}
| {:halted, any()}
| {:suspended, any(), suspended_function()}
def reduce(_, {:halt, acc}, _) do
{:halted, acc}
end
def reduce(queue, {:suspend, acc}, fun) do
{:suspended, acc, fn acc_2 -> reduce(queue, acc_2, fun) end}
end
def reduce(%MapQueue{map: map}, {:cont, acc}, _fun) when map_size(map) == 0 do
{:done, acc}
end
def reduce(%MapQueue{} = queue, {:cont, acc}, fun) do
{popped, queue} = MapQueue.pop(queue)
reduce(queue, fun.(popped, acc), fun)
end
@spec slice(MapQueue.t()) ::
{:ok, non_neg_integer(), (non_neg_integer(), pos_integer() -> list(any()))}
def slice(%MapQueue{map: map} = queue) do
func = fn start, count ->
queue
|> MapQueue.slice(start, count)
|> Enum.into([])
end
{:ok, map_size(map), func}
end
end
defimpl Inspect do
def inspect(%MapQueue{map: map, first: first, last: last}, _opts) do
items =
[items: render_values(map, first, last), size: map_size(map)]
|> Enum.reduce([], fn {k, v}, acc ->
[Enum.join([to_string(k), ": ", to_string(v)]) | acc]
end)
|> List.flatten()
|> Enum.join(", ")
:erlang.iolist_to_binary(["#MapQueue<[", items, "]>"])
end
defp render_values(map, _, _) when map_size(map) == 0 do
wrap_brackets("")
end
defp render_values(map, first, _) when map_size(map) == 1 do
map
|> render_value(first)
|> wrap_brackets
end
defp render_values(map, first, last) do
[render_value(map, first), ", ..., ", render_value(map, last)]
|> Enum.join("")
|> wrap_brackets()
end
def render_value(map, key) do
map
|> Map.get(key)
|> inspect()
end
defp wrap_brackets(item) do
"[" <> item <> "]"
end
end
defimpl Collectable, for: MapQueue do
def into(original) do
collector_fun = fn
queue, {:cont, item} -> MapQueue.push(queue, item)
queue, :done -> queue
_queue, :halt -> :ok
end
{original, collector_fun}
end
end
end
|
lib/map_queue.ex
| 0.89683
| 0.71015
|
map_queue.ex
|
starcoder
|
defmodule Statistex do
@moduledoc """
Calculate all the statistics for given samples.
Works all at once with `statistics/1` or has a lot of functions that can be triggered individually.
To avoid wasting computation, function can be given values they depend on as optional keyword arguments so that these values can be used instead of recalculating them. For an example see `average/2`.
Most statistics don't really make sense when there are no samples, for that reason all functions except for `sample_size/1` raise `ArgumentError` when handed an empty list.
It is suggested that if it's possible for your program to throw an empty list at Statistex to handle that before handing it to Staistex to take care of the "no reasonable statistics" path entirely separately.
Limitations of ther erlang standard library apply (particularly `:math.pow/2` raises for VERY large numbers).
"""
alias Statistex.{Mode, Percentile}
require Integer
defstruct [
:total,
:average,
:variance,
:standard_deviation,
:standard_deviation_ratio,
:median,
:percentiles,
:frequency_distribution,
:mode,
:minimum,
:maximum,
sample_size: 0
]
@typedoc """
All the statistics `statistics/1` computes from the samples.
For a description of what a given value means please check out the function here by the same name, it will have an explanation.
"""
@type t :: %__MODULE__{
total: number,
average: float,
variance: float,
standard_deviation: float,
standard_deviation_ratio: float,
median: number,
percentiles: percentiles,
frequency_distribution: %{sample => pos_integer},
mode: mode,
minimum: number,
maximum: number,
sample_size: non_neg_integer
}
@typedoc """
The samples to compute statistics from.
Importantly this list is not empty/includes at least one sample otherwise an `ArgumentError` will be raised.
"""
@type samples :: [sample, ...]
@typedoc """
A single sample/
"""
@type sample :: number
@typedoc """
The optional configuration handed to a lot of functions.
Keys used are function dependent and are documented there.
"""
@type configuration :: keyword
@typedoc """
Careful with the mode, might be multiple values, one value or nothing.😱 See `mode/1`.
"""
@type mode :: [sample()] | sample() | nil
@typedoc """
The percentiles map returned by `percentiles/2`.
"""
@type percentiles :: %{number() => float}
@empty_list_error_message "Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number."
@doc """
Calculate all statistics Statistex offers for a given list of numbers.
The statistics themselves are described in the individual samples that can be used to calculate individual values.
`Argumenterror` is raised if the given list is empty.
## Options
In a `percentiles` options arguments for the calculation of percentiles (see `percentiles/2`) can be given. The 50th percentile is always calculated as it is the median.
## Examples
iex> Statistex.statistics([200, 400, 400, 400, 500, 500, 500, 700, 900])
%Statistex{
average: 500.0,
variance: 40_000.0,
standard_deviation: 200.0,
standard_deviation_ratio: 0.4,
median: 500.0,
percentiles: %{50 => 500.0},
frequency_distribution: %{
200 => 1,
400 => 3,
500 => 3,
700 => 1,
900 => 1
},
mode: [500, 400],
minimum: 200,
maximum: 900,
sample_size: 9,
total: 4500
}
iex> Statistex.statistics([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
iex> Statistex.statistics([0, 0, 0, 0])
%Statistex{
average: 0.0,
variance: 0.0,
standard_deviation: 0.0,
standard_deviation_ratio: 0.0,
median: 0.0,
percentiles: %{50 => 0.0},
frequency_distribution: %{0 => 4},
mode: 0,
minimum: 0,
maximum: 0,
sample_size: 4,
total: 0
}
"""
@spec statistics(samples, configuration) :: t()
def statistics(samples, configuration \\ [])
def statistics([], _) do
raise(ArgumentError, @empty_list_error_message)
end
def statistics(samples, configuration) do
total = total(samples)
sample_size = length(samples)
average = average(samples, total: total, sample_size: sample_size)
variance = variance(samples, average: average, sample_size: sample_size)
standard_deviation = standard_deviation(samples, variance: variance)
standard_deviation_ratio =
standard_deviation_ratio(samples, standard_deviation: standard_deviation)
percentiles = calculate_percentiles(samples, configuration)
frequency_distribution = frequency_distribution(samples)
%__MODULE__{
total: total,
average: average,
variance: variance,
standard_deviation: standard_deviation,
standard_deviation_ratio: standard_deviation_ratio,
median: median(samples, percentiles: percentiles),
percentiles: percentiles,
frequency_distribution: frequency_distribution,
mode: mode(samples, frequency_distribution: frequency_distribution),
minimum: minimum(samples),
maximum: maximum(samples),
sample_size: sample_size
}
end
@doc """
The total of all samples added together.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.total([1, 2, 3, 4, 5])
15
iex> Statistex.total([10, 10.5, 5])
25.5
iex> Statistex.total([-10, 5, 3, 2])
0
iex> Statistex.total([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec total(samples) :: number
def total([]), do: raise(ArgumentError, @empty_list_error_message)
def total(samples), do: Enum.sum(samples)
@doc """
Number of samples in the given list.
Nothing to fancy here, this just calls `length(list)` and is only provided for completeness sake.
## Examples
iex> Statistex.sample_size([])
0
iex> Statistex.sample_size([1, 1, 1, 1, 1])
5
"""
@spec sample_size([sample]) :: non_neg_integer
def sample_size(samples), do: length(samples)
@doc """
Calculate the average.
It's.. well the average.
When the given samples are empty there is no average.
`Argumenterror` is raised if the given list is empty.
## Options
If you already have these values, you can provide both `:total` and `:sample_size`. Should you provide both the provided samples are wholly ignored.
## Examples
iex> Statistex.average([5])
5.0
iex> Statistex.average([600, 470, 170, 430, 300])
394.0
iex> Statistex.average([-1, 1])
0.0
iex> Statistex.average([2, 3, 4], sample_size: 3)
3.0
iex> Statistex.average([20, 20, 20, 20, 20], total: 100, sample_size: 5)
20.0
iex> Statistex.average(:ignored, total: 100, sample_size: 5)
20.0
iex> Statistex.average([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec average(samples, keyword) :: float
def average(samples, options \\ [])
def average([], _), do: raise(ArgumentError, @empty_list_error_message)
def average(samples, options) do
total = Keyword.get_lazy(options, :total, fn -> total(samples) end)
sample_size = Keyword.get_lazy(options, :sample_size, fn -> sample_size(samples) end)
total / sample_size
end
@doc """
Calculate the variance.
A measurement how much samples vary (the higher the more the samples vary). This is the variance of a sample and is hence in its calculation divided by sample_size - 1 (Bessel's correction).
`Argumenterror` is raised if the given list is empty.
## Options
If already calculated, the `:average` and `:sample_size` options can be provided to avoid recalulating those values.
## Examples
iex> Statistex.variance([4, 9, 11, 12, 17, 5, 8, 12, 12])
16.0
iex> Statistex.variance([4, 9, 11, 12, 17, 5, 8, 12, 12], sample_size: 9, average: 10.0)
16.0
iex> Statistex.variance([42])
0.0
iex> Statistex.variance([1, 1, 1, 1, 1, 1, 1])
0.0
iex> Statistex.variance([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec variance(samples, keyword) :: float
def variance(samples, options \\ [])
def variance([], _), do: raise(ArgumentError, @empty_list_error_message)
def variance(samples, options) do
sample_size = Keyword.get_lazy(options, :sample_size, fn -> sample_size(samples) end)
average =
Keyword.get_lazy(options, :average, fn -> average(samples, sample_size: sample_size) end)
do_variance(samples, average, sample_size)
end
defp do_variance(_samples, _average, 1), do: 0.0
defp do_variance(samples, average, sample_size) do
total_variance =
Enum.reduce(samples, 0, fn sample, total ->
total + :math.pow(sample - average, 2)
end)
total_variance / (sample_size - 1)
end
@doc """
Calculate the standard deviation.
A measurement how much samples vary (the higher the more the samples vary). It's the square root of the variance. Unlike the variance, its unit is the same as that of the sample (as calculating the variance includes squaring).
## Options
If already calculated, the `:variance` option can be provided to avoid recalulating those values.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.standard_deviation([4, 9, 11, 12, 17, 5, 8, 12, 12])
4.0
iex> Statistex.standard_deviation([4, 9, 11, 12, 17, 5, 8, 12, 12], variance: 16.0)
4.0
iex> Statistex.standard_deviation([42])
0.0
iex> Statistex.standard_deviation([1, 1, 1, 1, 1, 1, 1])
0.0
iex> Statistex.standard_deviation([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec standard_deviation(samples, keyword) :: float
def standard_deviation(samples, options \\ [])
def standard_deviation([], _), do: raise(ArgumentError, @empty_list_error_message)
def standard_deviation(samples, options) do
variance = Keyword.get_lazy(options, :variance, fn -> variance(samples) end)
:math.sqrt(variance)
end
@doc """
Calculate the standard deviation relative to the average.
This helps put the absolute standard deviation value into perspective expressing it relative to the average. It's what percentage of the absolute value of the average the variance takes.
`Argumenterror` is raised if the given list is empty.
## Options
If already calculated, the `:average` and `:standard_deviation` options can be provided to avoid recalulating those values.
If both values are provided, the provided samples will be ignored.
## Examples
iex> Statistex.standard_deviation_ratio([4, 9, 11, 12, 17, 5, 8, 12, 12])
0.4
iex> Statistex.standard_deviation_ratio([-4, -9, -11, -12, -17, -5, -8, -12, -12])
0.4
iex> Statistex.standard_deviation_ratio([4, 9, 11, 12, 17, 5, 8, 12, 12], average: 10.0, standard_deviation: 4.0)
0.4
iex> Statistex.standard_deviation_ratio(:ignored, average: 10.0, standard_deviation: 4.0)
0.4
iex> Statistex.standard_deviation_ratio([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec standard_deviation_ratio(samples, keyword) :: float
def standard_deviation_ratio(samples, options \\ [])
def standard_deviation_ratio([], _), do: raise(ArgumentError, @empty_list_error_message)
def standard_deviation_ratio(samples, options) do
average = Keyword.get_lazy(options, :average, fn -> average(samples) end)
std_dev =
Keyword.get_lazy(options, :standard_deviation, fn ->
standard_deviation(samples, average: average)
end)
if average == 0 do
0.0
else
abs(std_dev / average)
end
end
@median_percentile 50
defp calculate_percentiles(samples, configuration) do
percentiles_configuration = Keyword.get(configuration, :percentiles, [])
# median_percentile is manually added so that it can be used directly by median
percentiles_configuration = Enum.uniq([@median_percentile | percentiles_configuration])
percentiles(samples, percentiles_configuration)
end
@doc """
Calculates the value at the `percentile_rank`-th percentile.
Think of this as the
value below which `percentile_rank` percent of the samples lie. For example,
if `Statistex.percentile(samples, 99)` == 123.45,
99% of samples are less than 123.45.
Passing a number for `percentile_rank` calculates a single percentile.
Passing a list of numbers calculates multiple percentiles, and returns them
as a map like %{90 => 45.6, 99 => 78.9}, where the keys are the percentile
numbers, and the values are the percentile values.
Percentiles must be between 0 and 100 (excluding the boundaries).
The method used for interpolation is [described here and recommended by NIST](https://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm).
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], 12.5)
%{12.5 => 1.0}
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], [50])
%{50 => 3.0}
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], [75])
%{75 => 4.75}
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], 99)
%{99 => 5.0}
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], [50, 75, 99])
%{50 => 3.0, 75 => 4.75, 99 => 5.0}
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], 100)
** (ArgumentError) percentile must be between 0 and 100, got: 100
iex> Statistex.percentiles([5, 3, 4, 5, 1, 3, 1, 3], 0)
** (ArgumentError) percentile must be between 0 and 100, got: 0
iex> Statistex.percentiles([], [50])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec percentiles(samples, number | [number(), ...]) ::
percentiles()
defdelegate(percentiles(samples, percentiles), to: Percentile)
@doc """
A map showing which sample occurs how often in the samples.
Goes from a concrete occurence of the sample to the number of times it was observed in the samples.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.frequency_distribution([1, 2, 4.23, 7, 2, 99])
%{
2 => 2,
1 => 1,
4.23 => 1,
7 => 1,
99 => 1
}
iex> Statistex.frequency_distribution([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec frequency_distribution(samples) :: %{required(sample) => pos_integer}
def frequency_distribution([]), do: raise(ArgumentError, @empty_list_error_message)
def frequency_distribution(samples) do
Enum.reduce(samples, %{}, fn sample, counts ->
Map.update(counts, sample, 1, fn old_value -> old_value + 1 end)
end)
end
@doc """
Calculates the mode of the given samples.
Mode is the sample(s) that occur the most. Often one value, but can be multiple values if they occur the same amount of times. If no value occurs at least twice, there is no mode and it hence returns `nil`.
`Argumenterror` is raised if the given list is empty.
## Options
If already calculated, the `:frequency_distribution` option can be provided to avoid recalulating it.
## Examples
iex> Statistex.mode([5, 3, 4, 5, 1, 3, 1, 3])
3
iex> Statistex.mode([1, 2, 3, 4, 5])
nil
iex> Statistex.mode([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
iex> mode = Statistex.mode([5, 3, 4, 5, 1, 3, 1])
iex> Enum.sort(mode)
[1, 3, 5]
"""
@spec mode(samples, keyword) :: mode
def mode(samples, opts \\ []), do: Mode.mode(samples, opts)
@doc """
Calculates the median of the given samples.
The median can be thought of separating the higher half from the lower half of the samples.
When all samples are sorted, this is the middle value (or average of the two middle values when the number of times is even).
More stable than the average.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.median([1, 3, 4, 6, 7, 8, 9])
6.0
iex> Statistex.median([1, 2, 3, 4, 5, 6, 8, 9])
4.5
iex> Statistex.median([0])
0.0
iex> Statistex.median([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec median(samples, keyword) :: number
def median(samples, options \\ [])
def median([], _), do: raise(ArgumentError, @empty_list_error_message)
def median(samples, options) do
percentiles =
Keyword.get_lazy(options, :percentiles, fn -> percentiles(samples, @median_percentile) end)
Map.get_lazy(percentiles, @median_percentile, fn ->
samples |> percentiles(@median_percentile) |> Map.fetch!(@median_percentile)
end)
end
@doc """
The biggest sample.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.maximum([1, 100, 24])
100
iex> Statistex.maximum([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec maximum(samples) :: sample
def maximum([]), do: raise(ArgumentError, @empty_list_error_message)
def maximum(samples), do: Enum.max(samples)
@doc """
The smallest sample.
`Argumenterror` is raised if the given list is empty.
## Examples
iex> Statistex.minimum([1, 100, 24])
1
iex> Statistex.minimum([])
** (ArgumentError) Passed an empty list ([]) to calculate statistics from, please pass a list containing at least on number.
"""
@spec minimum(samples) :: sample
def minimum([]), do: raise(ArgumentError, @empty_list_error_message)
def minimum(samples), do: Enum.min(samples)
end
|
lib/statistex.ex
| 0.950537
| 0.745329
|
statistex.ex
|
starcoder
|
defmodule Recipe do
@moduledoc """
### Intro
The `Recipe` module allows implementing multi-step, reversible workflows.
For example, you may wanna parse some incoming data, write to two different
data stores and then push some notifications. If anything fails, you wanna
rollback specific changes in different data stores. `Recipe` allows you to do
that.
In addition, a recipe doesn't enforce any constraint around which processes
execute which step. You can assume that unless you explicitly involve other
processes, all code that builds a recipe is executed by default by the
calling process.
Ideal use cases are:
- multi-step operations where you need basic transactional properties, e.g.
saving data to Postgresql and Redis, rolling back the change in Postgresql if
the Redis write fails
- interaction with services that simply don't support transactions
- composing multiple workflows that can share steps (with the
help of `Kernel.defdelegate/2`)
- trace workflows execution via a correlation id
You can avoid using this library if:
- A simple `with` macro will do
- You don't care about failure semantics and just want your operation to
crash the calling process
- Using Ecto, you can express your workflow with `Ecto.Multi`
Heavily inspired by the `ktn_recipe` module included in [inaka/erlang-katana](https://github.com/inaka/erlang-katana).
### Core ideas
- A workflow as a set of discreet steps
- Each step can have a specific error handling scenario
- Each step is a separate function that receives a state
with the result of all previous steps
- Each step should be easily testable in isolation
- Each workflow run is identified by a correlation id
- Each workflow needs to be easily audited via logs or an event store
### Example
The example below outlines a possible workflow where a user creates a new
conversation, passing an initial message.
Each step is named in `steps/0`. Each step definition uses data added to the
workflow state and performs a specific task.
Any error shortcuts the workflow to `handle_error/3`, where a specialized
clause for `:create_initial_message` deletes the conversation if the system
failes to create the initial message (therefore simulating a transaction).
defmodule StartNewConversation do
use Recipe
### Public API
def run(user_id, initial_message_text) do
state = Recipe.initial_state
|> Recipe.assign(:user_id, user_id)
|> Recipe.assign(:initial_message_text, initial_message_text)
Recipe.run(__MODULE__, state)
end
### Callbacks
def steps, do: [:validate,
:create_conversation,
:create_initial_message,
:broadcast_new_conversation,
:broadcast_new_message]
def handle_result(state) do
state.assigns.conversation
end
def handle_error(:create_initial_message, _error, state) do
Service.Conversation.delete(state.conversation.id)
end
def handle_error(_step, error, _state), do: error
### Steps
def validate(state) do
text = state.assigns.initial_message_text
if MessageValidator.valid_text?(text) do
{:ok, state}
else
{:error, :empty_message_text}
end
end
def create_conversation(state) do
case Service.Conversation.create(state.assigns.user_id) do
{:ok, conversation} ->
{:ok, Recipe.assign(state, :conversation, conversation)}
error ->
error
end
end
def create_initial_message(state) do
%{user_id: user_id,
conversation: conversation,
initial_message_text: text} = state.assigns
case Service.Message.create(user_id, conversation.id, text) do
{:ok, message} ->
{:ok, Recipe.assign(state, :initial_message, message)}
error ->
error
end
end
def broadcast_new_conversation(state) do
Dispatcher.broadcast("conversation-created", state.assigns.conversation)
{:ok, state}
end
def broadcast_new_message(state) do
Dispatcher.broadcast("message-created", state.assigns.initial_message)
{:ok, state}
end
end
### Telemetry
A recipe run can be instrumented with callbacks for start, end and each step execution.
To instrument a recipe run, it's sufficient to call:
Recipe.run(module, initial_state, enable_telemetry: true)
The default setting for telemetry is to use the `Recipe.Debug` module, but you can implement
your own by using the `Recipe.Telemetry` behaviour, definining the needed callbacks and run
the recipe as follows:
Recipe.run(module, initial_state, enable_telemetry: true, telemetry_module: MyModule)
An example of a compliant module can be:
defmodule Recipe.Debug do
use Recipe.Telemetry
def on_start(state) do
IO.inspect(state)
end
def on_finish(state) do
IO.inspect(state)
end
def on_success(step, state, elapsed_microseconds) do
IO.inspect([step, state, elapsed_microseconds])
end
def on_error(step, error, state, elapsed_microseconds) do
IO.inspect([step, error, state, elapsed_microseconds])
end
end
### Application-wide telemetry configuration
If you wish to control telemetry application-wide, you can do that by
creating an application-specific wrapper for `Recipe` as follows:
defmodule MyApp.Recipe do
def run(recipe_module, initial_state, run_opts \\ []) do
final_run_opts = Keyword.put_new(run_opts,
:enable_telemetry,
telemetry_enabled?())
Recipe.run(recipe_module, initial_state, final_run_opts)
end
def telemetry_on! do
Application.put_env(:recipe, :enable_telemetry, true)
end
def telemetry_off! do
Application.put_env(:recipe, :enable_telemetry, false)
end
defp telemetry_enabled? do
Application.get_env(:recipe, :enable_telemetry, false)
end
end
This module supports using a default setting which can be toggled
at runtime with `telemetry_on!/0` and `telemetry_off!/0`, overridable
on a per-run basis by passing `enable_telemetry: false` as a third
argument to `MyApp.Recipe.run/3`.
You can also add static configuration to `config/config.exs`:
config :recipe,
enable_telemetry: true
"""
alias Recipe.{InvalidRecipe, UUID}
require Logger
@default_run_opts [enable_telemetry: false]
defstruct assigns: %{},
recipe_module: NoOp,
correlation_id: nil,
telemetry_module: Recipe.Debug,
run_opts: @default_run_opts
@type step :: atom
@type recipe_module :: atom
@type error :: term
@type run_opts :: [{:enable_telemetry, boolean} | {:correlation_id, UUID.t()}]
@type function_name :: atom
@type telemetry_module :: module
@type t :: %__MODULE__{
assigns: %{optional(atom) => term},
recipe_module: module,
correlation_id: nil | Recipe.UUID.t(),
telemetry_module: telemetry_module,
run_opts: Recipe.run_opts()
}
@doc """
Lists all steps included in the recipe, e.g. `[:square, :double]`
"""
@callback steps() :: [step]
@doc """
Invoked at the end of the recipe, it receives the state obtained at the
last step.
"""
@callback handle_result(t) :: term
@doc """
Invoked any time a step fails. Receives the name of the failed step,
the error and the state.
"""
@callback handle_error(step, error, t) :: term
defmacro __using__(_opts) do
quote do
@behaviour Recipe
@after_compile __MODULE__
@doc false
def __after_compile__(env, bytecode) do
unless Module.defines?(__MODULE__, {:steps, 0}) do
raise InvalidRecipe, message: InvalidRecipe.missing_steps(__MODULE__)
end
steps = __MODULE__.steps()
definitions = Module.definitions_in(__MODULE__)
case all_steps_defined?(definitions, steps) do
:ok ->
:ok
{:missing, missing_steps} ->
raise InvalidRecipe,
message: InvalidRecipe.missing_step_definitions(__MODULE__, missing_steps)
end
end
defp all_steps_defined?(definitions, steps) do
missing_steps =
Enum.reduce(steps, [], fn step, missing_steps ->
case Keyword.get(definitions, step, :not_defined) do
:not_defined -> [step | missing_steps]
arity when arity !== 1 -> [step | missing_steps]
1 -> missing_steps
end
end)
case missing_steps do
[] -> :ok
_other -> {:missing, Enum.reverse(missing_steps)}
end
end
end
end
@doc """
Returns an empty recipe state. Useful in conjunction with `Recipe.run/2`.
"""
@spec initial_state() :: t
def initial_state, do: %__MODULE__{}
@doc """
Assigns a new value in the recipe state under the specified key.
Keys are available for reading under the `assigns` key.
iex> state = Recipe.initial_state |> Recipe.assign(:user_id, 1)
iex> state.assigns.user_id
1
"""
@spec assign(t, atom, term) :: t
def assign(state, key, value) do
new_assigns = Map.put(state.assigns, key, value)
%{state | assigns: new_assigns}
end
@doc """
Unassigns (a.k.a. deletes) a specific key in the state assigns.
iex> state = Recipe.initial_state |> Recipe.assign(:user_id, 1)
iex> state.assigns.user_id
1
iex> new_state = Recipe.unassign(state, :user_id)
iex> new_state.assigns
%{}
"""
@spec unassign(t, atom) :: t
def unassign(state, key) do
new_assigns = Map.delete(state.assigns, key)
%{state | assigns: new_assigns}
end
@doc """
Runs a recipe, identified by a module which implements the `Recipe`
behaviour, allowing to specify the initial state.
In case of a successful run, it will return a 3-element tuple `{:ok,
correlation_id, result}`, where `correlation_id` is a uuid that can be used
to connect this workflow with another one and `result` is the return value of
the `handle_result/1` callback.
Supports an optional third argument (a keyword list) for extra options:
- `:enable_telemetry`: when true, uses the configured telemetry module to log
and collect metrics around the recipe execution
- `:telemetry_module`: the telemetry module to use when logging events and metrics.
The module needs to implement the `Recipe.Telemetry` behaviour (see related docs),
it's set by default to `Recipe.Debug` and it's only used when `:enable_telemetry`
is set to true
- `:correlation_id`: you can override the automatically generated correlation id
by passing it as an option. A uuid can be generated with `Recipe.UUID.generate/0`
### Example
```
Recipe.run(Workflow, Recipe.initial_state(), enable_telemetry: true)
```
"""
@spec run(recipe_module, t, run_opts) :: {:ok, UUID.t(), term} | {:error, term}
def run(recipe_module, initial_state, run_opts \\ []) do
steps = recipe_module.steps()
final_run_opts = Keyword.merge(initial_state.run_opts, run_opts)
correlation_id = Keyword.get(final_run_opts, :correlation_id, UUID.generate())
telemetry_module =
Keyword.get(final_run_opts, :telemetry_module, initial_state.telemetry_module)
state = %{
initial_state
| recipe_module: recipe_module,
correlation_id: correlation_id,
telemetry_module: telemetry_module,
run_opts: final_run_opts
}
maybe_on_start(state)
do_run(steps, state)
end
defp do_run([], state) do
maybe_on_finish(state)
{:ok, state.correlation_id, state.recipe_module.handle_result(state)}
end
defp do_run([step | remaining_steps], state) do
case :timer.tc(state.recipe_module, step, [state]) do
{elapsed, {:ok, new_state}} ->
maybe_on_success(step, new_state, elapsed)
do_run(remaining_steps, new_state)
{elapsed, error} ->
maybe_on_error(step, error, state, elapsed)
{:error, state.recipe_module.handle_error(step, error, state)}
end
end
defp maybe_on_start(state) do
if Keyword.get(state.run_opts, :enable_telemetry) do
state.telemetry_module.on_start(state)
end
end
defp maybe_on_success(step, state, elapsed) do
if Keyword.get(state.run_opts, :enable_telemetry) do
state.telemetry_module.on_success(step, state, elapsed)
end
end
defp maybe_on_error(step, error, state, elapsed) do
if Keyword.get(state.run_opts, :enable_telemetry) do
state.telemetry_module.on_error(step, error, state, elapsed)
end
end
defp maybe_on_finish(state) do
if Keyword.get(state.run_opts, :enable_telemetry) do
state.telemetry_module.on_finish(state)
end
end
end
|
lib/recipe.ex
| 0.865324
| 0.467514
|
recipe.ex
|
starcoder
|
defmodule Xandra do
@moduledoc """
This module provides the main API to interface with Cassandra.
This module handles the connection to Cassandra, queries, connection pooling,
connection backoff, logging, and more. Many of these features are provided by
the [`DBConnection`](https://hex.pm/packages/db_connection) library, which
Xandra is built on top of.
## Errors
Many of the functions in this module (whose names don't end with a `!`)
return values in the form `{:ok, result}` or `{:error, error}`. While `result`
varies based on the specific function, `error` is always one of the following:
* a `Xandra.Error` struct: such structs represent errors returned by
Cassandra. When such an error is returned, it means that communicating
with the Cassandra server was successful, but the server returned an
error. Examples of these errors are syntax errors in queries, non-existent
tables, and so on. See `Xandra.Error` for more information.
* a `Xandra.ConnectionError` struct: such structs represent errors in the
communication with the Cassandra server. For example, if the Cassandra
server dies while the connection is waiting for a response from the
server, a `Xandra.ConnectionError` error will be returned. See
`Xandra.ConnectionError` for more information.
## Parameters, encoding, and types
Xandra supports parameterized queries (queries that specify "parameter" values
through `?` or `:named_value`):
SELECT * FROM users WHERE name = ? AND email = ?
SELECT * FROM users WHERE name = :name AND email = :email
When a query has positional parameters, parameters can be passed as a list to
functions like `execute/4`: in this case, a parameter in a given position in
the list will be used as the `?` in the corresponding position in the
query. When a query has named parameters, parameters are passed as a map with
string keys representing each parameter's name and values representing the
corresponding parameter's value.
### Types
Cassandra supports many types of values, and some types have "shades" that
cannot be represented by Elixir types. For example, in Cassandra an integer
could be a "bigint" (a 64 bit integer), an "int" (a 32 bit integer), a
"smallint" (a 16 bit integer), or others; in Elixir, however, integers are
just integers (with varying size to be precise), so it is impossible to
univocally map Elixir integers to a specific Cassandra integer type. For this
reason, when executing simple parameterized queries (statements) it is
necessary to explicitly specify the type of each value.
To specify the type of a value, that value needs to be provided as a
two-element tuple where the first element is the value's type and the second
element is the value itself. Types are expressed with the same syntax used in
CQL: for example, 16-bit integers are represented as `"smallint"`, while maps
of strings to booleans are represented as `"map<text, boolean>"`.
# Using a list of parameters:
statement = "INSERT INTO species (name, properties) VALUES (?, ?)"
Xandra.execute(conn, statement, [
{"text", "human"},
{"map<text, boolean>", %{"legs" => true, "arms" => true, "tail" => false}},
])
# Using a map of parameters:
statement = "INSERT INTO species (name, properties) VALUES (:name, :properties)"
Xandra.execute(conn, statement, %{
"name" => {"text", "human"},
"properties" => {"map<text, boolean>", %{"legs" => true, "arms" => true, "tail" => false}},
})
You only need to specify types for simple queries (statements): when using
prepared queries, the type information of each parameter of the query is
encoded in the prepared query itself.
# Using a map of parameters:
prepared = Xandra.prepare!(conn, "INSERT INTO species (name, properties) VALUES (:name, :properties)")
Xandra.execute(conn, prepared, %{
"name" => "human",
"properties" => %{"legs" => true, "arms" => true, "tail" => false},
})
#### User-defined types
Xandra supports user-defined types (UDTs). A UDT can be inserted as a map with
string fields. For example, consider having created the following UDTs:
CREATE TYPE full_name (first_name text, last_name text)
CREATE TYPE profile (username text, full_name frozen<full_name>)
and having the following table:
CREATE TABLE users (id int PRIMARY KEY, profile frozen<profile>)
Inserting rows will look something like this:
prepared_insert = Xandra.prepare!(conn, "INSERT INTO users (id, profile) VALUES (?, ?)")
profile = %{
"username" => "bperry",
"full_name" => %{"first_name" => "Britta", "last_name" => "Perry"},
}
Xandra.execute!(conn, prepared_insert, [_id = 1, profile])
Note that inserting UDTs is only supported on prepared queries.
When retrieved, UDTs are once again represented as maps with string
keys. Retrieving the row inserted above would look like this:
%{"profile" => profile} = conn |> Xandra.execute!("SELECT id, profile FROM users") |> Enum.fetch!(0)
profile
#=> %{"username" => "bperry", "full_name" => %{"first_name" => "Britta", "last_name" => "Perry"}}
## Reconnections
Thanks to the `DBConnection` library, Xandra is able to handle connection
losses and to automatically reconnect to Cassandra. By default, reconnections
are retried at exponentially increasing randomized intervals, but backoff can
be configured through a subset of the options accepted by
`start_link/2`. These options are described in the documentation for
`DBConnection.start_link/2`.
## Clustering
Xandra supports connecting to multiple nodes in a Cassandra cluster and
executing queries on different nodes based on load balancing strategies. See
the documentation for `Xandra.Cluster` for more information.
## Authentication
Xandra supports Cassandra authentication. See the documentation for
`Xandra.Authenticator` for more information.
## Retrying failed queries
Xandra takes a customizable and extensible approach to retrying failed queries
through "retry strategies" that encapsulate the logic for retrying
queries. See `Xandra.RetryStrategy` for documentation on retry strategies.
## Compression
Xandra supports compression. To inform the Cassandra server that the
connections you start should use compression for data transmitted to and from
the server, you can pass the `:compressor` option to `start_link/1`; this
option should be a module that implements the `Xandra.Compressor`
behaviour. After this, all compressed data that Cassandra sends to the
connection will be decompressed using this behaviour module.
To compress outgoing data (such as when issuing or preparing queries), the
`:compressor` option should be specified explicitly. When it's specified, the
given module will be used to compress data. If no `:compressor` option is
passed, the outgoing data will not be compressed.
"""
alias __MODULE__.{Batch, Connection, ConnectionError, Error, Prepared, Page, PageStream, Simple}
@type statement :: String.t()
@type values :: list | map
@type error :: Error.t() | ConnectionError.t()
@type result :: Xandra.Void.t() | Page.t() | Xandra.SetKeyspace.t() | Xandra.SchemaChange.t()
@type conn :: DBConnection.conn()
@type xandra_start_option ::
{:nodes, [String.t()]}
| {:compressor, module}
| {:authentication, {module, Keyword.t()}}
| {:atom_keys, boolean}
@type db_connection_start_option :: {atom(), any}
@type start_option :: xandra_start_option | db_connection_start_option
@type start_options :: [start_option]
@default_port 9042
@default_start_options [
nodes: ["127.0.0.1"],
idle_timeout: 30_000
]
@doc """
Starts a new connection or pool of connections to Cassandra.
This function starts a new connection or pool of connections to the provided
Cassandra server. `options` is a list of both Xandra-specific options, as well
as `DBConnection` options.
## Options
These are the Xandra-specific options supported by this function:
* `:nodes` - (list of strings) the Cassandra nodes to connect to. Each node
in the list has to be in the form `"ADDRESS:PORT"` or in the form
`"ADDRESS"`: if the latter is used, the default port (`#{@default_port}`)
will be used for that node. Defaults to `["127.0.0.1"]`. This option must
contain only one node unless the `:pool` option is set to
`Xandra.Cluster`; see the documentation for `Xandra.Cluster` for more
information.
* `:compressor` - (module) the compressor module to use for compressing and
decompressing data. See the "Compression" section in the module
documentation. By default this option is not present.
* `:authentication` - (tuple) a two-element tuple: the authenticator
module to use for authentication and its supported options. See the
"Authentication" section in the module documentation.
* `:atom_keys` - (boolean) whether or not results of and parameters to
`execute/4` will have atom keys. If `true`, the result maps will have
column names returned as atoms rather than as strings. Additionally,
maps that represent named parameters will need atom keys. Defaults to `false`.
The rest of the options are forwarded to `DBConnection.start_link/2`. For
example, to start a pool of connections to Cassandra, the `:pool` option can
be used:
Xandra.start_link(pool: DBConnection.Poolboy)
Note that this requires the `poolboy` dependency to be specified in your
application. The following options have default values that are different from
the default values provided by `DBConnection`:
* `:idle_timeout` - defaults to `30_000` (30 seconds)
## Examples
# Start a connection:
{:ok, conn} = Xandra.start_link()
# Start a connection and register it under a name:
{:ok, _conn} = Xandra.start_link(name: :xandra)
# Start a named pool of connections:
{:ok, _pool} = Xandra.start_link(name: :xandra_pool, pool: DBConnection.Poolboy)
As the `DBConnection` documentation states, if using a pool it's necessary to
pass a `:pool` option with the pool module being used to every call. For
example:
{:ok, _pool} = Xandra.start_link(name: :xandra_pool, pool: DBConnection.Poolboy)
Xandra.execute!(:xandra_pool, "SELECT * FROM users", _params = [], pool: DBConnection.Poolboy)
### Using a keyspace for new connections
It is common to start a Xandra connection or pool of connections that will use
a single keyspace for their whole life span. Doing something like:
{:ok, conn} = Xandra.start_link()
Xandra.execute!(conn, "USE my_keyspace")
will work just fine when you only have one connection. If you have a pool of
connections (with the `:pool` option), however, the code above won't work:
that code would start the pool, and then checkout one connection from the pool
to execute the `USE my_keyspace` query. That specific connection will then be
using the `my_keyspace` keyspace, but all other connections in the pool will
not. Fortunately, `DBConnection` provides an option we can use to solve this
problem: `:after_connect`. This option can specify a function that will be run
after each single connection to Cassandra. This function will take a
connection and can be used to setup that connection; since this function is
run for every established connection, it will work well with pools as well.
{:ok, conn} = Xandra.start_link(after_connect: fn(conn) -> Xandra.execute(conn, "USE my_keyspace") end)
See the documentation for `DBConnection.start_link/2` for more information
about this option.
"""
@spec start_link(start_options) :: GenServer.on_start()
def start_link(options \\ []) when is_list(options) do
options =
@default_start_options
|> Keyword.merge(options)
|> parse_start_options()
|> Keyword.put(:prepared_cache, Prepared.Cache.new())
DBConnection.start_link(Connection, options)
end
@doc """
Streams the results of a simple query or a prepared query with the given `params`.
This function can be used to stream the results of `query` so as not to load
them entirely in memory. This function doesn't send any query to Cassandra
right away: it will only execute queries as necessary when results are
requested out of the returned stream.
The returned value is a stream of `Xandra.Page` structs, where each of such
structs contains at most as many rows as specified by the `:page_size`
option. Every time an element is requested from the stream, `query` will be
executed with `params` to get that result.
In order to get each result from Cassandra, `execute!/4` is used: this means
that if there is an error (such as a network error) when executing the
queries, that error will be raised.
### Simple or prepared queries
Regardless of `query` being a simple query or a prepared query, this function
will execute it every time a result is needed from the returned stream. For
this reason, it is usually a good idea to use prepared queries when streaming.
## Options
`options` supports all the options supported by `execute/4`, with the same
default values.
## Examples
prepared = Xandra.prepare!(conn, "SELECT * FROM users")
users_stream = Xandra.stream_pages!(conn, prepared, _params = [], page_size: 2)
[%Xandra.Page{} = _page1, %Xandra.Page{} = _page2] = Enum.take(users_stream, 2)
"""
@spec stream_pages!(conn, statement | Prepared.t(), values, Keyword.t()) :: Enumerable.t()
def stream_pages!(conn, query, params, options \\ [])
def stream_pages!(conn, statement, params, options) when is_binary(statement) do
%PageStream{conn: conn, query: statement, params: params, options: options}
end
def stream_pages!(conn, %Prepared{} = prepared, params, options) do
%PageStream{conn: conn, query: prepared, params: params, options: options}
end
@doc """
Prepares the given query.
This function prepares the given statement on the Cassandra server. If
preparation is successful and there are no network errors while talking to the
server, `{:ok, prepared}` is returned, otherwise `{:error, error}` is
returned.
The returned prepared query can be run through `execute/4`, or used inside a
batch (see `Xandra.Batch`).
Errors returned by this function can be either `Xandra.Error` or
`Xandra.ConnectionError` structs. See the module documentation for more
information about errors.
Supports all the options supported by `DBConnection.prepare/3`, and the
following additional options:
* `:force` - (boolean) when `true`, forces the preparation of the query on
the server instead of trying to read the prepared query from cache. See
the "Prepared queries cache" section below. Defaults to `false`.
* `:compressor` - (module) the compressor module used to compress and
decompress data. See the "Compression" section in the module
documentation. By default, this option is not present.
## Prepared queries cache
Since Cassandra prepares queries on a per-node basis (and not on a
per-connection basis), Xandra internally caches prepared queries for each
connection or pool of connections. This means that if you prepare a query that
was already prepared, no action will be executed on the Cassandra server and
the prepared query will be returned from the cache.
If the Cassandra node goes down, however, the prepared query will be
invalidated and trying to use the one from cache will result in a
`Xandra.Error`. However, this is automatically handled by Xandra: when such an
error is returned, Xandra will first retry to prepare the query and only
return an error if the preparation fails.
If you want to ensure a query is prepared on the server, you can set the
`:force` option to `true`.
## Examples
{:ok, prepared} = Xandra.prepare(conn, "SELECT * FROM users WHERE id = ?")
{:ok, _page} = Xandra.execute(conn, prepared, [_id = 1])
{:error, %Xandra.Error{reason: :invalid_syntax}} = Xandra.prepare(conn, "bad syntax")
# Force a query to be prepared on the server and not be read from cache:
Xandra.prepare!(conn, "SELECT * FROM users WHERE ID = ?", force: true)
"""
@spec prepare(conn, statement, Keyword.t()) :: {:ok, Prepared.t()} | {:error, error}
def prepare(conn, statement, options \\ []) when is_binary(statement) do
DBConnection.prepare(conn, %Prepared{statement: statement}, options)
end
@doc """
Prepares the given query, raising if there's an error.
This function works exactly like `prepare/3`, except it returns the prepared
query directly if preparation succeeds, otherwise raises the returned error.
## Examples
prepared = Xandra.prepare!(conn, "SELECT * FROM users WHERE id = ?")
{:ok, _page} = Xandra.execute(conn, prepared, [_id = 1])
"""
@spec prepare!(conn, statement, Keyword.t()) :: Prepared.t() | no_return
def prepare!(conn, statement, options \\ []) do
case prepare(conn, statement, options) do
{:ok, result} -> result
{:error, exception} -> raise(exception)
end
end
@doc """
Executes the given simple query, prepared query, or batch query.
Returns `{:ok, result}` if executing the query was successful, or `{:error,
error}` otherwise. The meaning of the `params_or_options` argument depends on
what `query` is:
* if `query` is a batch query, than `params_or_options` has to be a list of
options that will be used to run the batch query (since batch queries
don't use parameters as parameters are attached to each query in the
batch).
* if `query` is a simple query (a string) or a prepared query, then
`params_or_opts` is a list or map of parameters, and this function is
exactly the same as calling `execute(conn, query, params_or_options, [])`.
When `query` is a batch query, successful results will always be `Xandra.Void`
structs.
When `{:error, error}` is returned, `error` can be either a `Xandra.Error` or
a `Xandra.ConnectionError` struct. See the module documentation for more
information on errors.
## Options for batch queries
When `query` is a batch query, `params_or_options` is a list of options. All
options supported by `DBConnection.execute/4` are supported, and the following
additional batch-specific options:
* `:consistency` - same as the `:consistency` option described in the
documentation for `execute/4`.
* `:serial_consistency` - same as the `:serial_consistency` option described
in the documentation for `execute/4`.
* `:timestamp` - (integer) using this option means that the provided
timestamp will apply to all the statements in the batch that do not
explicitly specify a timestamp.
## Examples
For examples on executing simple queries or prepared queries, see the
documentation for `execute/4`. Examples below specifically refer to batch
queries. See the documentation for `Xandra.Batch` for more information about
batch queries and how to construct them.
prepared_insert = Xandra.prepare!(conn, "INSERT (email, name) INTO users VALUES (?, ?)")
batch =
Xandra.Batch.new()
|> Xandra.Batch.add(prepared_insert, ["<EMAIL>", "<NAME>"])
|> Xandra.Batch.add(prepared_insert, ["<EMAIL>", "<NAME>"])
|> Xandra.Batch.add(prepared_insert, ["<EMAIL>", "<NAME>"])
# Execute the batch:
Xandra.execute(conn, batch)
#=> {:ok, %Xandra.Void{}}
# Execute the batch with a default timestamp for all statements:
Xandra.execute(conn, batch, timestamp: System.system_time(:millisecond) - 1_000)
#=> {:ok, %Xandra.Void{}}
All `DBConnection.execute/4` options are supported here as well:
Xandra.execute(conn, batch, pool: DBConnection.Poolboy)
#=> {:ok, %Xandra.Void{}}
"""
@spec execute(conn, statement | Prepared.t(), values) :: {:ok, result} | {:error, error}
@spec execute(conn, Batch.t(), Keyword.t()) :: {:ok, Xandra.Void.t()} | {:error, error}
def execute(conn, query, params_or_options \\ [])
def execute(conn, statement, params) when is_binary(statement) do
execute(conn, statement, params, _options = [])
end
def execute(conn, %Prepared{} = prepared, params) do
execute(conn, prepared, params, _options = [])
end
def execute(conn, %Batch{} = batch, options) when is_list(options) do
execute_with_retrying(conn, batch, nil, options)
end
@doc """
Executes the given simple query or prepared query with the given parameters.
Returns `{:ok, result}` where `result` is the result of executing `query` if
the execution is successful (there are no network errors or semantic errors
with the query), or `{:error, error}` otherwise.
`result` can be one of the following:
* a `Xandra.Void` struct - returned for queries such as `INSERT`, `UPDATE`,
or `DELETE`.
* a `Xandra.SchemaChange` struct - returned for queries that perform changes
on the schema (such as creating tables).
* a `Xandra.SetKeyspace` struct - returned for `USE` queries.
* a `Xandra.Page` struct - returned for queries that return rows (such as
`SELECT` queries).
The properties of each of the results listed above are described in each
result's module.
## Options
This function accepts all options accepted by `DBConnection.execute/4`, plus
the following ones:
* `:consistency` - (atom) specifies the consistency level for the given
query. See the Cassandra documentation for more information on consistency
levels. The value of this option can be one of:
* `:one` (default)
* `:two`
* `:three`
* `:any`
* `:quorum`
* `:all`
* `:local_quorum`
* `:each_quorum`
* `:serial`
* `:local_serial`
* `:local_one`
* `:page_size` - (integer) the size of a page of results. If `query` returns
`Xandra.Page` struct, that struct will contain at most `:page_size` rows
in it. Defaults to `10_000`.
* `:paging_state` - (binary) the offset where rows should be
returned from. By default this option is not present and paging starts
from the beginning. See the "Paging" section below for more information on
how to page queries.
* `:timestamp` - (integer) the default timestamp for the query (in
microseconds). If provided, overrides the server-side assigned timestamp;
however, a timestamp in the query itself will still override this
timestamp.
* `:serial_consistency` - (atom) specifies the serial consistency to use for
executing the given query. Can be of `:serial` and `:local_serial`.
* `:compressor` - (module) the compressor module used to compress and
decompress data. See the "Compression" section in the module
documentation. By default, this option is not present.
* `:retry_strategy` - (module) the module implementing the
`Xandra.RetryStrategy` behaviour that is used in case the query fails to
determine whether to retry it or not. See the "Retrying failed queries"
section in the module documentation. By default, this option is not
present.
* `:date_format` - (`:date` or `:integer`) controls the format in which
dates are returned. When set to `:integer` the returned value is
a number of days from the Unix epoch, a date struct otherwise.
Defaults to `:date`.
* `:time_format` - (`:time` or `:integer`) controls the format in which
times are returned. When set to `:integer` the returned value is
a number of nanoseconds from midnight, a time struct otherwise.
Defaults to `:time`.
* `:timestamp_format` - (`:datetime` or `:integer`) controls the format in which
timestamps are returned. When set to `:integer` the returned value is
a number of milliseconds from the Unix epoch, a datetime struct otherwise.
Defaults to `:datetime`.
## Parameters
The `params` argument specifies parameters to use when executing the query; it
can be either a list of positional parameters (specified via `?` in the query)
or a map of named parameters (specified as `:named_parameter` in the
query). When `query` is a simple query, the value of each parameter must be a
two-element tuple specifying the type used to encode the value and the value
itself; when `query` is a prepared query, this is not necessary (and values
can just be values) as the type information is encoded in the prepared
query. See the module documenatation for more information about query
parameters, types, and encoding values.
## Examples
Executing a simple query (which is just a string):
statement = "INSERT INTO users (first_name, last_name) VALUES (:first_name, :last_name)"
{:ok, %Xandra.Void{}} = Xandra.execute(conn, statement, %{
"first_name" => {"text", "Chandler"},
"last_name" => {"text", "Bing"},
})
Executing the query when `atom_keys: true` has been specified in `Xandra.start_link/1`:
Xandra.execute(conn, statement, %{
first_name: {"text", "Chandler"},
last_name: {"text", "Bing"}
})
Executing a prepared query:
prepared = Xandra.prepare!(conn, "INSERT INTO users (first_name, last_name) VALUES (?, ?)")
{:ok, %Xandra.Void{}} = Xandra.execute(conn, prepared, ["Monica", "Geller"])
Performing a `SELECT` query and using `Enum.to_list/1` to convert the
`Xandra.Page` result to a list of rows:
statement = "SELECT * FROM users"
{:ok, %Xandra.Page{} = page} = Xandra.execute(conn, statement, _params = [])
Enum.to_list(page)
#=> [%{"first_name" => "Chandler", "last_name" => "Bing"},
#=> %{"first_name" => "Monica", "last_name" => "Geller"}]
Performing the query when `atom_keys: true` has been specified in `Xandra.start_link/1`:
{:ok, page} = Xandra.execute(conn, statement, _params = [])
Enum.to_list(page)
#=> [%{first_name: "Chandler", last_name: "Bing"},
#=> %{first_name: "Monica", last_name: "Geller"}]
Ensuring the write is written to the commit log and memtable of at least three replica nodes:
statement = "INSERT INTO users (first_name, last_name) VALUES ('Chandler', 'Bing')"
{:ok, %Xandra.Void{}} = Xandra.execute(conn, statement, _params = [], consistency: :three)
This function supports all options supported by `DBConnection.execute/4`; for
example, if the `conn` connection was started with `pool: DBConnection.Poolboy`,
then the `:pool` option would have to be passed here as well:
statement = "DELETE FROM users WHERE first_name = 'Chandler'"
{:ok, %Xandra.Void{}} = Xandra.execute(conn, statement, _params = [], pool: DBConnection.Poolboy)
## Paging
Since `execute/4` supports the `:paging_state` option, it is possible to manually
implement paging. For example, given the following prepared query:
prepared = Xandra.prepare!(conn, "SELECT first_name FROM users")
We can now execute such query with a specific page size using the `:page_size`
option:
{:ok, %Xandra.Page{} = page} = Xandra.execute(conn, prepared, [], page_size: 2)
Since `:page_size` is `2`, `page` will contain at most `2` rows:
Enum.to_list(page)
#=> [%{"first_name" => "Ross"}, %{"first_name" => "Rachel"}]
Now, we can pass `page.paging_state` as the value of the `:paging_state` option to let the paging
start from where we left off:
{:ok, %Xandra.Page{} = new_page} = Xandra.execute(conn, prepared, [], page_size: 2, paging_state: page.paging_state)
Enum.to_list(page)
#=> [%{"first_name" => "Joey"}, %{"first_name" => "Phoebe"}]
However, using `:paging_state` and `:page_size` directly with `execute/4` is not
recommended when the intent is to "stream" a query. For that, it's recommended
to use `stream_pages!/4`. Also note that if the `:paging_state` option is set to `nil`,
meaning there are no more pages to fetch, an `ArgumentError` exception will be raised;
be sure to check for this with `page.paging_state != nil`.
"""
@spec execute(conn, statement | Prepared.t(), values, Keyword.t()) ::
{:ok, result} | {:error, error}
def execute(conn, query, params, options)
def execute(conn, statement, params, options) when is_binary(statement) do
query = %Simple{statement: statement}
execute_with_retrying(conn, query, params, validate_paging_state(options))
end
def execute(conn, %Prepared{} = prepared, params, options) do
execute_with_retrying(conn, prepared, params, validate_paging_state(options))
end
@doc """
Executes the given simple query, prepared query, or batch query, raising if
there's an error.
This function behaves exactly like `execute/3`, except that it returns
successful results directly and raises on errors.
## Examples
Xandra.execute!(conn, "INSERT INTO users (name, age) VALUES ('Jane', 29)")
#=> %Xandra.Void{}
"""
@spec execute!(conn, statement | Prepared.t(), values) :: result | no_return
@spec execute!(conn, Batch.t(), Keyword.t()) :: Xandra.Void.t() | no_return
def execute!(conn, query, params_or_options \\ []) do
case execute(conn, query, params_or_options) do
{:ok, result} -> result
{:error, exception} -> raise(exception)
end
end
@doc """
Executes the given simple query, prepared query, or batch query, raising if
there's an error.
This function behaves exactly like `execute/4`, except that it returns
successful results directly and raises on errors.
## Examples
statement = "INSERT INTO users (name, age) VALUES ('John', 43)"
Xandra.execute!(conn, statement, _params = [], consistency: :quorum)
#=> %Xandra.Void{}
"""
@spec execute!(conn, statement | Prepared.t(), values, Keyword.t()) :: result | no_return
def execute!(conn, query, params, options) do
case execute(conn, query, params, options) do
{:ok, result} -> result
{:error, exception} -> raise(exception)
end
end
@doc """
Acquires a locked connection from `conn` and executes `fun` passing such
connection as the argument.
All options are forwarded to `DBConnection.run/3` (and thus some of them to
the underlying pool).
The return value of this function is the return value of `fun`.
## Examples
Preparing a query and executing it on the same connection:
Xandra.run(conn, fn conn ->
prepared = Xandra.prepare!(conn, "INSERT INTO users (name, age) VALUES (:name, :age)")
Xandra.execute!(conn, prepared, %{"name" => "John", "age" => 84})
end)
"""
@spec run(conn, Keyword.t(), (conn -> result)) :: result when result: var
def run(conn, options \\ [], fun) when is_function(fun, 1) do
DBConnection.run(conn, fun, options)
end
defp reprepare_queries(conn, [%Simple{} | rest], options) do
reprepare_queries(conn, rest, options)
end
defp reprepare_queries(conn, [%Prepared{statement: statement} | rest], options) do
with {:ok, _prepared} <- prepare(conn, statement, Keyword.put(options, :force, true)) do
reprepare_queries(conn, rest, options)
end
end
defp reprepare_queries(_conn, [], _options) do
:ok
end
defp validate_paging_state(options) do
case Keyword.fetch(options, :paging_state) do
{:ok, nil} ->
raise ArgumentError, "no more pages are available"
{:ok, value} when not is_binary(value) ->
raise ArgumentError,
"expected a binary as the value of the :paging_state option, " <>
"got: #{inspect(value)}"
_other ->
maybe_put_paging_state(options)
end
end
defp maybe_put_paging_state(options) do
case Keyword.pop(options, :cursor) do
{%Page{paging_state: nil}, _options} ->
raise ArgumentError, "no more pages are available"
{%Page{paging_state: paging_state}, options} ->
IO.warn("the :cursor option is deprecated, please use :paging_state instead")
Keyword.put(options, :paging_state, paging_state)
{nil, options} ->
options
{other, _options} ->
raise ArgumentError,
"expected a Xandra.Page struct as the value of the :cursor option, " <>
"got: #{inspect(other)}"
end
end
defp execute_with_retrying(conn, query, params, options) do
case Keyword.pop(options, :retry_strategy) do
{nil, options} ->
execute_without_retrying(conn, query, params, options)
{retry_strategy, options} ->
execute_with_retrying(conn, query, params, options, retry_strategy)
end
end
defp execute_with_retrying(conn, query, params, options, retry_strategy) do
with {:error, reason} <- execute_without_retrying(conn, query, params, options) do
{retry_state, options} =
Keyword.pop_lazy(options, :retrying_state, fn ->
retry_strategy.new(options)
end)
case retry_strategy.retry(reason, options, retry_state) do
:error ->
{:error, reason}
{:retry, new_options, new_retry_state} ->
new_options = Keyword.put(new_options, :retrying_state, new_retry_state)
execute_with_retrying(conn, query, params, new_options, retry_strategy)
other ->
raise ArgumentError,
"invalid return value #{inspect(other)} from " <>
"retry strategy #{inspect(retry_strategy)} " <>
"with state #{inspect(retry_state)}"
end
end
end
defp execute_without_retrying(conn, %Batch{} = batch, nil, options) do
run(conn, options, fn conn ->
case DBConnection.execute(conn, batch, nil, options) do
{:ok, %Error{reason: :unprepared}} ->
with :ok <- reprepare_queries(conn, batch.queries, options) do
execute(conn, batch, options)
end
{:ok, %Error{} = error} ->
{:error, error}
other ->
other
end
end)
end
defp execute_without_retrying(conn, %Simple{} = query, params, options) do
with {:ok, %Error{} = error} <- DBConnection.execute(conn, query, params, options) do
{:error, error}
end
end
defp execute_without_retrying(conn, %Prepared{} = prepared, params, options) do
run(conn, options, fn conn ->
case DBConnection.execute(conn, prepared, params, options) do
{:ok, %Error{reason: :unprepared}} ->
# We can ignore the newly returned prepared query since it will have the
# same id of the query we are repreparing.
case DBConnection.prepare_execute(
conn,
prepared,
params,
Keyword.put(options, :force, true)
) do
{:ok, _prepared, %Error{} = error} ->
{:error, error}
{:ok, _prepared, result} ->
{:ok, result}
{:error, _reason} = error ->
error
end
{:ok, %Error{} = error} ->
{:error, error}
other ->
other
end
end)
end
defp parse_start_options(options) do
cluster? = options[:pool] == Xandra.Cluster
Enum.flat_map(options, fn
{:nodes, nodes} when cluster? ->
[nodes: Enum.map(nodes, &parse_node/1)]
{:nodes, [string]} ->
{address, port} = parse_node(string)
[address: address, port: port]
{:nodes, _nodes} ->
raise ArgumentError,
"multi-node use requires the :pool option to be set to Xandra.Cluster"
{_key, _value} = option ->
[option]
end)
end
defp parse_node(string) do
case String.split(string, ":", parts: 2) do
[address, port] ->
case Integer.parse(port) do
{port, ""} ->
{String.to_charlist(address), port}
_ ->
raise ArgumentError, "invalid item #{inspect(string)} in the :nodes option"
end
[address] ->
{String.to_charlist(address), @default_port}
end
end
end
|
lib/xandra.ex
| 0.931408
| 0.741399
|
xandra.ex
|
starcoder
|
defmodule StaffNotes.Ecto.Slug do
@moduledoc """
An `Ecto.Type` that represents an identifier that can be used in a URL.
This type also makes it so that `binary_id` is distinguishable from a name because it keeps them
both strongly typed.
## Use
Use this as the type of the database field in the schema:
```
defmodule StaffNotes.Accounts.Organization do
use Ecto.Schema
alias StaffNotes.Ecto.Slug
schema "organizations" do
field :name, Slug
end
end
```
## Format
Follows the GitHub pattern for logins. They consist of:
* Alphanumerics — `/[a-zA-Z0-9]/`
* Hyphens — `/-/`
* Must begin and end with an alphanumeric
"""
@pattern ~r{\A[a-z0-9]+(-[a-z0-9]+)*\z}i
@behaviour Ecto.Type
@type t :: %__MODULE__{text: String.t()}
defstruct text: ""
@doc """
Returns the underlying schema type for a slug.
See: `c:Ecto.Type.type/0`
"""
@impl Ecto.Type
def type, do: :string
@doc """
Casts the given value into a slug.
See: `c:Ecto.Type.cast/1`
"""
@impl Ecto.Type
def cast(%__MODULE__{} = slug) do
if __MODULE__.valid?(slug) do
{:ok, slug}
else
:error
end
end
def cast(binary) when is_binary(binary), do: cast(%__MODULE__{text: binary})
def cast(_other), do: :error
@doc """
Loads the given value into a slug.
See: `c:Ecto.Type.load/1`
"""
@impl Ecto.Type
def load(binary) when is_binary(binary) do
if __MODULE__.valid?(binary) do
{:ok, %__MODULE__{text: binary}}
else
:error
end
end
def load(_other), do: :error
@doc """
Dumps the given value into an `Ecto` native type.
See: `c:Ecto.Type.dump/1`
"""
@impl Ecto.Type
def dump(%__MODULE__{} = slug), do: dump(slug.text)
def dump(binary) when is_binary(binary) do
if __MODULE__.valid?(binary) do
{:ok, binary}
else
:error
end
end
def dump(_other), do: :error
@doc """
Converts a slug to iodata.
"""
def to_iodata(%__MODULE__{} = slug), do: __MODULE__.to_string(slug)
@doc """
Converts a slug to a string.
"""
def to_string(%__MODULE__{} = slug) do
if StaffNotes.Ecto.Slug.valid?(slug), do: slug.text, else: "INVALID SLUG #{slug.text}"
end
@doc """
Determines whether the given value is valid to be used as a slug.
"""
@spec valid?(t | String.t()) :: boolean
def valid?(binary) when is_binary(binary), do: binary =~ @pattern
def valid?(%__MODULE__{text: binary}) when is_binary(binary), do: binary =~ @pattern
def valid?(_), do: false
defimpl Phoenix.HTML.Safe do
def to_iodata(%StaffNotes.Ecto.Slug{} = slug), do: StaffNotes.Ecto.Slug.to_iodata(slug)
end
defimpl String.Chars do
def to_string(%StaffNotes.Ecto.Slug{} = slug), do: StaffNotes.Ecto.Slug.to_string(slug)
end
end
|
lib/staff_notes/ecto/slug.ex
| 0.872048
| 0.825695
|
slug.ex
|
starcoder
|
defmodule Type.Union do
@moduledoc """
Represents the Union of two or more types.
The associated struct has one field:
- `:of` which is a list of all types that are being unioned.
For performance purposes, Union keeps its subtypes in
reverse-type-order.
Type.Union implements both the enumerable protocol and the
collectable protocol; if you collect into a union, it will return
a `t:Type.t/0` result in general; it might not be a Type.Union struct:
```
iex> Enum.into([1, 3], %Type.Union{})
%Type.Union{of: [3, 1]}
iex> inspect %Type.Union{of: [3, 1]}
"1 | 3"
iex> Enum.into([1..10, 11..20], %Type.Union{})
1..20
```
"""
defstruct [of: []]
@type t :: %__MODULE__{of: [Type.t, ...]}
@type t(type) :: %__MODULE__{of: [type, ...]}
@spec collapse(t) :: Type.t
@doc false
def collapse(%__MODULE__{of: []}) do
import Type, only: :macros
Type.none()
end
def collapse(%__MODULE__{of: [singleton]}), do: singleton
def collapse(union), do: union
@spec merge(t, Type.t) :: t
@doc false
# special case merging a union with another union.
def merge(%{of: into}, %__MODULE__{of: list}) do
%__MODULE__{of: merge_raw(into, list)}
end
def merge(%{of: list}, type) do
%__MODULE__{of: merge_raw(list, [type])}
end
# merges: types in argument1 into the list of argument2
# argument2 is required to be in DESCENDING order
# returns the fully merged types, in ASCENDING order
@spec merge_raw([Type.t], [Type.t]) :: [Type.t]
defp merge_raw(list, [head | rest]) do
{new_list, retries} = fold(head, Enum.reverse(list), [])
merge_raw(new_list, retries ++ rest)
end
defp merge_raw(list, []) do
Enum.sort(list, {:desc, Type})
end
# folds argument 1 into the list of argument2.
# argument 3, which is the stack, contains the remaining types in ASCENDING order.
@spec fold(Type.t, [Type.t], [Type.t]) :: {[Type.t], [Type.t]}
defp fold(type, [head | rest], stack) do
with order when order in [:gt, :lt] <- Type.compare(head, type),
retries when is_list(retries) <- type_merge(order, head, type) do
{unroll(rest, stack), retries}
else
:eq ->
{unroll(rest, [head | stack]), []}
:nomerge ->
fold(type, rest, [head | stack])
end
end
defp fold(type, [], stack) do
{[type | stack], []}
end
defp unroll([], stack), do: stack
defp unroll([head | rest], stack), do: unroll(rest, [head | stack])
defdelegate type_merge(order, head, type), to: Type.Union.Merge
defimpl Type.Properties do
import Type, only: :macros
import Type.Helpers
alias Type.Union
def compare(union, %Type.Function.Var{constraint: type}) do
case Type.compare(union, type) do
:eq -> :gt
order -> order
end
end
def compare(union, %Type.Opaque{type: type}) do
case Type.compare(union, type) do
:eq -> :gt
order -> order
end
end
def compare(%{of: llist}, %Union{of: rlist}) do
union_list_compare(llist, rlist)
end
def compare(%{of: [first | _]}, type) do
case Type.compare(first, type) do
:eq -> :gt
order -> order
end
end
defp union_list_compare([], []), do: :eq
defp union_list_compare([], _), do: :lt
defp union_list_compare(_, []), do: :gt
defp union_list_compare([lh | lrest], [rh | rrest]) do
case Type.compare(lh, rh) do
:eq -> union_list_compare(lrest, rrest)
order -> order
end
end
def typegroup(%{of: [first | _]}) do
Type.Properties.typegroup(first)
end
def usable_as(challenge, target, meta) do
challenge.of
|> Enum.map(&Type.usable_as(&1, target, meta))
|> Enum.reduce(fn
# TO BE REPLACED WITH SOMETHING MORE SOPHISTICATED.
:ok, :ok -> :ok
:ok, {:maybe, _} -> {:maybe, nil}
:ok, {:error, _} -> {:maybe, nil}
{:maybe, _}, :ok -> {:maybe, nil}
{:error, _}, :ok -> {:maybe, nil}
{:maybe, _}, {:maybe, _} -> {:maybe, nil}
{:maybe, _}, {:error, _} -> {:maybe, nil}
{:error, _}, {:maybe, _} -> {:maybe, nil}
{:error, _}, {:error, _} -> {:error, nil}
end)
|> case do
:ok -> :ok
{:maybe, _} -> {:maybe, [Type.Message.make(challenge, target, meta)]}
{:error, _} -> {:error, Type.Message.make(challenge, target, meta)}
end
end
intersection do
def intersection(lunion, runion = %Type.Union{}) do
lunion.of
|> Enum.map(&Type.intersection(runion, &1))
|> Type.union
end
def intersection(union = %{}, ritem) do
union.of
|> Enum.map(&Type.intersection(&1, ritem))
|> Type.union
end
end
subtype do
def subtype?(%{of: types}, target) do
Enum.all?(types, &Type.subtype?(&1, target))
end
end
def normalize(%{of: types}) do
%Union{of: Enum.map(types, &Type.normalize/1)}
end
end
defimpl Collectable do
alias Type.Union
def into(original) do
collector_fun = fn
union, {:cont, elem} ->
Union.merge(union, elem)
union, :done -> Union.collapse(union)
_set, :halt -> :ok
end
{original, collector_fun}
end
end
defimpl Inspect do
import Inspect.Algebra
import Type, only: :macros
def inspect(%{of: types}, opts) do
cond do
# override for boolean
rest = type_has(types, [true, false]) ->
override(rest, :boolean, opts)
# override for identifier
rest = type_has(types, [reference(), port(), pid()]) ->
override(rest, :identifier, opts)
# override for iodata
rest = type_has(types, [iolist(), %Type.Bitstring{size: 0, unit: 8}]) ->
override(rest, :iodata, opts)
# override for number
rest = type_has(types, [float(), neg_integer(), 0, pos_integer()]) ->
override(rest, :number, opts)
# override for integers
rest = type_has(types, [neg_integer(), 0, pos_integer()]) ->
override(rest, :integer, opts)
# override for timeout
rest = type_has(types, [0, pos_integer(), :infinity]) ->
override(rest, :timeout, opts)
# override for non_neg_integer
rest = type_has(types, [0, pos_integer()]) ->
override(rest, :non_neg_integer, opts)
rest = type_has(types, [-1..0, pos_integer()]) ->
type = override(rest, :non_neg_integer, opts)
concat(["-1", " | ", type])
(range = Enum.find(types, &match?(_..0, &1))) && pos_integer() in types ->
type = types
|> Kernel.--([range, pos_integer()])
|> override(:non_neg_integer, opts)
concat(["#{range.first}..-1", " | ", type])
true -> normal_inspect(types, opts)
end
end
defp type_has(types, query) do
if Enum.all?(query, &(&1 in types)), do: types -- query
end
defp override([], name, _opts) do
"#{name}()"
end
defp override(types, name, opts) do
concat(["#{name}()", " | ",
to_doc(%Type.Union{of: types}, opts)])
end
defp normal_inspect(list, opts) do
list
|> Enum.reverse
|> Enum.map(&to_doc(&1, opts))
|> Enum.intersperse(" | ")
|> concat
end
end
end
|
lib/type/union.ex
| 0.86757
| 0.873485
|
union.ex
|
starcoder
|
defmodule Joystick do
@moduledoc """
Simple wrapper to get Linux Joystick events.
# Usage
```
iex()> {:ok, js} = Joystick.start_link(0, self())
iex()> flush()
{:joystick, %Joystick.Event{number: 1, timestamp: 1441087318, type: :axis, value: -60}}
{:joystick, %Joystick.Event{number: 4, timestamp: 1441087318, type: :axis, value: -5}}
iex()> Joystick.info(js)
%{axes: 8, buttons: 11, name: 'Microsoft X-Box One pad', version: 131328}
```
"""
use GenServer
require Logger
defmodule Event do
@moduledoc false
defstruct [:number, :timestamp, :type, :value]
@compile {:inline, decode: 1}
@doc false
def decode(%{timestamp: _, number: _, type: 0x01, value: _} = data) do
struct(__MODULE__, %{data | type: :button})
end
def decode(%{timestamp: _, number: _, type: 0x02, value: _} = data) do
struct(__MODULE__, %{data | type: :axis})
end
def decode(%{timestamp: _, number: _, type: _, value: _} = data) do
struct(__MODULE__, %{data | type: :init})
end
end
@doc """
Start listening to joystick events.
* `device` - a number pointing to the js file.
* for example 0 would evaluate to "/dev/input/js0"
* `listener` - pid to receive events
"""
def start_link(device, listener) do
GenServer.start_link(__MODULE__, [device, listener])
end
@doc "Get information about a joystick"
def info(joystick) do
GenServer.call(joystick, :info)
end
@doc """
Stop a running joystick instance.
"""
def stop(joystick, reason \\ :normal) do
GenServer.stop(joystick, reason)
end
@doc false
def init([device, listener]) do
{:ok, res} = start_js(device)
js = get_info(res)
:ok = poll(res)
{:ok, %{res: res, listener: listener, last_ts: 0, joystick: js}}
end
@doc false
def terminate(_, state) do
if state.res do
stop_js(state.res)
end
end
@doc false
def handle_call(:info, _, state), do: {:reply, state.joystick, state}
@doc false
def handle_info({:select, res, _ref, :ready_input}, %{last_ts: last_ts} = state) do
{time, raw_input} = :timer.tc(fn -> Joystick.receive_input(res) end)
case raw_input do
{:error, reason} -> {:stop, {:input_error, reason}, state}
input = %{timestamp: current_ts} when current_ts >= last_ts ->
event = {:joystick, Event.decode(input)}
send(state.listener, event)
:ok = poll(res)
# Logger.debug "Event (#{time}µs): #{inspect event}"
{:noreply, %{state | last_ts: current_ts}}
event = %{timestamp: current_ts} when current_ts < last_ts ->
Logger.warn "Got late event (#{time}µs): #{inspect event}"
{:noreply, %{state | last_ts: current_ts}}
end
end
@on_load :load_nif
@doc false
def load_nif do
nif_file = '#{:code.priv_dir(:joystick)}/joystick_nif'
case :erlang.load_nif(nif_file, 0) do
:ok -> :ok
{:error, {:reload, _}} -> :ok
{:error, reason} -> Logger.warn "Failed to load nif: #{inspect reason}"
end
end
## These functions get replaced by the nif.
@doc false
def start_js(_device), do: do_exit_no_nif()
@doc false
def stop_js(_handle), do: do_exit_no_nif()
@doc false
def poll(_handle), do: do_exit_no_nif()
@doc false
def receive_input(_handle), do: do_exit_no_nif()
@doc false
def get_info(_handle), do: do_exit_no_nif()
## Private stuff
defp do_exit_no_nif, do: exit("nif not loaded.")
end
|
lib/joystick.ex
| 0.775477
| 0.582729
|
joystick.ex
|
starcoder
|
defmodule Cldr.Unit.Test.ConversionData do
@moduledoc false
@conversion_test_data "test/support/data/conversion_test_data.txt"
@external_resource @conversion_test_data
@offset 1
def conversion_test_data do
@conversion_test_data
|> File.read!()
|> String.split("\n")
|> Enum.map(&String.trim/1)
end
def conversions do
conversion_test_data()
|> Enum.with_index()
|> Enum.map(&parse_test/1)
|> Enum.reject(&is_nil/1)
end
def parse_test({"", _}) do
nil
end
def parse_test({<<"#", _rest::binary>>, _}) do
nil
end
@fields [:category, :from, :to, :factor, :result]
def parse_test({test, index}) do
test
|> String.split(";")
|> Enum.map(&String.trim/1)
|> zip(@fields)
|> Enum.map(&transform/1)
|> Map.new()
|> Map.put(:line, index + @offset)
end
def zip(data, fields) do
fields
|> Enum.zip(data)
end
def transform({:factor, factor}) do
factor =
factor
|> String.replace(" * x", "")
|> String.replace(",", "")
|> String.trim()
|> String.split("/")
|> resolve_factor
{:factor, factor}
rescue
ArgumentError ->
{:factor, factor}
end
@float ~r/^([-+]?[0-9]*)\.([0-9]+)([eE]([-+]?[0-9]+))?$/
def transform({:result, result}) do
result = String.replace(result, ",", "")
result =
case Regex.run(@float, result) do
[float, _integer, "0"] ->
{String.to_float(float), 0, 15}
[float, _integer, fraction] ->
{String.to_float(float), String.length(fraction), 15}
[float, integer, fraction, _, exp] ->
{
String.to_float(float),
rounding_from(integer, fraction, exp),
precision_from(integer, fraction, exp)
}
end
{:result, result}
end
def transform(other) do
other
end
def rounding_from(_integer, "0", <<"-", exp::binary>>) do
String.to_integer(exp)
end
def rounding_from(_integer, "0", _exp) do
0
end
def rounding_from(_integer, fraction, <<"-", exp::binary>>) do
String.length(fraction) + String.to_integer(exp)
end
def rounding_from(_integer, fraction, exp) do
if String.to_integer(exp) >= String.length(fraction) do
0
else
String.length(fraction)
end
end
def precision_from(integer, "0", _exp) do
String.length(integer) + 1
end
def precision_from(integer, fraction, <<"-", _exp::binary>>) do
String.length(fraction) + String.length(integer)
end
def precision_from(integer, fraction, exp) do
if String.length(fraction) < String.to_integer(exp) do
String.length(fraction) + String.length(integer)
else
String.length(fraction)
end
end
def resolve_factor([factor]) do
to_number(factor)
end
def resolve_factor([numerator, denominator]) do
numerator = to_number(numerator)
denominator = to_number(denominator)
Ratio.new(numerator, denominator)
end
def resolve_factor(other) do
other
end
def to_number(number_string) when is_binary(number_string) do
number_string
|> String.split(".")
|> to_number
end
def to_number([integer]) do
String.to_integer(integer)
end
def to_number([integer, fraction]) do
String.to_float(integer <> "." <> fraction)
end
def round(%Cldr.Unit{value: value} = unit, digits, significant) when is_number(value) do
value =
value
|> round_precision(significant)
|> round(digits)
%{unit | value: value}
end
def round(number, rounding) when rounding > 15 do
number
end
def round(float, digits) when is_float(float) do
Float.round(float, digits)
end
def round(other, _digits) do
other
end
def round_precision(0.0 = value, _) do
value
end
def round_precision(integer, round_digits) when is_integer(integer) do
number_of_digits = Cldr.Digits.number_of_integer_digits(integer)
p = Cldr.Math.power(10, number_of_digits) |> Decimal.new()
d = Decimal.new(integer)
d
|> Decimal.div(p)
|> Decimal.round(round_digits)
|> Decimal.mult(p)
|> Decimal.to_integer()
end
def round_precision(float, digits) when is_float(float) do
Cldr.Math.round_significant(float, digits)
end
end
|
test/support/parse_conversion_data.ex
| 0.70304
| 0.548432
|
parse_conversion_data.ex
|
starcoder
|
defmodule FaultTree.Analyzer do
@moduledoc """
Handles building a fault tree out of an array of nodes.
"""
alias FaultTree.Analyzer.Probability
alias FaultTree.Gate
alias FaultTree.Node
require Logger
@doc """
Converts a `FaultTree` struct into a hierarchical map.
"""
def process(tree) do
{:ok, pid} = GenServer.start_link(Probability, nil)
result = tree
|> find_root()
|> process(tree, pid)
GenServer.stop(pid)
result
end
defp process(node, tree, pid) do
children = node
|> FaultTree.find_children(tree.nodes)
|> Enum.map(fn n -> process(n, tree, pid) end)
node
|> Map.put(:children, children)
|> probability(tree, pid)
|> Probability.save(pid)
end
@doc """
Calculate the probability of failure for a given node.
The node must have all of its children with defined probabilities.
For TRANSFER gates, the probability is copied from the source node. If the
source node was not calculated before the transfer gate is reached, the probability
will be calculated twice. This will be inefficient, but mathemetically correct.
"""
def probability(node = %Node{type: :basic}, _tree, _pid), do: node
def probability(node = %Node{probability: p}, _tree, _pid) when p != nil, do: node
def probability(node = %Node{}, tree, pid) do
p =
case node.type do
:or -> Gate.Or.probability(node.children)
:and -> Gate.And.probability(node.children)
:atleast -> Gate.AtLeast.probability(node.atleast, node.children)
:transfer ->
# Use the source probability for TRANSFER gates.
src = tree |> find_source_node(node.source)
case Probability.lookup(src, pid) do
nil -> src |> process(tree, pid) |> Map.get(:probability)
res -> res
end
end
Map.put(node, :probability, p)
end
defp find_root(tree), do: Enum.find(tree.nodes, &valid_root?/1)
defp valid_root?(%Node{type: :basic}), do: false
defp valid_root?(%Node{type: :transfer}), do: false
defp valid_root?(%Node{parent: :nil}), do: true
defp valid_root?(_), do: false
defp find_source_node(tree, source) do
tree.nodes
|> Stream.filter(fn %Node{type: type} -> type != :transfer end)
|> Enum.find(fn %Node{name: name} -> name == source end)
end
end
|
lib/fault_tree/analyzer.ex
| 0.860501
| 0.636099
|
analyzer.ex
|
starcoder
|
defmodule ReIntegrations.Credipronto.Mapper do
@moduledoc """
Module for mapping Credipronto's query and response payloads
"""
def query_out(params), do: Enum.reduce(params, %{}, &map_attributes/2)
def payload_in(params), do: Enum.reduce(params, %{}, &map_payload/2)
defp map_attributes({:mutuary, value}, acc), do: Map.put(acc, :mutuario, value)
defp map_attributes({:birthday, value}, acc),
do: Map.put(acc, :data_nascimento, encode_date(value))
defp map_attributes({:include_coparticipant, value}, acc),
do: Map.put(acc, :incluir_co, encode_boolean(value))
defp map_attributes({:net_income, value}, acc),
do: Map.put(acc, :renda_liquida, encode_decimal(value))
defp map_attributes({:amortization, value}, acc),
do: Map.put(acc, :amortizacao, encode_boolean(value))
defp map_attributes({:annual_interest, value}, acc),
do: Map.put(acc, :juros_anual, encode_float(value))
defp map_attributes({:birthday_coparticipant, value}, acc),
do: Map.put(acc, :data_nascimento_co, encode_date(value))
defp map_attributes({:calculate_tr, value}, acc),
do: Map.put(acc, :calcular_tr, encode_boolean(value))
defp map_attributes({:evaluation_rate, value}, acc),
do: Map.put(acc, :tarifa_avaliacao, encode_decimal(value))
defp map_attributes({:fundable_value, value}, acc),
do: Map.put(acc, :valor_financiavel, encode_decimal(value))
defp map_attributes({:home_equity_annual_interest, value}, acc),
do: Map.put(acc, :juros_anual_home_equity, encode_float(value))
defp map_attributes({:insurer, value}, acc), do: Map.put(acc, :seguradora, value)
defp map_attributes({:itbi_value, value}, acc),
do: Map.put(acc, :valor_itbi, encode_decimal(value))
defp map_attributes({:listing_price, value}, acc),
do: Map.put(acc, :valor_imovel, encode_decimal(value))
defp map_attributes({:listing_type, value}, acc), do: Map.put(acc, :tipo_imovel, value)
defp map_attributes({:net_income_coparticipant, value}, acc),
do: Map.put(acc, :renda_liquida_co, encode_decimal(value))
defp map_attributes({:product_type, value}, acc), do: Map.put(acc, :tipo_produto, value)
defp map_attributes({:rating, value}, acc), do: Map.put(acc, :rating, to_string(value))
defp map_attributes({:sum, value}, acc), do: Map.put(acc, :somar, encode_boolean(value))
defp map_attributes({:term, value}, acc), do: Map.put(acc, :prazo, to_string(value))
defp encode_date(nil), do: ""
defp encode_date(%Date{} = date) do
day =
date.day
|> to_string()
|> String.pad_leading(2, "0")
month =
date.month
|> to_string()
|> String.pad_leading(2, "0")
year =
date.year
|> to_string()
|> String.pad_leading(2, "0")
"#{day}/#{month}/#{year}"
end
defp encode_boolean(true), do: "S"
defp encode_boolean(_), do: "N"
defp encode_decimal(nil), do: ""
defp encode_decimal(%Decimal{} = decimal) do
decimal
|> Decimal.round(2)
|> Decimal.mult(100)
|> Decimal.to_integer()
|> CurrencyFormatter.format("BRL", keep_decimals: true)
|> trim_currency()
end
defp encode_float(float), do: :erlang.float_to_binary(float, decimals: 10)
defp trim_currency("R$" <> rest), do: rest
defp map_payload({"cem", value}, acc), do: Map.put(acc, :cem, value)
defp map_payload({"cet", value}, acc), do: Map.put(acc, :cet, value)
defp map_payload({_key, _value}, acc), do: acc
end
|
apps/re_integrations/lib/simulators/credipronto/mapper.ex
| 0.605449
| 0.582877
|
mapper.ex
|
starcoder
|
defmodule ETFs.Stream do
defstruct path: nil, format: :v3
@fourcc "ETFs"
@doc false
def open(path, opts \\ []) do
format = Keyword.get(opts, :format, :v3)
%__MODULE__{path: path, format: format}
end
def record_count(%__MODULE__{path: path, format: :v3}) do
with {:ok, f} <- File.open(path, [:read]),
@fourcc <- IO.binread(f, 4),
<<record_count::integer-size(32)>> <- IO.binread(f, 4),
:ok = File.close(f) do
{:ok, record_count}
else
err -> {:error, err}
end
end
def stream_all_records!(%__MODULE__{path: path, format: :v3}) do
Stream.resource(
fn ->
{:ok, io} = File.open(path, [:read])
@fourcc = IO.binread(io, 4)
<<record_count::integer-size(32)>> = IO.binread(io, 4)
<<_toc_pos::integer-size(64)>> = IO.binread(io, 8)
{io, record_count}
end,
fn
{io, 0} ->
{:halt, io}
{io, records_left} ->
with <<record_len::integer-size(32)>> <- IO.binread(io, 4),
record when is_binary(record) <- IO.binread(io, record_len) do
{[record], {io, records_left - 1}}
else
:eof -> {:halt, io}
{:error, _} -> {:halt, io}
end
end,
&File.close/1
)
|> Stream.map(&:erlang.binary_to_term/1)
end
def slice_records(%__MODULE__{path: path, format: :v3}) do
{:ok, io} = File.open(path, [:read])
@fourcc = IO.binread(io, 4)
<<record_count::integer-size(32)>> = IO.binread(io, 4)
<<toc_pos::integer-size(64)>> = IO.binread(io, 8)
f = fn start, count ->
first_record_pos_pos = toc_pos + (start * 8)
{:ok, <<first_record_pos::integer-size(64)>>} = :file.pread(io, first_record_pos_pos, 8)
:file.position(io, {:bof, first_record_pos})
records = for _i <- (0..(count - 1)) do
<<record_len::integer-size(32)>> = IO.binread(io, 4)
IO.binread(io, record_len) |> :erlang.binary_to_term
end
File.close(io)
records
end
{:ok, record_count, f}
end
def collect_into(%__MODULE__{path: path, format: :v3}) do
{:ok, io} = File.open(path, [:write])
IO.binwrite(io, [@fourcc, <<0::integer-size(32)>>, <<0::integer-size(64)>>])
collector_fun = fn
{io, pos, toc}, {:cont, record} ->
record_bin = :erlang.term_to_binary(record, [:compressed, minor_version: 2])
msg = [<<byte_size(record_bin)::integer-size(32)>>, record_bin]
IO.binwrite(io, msg)
msg_size = 4 + byte_size(record_bin)
{io, pos + msg_size, [{pos, msg_size} | toc]}
{io, toc_pos, toc}, :done ->
toc
|> Enum.reverse
|> Enum.each(fn {pos, _msg_size} ->
IO.binwrite(io, [
<<pos::integer-size(64)>>
])
end)
:file.position(io, {:bof, 4})
IO.binwrite(io, <<length(toc)::integer-size(32)>>)
IO.binwrite(io, <<toc_pos::integer-size(64)>>)
File.close(io)
_set, :halt ->
File.close(io)
end
{{io, 16, []}, collector_fun}
end
end
defimpl Enumerable, for: ETFs.Stream do
def reduce(etfs, acc, fun) do
s = ETFs.Stream.stream_all_records!(etfs)
Enumerable.reduce(s, acc, fun)
end
def slice(etfs), do:
ETFs.Stream.slice_records(etfs)
def member?(etfs, element) do
s = ETFs.Stream.stream_all_records!(etfs)
Enumerable.member?(s, element)
end
def count(etfs) do
ETFs.Stream.record_count(etfs)
end
end
defimpl Collectable, for: ETFs.Stream do
def into(etfs) do
ETFs.Stream.collect_into(etfs)
end
end
|
lib/etfs/stream.ex
| 0.518059
| 0.456228
|
stream.ex
|
starcoder
|
defmodule Discordian.Calendar do
@behaviour Calendar
@type year :: Integer.t()
@type month :: 1..6
@type day :: 1..73
@type day_of_week :: 1..5
@type hour :: 0..23
@type minute :: 0..59
@type second :: 0..60
@type microsecond :: Integer.t()
@seconds_per_minute 60
@seconds_per_hour 60*60
@seconds_per_day 24*60*60
@microseconds_per_second 1000000
@months_in_year 5
@doc """
Converts the date into a string according to the calendar.
"""
@impl true
@spec date_to_string(year, month, day) :: String.t()
def date_to_string(year, month, day) do
zero_pad(year, 4) <> "-" <> zero_pad(month, 2) <> "-" <> zero_pad(day, 2)
end
@doc """
Converts the datetime (with time zone) into a string according to the calendar.
"""
@impl true
def datetime_to_string(
year,
month,
day,
hour,
minute,
second,
microsecond,
time_zone,
zone_abbr,
utc_offset,
std_offset
) do
date_to_string(year, month, day) <>
" " <>
time_to_string(hour, minute, second, microsecond) <>
offset_to_string(utc_offset, std_offset, time_zone) <>
zone_to_string(utc_offset, std_offset, zone_abbr, time_zone)
end
@doc """
Calculates the day and era from the given year, month, and day.
"""
@spec day_of_era(year, month, day) :: {day :: pos_integer(), era :: 0..1}
@impl true
def day_of_era(year, month, day)
when is_integer(year) and is_integer(month) and is_integer(day) and year > 0 do
day = Discordian.discordian_to_days(year, month, day)
{day, 1}
end
def day_of_era(year, month, day)
when is_integer(year) and is_integer(month) and is_integer(day) and year < 0 do
day = Discordian.discordian_to_days(year, month, day)
{day, 0}
end
@doc """
Calculates the day of the week from the given year, month, and day.
"""
@impl true
@spec day_of_week(year, month, day) :: day_of_week
def day_of_week(year, month, day) do
if leap_year?(year) and month == 1 and day == 1 do
0
else
case rem(day_of_year(year, month, day), 5) do
0 -> 5
x -> x
end
end
end
@doc """
Calculates the day of the year from the given year, month, and day.
"""
@impl true
@spec day_of_year(year, month, day) :: 1..366
def day_of_year(year, month, day) do
Discordian.discordian_day_year(year, month, day)
end
@doc """
Define the rollover moment for the given calendar.
"""
@impl true
def day_rollover_relative_to_midnight_utc(), do: {0,1}
@doc """
Returns how many days there are in the given year-month.
"""
@impl true
@spec days_in_month(Calendar.year(), Calendar.month()) :: Calendar.day()
def days_in_month(year, month), do: Discordian.discordian_month_lenght(year, month)
@doc """
Returns true if the given year is a leap year.
"""
@impl true
@spec leap_year?(Calendar.year()) :: boolean
def leap_year?(year), do: Discordian.is_leap_year(year)
@doc """
Returns how many months there are in the given year.
"""
@impl true
@spec months_in_year(year) :: 5
def months_in_year(_year), do: @months_in_year
@doc """
Converts iso_days/0 to the Calendar's datetime format.
"""
@impl true
@spec naive_datetime_from_iso_days(Calendar.iso_days()) ::
{Calendar.year(), Calendar.month(), Calendar.day(), Calendar.hour(), Calendar.minute(),
Calendar.second(), Calendar.microsecond()}
def naive_datetime_from_iso_days({days, day_fraction}) do
{year, month, day} = Discordian.iso_days_to_discordian(days)
{hour, minute, second, microsecond} = time_from_day_fraction(day_fraction)
{year, month, day, hour, minute, second, microsecond}
end
@doc """
Converts the given datetime (without time zone) into the iso_days/0 format.
"""
@impl true
@spec naive_datetime_to_iso_days(
Calendar.year(),
Calendar.month(),
Calendar.day(),
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
) :: Calendar.iso_days()
def naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) do
{Discordian.discordian_to_days(year, month, day),
time_to_day_fraction(hour, minute, second, microsecond)}
end
@doc """
Converts the datetime (without time zone) into a string according to the calendar.
"""
@impl true
@spec naive_datetime_to_string(year, month, day, hour, minute, second, microsecond) ::
String.t()
def naive_datetime_to_string(year, month, day, hour, minute, second, microsecond) do
date_to_string(year, month, day) <> " " <> time_to_string(hour, minute, second, microsecond)
end
@doc """
Parses the string representation for a date returned by date_to_string/3 into a date-tuple.
"""
@impl true
def parse_date(string_date) do
date_list = String.split(string_date, "-")
date_tuple = date_list |> Enum.map(&String.to_integer/1) |> List.to_tuple()
{:ok, date_tuple}
end
@doc """
Parses the string representation for a naive datetime returned by naive_datetime_to_string/7 into a naive-datetime-tuple.
"""
@impl true
def parse_naive_datetime(string_date) do
date_list = String.split(string_date, "-")
date_tuple = date_list |> Enum.map(&String.to_integer/1) |> List.to_tuple()
{:ok, date_tuple}
end
@doc """
Parses the string representation for a time returned by time_to_string/4 into a time-tuple.
"""
@impl true
def parse_time(string_date) do
date_list = String.split(string_date, "-")
date_tuple = date_list |> Enum.map(&String.to_integer/1) |> List.to_tuple()
{:ok, date_tuple}
end
@doc """
Parses the string representation for a datetime returned by datetime_to_string/11 into a datetime-tuple.
"""
@impl true
def parse_utc_datetime(string_date) do
date_list = String.split(string_date, "-")
date_tuple = date_list |> Enum.map(&String.to_integer/1) |> List.to_tuple()
{:ok, date_tuple}
end
@doc """
Calculates the quarter of the year from the given year, month, and day.
"""
@impl true
@spec quarter_of_year(year, month, day) :: 1..4
def quarter_of_year(year, month, day)
when is_integer(year) and is_integer(month) and is_integer(day) do
trunc((month-1)/1.25) + 1
end
@doc """
Converts day_fraction/0 to the Calendar's time format.
"""
@impl true
@spec time_from_day_fraction(Calendar.day_fraction()) ::
{Calendar.hour(), Calendar.minute(), Calendar.second(), Calendar.microsecond()}
def time_from_day_fraction({parts_in_day, parts_per_day}) do
total_microseconds =
div(parts_in_day * @seconds_per_day * @microseconds_per_second, parts_per_day)
{hours, rest_microseconds1} =
{div(total_microseconds, @seconds_per_hour * @microseconds_per_second),
rem(total_microseconds, @seconds_per_hour * @microseconds_per_second)}
{minutes, rest_microseconds2} =
{div(rest_microseconds1, @seconds_per_minute * @microseconds_per_second),
rem(rest_microseconds1, @seconds_per_minute * @microseconds_per_second)}
{seconds, microseconds} = {div(rest_microseconds2, @microseconds_per_second),
rem(rest_microseconds2, @microseconds_per_second)}
{hours, minutes, seconds, {microseconds, 6}}
end
@doc """
Converts the given time to the day_fraction/0 format.
"""
@impl true
@spec time_to_day_fraction(
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
) :: Calendar.day_fraction()
def time_to_day_fraction(hour, minute, second, {microsecond, _}) do
combined_seconds = hour * @seconds_per_hour + minute * @seconds_per_minute + second
{combined_seconds * @microseconds_per_second + microsecond,
@seconds_per_day * @microseconds_per_second}
end
@doc """
Converts the time into a string according to the calendar.
"""
@impl true
def time_to_string(hour, minute, second, microsecond) do
Integer.to_string(hour) <> ":" <> Integer.to_string(minute) <> ":" <> Integer.to_string(second) <> ":" <> Integer.to_string(microsecond)
end
@doc """
Should return true if the given date describes a proper date in the calendar.
"""
@impl true
def valid_date?(year, month, day), do: Discordian.is_valid_discordian_date({year, month, day})
@doc """
Should return true if the given time describes a proper time in the calendar.
"""
@impl true
def valid_time?(hour, minute, second, {microsecond, precision}) do
hour in 0..23 and minute in 0..59 and second in 0..60 and
microsecond in 0..999_999 and precision in 0..6
end
@doc """
Calculates the year and era from the given year.
"""
@spec year_of_era(year) :: {year, era :: 0..1}
@impl true
def year_of_era(year) when is_integer(year) and year > 0, do: {year, 1}
def year_of_era(year) when is_integer(year) and year < 1, do: {abs(year) + 1, 0}
defp offset_to_string(utc, std, zone, format \\ :extended)
defp offset_to_string(0, 0, "Etc/UTC", _format), do: "Z"
defp offset_to_string(utc, std, _zone, format) do
total = utc + std
second = abs(total)
minute = second |> rem(3600) |> div(60)
hour = div(second, 3600)
format_offset(total, hour, minute, format)
end
defp format_offset(total, hour, minute, :extended) do
sign(total) <> zero_pad(hour, 2) <> ":" <> zero_pad(minute, 2)
end
defp format_offset(total, hour, minute, :basic) do
sign(total) <> zero_pad(hour, 2) <> zero_pad(minute, 2)
end
defp zone_to_string(0, 0, _abbr, "Etc/UTC"), do: ""
defp zone_to_string(_, _, abbr, zone), do: " " <> abbr <> " " <> zone
defp sign(total) when total < 0, do: "-"
defp sign(_), do: "+"
defp zero_pad(val, count) do
num = Integer.to_string(val)
:binary.copy("0", count - byte_size(num)) <> num
end
end
|
lib/discordian/calendar.ex
| 0.89874
| 0.586612
|
calendar.ex
|
starcoder
|
defmodule AWS.Appflow do
@moduledoc """
Welcome to the Amazon AppFlow API reference. This guide is for developers
who need detailed information about the Amazon AppFlow API operations, data
types, and errors.
Amazon AppFlow is a fully managed integration service that enables you to
securely transfer data between software as a service (SaaS) applications
like Salesforce, Marketo, Slack, and ServiceNow, and AWS services like
Amazon S3 and Amazon Redshift.
Use the following links to get started on the Amazon AppFlow API:
<ul> <li>
[Actions](https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html):
An alphabetical list of all Amazon AppFlow API operations.
</li> <li> [Data
types](https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Types.html):
An alphabetical list of all Amazon AppFlow data types.
</li> <li> [Common
parameters](https://docs.aws.amazon.com/appflow/1.0/APIReference/CommonParameters.html):
Parameters that all Query operations can use.
</li> <li> [Common
errors](https://docs.aws.amazon.com/appflow/1.0/APIReference/CommonErrors.html):
Client and server errors that all operations can return.
</li> </ul> If you're new to Amazon AppFlow, we recommend that you review
the [Amazon AppFlow User
Guide](https://docs.aws.amazon.com/appflow/latest/userguide/what-is-appflow.html).
Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and
include applicable OAuth attributes (such as `auth-code` and `redirecturi`)
with the connector-specific `ConnectorProfileProperties` when creating a
new connector profile using Amazon AppFlow API operations. For example,
Salesforce users can refer to the [ *Authorize Apps with OAuth*
](https://help.salesforce.com/articleView?id=remoteaccess_authenticate.htm)
documentation.
"""
@doc """
Creates a new connector profile associated with your AWS account. There is
a soft quota of 100 connector profiles per AWS account. If you need more
connector profiles than this quota allows, you can submit a request to the
Amazon AppFlow team through the Amazon AppFlow support channel.
"""
def create_connector_profile(client, input, options \\ []) do
path_ = "/create-connector-profile"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Enables your application to create a new flow using Amazon AppFlow. You
must create a connector profile before calling this API. Please note that
the Request Syntax below shows syntax for multiple destinations, however,
you can only transfer data to one item in this list at a time. Amazon
AppFlow does not currently support flows to multiple destinations at once.
"""
def create_flow(client, input, options \\ []) do
path_ = "/create-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Enables you to delete an existing connector profile.
"""
def delete_connector_profile(client, input, options \\ []) do
path_ = "/delete-connector-profile"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Enables your application to delete an existing flow. Before deleting the
flow, Amazon AppFlow validates the request by checking the flow
configuration and status. You can delete flows one at a time.
"""
def delete_flow(client, input, options \\ []) do
path_ = "/delete-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Provides details regarding the entity used with the connector, with a
description of the data model for each entity.
"""
def describe_connector_entity(client, input, options \\ []) do
path_ = "/describe-connector-entity"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns a list of `connector-profile` details matching the provided
`connector-profile` names and `connector-types`. Both input lists are
optional, and you can use them to filter the result.
If no names or `connector-types` are provided, returns all connector
profiles in a paginated form. If there is no match, this operation returns
an empty list.
"""
def describe_connector_profiles(client, input, options \\ []) do
path_ = "/describe-connector-profiles"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Describes the connectors vended by Amazon AppFlow for specified connector
types. If you don't specify a connector type, this operation describes all
connectors vended by Amazon AppFlow. If there are more connectors than can
be returned in one page, the response contains a `nextToken` object, which
can be be passed in to the next call to the `DescribeConnectors` API
operation to retrieve the next page.
"""
def describe_connectors(client, input, options \\ []) do
path_ = "/describe-connectors"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Provides a description of the specified flow.
"""
def describe_flow(client, input, options \\ []) do
path_ = "/describe-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Fetches the execution history of the flow.
"""
def describe_flow_execution_records(client, input, options \\ []) do
path_ = "/describe-flow-execution-records"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns the list of available connector entities supported by Amazon
AppFlow. For example, you can query Salesforce for *Account* and
*Opportunity* entities, or query ServiceNow for the *Incident* entity.
"""
def list_connector_entities(client, input, options \\ []) do
path_ = "/list-connector-entities"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Lists all of the flows associated with your account.
"""
def list_flows(client, input, options \\ []) do
path_ = "/list-flows"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves the tags that are associated with a specified flow.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Activates an existing flow. For on-demand flows, this operation runs the
flow immediately. For schedule and event-triggered flows, this operation
activates the flow.
"""
def start_flow(client, input, options \\ []) do
path_ = "/start-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Deactivates the existing flow. For on-demand flows, this operation returns
an `unsupportedOperationException` error message. For schedule and
event-triggered flows, this operation deactivates the flow.
"""
def stop_flow(client, input, options \\ []) do
path_ = "/stop-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Applies a tag to the specified flow.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Removes a tag from the specified flow.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Updates a given connector profile associated with your account.
"""
def update_connector_profile(client, input, options \\ []) do
path_ = "/update-connector-profile"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates an existing flow.
"""
def update_flow(client, input, options \\ []) do
path_ = "/update-flow"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "appflow"}
host = build_host("appflow", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/appflow.ex
| 0.882168
| 0.543651
|
appflow.ex
|
starcoder
|
defmodule Evolvr.Bio do
@moduledoc """
A collection of bioinformatics algorithms.
"""
@doc"""
Returns a list of kmers.
"""
def get_kmers(seq, k) do
ks = get_all_kmers(seq, k)
Enum.filter(ks, fn(x) -> String.length(x) == k end)
end
defp get_all_kmers("", _k) do
[]
end
defp get_all_kmers(seq, k) do
ht = String.split_at(seq, k)
head = elem(ht, 0)
htt = String.split_at(seq, 1)
tail = elem(htt, 1)
[ head | get_all_kmers(tail, k) ]
end
@doc"""
Returns number of times a pattern occurs in one sequence,
including overlapping occurences.
"""
def pattern_count(seq, pattern) do
k = String.length(pattern)
kmers = get_kmers(seq, k)
count_pattern(kmers, pattern, 0)
end
defp count_pattern([], _pattern, total) do
total
end
defp count_pattern(kmers, pattern, total) do
[head | tail] = kmers
if head == pattern do
count_pattern(tail, pattern, total+1)
else
count_pattern(tail, pattern, total)
end
end
@doc"""
Returns a list of the most frequent words (kmers) in a sequence.
"""
def frequent_words(seq, k) do
frequent_words_map(seq, k, %{})
|> most_frequent(k)
|> to_string_list
end
defp clean_map(map,k) do
map |> Enum.filter(fn(x) -> elem(x,0) |> Atom.to_string |> String.length == k end)
end
defp most_frequent(map,k) do
map = clean_map(map,k) # this is temp, since now head_kmer runs too many times
#max_freq = Enum.max(map) |> elem(1)
max_freq = Enum.max_by(map, fn(x) -> elem(x,1) end) |> elem(1)
map |> Enum.filter(fn(x) -> elem(x,1) == max_freq end)
end
defp to_string_list(map) do
map |> Dict.keys |> Enum.map(fn(x) -> Atom.to_string(x) |> String.upcase end)
end
defp get_head_kmer(seq, k) do
seq |> String.slice(0..k-1) |> String.downcase |> String.to_atom
end
defp frequent_words_map("", _k, map) do
map
end
defp frequent_words_map(seq, k, map) do
tail = String.split_at(seq,1) |> elem(1)
kmer = get_head_kmer(seq, k)
if Map.has_key?(map, kmer) do
new_value = map[kmer] + 1
map = put_in(map[kmer], new_value)
else
map = Dict.put_new(map, kmer, 1)
end
frequent_words_map(tail, k, map)
end
end
|
lib/evolvr/bio.ex
| 0.619817
| 0.462412
|
bio.ex
|
starcoder
|
defmodule Books.PassiveAbilities do
@moduledoc """
The PassiveAbilities context.
"""
import Ecto.Query, warn: false
alias Books.Repo
alias Books.PassiveAbilities.PassiveAbility
@doc """
Returns the list of passive_abilities.
## Examples
iex> list_passive_abilities()
[%PassiveAbility{}, ...]
"""
def list_passive_abilities do
Repo.all(PassiveAbility)
end
@doc """
Gets a single passive_ability.
Raises `Ecto.NoResultsError` if the Passive ability does not exist.
## Examples
iex> get_passive_ability!(123)
%PassiveAbility{}
iex> get_passive_ability!(456)
** (Ecto.NoResultsError)
"""
def get_passive_ability!(id), do: Repo.get!(PassiveAbility, id)
@doc """
Creates a passive_ability.
## Examples
iex> create_passive_ability(%{field: value})
{:ok, %PassiveAbility{}}
iex> create_passive_ability(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_passive_ability(attrs \\ %{}) do
%PassiveAbility{}
|> PassiveAbility.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a passive_ability.
## Examples
iex> update_passive_ability(passive_ability, %{field: new_value})
{:ok, %PassiveAbility{}}
iex> update_passive_ability(passive_ability, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_passive_ability(%PassiveAbility{} = passive_ability, attrs) do
passive_ability
|> PassiveAbility.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a passive_ability.
## Examples
iex> delete_passive_ability(passive_ability)
{:ok, %PassiveAbility{}}
iex> delete_passive_ability(passive_ability)
{:error, %Ecto.Changeset{}}
"""
def delete_passive_ability(%PassiveAbility{} = passive_ability) do
Repo.delete(passive_ability)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking passive_ability changes.
## Examples
iex> change_passive_ability(passive_ability)
%Ecto.Changeset{data: %PassiveAbility{}}
"""
def change_passive_ability(%PassiveAbility{} = passive_ability, attrs \\ %{}) do
PassiveAbility.changeset(passive_ability, attrs)
end
end
|
lib/books/passive_abilities.ex
| 0.83772
| 0.430985
|
passive_abilities.ex
|
starcoder
|
defmodule CipherSuites do
@moduledoc """
Support OpenSSL-style cipher suite selection in Erlang/Elixir applications.
"""
@doc """
Applies the specified OpenSSL cipher selection string to the list of known
cipher suites and returns the resulting list.
The result can be used in the `:ciphers` option for `:ssl` client and
server connections, as well as in most TLS-capable applications, such as
Ranch, Cowboy, Plug and Phoenix.
Example:
iex> CipherSuites.select("aRSA+kEECDH+AES256:!SHA")
[{:ecdhe_rsa, :aes_256_gcm, :aead, :sha384},
{:ecdhe_rsa, :aes_256_cbc, :sha384, :sha384}]
Please refer to the
[OpenSSL man page](https://www.openssl.org/docs/manmaster/apps/ciphers.html)
for more information about the syntax of the cipher selection string.
"""
@spec select(binary) :: [:ssl.ciphersuite()]
def select(expression) do
expression
|> String.split([":", ",", " "], trim: true)
|> filter()
end
@doc """
Returns all known cipher suites, as reported by the `:ssl` module.
Note that this function returns all known cipher suites, including null
ciphers, which is different from what `select("ALL")` returns!
"""
@spec all() :: [:ssl.ciphersuite()]
def all do
Application.get_env(:cipher_suites, :all_suites, :ssl.cipher_suites(:all))
end
@doc """
Returns the default cipher suites, as reported by the `:ssl` module.
"""
@spec default() :: [:ssl.ciphersuite()]
def default do
Application.get_env(:cipher_suites, :default_suites, :ssl.cipher_suites())
end
@doc """
Expands a cipher suite spec string in OpenSSL format in a Phoenix Endpoint
configuration. For use in the Endpoint's `init/2` callback, e.g.:
# Inside config.exs
config :my_app, MyAppWeb.Endpoint,
https: [
port: 4001,
certfile: "priv/cert.pem",
keyfile: "priv/key.pem",
ciphers: "aRSA+kEECDH+AES256:!SHA"
]
# Inside MyAppWeb.Endpoint...
def init(_key, config) do
{:ok, CipherSuites.init_phoenix_endpoint(config)}
end
"""
@spec init_phoenix_endpoint(Keyword.t()) :: Keyword.t()
def init_phoenix_endpoint(config) do
if get_in(config, [:https, :ciphers]) do
update_in(config, [:https, :ciphers], fn
spec when is_binary(spec) -> CipherSuites.select(spec)
list -> list
end)
else
config
end
end
# Private
@high [
:aes_128_cbc,
:aes_128_gcm,
:aes_256_cbc,
:aes_256_gcm,
:chacha20_poly1305
]
@medium [:rc4_128, :idea_cbc, :"3des_ede_cbc"]
@low [:des40_cbc, :des_cbc]
@key_size %{
null: 0,
rc4_128: 128,
idea_cbc: 128,
des40_cbc: 40,
des_cbc: 56,
# OpenSSL treats 3DES as having an effective
"3des_ede_cbc": 112,
# key size of 112 bits instead of 156, due
# to known weaknesses
aes_128_cbc: 128,
aes_256_cbc: 256,
aes_128_gcm: 128,
aes_256_gcm: 256,
chacha20_poly1305: 256
}
@more_openssl_suites %{
# TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
"ECDHE-RSA-CHACHA20-POLY1305" => {:ecdhe_rsa, :chacha20_poly1305, :aead, :sha256},
# TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
"ECDHE-ECDSA-CHACHA20-POLY1305" => {:ecdhe_ecdsa, :chacha20_poly1305, :aead, :sha256},
# TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256
"DHE-RSA-CHACHA20-POLY1305" => {:dhe_rsa, :chacha20_poly1305, :aead, :sha256}
}
# DEFAULT: "When used, this must be the first cipherstring specified.
# This [...] is normally ALL:!COMPLEMENTOFDEFAULT:!eNULL".
defp filter(["DEFAULT" | tokens]) do
filter(["ALL", "!COMPLEMENTOFDEFAULT", "!eNULL" | tokens])
end
# Modifiers: "Each cipher string can be optionally preceded by the
# characters !, - or +."
# This function also handles special cipher strings starting with "@".
defp filter(tokens) do
result =
Enum.reduce(tokens, %{included: [], excluded: []}, fn
"@STRENGTH", state ->
sort_by_strength(state)
# "@SECLEVEL=" <> n ->
"!" <> cipher, state ->
exclude(cipher, state)
"-" <> cipher, state ->
delete(cipher, state)
"+" <> cipher, state ->
move_to_end(cipher, state)
cipher, state ->
append(cipher, state)
end)
result.included -- result.excluded
end
# "If ! is used then the ciphers are permanently deleted from the list.
# The ciphers deleted can never reappear in the list even if they are
# explicitly stated."
defp exclude(cipher, state) do
%{state | excluded: merge(state.excluded, cipher_string(cipher))}
end
# "If - is used then the ciphers are deleted from the list, but some or
# all of the ciphers can be added again by later options."
defp delete(cipher, state) do
%{state | included: state.included -- cipher_string(cipher)}
end
# "If + is used then the ciphers are moved to the end of the list. This
# option doesn't add any new ciphers it just moves matching existing
# ones."
defp move_to_end(cipher, state) do
ciphers = cipher_string(cipher)
{last, first} = Enum.split_with(state.included, &(&1 in ciphers))
%{state | included: first ++ last}
end
# "If none of these characters is present then the string is just
# interpreted as a list of ciphers to be appended to the current
# preference list. If the list includes any ciphers already present they
# will be ignored: that is they will not moved to the end of the list."
defp append(cipher, state) do
%{state | included: merge(state.included, cipher_string(cipher))}
end
# "The cipher string @STRENGTH can be used at any point to sort the
# current cipher list in order of encryption algorithm key length."
defp sort_by_strength(state) do
sorted =
state.included
|> Enum.map(fn suite -> {@key_size[elem(suite, 1)], suite} end)
|> Enum.sort_by(&elem(&1, 0), &>=/2)
|> Enum.map(&elem(&1, 1))
%{state | included: sorted}
end
# "Lists of cipher suites can be combined in a single cipher string
# using the + character."
defp cipher_string(cipher) do
case openssl_suite(cipher) do
nil ->
cipher
|> String.split("+")
|> find(all())
suite ->
[suite]
end
end
# Select only those cipher suites from `acc` that match all the given
# criteria
defp find([], acc), do: acc
defp find([cipher | more], acc) do
ciphers = find(cipher)
find(more, Enum.filter(acc, &(&1 in ciphers)))
end
defp find("ALL"), do: all() -- find("COMPLEMENTOFALL")
defp find("COMPLEMENTOFDEFAULT"), do: find("ALL") -- default()
defp find("COMPLEMENTOFALL"), do: cipher_string("eNULL")
defp find("HIGH"), do: all_with_cipher(@high)
defp find("MEDIUM"), do: all_with_cipher(@medium)
defp find("LOW"), do: all_with_cipher(@low)
defp find("eNULL"), do: all_with_cipher(:null)
defp find("NULL"), do: find("eNULL")
defp find("aNULL"), do: all_with_key_exchange([:dh_anon, :ecdh_anon])
defp find("kRSA"), do: all_with_key_exchange(:rsa)
defp find("RSA"), do: find("kRSA")
defp find("aRSA"), do: all_with_key_exchange([:rsa, :dhe_rsa, :srp_rsa, :ecdhe_rsa])
defp find("kDHr"), do: all_with_key_exchange(:dh_rsa)
defp find("kDHd"), do: all_with_key_exchange(:dh_dss)
defp find("kDH"), do: all_with_key_exchange([:dh_rsa, :dh_dss])
defp find("kDHE"), do: all_with_key_exchange([:dhe_rsa, :dhe_dss, :dh_anon, :dhe_psk])
defp find("kEDH"), do: find("kDHE")
defp find("DH"),
do: all_with_key_exchange([:dh_rsa, :dh_dss, :dhe_rsa, :dhe_dss, :dh_anon, :dhe_psk])
defp find("DHE"), do: all_with_key_exchange([:dhe_rsa, :dhe_dss, :dhe_psk])
defp find("EDH"), do: find("DHE")
defp find("ADH"), do: all_with_key_exchange(:dh_anon)
defp find("kEECDH"), do: all_with_key_exchange([:ecdhe_rsa, :ecdhe_ecdsa, :ecdh_anon])
defp find("kECDHE"), do: find("kEECDH")
defp find("ECDH"),
do: all_with_key_exchange([:ecdhe_rsa, :ecdhe_ecdsa, :ecdh_rsa, :ecdh_ecdsa, :ecdh_anon])
defp find("ECDHE"), do: all_with_key_exchange([:ecdhe_rsa, :ecdhe_ecdsa])
defp find("EECDH"), do: find("ECDHE")
defp find("AECDH"), do: all_with_key_exchange(:ecdh_anon)
defp find("aDSS"), do: all_with_key_exchange([:dhe_dss, :srp_dss])
defp find("DSS"), do: find("aDSS")
defp find("aDH"), do: all_with_key_exchange([:dh_rsa, :dh_dss])
defp find("aECDSA"), do: all_with_key_exchange(:ecdhe_ecdsa)
defp find("ECDSA"), do: find("aECDSA")
defp find("SSLv3") do
# Baseline ciphersuites, no AEAD, fixed PRF; note that many
# implementations didn't add ECC support until TLS 1.0 or later, but
# technically they can be used with SSLv3
find_all(
[
:ecdhe_ecdsa,
:ecdhe_rsa,
:ecdh_ecdsa,
:ecdh_rsa,
:dhe_rsa,
:dhe_dss,
:rsa,
:dh_anon,
:ecdh_anon,
:dhe_psk,
:rsa_psk,
:psk,
:srp_anon,
:srp_rsa,
:srp_dss
],
[:aes_256_cbc, :"3des_ede_cbc", :aes_128_cbc, :des_cbc, :rc4_128, :null],
[:sha, :md5],
[:default_prf]
)
end
defp find("TLSv1.0") do
[]
end
defp find("TLSv1.2") do
# TLS 1.2 adds AEAD ciphers, HMAC-SHA256 and new pseudo-random function
# (PRF) options
find_any([], [:chacha20_poly1305, :aes_256_gcm, :aes_128_gcm], [:null, :sha256, :sha384], [
:sha256,
:sha384
])
end
defp find("AES128"), do: all_with_cipher([:aes_128_cbc, :aes_128_gcm])
defp find("AES256"), do: all_with_cipher([:aes_256_cbc, :aes_256_gcm])
defp find("AES"), do: all_with_cipher([:aes_128_cbc, :aes_128_gcm, :aes_256_cbc, :aes_256_gcm])
defp find("AESGCM"), do: all_with_cipher([:aes_128_gcm, :aes_256_gcm])
defp find("CHACHA20"), do: all_with_cipher(:chacha20)
defp find("3DES"), do: all_with_cipher(:"3des_ede_cbc")
defp find("DES"), do: all_with_cipher(:des_cbc)
defp find("RC4"), do: all_with_cipher(:rc4_128)
defp find("IDEA"), do: all_with_cipher(:idea_cbc)
defp find("MD5"), do: all_with_hash(:md5)
defp find("SHA1"), do: all_with_hash(:sha)
defp find("SHA"), do: find("SHA1")
defp find("SHA256"), do: all_with_hash(:sha256)
defp find("SHA384"), do: all_with_hash(:sha384)
defp find("kPSK"), do: all_with_key_exchange(:psk)
defp find("PSK"), do: find("kPSK")
defp find("kDHEPSK"), do: all_with_key_exchange(:dhe_psk)
defp find("kRSAPSK"), do: all_with_key_exchange(:rsa_psk)
defp find("aPSK"), do: all_with_key_exchange([:psk, :dhe_psk, :rsa_psk])
defp find("kSRP"), do: all_with_key_exchange([:srp, :srp_rsa, :srp_dss, :srp_anon])
defp find("SRP"), do: find("kSRP")
defp find("aSRP"), do: all_with_key_exchange(:srp_anon)
# Unsupported
defp find("AESCCM"), do: []
defp find("AESCCM8"), do: []
defp find("CAMELLIA128"), do: []
defp find("CAMELLIA256"), do: []
defp find("CAMELLIA"), do: []
defp find("RC2"), do: []
defp find("SEED"), do: []
defp find("aGOST"), do: []
defp find("aGOST01"), do: []
defp find("kGOST"), do: []
defp find("GOST94"), do: []
defp find("GOST89MAC"), do: []
defp find("kECDHEPSK"), do: []
defp merge(a, b) do
Enum.uniq(a ++ b)
end
defp find_all(key_exchanges, ciphers, hash_functions, prfs) do
Enum.filter(all(), fn
{key_exchange, cipher, hash} ->
key_exchange in key_exchanges and cipher in ciphers and hash in hash_functions and
:default_prf in prfs
{key_exchange, cipher, hash, prf} ->
key_exchange in key_exchanges and cipher in ciphers and hash in hash_functions and
prf in prfs
end)
end
defp find_any(key_exchanges, ciphers, hash_functions, prfs) do
Enum.filter(all(), fn
{key_exchange, cipher, hash} ->
key_exchange in key_exchanges or cipher in ciphers or hash in hash_functions or
:default_prf in prfs
{key_exchange, cipher, hash, prf} ->
key_exchange in key_exchanges or cipher in ciphers or hash in hash_functions or
prf in prfs
end)
end
defp all_with_key_exchange(key_exchanges) when is_list(key_exchanges) do
Enum.filter(all(), &(elem(&1, 0) in key_exchanges))
end
defp all_with_key_exchange(key_exchange) do
Enum.filter(all(), &(elem(&1, 0) == key_exchange))
end
defp all_with_cipher(ciphers) when is_list(ciphers) do
Enum.filter(all(), &(elem(&1, 1) in ciphers))
end
defp all_with_cipher(cipher) do
Enum.filter(all(), &(elem(&1, 1) == cipher))
end
defp all_with_hash(hash) do
Enum.filter(all(), &(elem(&1, 2) == hash))
end
defp openssl_suite(cipher_name) do
definition =
cipher_name
|> String.to_charlist()
|> :ssl_cipher.openssl_suite()
|> :ssl_cipher.suite_definition()
case definition do
%{key_exchange: key_exchange, cipher: cipher, mac: mac, prf: prf} ->
{key_exchange, cipher, mac, prf}
tuple ->
tuple
end
rescue
FunctionClauseError ->
Map.get(@more_openssl_suites, cipher_name)
end
end
|
lib/cipher_suites.ex
| 0.828904
| 0.471284
|
cipher_suites.ex
|
starcoder
|
defmodule Mint.HTTP1 do
@moduledoc """
Processless HTTP client with support for HTTP/1 and HTTP/1.1.
This module provides a data structure that represents an HTTP/1 or HTTP/1.1 connection to
a given server. The connection is represented as an opaque struct `%Mint.HTTP1{}`.
The connection is a data structure and is not backed by a process, and all the
connection handling happens in the process that creates the struct.
This module and data structure work exactly like the ones described in the `Mint`
module, with the exception that `Mint.HTTP1` specifically deals with HTTP/1 and HTTP/1.1 while
`Mint` deals seamlessly with HTTP/1, HTTP/1.1, and HTTP/2. For more information on
how to use the data structure and client architecture, see `Mint`.
"""
import Mint.Core.Util
alias Mint.Core.Util
alias Mint.HTTP1.{Parse, Request, Response}
alias Mint.{HTTPError, TransportError, Types}
require Logger
@behaviour Mint.Core.Conn
@opaque t() :: %__MODULE__{}
@user_agent "mint/" <> Mix.Project.config()[:version]
@typedoc """
An HTTP/1-specific error reason.
The values can be:
* `:closed` - when you try to make a request or stream a body chunk but the connection
is closed.
* `:request_body_is_streaming` - when you call `request/5` to send a new
request but another request is already streaming.
* `{:unexpected_data, data}` - when unexpected data is received from the server.
* `:invalid_status_line` - when the HTTP/1 status line is invalid.
* `{:invalid_request_target, target}` - when the request target is invalid.
* `:invalid_header` - when headers can't be parsed correctly.
* `{:invalid_header_name, name}` - when a header name is invalid.
* `{:invalid_header_value, name, value}` - when a header value is invalid. `name`
is the name of the header and `value` is the invalid value.
* `:invalid_chunk_size` - when the chunk size is invalid.
* `:missing_crlf_after_chunk` - when the CRLF after a chunk is missing.
* `:invalid_trailer_header` - when trailer headers can't be parsed.
* `:more_than_one_content_length_header` - when more than one `content-length`
headers are in the response.
* `:transfer_encoding_and_content_length` - when both the `content-length` as well
as the `transfer-encoding` headers are in the response.
* `{:invalid_content_length_header, value}` - when the value of the `content-length`
header is invalid, that is, is not an non-negative integer.
* `:empty_token_list` - when a header that is supposed to contain a list of tokens
(such as the `connection` header) doesn't contain any.
* `{:invalid_token_list, string}` - when a header that is supposed to contain a list
of tokens (such as the `connection` header) contains a malformed list of tokens.
* `:trailing_headers_but_not_chunked_encoding` - when you try to send trailing
headers through `stream_request_body/3` but the transfer encoding of the request
was not `chunked`.
"""
@type error_reason() :: term()
defstruct [
:host,
:port,
:request,
:socket,
:transport,
:mode,
:scheme_as_string,
requests: :queue.new(),
state: :closed,
buffer: "",
private: %{}
]
@doc """
Same as `Mint.HTTP.connect/4`, but forces an HTTP/1 or HTTP/1.1 connection.
This function doesn't support proxying.
"""
@spec connect(Types.scheme(), String.t(), :inet.port_number(), keyword()) ::
{:ok, t()} | {:error, Types.error()}
def connect(scheme, hostname, port, opts \\ []) do
# TODO: Also ALPN negotiate HTTP1?
transport = scheme_to_transport(scheme)
transport_opts = Keyword.get(opts, :transport_opts, [])
with {:ok, socket} <- transport.connect(hostname, port, transport_opts) do
initiate(scheme, socket, hostname, port, opts)
end
end
@doc false
@spec upgrade(
Types.scheme(),
Mint.Types.socket(),
Types.scheme(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def upgrade(old_scheme, socket, new_scheme, hostname, port, opts) do
# TODO: Also ALPN negotiate HTTP1?
transport = scheme_to_transport(new_scheme)
transport_opts = Keyword.get(opts, :transport_opts, [])
with {:ok, socket} <- transport.upgrade(socket, old_scheme, hostname, port, transport_opts) do
initiate(new_scheme, socket, hostname, port, opts)
end
end
@doc false
@impl true
@spec initiate(
Types.scheme(),
Mint.Types.socket(),
String.t(),
:inet.port_number(),
keyword()
) :: {:ok, t()} | {:error, Types.error()}
def initiate(scheme, socket, hostname, port, opts) do
transport = scheme_to_transport(scheme)
mode = Keyword.get(opts, :mode, :active)
unless mode in [:active, :passive] do
raise ArgumentError,
"the :mode option must be either :active or :passive, got: #{inspect(mode)}"
end
with :ok <- inet_opts(transport, socket),
:ok <- if(mode == :active, do: transport.setopts(socket, active: :once), else: :ok) do
conn = %__MODULE__{
transport: transport,
socket: socket,
mode: mode,
host: hostname,
port: port,
scheme_as_string: Atom.to_string(scheme),
state: :open
}
{:ok, conn}
else
{:error, reason} ->
:ok = transport.close(socket)
{:error, reason}
end
end
@doc """
See `Mint.HTTP.close/1`.
"""
@impl true
@spec close(t()) :: {:ok, t()}
def close(conn)
def close(%__MODULE__{state: :open} = conn) do
conn = internal_close(conn)
{:ok, conn}
end
def close(%__MODULE__{state: :closed} = conn) do
{:ok, conn}
end
@doc """
See `Mint.HTTP.open?/1`.
"""
@impl true
@spec open?(t(), :read | :write | :read_write) :: boolean()
def open?(conn, type \\ :read_write)
def open?(%__MODULE__{state: state}, type) when type in [:read, :write, :read_write] do
state == :open
end
@doc """
See `Mint.HTTP.request/5`.
In HTTP/1 and HTTP/1.1, you can't open a new request if you're streaming the body of
another request. If you try, an error will be returned.
"""
@impl true
@spec request(
t(),
method :: String.t(),
path :: String.t(),
Types.headers(),
body :: iodata() | nil | :stream
) ::
{:ok, t(), Types.request_ref()}
| {:error, t(), Types.error()}
def request(conn, method, path, headers, body)
def request(%__MODULE__{state: :closed} = conn, _method, _path, _headers, _body) do
{:error, conn, wrap_error(:closed)}
end
def request(
%__MODULE__{request: %{state: {:stream_request, _}}} = conn,
_method,
_path,
_headers,
_body
) do
{:error, conn, wrap_error(:request_body_is_streaming)}
end
def request(%__MODULE__{} = conn, method, path, headers, body) do
%__MODULE__{transport: transport, socket: socket} = conn
headers =
headers
|> add_default_headers(conn)
with {:ok, headers, encoding} <- add_content_length_or_transfer_encoding(headers, body),
{:ok, iodata} <- Request.encode(method, path, headers, body),
:ok <- transport.send(socket, iodata) do
request_ref = make_ref()
request = new_request(request_ref, method, body, encoding)
if conn.request == nil do
conn = %__MODULE__{conn | request: request}
{:ok, conn, request_ref}
else
requests = :queue.in(request, conn.requests)
conn = %__MODULE__{conn | requests: requests}
{:ok, conn, request_ref}
end
else
{:error, %TransportError{reason: :closed} = error} ->
{:error, %{conn | state: :closed}, error}
{:error, %HTTPError{} = error} ->
{:error, conn, error}
{:error, reason} ->
{:error, conn, wrap_error(reason)}
end
end
@doc """
See `Mint.HTTP.stream_request_body/3`.
In HTTP/1, sending an empty chuunk is a no-op.
## Transfer encoding and content length
When streaming the request body, Mint cannot send a precalculated `content-length`
request header because it doesn't know the body that you'll stream. However, Mint
will transparently handle the presence of a `content-length` header using this logic:
* if you specifically set a `content-length` header, then transfer encoding and
making sure the content length is correct for what you'll stream is up to you.
* if you specifically set the transfer encoding (`transfer-encoding` header)
to `chunked`, then it's up to you to
[properly encode chunks](https://en.wikipedia.org/wiki/Chunked_transfer_encoding).
* if you don't set the transfer encoding to `chunked` and don't provide a
`content-length` header, Mint will do implicit `chunked` transfer encoding
(setting the `transfer-encoding` header appropriately) and will take care
of properly encoding the chunks.
"""
@impl true
@spec stream_request_body(
t(),
Types.request_ref(),
iodata() | :eof | {:eof, trailing_headers :: Types.headers()}
) ::
{:ok, t()} | {:error, t(), Types.error()}
def stream_request_body(
%__MODULE__{request: %{state: {:stream_request, :identity}, ref: ref}} = conn,
ref,
:eof
) do
{:ok, put_in(conn.request.state, :status)}
end
def stream_request_body(
%__MODULE__{request: %{state: {:stream_request, :identity}, ref: ref}} = conn,
ref,
{:eof, _trailing_headers}
) do
{:error, conn, wrap_error(:trailing_headers_but_not_chunked_encoding)}
end
def stream_request_body(
%__MODULE__{request: %{state: {:stream_request, :identity}, ref: ref}} = conn,
ref,
body
) do
case conn.transport.send(conn.socket, body) do
:ok ->
{:ok, conn}
{:error, %TransportError{reason: :closed} = error} ->
{:error, %{conn | state: :closed}, error}
{:error, error} ->
{:error, conn, error}
end
end
def stream_request_body(
%__MODULE__{request: %{state: {:stream_request, :chunked}, ref: ref}} = conn,
ref,
chunk
) do
with {:ok, chunk} <- validate_chunk(chunk),
:ok <- conn.transport.send(conn.socket, Request.encode_chunk(chunk)) do
case chunk do
:eof -> {:ok, put_in(conn.request.state, :status)}
{:eof, _trailing_headers} -> {:ok, put_in(conn.request.state, :status)}
_other -> {:ok, conn}
end
else
:empty_chunk ->
{:ok, conn}
{:error, %TransportError{reason: :closed} = error} ->
{:error, %{conn | state: :closed}, error}
{:error, error} ->
{:error, conn, error}
end
end
defp validate_chunk({:eof, trailing_headers}) do
headers = lower_header_keys(trailing_headers)
if unallowed_header = find_unallowed_trailing_header(headers) do
{:error, wrap_error({:unallowed_trailing_header, unallowed_header})}
else
{:ok, {:eof, headers}}
end
end
defp validate_chunk(:eof) do
{:ok, :eof}
end
defp validate_chunk(chunk) do
if IO.iodata_length(chunk) == 0 do
:empty_chunk
else
{:ok, chunk}
end
end
@doc """
See `Mint.HTTP.stream/2`.
"""
@impl true
@spec stream(t(), term()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
| :unknown
def stream(conn, message)
def stream(%__MODULE__{transport: transport, socket: socket} = conn, {tag, socket, data})
when tag in [:tcp, :ssl] do
result = handle_data(conn, data)
if conn.mode == :active do
# TODO: handle errors here.
_ = transport.setopts(socket, active: :once)
end
result
end
def stream(%__MODULE__{socket: socket} = conn, {tag, socket})
when tag in [:tcp_closed, :ssl_closed] do
handle_close(conn)
end
def stream(%__MODULE__{socket: socket} = conn, {tag, socket, reason})
when tag in [:tcp_error, :ssl_error] do
handle_error(conn, conn.transport.wrap_error(reason))
end
def stream(%__MODULE__{}, _message) do
:unknown
end
defp handle_data(%__MODULE__{request: nil} = conn, data) do
conn = internal_close(conn)
{:error, conn, wrap_error({:unexpected_data, data}), []}
end
defp handle_data(%__MODULE__{request: request} = conn, data) do
data = maybe_concat(conn.buffer, data)
case decode(request.state, conn, data, []) do
{:ok, conn, responses} ->
{:ok, conn, Enum.reverse(responses)}
{:error, conn, reason, responses} ->
conn = put_in(conn.state, :closed)
{:error, conn, reason, responses}
end
end
defp handle_close(%__MODULE__{request: request} = conn) do
conn = put_in(conn.state, :closed)
conn = request_done(conn)
if request && request.body == :until_closed do
conn = put_in(conn.state, :closed)
{:ok, conn, [{:done, request.ref}]}
else
{:error, conn, conn.transport.wrap_error(:closed), []}
end
end
defp handle_error(conn, error) do
conn = put_in(conn.state, :closed)
{:error, conn, error, []}
end
@doc """
See `Mint.HTTP.recv/3`.
"""
@impl true
@spec recv(t(), non_neg_integer(), timeout()) ::
{:ok, t(), [Types.response()]}
| {:error, t(), Types.error(), [Types.response()]}
# TODO: remove the check when we depend on Elixir 1.7+.
if Version.match?(System.version(), ">= 1.7.0") do
@doc since: "0.3.0"
end
def recv(conn, byte_count, timeout)
def recv(%__MODULE__{mode: :passive} = conn, byte_count, timeout) do
case conn.transport.recv(conn.socket, byte_count, timeout) do
{:ok, data} -> handle_data(conn, data)
{:error, error} -> handle_error(conn, error)
end
end
def recv(_conn, _byte_count, _timeout) do
raise ArgumentError,
"can't use recv/3 to synchronously receive data when the mode is :active. " <>
"Use Mint.HTTP.set_mode/2 to set the connection to passive mode"
end
@doc """
See `Mint.HTTP.set_mode/2`.
"""
# TODO: remove the check when we depend on Elixir 1.7+.
if Version.match?(System.version(), ">= 1.7.0") do
@doc since: "0.3.0"
end
@impl true
@spec set_mode(t(), :active | :passive) :: {:ok, t()} | {:error, Types.error()}
def set_mode(%__MODULE__{} = conn, mode) when mode in [:active, :passive] do
active =
case mode do
:active -> :once
:passive -> false
end
with :ok <- conn.transport.setopts(conn.socket, active: active) do
{:ok, put_in(conn.mode, mode)}
end
end
@doc """
See `Mint.HTTP.controlling_process/2`.
"""
# TODO: remove the check when we depend on Elixir 1.7+.
if Version.match?(System.version(), ">= 1.7.0") do
@doc since: "0.3.0"
end
@impl true
@spec controlling_process(t(), pid()) :: {:ok, t()} | {:error, Types.error()}
def controlling_process(%__MODULE__{} = conn, new_pid) when is_pid(new_pid) do
with :ok <- conn.transport.controlling_process(conn.socket, new_pid) do
{:ok, conn}
end
end
@doc """
See `Mint.HTTP.open_request_count/1`.
In HTTP/1, the number of open requests is the number of pipelined requests.
"""
@impl true
@spec open_request_count(t()) :: non_neg_integer()
def open_request_count(%__MODULE__{} = conn) do
if is_nil(conn.request) do
0
else
1 + :queue.len(conn.requests)
end
end
@doc """
See `Mint.HTTP.put_private/3`.
"""
@impl true
@spec put_private(t(), atom(), term()) :: t()
def put_private(%__MODULE__{private: private} = conn, key, value) when is_atom(key) do
%{conn | private: Map.put(private, key, value)}
end
@doc """
See `Mint.HTTP.get_private/3`.
"""
@impl true
@spec get_private(t(), atom(), term()) :: term()
def get_private(%__MODULE__{private: private} = _conn, key, default \\ nil) when is_atom(key) do
Map.get(private, key, default)
end
@doc """
See `Mint.HTTP.delete_private/2`.
"""
@impl true
@spec delete_private(t(), atom()) :: t()
def delete_private(%__MODULE__{private: private} = conn, key) when is_atom(key) do
%{conn | private: Map.delete(private, key)}
end
# Made public to be used with proxying.
@doc false
@impl true
@spec get_socket(t()) :: Mint.Types.socket()
def get_socket(%__MODULE__{socket: socket} = _conn) do
socket
end
## Helpers
defp decode(:status, %{request: request} = conn, data, responses) do
case Response.decode_status_line(data) do
{:ok, {version, status, _reason}, rest} ->
request = %{request | version: version, status: status, state: :headers}
conn = %{conn | request: request}
responses = [{:status, request.ref, status} | responses]
decode(:headers, conn, rest, responses)
:more ->
conn = put_in(conn.buffer, data)
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_status_line), responses}
end
end
defp decode(:headers, %{request: request} = conn, data, responses) do
decode_headers(conn, request, data, responses, request.headers_buffer)
end
defp decode(:body, conn, data, responses) do
case message_body(conn.request) do
{:ok, body} ->
conn = put_in(conn.request.body, body)
decode_body(body, conn, data, conn.request.ref, responses)
{:error, reason} ->
{:error, conn, wrap_error(reason), responses}
end
end
defp decode_headers(conn, request, data, responses, headers) do
case Response.decode_header(data) do
{:ok, {name, value}, rest} ->
headers = [{name, value} | headers]
case store_header(request, name, value) do
{:ok, request} -> decode_headers(conn, request, rest, responses, headers)
{:error, reason} -> {:error, conn, wrap_error(reason), responses}
end
{:ok, :eof, rest} ->
responses = [{:headers, request.ref, Enum.reverse(headers)} | responses]
request = %{request | state: :body, headers_buffer: []}
conn = %{conn | buffer: "", request: request}
decode(:body, conn, rest, responses)
:more ->
request = %{request | headers_buffer: headers}
conn = %{conn | buffer: data, request: request}
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_header), responses}
end
end
defp decode_body(:none, conn, data, request_ref, responses) do
conn = put_in(conn.buffer, data)
conn = request_done(conn)
responses = [{:done, request_ref} | responses]
{:ok, conn, responses}
end
defp decode_body(:until_closed, conn, data, _request_ref, responses) do
{conn, responses} = add_body(conn, data, responses)
{:ok, conn, responses}
end
defp decode_body({:content_length, length}, conn, data, request_ref, responses) do
cond do
length > byte_size(data) ->
conn = put_in(conn.request.body, {:content_length, length - byte_size(data)})
{conn, responses} = add_body(conn, data, responses)
{:ok, conn, responses}
length <= byte_size(data) ->
<<body::binary-size(length), rest::binary>> = data
{conn, responses} = add_body(conn, body, responses)
conn = request_done(conn)
responses = [{:done, request_ref} | responses]
next_request(conn, rest, responses)
end
end
defp decode_body({:chunked, nil}, conn, "", _request_ref, responses) do
conn = put_in(conn.buffer, "")
conn = put_in(conn.request.body, {:chunked, nil})
{:ok, conn, responses}
end
defp decode_body({:chunked, nil}, conn, data, request_ref, responses) do
case Integer.parse(data, 16) do
{_size, ""} ->
conn = put_in(conn.buffer, data)
conn = put_in(conn.request.body, {:chunked, nil})
{:ok, conn, responses}
{0, rest} ->
# Manually collapse the body buffer since we're done with the body
{conn, responses} = collapse_body_buffer(conn, responses)
decode_body({:chunked, :metadata, :trailer}, conn, rest, request_ref, responses)
{size, rest} when size > 0 ->
decode_body({:chunked, :metadata, size}, conn, rest, request_ref, responses)
_other ->
{:error, conn, wrap_error(:invalid_chunk_size), responses}
end
end
defp decode_body({:chunked, :metadata, size}, conn, data, request_ref, responses) do
case Parse.ignore_until_crlf(data) do
{:ok, rest} ->
decode_body({:chunked, size}, conn, rest, request_ref, responses)
:more ->
conn = put_in(conn.buffer, data)
conn = put_in(conn.request.body, {:chunked, :metadata, size})
{:ok, conn, responses}
end
end
defp decode_body({:chunked, :trailer}, conn, data, _request_ref, responses) do
decode_trailer_headers(conn, data, responses, conn.request.headers_buffer)
end
defp decode_body({:chunked, :crlf}, conn, data, request_ref, responses) do
case data do
<<"\r\n", rest::binary>> ->
conn = put_in(conn.request.body, {:chunked, nil})
decode_body({:chunked, nil}, conn, rest, request_ref, responses)
_other when byte_size(data) < 2 ->
conn = put_in(conn.buffer, data)
{:ok, conn, responses}
_other ->
{:error, conn, wrap_error(:missing_crlf_after_chunk), responses}
end
end
defp decode_body({:chunked, length}, conn, data, request_ref, responses) do
cond do
length > byte_size(data) ->
conn = put_in(conn.buffer, "")
conn = put_in(conn.request.body, {:chunked, length - byte_size(data)})
conn = add_body_to_buffer(conn, data)
{:ok, conn, responses}
length <= byte_size(data) ->
<<body::binary-size(length), rest::binary>> = data
{conn, responses} = add_body(conn, body, responses)
conn = put_in(conn.request.body, {:chunked, :crlf})
decode_body({:chunked, :crlf}, conn, rest, request_ref, responses)
end
end
defp decode_trailer_headers(conn, data, responses, headers) do
case Response.decode_header(data) do
{:ok, {name, value}, rest} ->
headers = [{name, value} | headers]
decode_trailer_headers(conn, rest, responses, headers)
{:ok, :eof, rest} ->
headers = Util.remove_unallowed_trailing_headers(headers)
responses = [
{:done, conn.request.ref}
| add_trailing_headers(headers, conn.request.ref, responses)
]
conn = request_done(conn)
next_request(conn, rest, responses)
:more ->
request = %{conn.request | body: {:chunked, :trailer}, headers_buffer: headers}
conn = %{conn | buffer: data, request: request}
{:ok, conn, responses}
:error ->
{:error, conn, wrap_error(:invalid_trailer_header), responses}
end
end
defp next_request(%{request: nil} = conn, data, responses) do
# TODO: Figure out if we should keep buffering even though there are no
# requests in flight
{:ok, %{conn | buffer: data}, responses}
end
defp next_request(conn, data, responses) do
decode(:status, %{conn | state: :status}, data, responses)
end
defp add_trailing_headers([], _request_ref, responses), do: responses
defp add_trailing_headers(headers, request_ref, responses),
do: [{:headers, request_ref, Enum.reverse(headers)} | responses]
defp add_body(conn, data, responses) do
conn = add_body_to_buffer(conn, data)
collapse_body_buffer(conn, responses)
end
defp add_body_to_buffer(conn, data) do
update_in(conn.request.data_buffer, &[&1 | data])
end
defp collapse_body_buffer(conn, responses) do
case IO.iodata_to_binary(conn.request.data_buffer) do
"" ->
{conn, responses}
data ->
conn = put_in(conn.request.data_buffer, [])
{conn, [{:data, conn.request.ref, data} | responses]}
end
end
defp store_header(%{content_length: nil} = request, "content-length", value) do
with {:ok, content_length} <- Parse.content_length_header(value),
do: {:ok, %{request | content_length: content_length}}
end
defp store_header(%{connection: connection} = request, "connection", value) do
with {:ok, connection_header} <- Parse.connection_header(value),
do: {:ok, %{request | connection: connection ++ connection_header}}
end
defp store_header(%{transfer_encoding: transfer_encoding} = request, "transfer-encoding", value) do
with {:ok, transfer_encoding_header} <- Parse.transfer_encoding_header(value),
do: {:ok, %{request | transfer_encoding: transfer_encoding ++ transfer_encoding_header}}
end
defp store_header(_request, "content-length", _value) do
{:error, :more_than_one_content_length_header}
end
defp store_header(request, _name, _value) do
{:ok, request}
end
defp request_done(%{request: request} = conn) do
conn = pop_request(conn)
cond do
!request -> conn
"close" in request.connection -> internal_close(conn)
request.version >= {1, 1} -> conn
"keep-alive" in request.connection -> conn
true -> internal_close(conn)
end
end
defp pop_request(conn) do
case :queue.out(conn.requests) do
{{:value, request}, requests} ->
%{conn | request: request, requests: requests}
{:empty, requests} ->
%{conn | request: nil, requests: requests}
end
end
defp internal_close(conn) do
if conn.buffer != "" do
_ = Logger.debug(["Connection closed with data left in the buffer: ", inspect(conn.buffer)])
end
:ok = conn.transport.close(conn.socket)
%{conn | state: :closed}
end
# RFC7230 3.3.3:
# > If a message is received with both a Transfer-Encoding and a
# > Content-Length header field, the Transfer-Encoding overrides the
# > Content-Length. Such a message might indicate an attempt to
# > perform request smuggling (Section 9.5) or response splitting
# > (Section 9.4) and ought to be handled as an error. A sender MUST
# > remove the received Content-Length field prior to forwarding such
# > a message downstream.
defp message_body(%{body: nil, method: method, status: status} = request) do
cond do
method == "HEAD" or status in 100..199 or status in [204, 304] ->
{:ok, :none}
# method == "CONNECT" and status in 200..299 -> nil
request.transfer_encoding != [] && request.content_length ->
{:error, :transfer_encoding_and_content_length}
"chunked" == List.first(request.transfer_encoding) ->
{:ok, {:chunked, nil}}
request.content_length ->
{:ok, {:content_length, request.content_length}}
true ->
{:ok, :until_closed}
end
end
defp message_body(%{body: body}) do
{:ok, body}
end
defp new_request(ref, method, body, encoding) do
state =
if body == :stream do
{:stream_request, encoding}
else
:status
end
%{
ref: ref,
state: state,
method: method,
version: nil,
status: nil,
headers_buffer: [],
data_buffer: [],
content_length: nil,
connection: [],
transfer_encoding: [],
body: nil
}
end
defp lower_header_keys(headers) do
for {name, value} <- headers, do: {Util.downcase_ascii(name), value}
end
defp add_default_headers(headers, conn) do
headers
|> Util.put_new_header("user-agent", @user_agent)
|> Util.put_new_header("host", default_host_header(conn))
end
# If the port is the default for the scheme, don't add it to the host header
defp default_host_header(%__MODULE__{scheme_as_string: scheme, host: host, port: port}) do
if URI.default_port(scheme) == port do
host
else
"#{host}:#{port}"
end
end
defp add_content_length_or_transfer_encoding(headers, :stream) do
cond do
List.keymember?(headers, "content-length", 0) ->
{:ok, headers, :identity}
found = List.keyfind(headers, "transfer-encoding", 0) ->
{"transfer-encoding", value} = found
with {:ok, tokens} <- Parse.transfer_encoding_header(value) do
if "chunked" in tokens or "identity" in tokens do
{:ok, headers, :identity}
else
new_transfer_encoding = {"transfer-encoding", value <> ",chunked"}
headers = List.keyreplace(headers, "transfer-encoding", 0, new_transfer_encoding)
{:ok, headers, :chunked}
end
end
# If no content-length or transfer-encoding are present, assume
# chunked transfer-encoding and handle the encoding ourselves.
true ->
headers = Util.put_new_header(headers, "transfer-encoding", "chunked")
{:ok, headers, :chunked}
end
end
defp add_content_length_or_transfer_encoding(headers, nil) do
{:ok, headers, :identity}
end
defp add_content_length_or_transfer_encoding(headers, body) do
length_fun = fn -> body |> IO.iodata_length() |> Integer.to_string() end
{:ok, Util.put_new_header_lazy(headers, "content-length", length_fun), :identity}
end
defp wrap_error(reason) do
%HTTPError{reason: reason, module: __MODULE__}
end
@doc false
def format_error(reason)
def format_error(:closed) do
"the connection is closed"
end
def format_error(:request_body_is_streaming) do
"a request body is currently streaming, so no new requests can be issued"
end
def format_error({:unexpected_data, data}) do
"received unexpected data: " <> inspect(data)
end
def format_error(:invalid_status_line) do
"invalid status line"
end
def format_error(:invalid_header) do
"invalid header"
end
def format_error({:invalid_request_target, target}) do
"invalid request target: #{inspect(target)}"
end
def format_error({:invalid_header_name, name}) do
"invalid header name: #{inspect(name)}"
end
def format_error({:invalid_header_value, name, value}) do
"invalid value for header #{inspect(name)}: #{inspect(value)}"
end
def format_error(:invalid_chunk_size) do
"invalid chunk size"
end
def format_error(:missing_crlf_after_chunk) do
"missing CRLF after chunk"
end
def format_error(:invalid_trailer_header) do
"invalid trailer header"
end
def format_error(:more_than_one_content_length_header) do
"the response contains two or more Content-Length headers"
end
def format_error(:transfer_encoding_and_content_length) do
"the response contained both a Transfer-Encoding header as well as a Content-Length header"
end
def format_error({:invalid_content_length_header, value}) do
"invalid Content-Length header: #{inspect(value)}"
end
def format_error(:empty_token_list) do
"header should contain a list of values, but it doesn't"
end
def format_error({:invalid_token_list, string}) do
"header contains invalid tokens: #{inspect(string)}"
end
def format_error(:trailing_headers_but_not_chunked_encoding) do
"trailing headers can only be sent when using chunked transfer-encoding"
end
def format_error({:unallowed_trailing_header, {name, value}}) do
"header #{inspect(name)} (with value #{inspect(value)}) is not allowed as a trailing header"
end
end
|
lib/mint/http1.ex
| 0.797044
| 0.562357
|
http1.ex
|
starcoder
|
defmodule Mix.Tasks.Phx.New do
@moduledoc """
Creates a new Phoenix project.
It expects the path of the project as an argument.
mix phx.new PATH [--module MODULE] [--app APP]
A project at the given PATH will be created. The
application name and module name will be retrieved
from the path, unless `--module` or `--app` is given.
## Options
* `--umbrella` - generate an umbrella project,
with one application for your domain, and
a second application for the web interface.
* `--app` - the name of the OTP application
* `--module` - the name of the base module in
the generated skeleton
* `--database` - specify the database adapter for Ecto. One of:
* `postgres` (https://github.com/elixir-ecto/postgrex)
* `mysql` (https://github.com/xerions/mariaex)
* `mssql` (https://github.com/findmypast-oss/mssqlex)
Please check the driver docs, between parentheses, for more information
and requirements. Defaults to "postgres".
* `--no-webpack` - do not generate webpack files
for static asset building. When choosing this
option, you will need to manually handle
JavaScript dependencies if building HTML apps
* `--no-ecto` - do not generate Ecto files.
* `--no-html` - do not generate HTML views.
* `--binary-id` - use `binary_id` as primary key type
in Ecto schemas
* `--verbose` - use verbose output
When passing the `--no-ecto` flag, Phoenix generators such as
`phx.gen.html`, `phx.gen.json` and `phx.gen.context` may no
longer work as expected as they generate context files that rely
on Ecto for the database access. In those cases, you can pass the
`--no-context` flag to generate most of the HTML and JSON files
but skip the context, allowing you to fill in the blanks as desired.
Similarly, if `--no-html` is given, the files generated by
`phx.gen.html` will no longer work, as important HTML components
will be missing.
## Examples
mix phx.new hello_world
Is equivalent to:
mix phx.new hello_world --module HelloWorld
Or without the HTML and JS bits (useful for APIs):
mix phx.new ~/Workspace/hello_world --no-html --no-webpack
As an umbrella:
mix phx.new hello --umbrella
Would generate the following directory structure and modules:
hello_umbrella/ Hello.Umbrella
apps/
hello/ Hello
hello_web/ HelloWeb
You can read more about umbrella projects using the
official [Elixir guide](http://elixir-lang.org/getting-started/mix-otp/dependencies-and-umbrella-apps.html#umbrella-projects)
To print the Phoenix installer version, pass `-v` or `--version`, for example:
mix phx.new -v
"""
use Mix.Task
alias Phx.New.{Generator, Project, Single, Umbrella, Web, Ecto}
@version Mix.Project.config[:version]
@shortdoc "Creates a new Phoenix v#{@version} application"
@switches [dev: :boolean, webpack: :boolean, ecto: :boolean,
app: :string, module: :string, web_module: :string,
database: :string, binary_id: :boolean, html: :boolean,
umbrella: :boolean, verbose: :boolean]
def run([version]) when version in ~w(-v --version) do
Mix.shell.info("Phoenix v#{@version}")
end
def run(argv) do
elixir_version_check!()
case parse_opts(argv) do
{_opts, []} -> Mix.Tasks.Help.run(["phx.new"])
{opts, [base_path | _]} ->
generator = if opts[:umbrella], do: Umbrella, else: Single
generate(base_path, generator, opts)
end
end
def run(argv, generator) do
elixir_version_check!()
case parse_opts(argv) do
{_opts, []} -> Mix.Tasks.Help.run(["phx.new"])
{opts, [base_path | _]} -> generate(base_path, generator, opts)
end
end
def generate(base_path, generator, opts) do
base_path
|> Project.new(opts)
|> generator.prepare_project()
|> Generator.put_binding()
|> validate_project()
|> generator.generate()
|> prompt_to_install_deps(generator)
end
defp validate_project(%Project{opts: opts} = project) do
check_app_name!(project.app, !!opts[:app])
check_directory_existence!(project.project_path)
check_module_name_validity!(project.root_mod)
check_module_name_availability!(project.root_mod)
project
end
defp prompt_to_install_deps(%Project{} = project, generator) do
install? = Mix.shell.yes?("\nFetch and install dependencies?")
cd_step = ["$ cd #{relative_app_path(project.project_path)}"]
maybe_cd(project.project_path, fn ->
mix_step = install_mix(project, install?)
compile =
case mix_step do
[] -> Task.async(fn -> rebar_available?() && cmd(project, "mix deps.compile") end)
_ -> Task.async(fn -> :ok end)
end
webpack_step = install_webpack(install?, project)
Task.await(compile, :infinity)
if Project.webpack?(project) and !System.find_executable("npm") do
print_webpack_info(project, generator)
end
print_missing_steps(cd_step ++ mix_step ++ webpack_step)
if Project.ecto?(project) do
print_ecto_info(project, generator)
end
print_mix_info(generator)
end)
end
defp maybe_cd(path, func), do: path && File.cd!(path, func)
defp parse_opts(argv) do
case OptionParser.parse(argv, strict: @switches) do
{opts, argv, []} ->
{opts, argv}
{_opts, _argv, [switch | _]} ->
Mix.raise "Invalid option: " <> switch_to_string(switch)
end
end
defp switch_to_string({name, nil}), do: name
defp switch_to_string({name, val}), do: name <> "=" <> val
defp install_webpack(install?, project) do
assets_path = Path.join(project.web_path || project.project_path, "assets")
webpack_config = Path.join(assets_path, "webpack.config.js")
maybe_cmd(project, "cd #{relative_app_path(assets_path)} && npm install && node node_modules/webpack/bin/webpack.js --mode development",
File.exists?(webpack_config), install? && System.find_executable("npm"))
end
defp install_mix(project, install?) do
maybe_cmd(project, "mix deps.get", true, install? && hex_available?())
end
defp hex_available? do
Code.ensure_loaded?(Hex)
end
defp rebar_available? do
Mix.Rebar.rebar_cmd(:rebar) && Mix.Rebar.rebar_cmd(:rebar3)
end
defp print_webpack_info(_project, _gen) do
Mix.shell.info """
Phoenix uses an optional assets build tool called webpack
that requires node.js and npm. Installation instructions for
node.js, which includes npm, can be found at http://nodejs.org.
The command listed next expect that you have npm available.
If you don't want webpack, you can re-run this generator
with the --no-webpack option.
"""
end
defp print_missing_steps(steps) do
Mix.shell.info """
We are almost there! The following steps are missing:
#{Enum.join(steps, "\n ")}
"""
end
defp print_ecto_info(%Project{}, Web), do: :ok
defp print_ecto_info(%Project{app_path: nil}, _gen), do: :ok
defp print_ecto_info(%Project{app_path: app_path} = project, _gen) do
config_path =
app_path
|> Path.join("config/dev.exs")
|> Path.relative_to(project.project_path)
Mix.shell.info """
Then configure your database in #{config_path} and run:
$ mix ecto.create
"""
end
defp print_mix_info(gen) when gen in [Ecto] do
Mix.shell.info """
You can run your app inside IEx (Interactive Elixir) as:
$ iex -S mix
"""
end
defp print_mix_info(_gen) do
Mix.shell.info """
Start your Phoenix app with:
$ mix phx.server
You can also run your app inside IEx (Interactive Elixir) as:
$ iex -S mix phx.server
"""
end
defp relative_app_path(path) do
case Path.relative_to_cwd(path) do
^path -> Path.basename(path)
rel -> rel
end
end
## Helpers
@doc false
def recompile(regex) do
if Code.ensure_loaded?(Regex) and function_exported?(Regex, :recompile!, 1) do
apply(Regex, :recompile!, [regex])
else
regex
end
end
defp maybe_cmd(project, cmd, should_run?, can_run?) do
cond do
should_run? && can_run? ->
cmd(project, cmd)
should_run? ->
["$ #{cmd}"]
true ->
[]
end
end
defp cmd(%Project{} = project, cmd) do
Mix.shell.info [:green, "* running ", :reset, cmd]
case Mix.shell.cmd(cmd, cmd_opts(project)) do
0 ->
[]
_ ->
["$ #{cmd}"]
end
end
defp cmd_opts(%Project{} = project) do
if Project.verbose?(project) do
[]
else
[quiet: true]
end
end
defp check_app_name!(name, from_app_flag) do
unless name =~ recompile(~r/^[a-z][\w_]*$/) do
extra =
if !from_app_flag do
". The application name is inferred from the path, if you'd like to " <>
"explicitly name the application then use the `--app APP` option."
else
""
end
Mix.raise "Application name must start with a letter and have only lowercase " <>
"letters, numbers and underscore, got: #{inspect name}" <> extra
end
end
defp check_module_name_validity!(name) do
unless inspect(name) =~ recompile(~r/^[A-Z]\w*(\.[A-Z]\w*)*$/) do
Mix.raise "Module name must be a valid Elixir alias (for example: Foo.Bar), got: #{inspect name}"
end
end
defp check_module_name_availability!(name) do
[name]
|> Module.concat()
|> Module.split()
|> Enum.reduce([], fn name, acc ->
mod = Module.concat([Elixir, name | acc])
if Code.ensure_loaded?(mod) do
Mix.raise "Module name #{inspect mod} is already taken, please choose another name"
else
[name | acc]
end
end)
end
defp check_directory_existence!(path) do
if File.dir?(path) and not Mix.shell.yes?("The directory #{path} already exists. Are you sure you want to continue?") do
Mix.raise "Please select another directory for installation."
end
end
defp elixir_version_check! do
unless Version.match?(System.version, "~> 1.5") do
Mix.raise "Phoenix v#{@version} requires at least Elixir v1.5.\n " <>
"You have #{System.version()}. Please update accordingly"
end
end
end
|
installer/lib/mix/tasks/phx.new.ex
| 0.761494
| 0.469946
|
phx.new.ex
|
starcoder
|
defmodule Ash.Type do
@array_constraints [
min_length: [
type: :non_neg_integer,
doc: "A minimum length for the items"
],
max_length: [
type: :non_neg_integer,
doc: "A maximum length for the items"
],
nil_items?: [
type: :boolean,
doc: "Whether or not the list can contain nil items",
default: true
]
]
@short_names [
map: Ash.Type.Map,
term: Ash.Type.Term,
atom: Ash.Type.Atom,
string: Ash.Type.String,
integer: Ash.Type.Integer,
float: Ash.Type.Float,
interval: Ash.Type.Interval,
function: Ash.Type.Function,
boolean: Ash.Type.Boolean,
uuid: Ash.Type.UUID,
binary: Ash.Type.Binary,
date: Ash.Type.Date,
decimal: Ash.Type.Decimal,
ci_string: Ash.Type.CiString,
utc_datetime: Ash.Type.UtcDatetime,
utc_datetime_usec: Ash.Type.UtcDatetimeUsec,
url_encoded_binary: Ash.Type.UrlEncodedBinary
]
@doc_array_constraints Keyword.put(@array_constraints, :items,
type: :any,
doc:
"Constraints for the elements of the list. See the contained type's docs for more."
)
@moduledoc """
Describes how to convert data to `Ecto.Type` and eventually into the database.
This behaviour is a superset of the `Ecto.Type` behavior, that also contains
API level information, like what kinds of filters are allowed.
## Built in types
#{
Enum.map_join(@short_names, fn {key, module} ->
"* `#{inspect(key)}` - `#{inspect(module)}`\n"
end)
}
### Composite Types
Currently, the only composite type supported is a list type, specified via:
`{:array, Type}`. The constraints available are:
#{Ash.OptionsHelpers.docs(@doc_array_constraints)}
## Defining Custom Types
Generally you add `use Ash.Type` to your module (it is possible to add `@behaviour
Ash.Type` and define everything yourself, but this is more work and error-prone).
Overriding the `{:array, type}` behavior. By definining the `*_array` versions
of `cast_input`, `cast_stored`, `dump_to_native` and `apply_constraints`, you can
override how your type behaves as a collection. This is how the features of embedded
resources are implemented. No need to implement them unless you wish to override the
default behavior.
Simple example of a float custom type
```Elixir
defmodule GenTracker.AshFloat do
use Ash.Type
@impl Ash.Type
def storage_type, do: :float
@impl Ash.Type
def cast_input(value, _) do
Ecto.Type.cast(:float, value)
end
@impl Ash.Type
def cast_stored(value, _) do
Ecto.Type.load(:float, value)
end
@impl Ash.Type
def dump_to_native(value, _) do
Ecto.Type.dump(:float, value)
end
end
```
All the Ash built-in types are implemented with `use Ash.Type` so they are good
examples to look at to create your own `Ash.Type`
"""
@type constraints :: Keyword.t()
@type constraint_error :: String.t() | {String.t(), Keyword.t()}
@type t :: atom | {:array, atom}
@callback storage_type() :: Ecto.Type.t()
@callback ecto_type() :: Ecto.Type.t()
@callback cast_input(term, constraints) :: {:ok, term} | {:error, Keyword.t()} | :error
@callback cast_input_array(list(term), constraints) ::
{:ok, list(term)} | {:error, Keyword.t()} | :error
@callback cast_stored(term, constraints) :: {:ok, term} | :error
@callback cast_stored_array(list(term), constraints) :: {:ok, list(term)} | :error
@callback dump_to_native(term, constraints) :: {:ok, term} | :error
@callback dump_to_native_array(list(term), constraints) :: {:ok, term} | :error
@callback dump_to_embedded(term, constraints) :: {:ok, term} | :error
@callback dump_to_embedded_array(list(term), constraints) :: {:ok, term} | :error
@callback handle_change(old_term :: term, new_term :: term, constraints) ::
{:ok, term} | {:error, term}
@callback handle_change_array(old_term :: list(term), new_term :: list(term), constraints) ::
{:ok, term} | {:error, term}
@callback prepare_change(old_term :: term, new_uncasted_term :: term, constraints) ::
{:ok, term} | {:error, term}
@callback prepare_change_array(
old_term :: list(term),
new_uncasted_term :: list(term),
constraints
) ::
{:ok, term} | {:error, term}
@callback constraints() :: constraints()
@callback array_constraints() :: constraints()
@callback apply_constraints(term, constraints) ::
{:ok, new_value :: term}
| :ok
| {:error, constraint_error() | list(constraint_error)}
@callback apply_constraints_array(list(term), constraints) ::
{:ok, new_values :: list(term)}
| :ok
| {:error, constraint_error() | list(constraint_error)}
@callback describe(constraints()) :: String.t() | nil
@callback equal?(term, term) :: boolean
@optional_callbacks [
cast_stored_array: 2,
cast_input_array: 2,
dump_to_native_array: 2,
handle_change_array: 3,
prepare_change_array: 3,
apply_constraints_array: 2,
array_constraints: 0,
dump_to_embedded: 2,
dump_to_embedded_array: 2
]
@builtin_types Keyword.values(@short_names)
def builtin?(type) when type in @builtin_types, do: true
def builtin?(_), do: false
def embedded_type?({:array, type}) do
embedded_type?(type)
end
def embedded_type?(type) do
Ash.Resource.Info.resource?(type)
end
def describe(type, constraints) do
case get_type(type) do
{:array, type} ->
type.describe(constraints)
type ->
type.describe(constraints)
end
end
def array_constraints({:array, type}) do
[items: array_constraints(type)]
end
def array_constraints(type) do
if ash_type?(type) do
type.array_constraints()
else
[]
end
end
@spec get_type(atom | module) :: atom | module | {:array, atom | module}
def get_type({:array, value}) do
{:array, get_type(value)}
end
def get_type(value) when is_atom(value) do
case Keyword.fetch(@short_names, value) do
{:ok, mod} -> mod
:error -> value
end
end
def get_type(value) do
value
end
@doc """
Process the old casted values alongside the new casted values.
This is leveraged by embedded types to know if something is being updated
or destroyed. This is not called on creates.
"""
def handle_change({:array, type}, old_value, new_value, constraints) do
if is_atom(type) && :erlang.function_exported(type, :handle_change_array, 3) do
type.handle_change_array(old_value, new_value, constraints)
else
{:ok, new_value}
end
end
def handle_change(type, old_value, new_value, constraints) do
type.handle_change(old_value, new_value, constraints)
end
@doc """
Process the old casted values alongside the new *un*casted values.
This is leveraged by embedded types to know if something is being updated
or destroyed. This is not called on creates.
"""
def prepare_change({:array, type}, old_value, new_value, constraints) do
if is_atom(type) && :erlang.function_exported(type, :prepare_change_array, 3) do
type.prepare_change_array(old_value, new_value, constraints)
else
{:ok, new_value}
end
end
def prepare_change(type, old_value, new_value, constraints) do
type.prepare_change(old_value, new_value, constraints)
end
@doc """
Returns the *underlying* storage type (the underlying type of the *ecto type* of the *ash type*)
"""
@spec storage_type(t()) :: Ecto.Type.t()
def storage_type({:array, type}), do: {:array, type.storage_type()}
def storage_type(type), do: type.storage_type()
@doc """
Returns the ecto compatible type for an Ash.Type.
If you `use Ash.Type`, this is created for you. For builtin types
this may return a corresponding ecto builtin type (atom)
"""
@spec ecto_type(t) :: Ecto.Type.t()
def ecto_type({:array, type}), do: {:array, ecto_type(type)}
for {name, mod} <- @short_names do
def ecto_type(unquote(name)), do: ecto_type(unquote(mod))
end
def ecto_type(type) do
type.ecto_type()
end
def ash_type_option(type) do
type = get_type(type)
if ash_type?(type) do
{:ok, type}
else
{:error, "Attribute type must be a built in type or a type module, got: #{inspect(type)}"}
end
end
@spec ash_type?(term) :: boolean
@doc "Returns true if the value is a builtin type or adopts the `Ash.Type` behaviour"
def ash_type?({:array, value}), do: ash_type?(value)
def ash_type?(module) when is_atom(module) do
case Code.ensure_compiled(module) do
{:module, _} ->
ash_type_module?(module)
_ ->
false
end
end
def ash_type?(_), do: false
@doc """
Casts input (e.g. unknown) data to an instance of the type, or errors
Maps to `Ecto.Type.cast/2`
"""
@spec cast_input(t(), term, constraints | nil) :: {:ok, term} | {:error, Keyword.t()} | :error
def cast_input(type, term, constraints \\ [])
def cast_input({:array, type}, empty, constraints) when empty in [nil, ""],
do: cast_input({:array, type}, [], constraints)
def cast_input({:array, _type}, term, _) when not is_list(term) do
{:error, "is invalid"}
end
def cast_input({:array, type}, term, constraints) do
if is_atom(type) && :erlang.function_exported(type, :cast_input_array, 2) do
type.cast_input_array(term, constraints)
else
single_constraints = constraints[:items] || []
term
|> Enum.with_index()
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn {item, index}, {:ok, casted} ->
case cast_input(type, item, single_constraints) do
:error ->
{:halt, {:error, message: "invalid value at %{index}", index: index}}
{:error, keyword} ->
errors =
keyword
|> List.wrap()
|> Ash.Error.flatten_preserving_keywords()
|> Enum.map(fn
message when is_binary(message) ->
[message: message, index: index]
keyword ->
Keyword.put(keyword, :index, index)
end)
{:halt, {:error, errors}}
{:ok, value} ->
{:cont, {:ok, [value | casted]}}
end
end)
end
end
def cast_input(_, nil, _), do: {:ok, nil}
def cast_input(type, term, constraints) do
type = get_type(type)
case type.cast_input(term, constraints) do
{:ok, value} ->
{:ok, value}
:error ->
case term do
"" ->
cast_input(type, nil, constraints)
_ ->
{:error, "is invalid"}
end
{:error, other} ->
case term do
"" ->
cast_input(type, nil, constraints)
_ ->
{:error, other}
end
end
end
@doc """
Casts a value from the data store to an instance of the type, or errors
Maps to `Ecto.Type.load/2`
"""
@spec cast_stored(t(), term, constraints | nil) :: {:ok, term} | {:error, keyword()} | :error
def cast_stored(type, term, constraints \\ [])
def cast_stored({:array, type}, term, constraints) when is_list(term) do
if is_atom(type) && :erlang.function_exported(type, :cast_stored_array, 2) do
type.cast_stored_array(term, constraints)
else
term
|> Enum.with_index()
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn {item, index}, {:ok, casted} ->
single_constraints = constraints[:items] || []
case cast_stored(type, item, single_constraints) do
:error ->
{:halt, {:error, index: index}}
{:error, keyword} ->
errors =
keyword
|> List.wrap()
|> Ash.Error.flatten_preserving_keywords()
|> Enum.map(fn
string when is_binary(string) ->
[message: string, index: index]
vars ->
Keyword.put(vars, :index, index)
end)
{:halt, {:error, errors}}
{:ok, value} ->
{:cont, {:ok, [value | casted]}}
end
end)
end
end
def cast_stored(type, term, constraints) do
type = get_type(type)
type.cast_stored(term, constraints)
end
@doc """
Confirms if a casted value matches the provided constraints.
"""
@spec apply_constraints(t(), term, constraints()) :: {:ok, term} | {:error, String.t()}
def apply_constraints({:array, type}, term, constraints) when is_list(term) do
if is_atom(type) && :erlang.function_exported(type, :apply_constraints_array, 2) do
case type.apply_constraints_array(term, constraints) do
:ok -> {:ok, term}
other -> other
end
else
list_constraint_errors = list_constraint_errors(term, constraints)
case list_constraint_errors do
[] ->
nil_items? = Keyword.get(constraints, :nil_items?, true)
item_constraints = constraints[:items] || []
if item_constraints != [] || !nil_items? do
term
|> Enum.with_index()
|> Enum.reduce({[], []}, fn {item, index}, {items, errors} ->
if is_nil(item) && not nil_items? do
{[item | items], [[message: "no nil/null values", index: index] | errors]}
else
case apply_constraints(type, item, item_constraints) do
{:ok, value} ->
{[value | items], errors}
{:error, new_errors} ->
new_errors =
new_errors
|> List.wrap()
|> Ash.Error.flatten_preserving_keywords()
|> Enum.map(fn
string when is_binary(string) ->
[message: string, index: index]
vars ->
Keyword.put(vars, :index, index)
end)
{[item | items], List.wrap(new_errors) ++ errors}
end
end
end)
|> case do
{terms, []} ->
{:ok, Enum.reverse(terms)}
{_, errors} ->
{:error, errors}
end
else
{:ok, term}
end
errors ->
{:error, errors}
end
end
end
def apply_constraints({:array, _}, _, _) do
{:error, ["must be a list"]}
end
def apply_constraints(type, term, constraints) do
type = get_type(type)
if ash_type?(type) do
case type.apply_constraints(term, constraints) do
:ok -> {:ok, term}
other -> other
end
else
type.apply_constraints(term, [])
end
end
defp list_constraint_errors(term, constraints) do
length =
if Keyword.has_key?(constraints, :max_length) || Keyword.has_key?(constraints, :min_length) do
length(term)
else
0
end
constraints
|> Enum.reduce([], fn
{:min_length, min_length}, errors ->
if length < min_length do
[message: "must have %{min} or more items", min: min_length]
else
errors
end
{:max_length, max_length}, errors ->
if length > max_length do
[message: "must have %{max} or fewer items", max: max_length]
else
errors
end
_, errors ->
errors
end)
end
@spec constraints(Ash.Changeset.t() | Ash.Query.t(), Ash.Type.t(), Keyword.t()) :: Keyword.t()
def constraints(source, type, constraints) do
if embedded_type?(type) do
Keyword.put(constraints, :__source__, source)
else
constraints
end
end
@spec constraints(t()) :: constraints()
def constraints({:array, _type}) do
@array_constraints
end
def constraints(type) do
if ash_type?(type) do
type.constraints()
else
[]
end
end
@doc """
Casts a value from the Elixir type to a value that the data store can persist
Maps to `Ecto.Type.dump/2`
"""
@spec dump_to_native(t(), term, constraints | nil) :: {:ok, term} | {:error, keyword()} | :error
def dump_to_native(type, term, constraints \\ [])
def dump_to_native({:array, type}, term, constraints) do
if is_atom(type) && :erlang.function_exported(type, :dump_to_native_array, 2) do
type.dump_to_native_array(term, constraints)
else
single_constraints = constraints[:items] || []
term
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn item, {:ok, dumped} ->
case dump_to_native(type, item, single_constraints) do
:error ->
{:halt, :error}
{:ok, value} ->
{:cont, {:ok, [value | dumped]}}
end
end)
end
end
def dump_to_native(type, term, constraints) do
type = get_type(type)
type.dump_to_native(term, constraints)
end
@doc """
Casts a value from the Elixir type to a value that can be embedded in another data structure.
Embedded resources expect to be stored in JSON, so this allows things like UUIDs to be stored
as strings in embedded resources instead of binary.
"""
@spec dump_to_embedded(t(), term, constraints | nil) ::
{:ok, term} | {:error, keyword()} | :error
def dump_to_embedded(type, term, constraints \\ [])
def dump_to_embedded({:array, type}, term, constraints) do
if is_atom(type) && :erlang.function_exported(type, :dump_to_embedded_array, 2) do
type.dump_to_embedded_array(term, constraints)
else
single_constraints = constraints[:items] || []
term
|> Enum.reverse()
|> Enum.reduce_while({:ok, []}, fn item, {:ok, dumped} ->
case dump_to_embedded(type, item, single_constraints) do
:error ->
{:halt, :error}
{:ok, value} ->
{:cont, {:ok, [value | dumped]}}
end
end)
end
end
def dump_to_embedded(type, term, constraints) do
type = get_type(type)
if :erlang.function_exported(type, :dump_to_embedded, 2) do
type.dump_to_embedded(term, constraints)
else
type.dump_to_native(term, constraints)
end
end
@doc """
Determines if two values of a given type are equal.
Maps to `Ecto.Type.equal?/3`
"""
@spec equal?(t(), term, term) :: boolean
def equal?({:array, type}, [nil | xs], [nil | ys]), do: equal?({:array, type}, xs, ys)
def equal?({:array, type}, [x | xs], [y | ys]),
do: equal?(type, x, y) && equal?({:array, type}, xs, ys)
def equal?({:array, _}, [], []), do: true
def equal?({:array, _}, _, _), do: false
def equal?(type, left, right) do
type.equal?(left, right)
end
# @callback equal?(term, term) :: boolean
defmacro __using__(_) do
quote location: :keep do
@behaviour Ash.Type
parent = __MODULE__
defmodule EctoType do
@moduledoc false
@behaviour Ecto.Type
@parent parent
@impl true
def type do
@parent.storage_type()
end
@impl true
def cast(term) do
@parent.cast_input(term, [])
end
@impl true
def load(term) do
@parent.cast_stored(term, [])
end
@impl true
def dump(term) do
@parent.dump_to_native(term, [])
end
@impl true
def equal?(left, right) do
@parent.equal?(left, right)
end
@impl true
def embed_as(_), do: :self
end
@impl true
def ecto_type, do: EctoType
@impl true
def equal?(left, right), do: left == right
@impl true
def constraints, do: []
@impl true
def describe([]), do: String.trim_leading(inspect(__MODULE__), "Ash.Type.")
def describe(constraints) do
"#{String.trim_leading(inspect(__MODULE__), "Ash.Type.")} | #{inspect(constraints)}"
end
@impl true
def apply_constraints(_, _), do: :ok
@impl true
def handle_change(_old_value, new_value, _constraints), do: {:ok, new_value}
@impl true
def prepare_change(_old_value, new_value, _constraints), do: {:ok, new_value}
@impl true
def array_constraints do
unquote(@array_constraints)
end
defoverridable equal?: 2,
constraints: 0,
array_constraints: 0,
apply_constraints: 2,
handle_change: 3,
prepare_change: 3
end
end
defp ash_type_module?(module) do
Ash.Helpers.implements_behaviour?(module, __MODULE__)
end
end
|
lib/ash/type/type.ex
| 0.904329
| 0.753897
|
type.ex
|
starcoder
|
defmodule Alerts.Sort do
@moduledoc """
Sorts alerts in order of relevance. Currently, the logic is:
* effect name
* lifecycle
* severity
* updated at (newest first)
* future affected period (closest first)
* id
"""
@lifecycle_order [
:new,
:upcoming,
:ongoing_upcoming,
:ongoing
]
@effect_order [
:amber_alert,
:cancellation,
:delay,
:suspension,
:track_change,
:detour,
:shuttle,
:stop_closure,
:dock_closure,
:station_closure,
:stop_moved,
:extra_service,
:schedule_change,
:service_change,
:snow_route,
:stop_shoveling,
:station_issue,
:dock_issue,
:access_issue,
:policy_change
]
def sort(alerts, now) do
Enum.sort_by(alerts, &sort_key(&1, now))
end
defp sort_key(alert, now) do
{
-high_severity(alert),
priority(alert),
effect_index(alert.effect),
lifecycle_index(alert.lifecycle),
-alert.severity,
-updated_at_date(alert.updated_at),
first_future_active_period_start(alert.active_period, now),
alert.id
}
end
# generate methods for looking up the indexes, rather than having to
# traverse the list each time
for {lifecycle, index} <- Enum.with_index(@lifecycle_order) do
defp lifecycle_index(unquote(lifecycle)), do: unquote(index)
end
# fallback
defp lifecycle_index(_), do: unquote(length(@lifecycle_order))
for {name, index} <- Enum.with_index(@effect_order) do
defp effect_index(unquote(name)), do: unquote(index)
end
# fallback
defp effect_index(_), do: unquote(length(@effect_order))
defp high_severity(%{severity: severity}) when severity >= 7 do
severity
end
defp high_severity(_), do: 0
defp updated_at_date(dt) do
dt
|> Timex.beginning_of_day()
|> Timex.to_unix()
end
defp priority(%{priority: :low}), do: 1
defp priority(%{priority: :high}), do: 0
defp priority(%{priority: :system}), do: 1
# atoms are greater than any integer
defp first_future_active_period_start([], _now), do: :infinity
defp first_future_active_period_start(periods, now) do
# first active period that's in the future
now_unix = DateTime.to_unix(now, :second)
future_periods =
for {start, _} <- periods,
start,
# wrap in a list to avoid an Erlang 19.3 issue
unix <- [DateTime.to_unix(start)],
unix > now_unix do
unix
end
if future_periods == [] do
:infinity
else
Enum.min(future_periods)
end
end
end
|
apps/alerts/lib/sort.ex
| 0.627381
| 0.412501
|
sort.ex
|
starcoder
|
defmodule Phoenix.Template do
@moduledoc """
Templates are used by Phoenix when rendering responses.
Since many views render significant content, for example
a whole HTML file, it is common to put these files into a particular
directory, typically "web/templates".
This module provides conveniences for reading all files from a
particular directory and embedding them into a single module.
Imagine you have a directory with templates:
# templates/foo.html.eex
Hello <%= @name %>
# templates.ex
defmodule Templates do
use Phoenix.Template, root: "templates"
end
Now the template foo can be directly rendered with:
Templates.render("foo.html", %{name: "<NAME>"})
## Options
* `:root` - the root template path to find templates
* `:pattern` - the wildcard pattern to apply to the root
when finding templates. Default `"*"`
## Rendering
In some cases, you will want to override the `render/2` clause
to compose the assigns for the template before rendering. In such
cases, you can render the template directly by calling the generated
private function `render_template/2`. For example:
# templates/foo.html.eex
Hello <%= @name %>
# templates.ex
defmodule Templates do
use Phoenix.Template, root: "templates"
def render("foo.html", %{name: name}) do
render_template("foo.html", %{name: String.upcase(name)})
end
end
In practice, developers rarely use `Phoenix.Template`
directly. Instead they use `Phoenix.View` which wraps the template
functionality and adds some extra conveniences.
## Terminology
Here is a quick introduction into Phoenix templates terms:
* template name - is the name of the template as
given by the user, without the template engine extension,
for example: "users.html"
* template path - is the complete path of the template
in the filesystem, for example, "path/to/users.html.eex"
* template root - the directory where templates are defined
* template engine - a module that receives a template path
and transforms its source code into Elixir quoted expressions.
## Custom Template Engines
Phoenix supports custom template engines. Engines tell
Phoenix how to convert a template path into quoted expressions.
See `Phoenix.Template.Engine` for more information on
the API required to be implemented by custom engines.
Once a template engine is defined, you can tell Phoenix
about it via the template engines option:
config :phoenix, :template_engines,
eex: Phoenix.Template.EExEngine,
exs: Phoenix.Template.ExsEngine
## Format encoders
Besides template engines, Phoenix has the concept of format encoders.
Format encoders work per format and are responsible for encoding a
given format to string once the view layer finishes processing.
A format encoder must export a function called `encode_to_iodata!/1`
which receives the rendering artifact and returns iodata.
New encoders can be added via the format encoder option:
config :phoenix, :format_encoders,
html: Phoenix.HTML.Engine,
json: Poison
"""
@type name :: binary
@type path :: binary
@type root :: binary
alias Phoenix.Template
@encoders [html: Phoenix.Template.HTML, json: Poison, js: Phoenix.Template.HTML]
@engines [eex: Phoenix.Template.EExEngine, exs: Phoenix.Template.ExsEngine]
@default_pattern "*"
defmodule UndefinedError do
@moduledoc """
Exception raised when a template cannot be found.
"""
defexception [:available, :template, :module, :root, :assigns, :pattern]
def message(exception) do
"Could not render #{inspect exception.template} for #{inspect exception.module}, "
<> "please define a matching clause for render/2 or define a template at "
<> "#{inspect Path.relative_to_cwd exception.root}. "
<> available_templates(exception.available)
<> "\nAssigns:\n\n"
<> inspect(exception.assigns)
<> "\n\nAssigned keys: #{inspect Map.keys(exception.assigns)}\n"
end
defp available_templates([]), do: "No templates were compiled for this module."
defp available_templates(available) do
"The following templates were compiled:\n\n"
<> Enum.map_join(available, "\n", &"* #{&1}")
<> "\n"
end
end
@doc false
defmacro __using__(options) do
quote bind_quoted: [options: options], unquote: true do
root = Keyword.fetch!(options, :root)
@phoenix_root Path.relative_to_cwd(root)
@phoenix_pattern Keyword.get(options, :pattern, unquote(@default_pattern))
@before_compile unquote(__MODULE__)
@doc """
Renders the given template locally.
"""
def render(template, assigns \\ %{})
def render(module, template) when is_atom(module) do
Phoenix.View.render(module, template, %{})
end
def render(template, _assigns) when not is_binary(template) do
raise ArgumentError, "render/2 expects template to be a string, got: #{inspect template}"
end
def render(template, assigns) when not is_map(assigns) do
render(template, Enum.into(assigns, %{}))
end
@doc """
Callback invoked when no template is found.
By default it raises but can be customized
to render a particular template.
"""
@spec template_not_found(Phoenix.Template.name, map) :: no_return
def template_not_found(template, assigns) do
Template.raise_template_not_found(__MODULE__, template, assigns)
end
defoverridable [template_not_found: 2]
end
end
@anno (if :erlang.system_info(:otp_release) >= '19' do
[generated: true]
else
[line: -1]
end)
@doc false
defmacro __before_compile__(env) do
root = Module.get_attribute(env.module, :phoenix_root)
pattern = Module.get_attribute(env.module, :phoenix_pattern)
pairs = for path <- find_all(root, pattern) do
compile(path, root)
end
names = Enum.map(pairs, &elem(&1, 0))
codes = Enum.map(pairs, &elem(&1, 1))
# We are using @anno because we don't want warnings coming from
# render/2 to be reported in case the user has defined a catch all
# render/2 clause.
quote @anno do
unquote(codes)
# Catch-all clause for rendering.
def render(template, assigns) do
render_template(template, assigns)
end
# Catch-all clause for template rendering.
defp render_template(template, %{render_existing: {__MODULE__, template}}) do
nil
end
defp render_template(template, %{template_not_found: __MODULE__} = assigns) do
Template.raise_template_not_found(__MODULE__, template, assigns)
end
defp render_template(template, assigns) do
template_not_found(template, Map.put(assigns, :template_not_found, __MODULE__))
end
@doc """
Returns the template root alongside all templates.
"""
def __templates__ do
{@phoenix_root, @phoenix_pattern, unquote(names)}
end
@doc """
Returns true whenever the list of templates changes in the filesystem.
"""
def __phoenix_recompile__? do
unquote(hash(root, pattern)) != Template.hash(@phoenix_root, @phoenix_pattern)
end
end
end
@doc """
Returns the format encoder for the given template name.
"""
@spec format_encoder(name) :: module | nil
def format_encoder(template_name) when is_binary(template_name) do
Map.get(compiled_format_encoders(), Path.extname(template_name))
end
defp compiled_format_encoders do
case Application.fetch_env(:phoenix, :compiled_format_encoders) do
{:ok, encoders} ->
encoders
:error ->
encoders =
@encoders
|> Keyword.merge(raw_config(:format_encoders))
|> Enum.filter(fn {_, v} -> v end)
|> Enum.into(%{}, fn {k, v} -> {".#{k}", v} end)
Application.put_env(:phoenix, :compiled_format_encoders, encoders)
encoders
end
end
@doc """
Returns a keyword list with all template engines
extensions followed by their modules.
"""
@spec engines() :: %{atom => module}
def engines do
compiled_engines()
end
defp compiled_engines do
case Application.fetch_env(:phoenix, :compiled_template_engines) do
{:ok, engines} ->
engines
:error ->
engines =
@engines
|> Keyword.merge(raw_config(:template_engines))
|> Enum.filter(fn {_, v} -> v end)
|> Enum.into(%{})
Application.put_env(:phoenix, :compiled_template_engines, engines)
engines
end
end
defp raw_config(name) do
Application.get_env(:phoenix, name) ||
raise "could not load #{name} configuration for Phoenix. " <>
"Please ensure you have listed :phoenix under :applications in your " <>
"mix.exs file and have enabled the :phoenix compiler under :compilers, " <>
"for example: [:phoenix] ++ Mix.compilers"
end
@doc """
Converts the template path into the template name.
## Examples
iex> Phoenix.Template.template_path_to_name(
...> "lib/templates/admin/users/show.html.eex",
...> "lib/templates")
"admin/users/show.html"
"""
@spec template_path_to_name(path, root) :: name
def template_path_to_name(path, root) do
path
|> Path.rootname()
|> Path.relative_to(root)
end
@doc """
Converts a module, without the suffix, to a template root.
## Examples
iex> Phoenix.Template.module_to_template_root(MyApp.UserView, MyApp, "View")
"user"
iex> Phoenix.Template.module_to_template_root(MyApp.Admin.User, MyApp, "View")
"admin/user"
iex> Phoenix.Template.module_to_template_root(MyApp.Admin.User, MyApp.Admin, "View")
"user"
iex> Phoenix.Template.module_to_template_root(MyApp.View, MyApp, "View")
""
iex> Phoenix.Template.module_to_template_root(MyApp.View, MyApp.View, "View")
""
"""
def module_to_template_root(module, base, suffix) do
module
|> Phoenix.Naming.unsuffix(suffix)
|> Module.split
|> Enum.drop(length(Module.split(base)))
|> Enum.map(&Phoenix.Naming.underscore/1)
|> join_paths
end
defp join_paths([]), do: ""
defp join_paths(paths), do: Path.join(paths)
@doc """
Returns all template paths in a given template root.
"""
@spec find_all(root, pattern :: String.t) :: [path]
def find_all(root, pattern \\ @default_pattern) do
extensions = engines() |> Map.keys() |> Enum.join(",")
root
|> Path.join(pattern <> ".{#{extensions}}")
|> Path.wildcard()
end
@doc """
Returns the hash of all template paths in the given root.
Used by Phoenix to check if a given root path requires recompilation.
"""
@spec hash(root, pattern :: String.t) :: binary
def hash(root, pattern \\ @default_pattern) do
find_all(root, pattern)
|> Enum.sort()
|> :erlang.md5()
end
@doc false
def raise_template_not_found(view_module, template, assigns) do
{root, pattern, names} = view_module.__templates__()
raise UndefinedError,
assigns: assigns,
available: names,
template: template,
root: root,
pattern: pattern,
module: view_module
end
defp compile(path, root) do
name = template_path_to_name(path, root)
defp = String.to_atom(name)
ext = Path.extname(path) |> String.lstrip(?.) |> String.to_atom
engine = Map.fetch!(engines(), ext)
quoted = engine.compile(path, name)
{name, quote do
@file unquote(path)
@external_resource unquote(path)
defp unquote(defp)(var!(assigns)) do
_ = var!(assigns)
unquote(quoted)
end
defp render_template(unquote(name), assigns) do
unquote(defp)(assigns)
end
end}
end
end
|
lib/phoenix/template.ex
| 0.903049
| 0.455199
|
template.ex
|
starcoder
|
defmodule TimeZoneInfo.DataStore.PersistentTerm do
if function_exported?(:persistent_term, :get, 0) do
@moduledoc false
# This module implements the `TimeZoneInfo.DataStore` and stores the data with
# [:persistent_term](https://erlang.org/doc/man/persistent_term.html).
@behaviour TimeZoneInfo.DataStore
@app :time_zone_info
@impl true
def put(data) do
put_time_zone_info(data)
put_transitions(data)
put_rules(data)
put_links(data)
put_time_zones(data)
end
@impl true
def get_transitions(time_zone, link \\ false) do
case :persistent_term.get({@app, :transitions, time_zone}, :not_found) do
:not_found ->
case link do
true -> {:error, :transitions_not_found}
false -> time_zone |> get_link() |> get_transitions(true)
end
transitions ->
{:ok, transitions}
end
end
@impl true
def get_rules(rules) do
case :persistent_term.get({@app, :rules, rules}, :not_found) do
:not_foudn -> {:error, :rules_not_found}
rules -> {:ok, rules}
end
end
@impl true
def get_time_zones(links: select) when select in [:ignore, :only, :include] do
with %{time_zones: time_zones, links: links, all: all} <-
:persistent_term.get({@app, :time_zones}, []) do
case select do
:ignore -> time_zones
:only -> links
:include -> all
end
end
end
@impl true
def empty? do
case version() do
nil -> true
_ -> false
end
end
@impl true
def version do
:persistent_term.get({@app, :version}, nil)
end
@impl true
def delete! do
:persistent_term.get()
|> Enum.map(fn {key, _value} -> key end)
|> Enum.filter(fn
key when is_tuple(key) -> elem(key, 0) == @app
_ -> false
end)
|> Enum.each(fn key ->
:persistent_term.erase(key)
end)
end
@impl true
def info do
{count, memory} =
:persistent_term.get()
|> Enum.reduce({0, 0}, fn {key, value}, {count, memory} = acc ->
case is_tuple(key) && elem(key, 0) == @app do
true -> {count + 1, memory + memory(key) + memory(value)}
false -> acc
end
end)
%{
version: version(),
count: count,
memory: memory,
time_zones: length(get_time_zones(links: :ignore)),
links: length(get_time_zones(links: :only))
}
end
defp memory(value), do: value |> :erlang.term_to_binary() |> byte_size()
defp put_time_zone_info(data) do
version = Map.get(data, :version)
:persistent_term.put({@app, :version}, version)
end
defp put_transitions(data) do
data
|> Map.get(:time_zones, %{})
|> Enum.each(fn {time_zone, transitions} ->
:persistent_term.put({@app, :transitions, time_zone}, transitions)
end)
end
defp put_rules(data) do
data
|> Map.get(:rules, %{})
|> Enum.each(fn {name, rules} ->
:persistent_term.put({@app, :rules, name}, rules)
end)
end
defp put_links(data) do
data
|> Map.get(:links, %{})
|> Enum.each(fn {from, to} ->
:persistent_term.put({@app, :link, from}, to)
end)
end
defp put_time_zones(data) do
time_zones = data |> Map.get(:time_zones) |> Map.keys() |> Enum.sort()
links = data |> Map.get(:links) |> Map.keys() |> Enum.sort()
all = time_zones |> Enum.concat(links) |> Enum.sort()
data = %{time_zones: time_zones, links: links, all: all}
:persistent_term.put({@app, :time_zones}, data)
end
defp get_link(time_zone), do: :persistent_term.get({@app, :link, time_zone}, :not_found)
end
end
|
lib/time_zone_info/data_store/persistent_term.ex
| 0.83602
| 0.511778
|
persistent_term.ex
|
starcoder
|
defmodule BitPal.ExchangeRate do
alias BitPal.Currencies
alias BitPalSchemas.Currency
@type pair :: {Currency.id(), Currency.id()}
@type t :: %__MODULE__{
rate: Decimal.t(),
pair: pair
}
defstruct [:rate, :pair]
# Creation
@spec new!(Money.t(), Money.t()) :: t
@spec new!(Decimal.t(), pair) :: t
def new!(x, y) do
case new(x, y) do
{:ok, res} -> res
_ -> raise ArgumentError, "invalid params to ExchangeRate.new"
end
end
@spec new(Decimal.t(), pair) :: {:ok, t} | :error
def new(_, {a, a}) do
:error
end
def new(rate, {a, b}) do
with false <- Decimal.lt?(rate, Decimal.new(0)),
{:ok, a} <- Currencies.cast(a),
{:ok, b} <- Currencies.cast(b) do
{:ok,
%__MODULE__{
rate: rate,
pair: {a, b}
}}
else
_ -> :error
end
end
@spec new(Money.t(), Money.t()) :: {:ok, t} | :error
def new(a, b) do
cond do
a.currency == b.currency ->
:error
Money.zero?(a) ->
:error
true ->
{:ok,
%__MODULE__{
rate: Decimal.div(Money.to_decimal(b), Money.to_decimal(a)),
pair: {a.currency, b.currency}
}}
end
end
# Handling
@spec normalize(t, Money.t(), Money.t()) ::
{:ok, Money.t(), Money.t()}
| {:error, :mismatched_exchange_rate}
| {:error, :bad_params}
def normalize(exchange_rate, a, b) do
{ex_a, ex_b} = exchange_rate.pair
case {a, b} do
{%Money{currency: ^ex_a}, nil} ->
{:ok, a,
Money.parse!(
Decimal.mult(exchange_rate.rate, Money.to_decimal(a)),
elem(exchange_rate.pair, 1)
)}
{nil, %Money{currency: ^ex_b}} ->
{:ok,
Money.parse!(
Decimal.div(Money.to_decimal(b), exchange_rate.rate),
elem(exchange_rate.pair, 0)
), b}
{%Money{currency: ^ex_b}, %Money{currency: ^ex_a}} ->
normalize(exchange_rate, b, a)
{%Money{currency: ^ex_a}, %Money{currency: ^ex_b}} ->
case new(a, b) do
{:ok, rate} ->
if eq?(exchange_rate, rate) do
{:ok, a, b}
else
{:error, :mismatched_exchange_rate}
end
_ ->
{:error, :bad_params}
end
_ ->
{:error, :bad_params}
end
end
@spec eq?(t, t) :: boolean
def eq?(a, b) do
a.pair == b.pair && Decimal.eq?(a.rate, b.rate)
end
@spec basecurrency(t) :: Currency.id()
def basecurrency(rate) do
elem(rate.pair, 0)
end
@spec currency(t) :: Currency.id()
def currency(rate) do
elem(rate.pair, 1)
end
# Parsing
@spec parse_pair(binary | {atom | String.t(), atom | String.t()}) ::
{:ok, pair} | {:error, :bad_pair}
def parse_pair(pair) when is_binary(pair) do
case String.split(pair, "-") do
[from, to] ->
parse_pair({from, to})
_ ->
{:error, :bad_pair}
end
end
def parse_pair({from, to}) do
{:ok, from} = Currencies.cast(from)
{:ok, to} = Currencies.cast(to)
{:ok, {from, to}}
rescue
_ ->
{:error, :bad_pair}
end
end
|
lib/bitpal/exchange_rate/exchange_rate.ex
| 0.882365
| 0.614307
|
exchange_rate.ex
|
starcoder
|
defmodule Axon.Optimizers do
@moduledoc """
Implementations of common gradient-based optimization algorithms.
All of the methods in this module are written in terms of
the update methods defined in `Axon.Updates`. Axon treats
optimizers as the tuple:
{init_fn, update_fn}
where `init_fn` returns an initial optimizer state and `update_fn`
scales input gradients. `init_fn` accepts a model's parameters
and attaches state to each parameter. `update_fn` accepts
gradients, optimizer state, and current model parameters and
returns updated optimizer state and gradients.
Custom optimizers are often created via the `Axon.Updates` API.
## Example
Consider the following usage of the Adam optimizer in a basic
update function (assuming `objective` and the `dataset` are
defined elsewhere):
defmodule Learning do
import Nx.Defn
defn init(params, init_fn) do
init_fn.(params)
end
defn update(params, optimizer_state, inputs, targets, update_fn) do
{loss, gradient} = value_and_grad(params, &objective(&1, inputs, targets))
{scaled_updates, new_optimizer_state} = update_fn.(gradient, optimizer_state, params)
{Axon.Updates.apply_updates(params, scaled_updates), new_optimizer_state, loss}
end
end
model_params = Nx.random_uniform({784, 10})
{init_fn, update_fn} = Axon.Optimizers.adam(0.005)
optimizer_state =
Learning.init(params, init_fn)
{new_params, new_optimizer_state, loss} =
Learning.update(params, optimizer_state, inputs, targets, update_fn)
For a simpler approach, you can also use optimizers with the training API:
model
|> Axon.Loop.trainer(:categorical_cross_entropy, Axon.Optimizers.adam(0.005))
|> Axon.Loop.run(data, epochs: 10, compiler: EXLA)
"""
alias Axon.Updates
@doc """
Adabelief optimizer.
## Options
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `0.0`
* `:eps_root` - numerical stability term. Defaults to `1.0e-16`
## References
* [AdaBelief Optimizer: Adapting Stepsizes by the Belief in Observed Gradients](https://arxiv.org/abs/2010.07468)
"""
def adabelief(learning_rate, opts \\ []) do
Updates.scale_by_belief(opts)
|> scale_by_learning_rate(learning_rate)
end
@doc """
Adagrad optimizer.
## Options
* `:eps` - numerical stability term. Defaults to `1.0e-7`
## References
* [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](https://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def adagrad(learning_rate, opts \\ []) do
Updates.scale_by_rss(opts)
|> scale_by_learning_rate(learning_rate)
end
@doc """
Adam optimizer.
## Options
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
* `:eps_root` - numerical stability term. Defaults to `1.0e-15`
## References
* [Adam: A Method for Stochastic Optimization](https://arxiv.org/abs/1412.6980)
"""
def adam(learning_rate, opts \\ []) do
Updates.scale_by_adam(opts)
|> scale_by_learning_rate(learning_rate)
end
@doc """
Adam with weight decay optimizer.
## Options
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
* `:eps_root` - numerical stability term. Defaults to `0.0`
* `:decay` - weight decay. Defaults to `0.0`
"""
def adamw(learning_rate, opts \\ []) do
{decay, opts} = Keyword.pop(opts, :decay, 0.0)
Updates.scale_by_adam(opts)
|> Updates.add_decayed_weights(decay: decay)
|> scale_by_learning_rate(learning_rate)
end
@doc """
Fromage optimizer.
## Options
* `:min_norm` - minimum norm value. Defaults to `0.0`.
## References
* [On the distance between two neural networks and the stability of learning](https://proceedings.neurips.cc/paper/2020/file/f4b31bee138ff5f7b84ce1575a738f95-Paper.pdf)
"""
def fromage(learning_rate, opts \\ []) do
if is_function(learning_rate) do
raise ArgumentError,
"fromage optimizer does not support learning rate schedule," <>
" please provide a scalar learning rate"
end
mult = Nx.divide(1, Nx.sqrt(Nx.add(1, Nx.power(learning_rate, 2))))
Updates.scale_by_trust_ratio(opts)
|> Updates.scale(Nx.multiply(-learning_rate, mult))
|> Updates.add_decayed_weights(decay: Nx.subtract(mult, 1))
end
@doc """
Lamb optimizer.
## Options
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
* `:eps_root` - numerical stability term. Defaults to `0.0`
* `:decay` - weight decay. Defaults to `0.0`
* `:min_norm` - minimum norm value. Defaults to `0.0`
## References
* [Large Batch Optimization for Deep Learning: Training BERT in 76 minutes](https://arxiv.org/abs/1904.00962)
"""
def lamb(learning_rate, opts \\ []) do
{decay, opts} = Keyword.pop(opts, :decay, 0.0)
{min_norm, opts} = Keyword.pop(opts, :min_norm, 0.0)
Updates.scale_by_adam(opts)
|> Updates.add_decayed_weights(decay: decay)
|> Updates.scale_by_trust_ratio(min_norm: min_norm)
|> scale_by_learning_rate(learning_rate)
end
@doc """
Noisy SGD optimizer.
## Options
* `:eta` - used to compute variance of noise distribution. Defaults to `0.1`
* `:gamma` - used to compute variance of noise distribution. Defaults to `0.55`
"""
def noisy_sgd(learning_rate, opts \\ []) do
scale_by_learning_rate(learning_rate)
|> Updates.add_noise(opts)
end
@doc """
Rectified Adam optimizer.
## Options
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
* `:eps_root` - numerical stability term. Defaults to `0.0`
* `:threshold` - threshold term. Defaults to `5.0`
## References
* [On the Variance of Adaptive Learning Rate and Beyond](https://arxiv.org/pdf/1908.03265.pdf)
"""
def radam(learning_rate, opts \\ []) do
Updates.scale_by_radam(opts)
|> scale_by_learning_rate(learning_rate)
end
@doc """
RMSProp optimizer.
## Options
* `:centered` - whether to scale by centered root of EMA of squares. Defaults to `false`
* `:momentum` - momentum term. If set, uses SGD with momentum and decay set
to value of this term.
* `:nesterov` - whether or not to use nesterov momentum. Defaults to `false`
* `:initial_scale` - initial value of EMA. Defaults to `0.0`
* `:decay` - EMA decay rate. Defaults to `0.9`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
"""
def rmsprop(learning_rate, opts \\ []) do
{centered, opts} = Keyword.pop(opts, :centered, false)
{nesterov?, opts} = Keyword.pop(opts, :nesterov, false)
{momentum, opts} = Keyword.pop(opts, :momentum, nil)
combinator =
if centered do
Updates.scale_by_stddev(opts)
else
Updates.scale_by_rms(opts)
end
|> scale_by_learning_rate(learning_rate)
if momentum,
do: Updates.trace(combinator, decay: momentum, nesterov: nesterov?),
else: combinator
end
@doc """
SGD optimizer.
## Options
* `:momentum` - momentum term. If set, uses SGD with momentum and decay set
to value of this term.
* `:nesterov` - whether or not to use nesterov momentum. Defaults to `false`
"""
def sgd(learning_rate, opts \\ []) do
momentum = opts[:momentum]
nesterov? = opts[:nesterov] || false
if momentum do
Updates.trace(decay: momentum, nesterov: nesterov?)
|> scale_by_learning_rate(learning_rate)
else
scale_by_learning_rate(learning_rate)
end
end
@doc """
Yogi optimizer.
## Options
* `:initial_accumulator_value` - initial value for first and second moment. Defaults to `0.0`
* `:b1` - first moment decay. Defaults to `0.9`
* `:b2` - second moment decay. Defaults to `0.999`
* `:eps` - numerical stability term. Defaults to `1.0e-8`
* `:eps_root` - numerical stability term. Defaults to `0.0`
## References
* [Adaptive Methods for Nonconvex Optimization](https://papers.nips.cc/paper/2018/file/90365351ccc7437a1309dc64e4db32a3-Paper.pdf)
"""
def yogi(learning_rate, opts \\ []) do
Updates.scale_by_yogi(opts)
|> scale_by_learning_rate(learning_rate)
end
## Helpers
defp scale_by_learning_rate(combinator \\ Updates.identity(), lr)
defp scale_by_learning_rate(combinator, schedule) when is_function(schedule, 1) do
Updates.scale_by_schedule(combinator, fn count -> Nx.negate(schedule.(count)) end)
end
defp scale_by_learning_rate(combinator, lr) do
Updates.scale(combinator, -lr)
end
end
|
lib/axon/optimizers.ex
| 0.957378
| 0.786336
|
optimizers.ex
|
starcoder
|
defmodule TypeCheck.Builtin.NamedType do
defstruct [:name, :type, :local]
use TypeCheck
@type! t :: %TypeCheck.Builtin.NamedType{name: atom(), type: TypeCheck.Type.t(), local: boolean()}
@type! problem_tuple ::
{t(), :named_type, %{problem: lazy(TypeCheck.TypeError.Formatter.problem_tuple())},
any()}
def stringify_name(atom, _opts) when is_atom(atom), do: to_string(atom)
def stringify_name(str, _opts) when is_binary(str), do: to_string(str)
def stringify_name(other, opts), do: TypeCheck.Protocols.Inspect.inspect(other, opts)
defimpl TypeCheck.Protocols.ToCheck do
def to_check(s, param) do
inner_check = TypeCheck.Protocols.ToCheck.to_check(s.type, param)
quote generated: true, location: :keep do
inner_res = unquote(inner_check)
case inner_res do
{:ok, bindings, altered_inner} ->
# Write it to a non-hygienic variable
# that we can read from more outer-level types
# unquote(Macro.var(s.name, TypeCheck.Builtin.NamedType)) = unquote(param)
{:ok, [{unquote(s.name), unquote(param)} | bindings], altered_inner}
{:error, problem} ->
{:error, {unquote(Macro.escape(s)), :named_type, %{problem: problem}, unquote(param)}}
end
end
end
end
defimpl TypeCheck.Protocols.Inspect do
def inspect(s, opts) do
if Map.get(opts, :show_long_named_type, false) || s.local do
@for.stringify_name(s.name, opts)
|> Inspect.Algebra.glue("::")
|> Inspect.Algebra.glue(TypeCheck.Protocols.Inspect.inspect(s.type, Map.put(opts, :show_long_named_type, false)))
|> Inspect.Algebra.group()
else
@for.stringify_name(s.name, opts)
end
|> Inspect.Algebra.color(:named_type, opts)
end
end
if Code.ensure_loaded?(StreamData) do
defimpl TypeCheck.Protocols.ToStreamData do
def to_gen(s) do
TypeCheck.Protocols.ToStreamData.to_gen(s.type)
end
end
end
end
|
lib/type_check/builtin/named_type.ex
| 0.533884
| 0.466724
|
named_type.ex
|
starcoder
|
defmodule Result do
@moduledoc """
Documentation for Result.
"""
@type t(error, value) :: Result.Error.t(error) | Result.Ok.t(value)
@doc """
See `Result.Ok.of/1`
"""
defdelegate ok(value), to: Result.Ok, as: :of
@doc """
See `Result.Error.of/1`
"""
defdelegate error(value), to: Result.Error, as: :of
# Operators
@doc """
See `Result.Operators.fold/1`
"""
defdelegate fold(result), to: Result.Operators
@doc """
See `Result.Operators.map/2`
"""
defdelegate map(result, f), to: Result.Operators
@doc """
See `Result.Operators.map2/3`
"""
defdelegate map2(result1, result2, f), to: Result.Operators
@doc """
See `Result.Operators.map_error/2`
"""
defdelegate map_error(result, f), to: Result.Operators
@doc """
See `Result.Operators.catch_error/3`
"""
defdelegate catch_error(result, expected_error, f), to: Result.Operators
@doc """
See `Result.Operators.catch_all_errors/2`
"""
defdelegate catch_all_errors(result, f), to: Result.Operators
@doc """
See `Result.Operators.perform/2`
"""
defdelegate perform(result, f), to: Result.Operators
@doc """
See `Result.Operators.and_then/2`
"""
defdelegate and_then(result, f), to: Result.Operators
@doc """
See `Result.Operators.and_then_x/2`
"""
defdelegate and_then_x(results, f), to: Result.Operators
@doc """
See `Result.Operators.with_default/2`
"""
defdelegate with_default(result, default), to: Result.Operators
@doc """
See `Result.Operators.resolve/1`
"""
defdelegate resolve(result), to: Result.Operators
@doc """
See `Result.Operators.retry/4`
"""
defdelegate retry(result, f, count, timeout \\ 1000), to: Result.Operators
@doc """
See `Result.Operators.error?/1`
"""
defdelegate error?(result), to: Result.Operators
@doc """
See `Result.Operators.ok?/1`
"""
defdelegate ok?(result), to: Result.Operators
# Calculations
@doc """
See `Result.Calc.r_and/2`
"""
defdelegate r_and(r1, r2), to: Result.Calc
@doc """
See `Result.Calc.r_or/2`
"""
defdelegate r_or(r1, r2), to: Result.Calc
@doc """
See `Result.Calc.product/1`
"""
defdelegate product(list), to: Result.Calc
@doc """
See `Result.Calc.sum/1`
"""
defdelegate sum(list), to: Result.Calc
end
|
lib/result.ex
| 0.814864
| 0.556641
|
result.ex
|
starcoder
|
defmodule ChromicPDF.Template do
@moduledoc """
Helper functions for page styling.
For a start, see `source_and_options/1`.
## Motivation
This module contains helper functions that make it easier to to build HTML templates (body,
header, and footer) that fully cover a given page. Like an adapter, it tries to harmonize
Chrome's `printToPDF` options and related CSS layout styles (`@page` and friends) with a custom
set of page sizing options. Using this module is entirely optional, but perhaps can help to
avoid some common pitfalls arising from the slightly unintuitive and sometimes conflicting
behaviour of `printToPDF` options and `@page` CSS styles in Chrome.
## Page dimensions
One particularly cumbersome detail is that Chrome in headless mode does not correctly interpret
the `@page` CSS rule to configure the page dimensions. Resulting PDF files will always be in
US-letter format unless configured differently with the `paperWidth` and `paperHeight` options.
Experience has shown, that results will be best if the `@page` rule aligns with the values
passed to `printToPDF/2`, which is why this module exists to make basic page sizing easier.
"""
require EEx
@type blob :: binary()
@type content_option ::
{:content, blob()}
| {:header, blob()}
| {:footer, blob()}
@type paper_size :: :a4 | :us_letter | {float(), float()}
@type style_option ::
{:size, paper_size()}
| {:header_height, binary()}
| {:header_font_size, binary()}
| {:header_zoom, binary()}
| {:footer_height, binary()}
| {:footer_font_size, binary()}
| {:footer_zoom, binary()}
| {:webkit_print_color_adjust, binary()}
@paper_sizes_in_inch %{
a4: {8.3, 11.7},
us_letter: {8.5, 11.0}
}
@default_content """
<style>
body {
margin: 1em;
font-family: sans-serif;
}
h1 {
margin: 1em 0;
font-size: 22pt;
}
h2 {
margin: 1em 0;
font-size: 14pt;
}
p { font-size: 12pt; }
pre {
padding: 1em;
border: 1px solid grey;
border-radius: 2px;
background-color: #faffa3;
white-space: pre-wrap;
}
</style>
<h1>ChromicPDF</h1>
<p>Please see documentation at <a href="https://hexdocs.pm/chromic_pdf/ChromicPDF.html">hexdocs.pm</a></p>
<h2>User Agent</h2>
<pre id="user-agent"></pre>
<script type="text/javascript">
window.onload = function() {
var browser, userAgent = navigator.userAgent;
document.getElementById('user-agent').innerHTML = userAgent;
};
</script>
"""
@doc """
Returns source and options for a PDF to be printed, a given set of template options. The return
value can be passed to `ChromicPDF.print_to_pdf/2`.
## Options
* `header`
* `footer`
* all options from `styles/1`
## Example
This example has the dimension of a ISO A4 page.
ChromicPDF.Template.source_and_options(
content: "<p>Hello</p>",
header: "<p>header</p>",
footer: "<p>footer</p>"
size: :a4,
header_height: "45mm",
header_font_size: "20pt",
footer_height: "40mm"
)
Content, header, and footer templates should be unwrapped HTML markup (i.e. no `<html>` around
the content), prefixed with any `<style>` tags that your page needs.
<style>
h1 { font-size: 22pt; }
</style>
<h1>Hello</h1>
## ⚠ Markup is injected into the DOM ⚠
Please be aware that the options returned by this function cause ChromicPDF to inject the
markup directly into the DOM using the remote debugging API. This comes with some pitfalls
which are explained in `ChromicPDF.print_to_pdf/2`. Most notably, **no relative URLs** may be
used within the given HTML.
"""
@spec source_and_options([content_option() | style_option()]) ::
ChromicPDF.source_and_options()
def source_and_options(opts) do
content = Keyword.get(opts, :content, @default_content)
header = Keyword.get(opts, :header, "")
footer = Keyword.get(opts, :footer, "")
styles = do_styles(opts)
{width, height} = get_paper_size(opts)
%{
source: {:html, html_concat(styles, content)},
opts: [
print_to_pdf: %{
displayHeaderFooter: true,
headerTemplate: html_concat(styles, header),
footerTemplate: html_concat(styles, footer),
paperWidth: width,
paperHeight: height
}
]
}
end
@doc """
Concatenes two HTML strings or iolists into one.
From `{:safe, iolist}` tuples, the `:safe` is dropped. This is useful to prepare data coming
from a Phoenix-compiled `.eex` template.
content = html_concat(@styles, render("content.html"))
"""
@spec html_concat({:safe, iolist()} | iodata(), {:safe, iolist()} | iodata()) :: iolist()
def html_concat({:safe, styles}, content), do: html_concat(styles, content)
def html_concat(styles, {:safe, content}), do: html_concat(styles, content)
def html_concat(styles, content), do: [styles, content]
@styles """
<style>
* {
-webkit-print-color-adjust: <%= @webkit_print_color_adjust %>;
}
@page {
width: <%= @width %>;
height: <%= @height %>;
margin: <%= @header_height %> 0 <%= @footer_height %>;
}
#header {
padding: 0 !important;
height: <%= @header_height %>;
font-size: <%= @header_font_size %>;
zoom: <%= @header_zoom %>;
}
#footer {
padding: 0 !important;
height: <%= @footer_height %>;
font-size: <%= @footer_font_size %>;
zoom: <%= @footer_zoom %>;
}
html, body {
margin: 0;
padding: 0;
}
</style>
"""
@doc """
Renders page styles for given options.
These base styles will configure page dimensions and header and footer heights. They also
remove any browser padding and margins from these elements, and set the font-size.
Additionally, they set the zoom level of header and footer templates to 0.75 which seems to
make them align with the content viewport scaling better.
## Options
* `size` page size, either a standard name (`:a4`, `:us_letter`) or a
`{<width>, <height>}` tuple in inches, default: `:us_letter`
* `header_height` default: zero
* `header_font_size` default: 10pt
* `header_zoom` default: 0.75
* `footer_height` default: zero
* `footer_font_size` default: 10pt
* `footer_zoom` default: 0.75
* `webkit_color_print_adjust` default: "exact"
"""
@spec styles([style_option()]) :: blob()
def styles(opts \\ []), do: do_styles(opts)
defp do_styles(opts) do
{width, height} = get_paper_size(opts)
assigns = [
width: "#{width}in",
height: "#{height}in",
header_height: Keyword.get(opts, :header_height, "0"),
header_font_size: Keyword.get(opts, :header_font_size, "10pt"),
footer_height: Keyword.get(opts, :footer_height, "0"),
footer_font_size: Keyword.get(opts, :footer_font_size, "10pt"),
header_zoom: Keyword.get(opts, :header_zoom, "0.75"),
footer_zoom: Keyword.get(opts, :footer_zoom, "0.75"),
webkit_print_color_adjust: Keyword.get(opts, :webkit_print_color_adjust, "exact")
]
render_styles(assigns)
end
EEx.function_from_string(:defp, :render_styles, @styles, [:assigns])
# Fetches paper size from opts, translates from config or uses given {width, height} tuple.
defp get_paper_size(manual) when tuple_size(manual) == 2, do: manual
defp get_paper_size(name) when is_atom(name), do: Map.fetch!(@paper_sizes_in_inch, name)
defp get_paper_size(opts) when is_list(opts) do
opts
|> Keyword.get(:size, :us_letter)
|> get_paper_size()
end
end
|
lib/chromic_pdf/template.ex
| 0.853165
| 0.469216
|
template.ex
|
starcoder
|
defmodule Membrane.HTTPAdaptiveStream.Sink do
@moduledoc """
Sink for generating HTTP streaming manifests.
Uses `Membrane.HTTPAdaptiveStream.Manifest` for manifest serialization
and `Membrane.HTTPAdaptiveStream.Storage` for saving files.
## Notifications
- `{:track_playable, input_pad_id}` - sent when the first segment of a track is
stored, and thus the track is ready to be played
- `{:cleanup, cleanup_function :: (()-> :ok)}` - sent when playback changes
from playing to prepared. Invoking `cleanup_function` lambda results in removing
all the files that remain after the streaming
## Examples
The following configuration:
%#{inspect(__MODULE__)}{
manifest_name: "manifest",
manifest_module: Membrane.HTTPAdaptiveStream.HLS,
storage: %Membrane.HTTPAdaptiveStream.Storages.FileStorage{directory: "output"}
}
will generate a HLS manifest in the `output` directory, playable from
`output/manifest.m3u8` file.
"""
use Bunch
use Membrane.Sink
alias Membrane.CMAF
alias Membrane.HTTPAdaptiveStream.{Manifest, Storage}
require Manifest.SegmentAttribute
def_input_pad :input,
availability: :on_request,
demand_unit: :buffers,
caps: CMAF.Track,
options: [
track_name: [
spec: String.t() | nil,
default: nil,
description: """
Name that will be used to name the media playlist for the given track, as well as its header and segments files.
It must not contain any URI reserved characters
"""
]
]
def_options manifest_name: [
type: :string,
spec: String.t(),
default: "index",
description: "Name of the main manifest file"
],
manifest_module: [
type: :atom,
spec: module,
description: """
Implementation of the `Membrane.HTTPAdaptiveStream.Manifest`
behaviour.
"""
],
storage: [
type: :struct,
spec: Storage.config_t(),
description: """
Storage configuration. May be one of `Membrane.HTTPAdaptiveStream.Storages.*`.
See `Membrane.HTTPAdaptiveStream.Storage` behaviour.
"""
],
target_window_duration: [
spec: pos_integer | :infinity,
type: :time,
default: Membrane.Time.seconds(40),
description: """
Manifest duration is keept above that time, while the oldest segments
are removed whenever possible.
"""
],
persist?: [
type: :bool,
default: false,
description: """
If true, stale segments are removed from the manifest only. Once
playback finishes, they are put back into the manifest.
"""
],
target_segment_duration: [
type: :time,
default: 0,
description: """
Expected length of each segment. Setting it is not necessary, but
may help players achieve better UX.
"""
]
@impl true
def handle_init(options) do
options
|> Map.from_struct()
|> Map.delete(:manifest_name)
|> Map.delete(:manifest_module)
|> Map.merge(%{
storage: Storage.new(options.storage),
manifest: %Manifest{name: options.manifest_name, module: options.manifest_module},
awaiting_first_segment: MapSet.new()
})
~> {:ok, &1}
end
@impl true
def handle_caps(Pad.ref(:input, track_id) = pad_ref, %CMAF.Track{} = caps, ctx, state) do
{header_name, manifest} =
if Manifest.has_track?(state.manifest, track_id) do
# Arrival of new caps for an already existing track indicate that stream parameters have changed.
# According to section 4.3.2.3 of RFC 8216, discontinuity needs to be signaled and new header supplied.
Manifest.discontinue_track(state.manifest, track_id)
else
track_name = parse_track_name(ctx.pads[pad_ref].options[:track_name] || track_id)
Manifest.add_track(
state.manifest,
%Manifest.Track.Config{
id: track_id,
track_name: track_name,
content_type: caps.content_type,
header_extension: ".mp4",
segment_extension: ".m4s",
target_window_duration: state.target_window_duration,
target_segment_duration: state.target_segment_duration,
persist?: state.persist?
}
)
end
{result, storage} = Storage.store_header(state.storage, header_name, caps.header)
{result, %{state | storage: storage, manifest: manifest}}
end
@impl true
def handle_prepared_to_playing(ctx, state) do
demands = ctx.pads |> Map.keys() |> Enum.map(&{:demand, &1})
{{:ok, demands}, state}
end
@impl true
def handle_pad_added(pad, %{playback_state: :playing}, state) do
{{:ok, demand: pad}, state}
end
@impl true
def handle_pad_added(_pad, _ctx, state) do
{:ok, state}
end
@impl true
def handle_start_of_stream(Pad.ref(:input, id), _ctx, state) do
awaiting_first_segment = MapSet.put(state.awaiting_first_segment, id)
{:ok, %{state | awaiting_first_segment: awaiting_first_segment}}
end
@impl true
def handle_write(Pad.ref(:input, id) = pad, buffer, _ctx, state) do
%{storage: storage, manifest: manifest} = state
duration = buffer.metadata.duration
{changeset, manifest} =
Manifest.add_segment(manifest, id, byte_size(buffer.payload), duration)
state = %{state | manifest: manifest}
with {:ok, storage} <- Storage.apply_segment_changeset(storage, changeset, buffer.payload),
{:ok, storage} <- serialize_and_store_manifest(manifest, storage) do
{notify, state} = maybe_notify_playable(id, state)
{{:ok, notify ++ [demand: pad]}, %{state | storage: storage}}
else
{error, storage} -> {error, %{state | storage: storage}}
end
end
@impl true
def handle_end_of_stream(Pad.ref(:input, id), _ctx, state) do
%{manifest: manifest, storage: storage} = state
manifest = Manifest.finish(manifest, id)
{store_result, storage} = serialize_and_store_manifest(manifest, storage)
storage = Storage.clear_cache(storage)
state = %{state | storage: storage, manifest: manifest}
{store_result, state}
end
@impl true
def handle_playing_to_prepared(_ctx, state) do
%{
manifest: manifest,
storage: storage,
persist?: persist?
} = state
to_remove = Manifest.all_segments(manifest)
cleanup = fn ->
{result, _storage} = Storage.cleanup(storage, to_remove)
result
end
result =
if persist? do
{result, storage} =
manifest |> Manifest.from_beginning() |> serialize_and_store_manifest(storage)
{result, %{state | storage: storage}}
else
{:ok, state}
end
with {:ok, state} <- result do
{{:ok, notify: {:cleanup, cleanup}}, state}
end
end
defp parse_track_name(track_id) when is_binary(track_id) do
valid_filename_regex = ~r/^[^\/:*?"<>|]+$/
if String.match?(track_id, valid_filename_regex) do
track_id
else
raise ArgumentError,
message: "Manually defined track identifiers should be valid file names."
end
end
defp parse_track_name(track_id) do
track_id |> :erlang.term_to_binary() |> Base.url_encode64(padding: false)
end
defp maybe_notify_playable(id, %{awaiting_first_segment: awaiting_first_segment} = state) do
if MapSet.member?(awaiting_first_segment, id) do
{[notify: {:track_playable, id}],
%{state | awaiting_first_segment: MapSet.delete(awaiting_first_segment, id)}}
else
{[], state}
end
end
defp serialize_and_store_manifest(manifest, storage) do
manifest_files = Manifest.serialize(manifest)
Storage.store_manifests(storage, manifest_files)
end
end
|
lib/membrane_http_adaptive_stream/sink.ex
| 0.893411
| 0.428473
|
sink.ex
|
starcoder
|
defmodule EQRCode.Matrix do
@moduledoc false
import Bitwise
defstruct [:version, :modules, :mask, :matrix]
@type matrix :: term
@type t :: %__MODULE__{version: integer, modules: integer, matrix: matrix}
@type coordinate :: {integer, integer}
@ecc_l 0b01
@alignments %{
1 => [],
2 => [6, 18],
3 => [6, 22],
4 => [6, 26],
5 => [6, 30],
6 => [6, 34],
7 => [6, 22, 38]
}
@finder_pattern [
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1
]
@alignment_pattern [
1,
1,
1,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
1,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1
]
@doc """
Initialize the matrix.
"""
@spec new(integer) :: t
def new(version) do
modules = (version - 1) * 4 + 21
matrix =
Tuple.duplicate(nil, modules)
|> Tuple.duplicate(modules)
%__MODULE__{version: version, modules: modules, matrix: matrix}
end
@doc """
Draw the finder patterns, three at a time.
"""
@spec draw_finder_patterns(t) :: t
def draw_finder_patterns(%__MODULE__{matrix: matrix, modules: modules} = m) do
z = modules - 7
matrix =
[{0, 0}, {z, 0}, {0, z}]
|> Stream.flat_map(&shape(&1, {7, 7}))
|> Stream.zip(Stream.cycle(@finder_pattern))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
update(acc, coordinate, v)
end)
%{m | matrix: matrix}
end
@doc """
Draw the seperators.
"""
@spec draw_seperators(t) :: t
def draw_seperators(%__MODULE__{matrix: matrix, modules: modules} = m) do
z = modules - 8
matrix =
[
{{0, 7}, {1, 8}},
{{0, z}, {1, 8}},
{{7, z}, {8, 1}},
{{7, 0}, {8, 1}},
{{z, 0}, {8, 1}},
{{z, 7}, {1, 8}}
]
|> Stream.flat_map(fn {a, b} -> shape(a, b) end)
|> Enum.reduce(matrix, &update(&2, &1, 0))
%{m | matrix: matrix}
end
@doc """
Draw the alignment patterns.
"""
@spec draw_alignment_patterns(t) :: t
def draw_alignment_patterns(%__MODULE__{matrix: matrix, version: version} = m) do
matrix =
for(
x <- @alignments[version],
y <- @alignments[version],
do: {x, y}
)
|> Stream.filter(&available?(matrix, &1))
|> Stream.map(fn {x, y} -> {x - 2, y - 2} end)
|> Stream.flat_map(&shape(&1, {5, 5}))
|> Stream.zip(Stream.cycle(@alignment_pattern))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
update(acc, coordinate, v)
end)
%{m | matrix: matrix}
end
@doc """
Draw the timing patterns.
"""
@spec draw_timing_patterns(t) :: t
def draw_timing_patterns(%__MODULE__{matrix: matrix, modules: modules} = m) do
z = modules - 13
matrix =
[{z, 1}, {1, z}]
|> Stream.flat_map(&shape({6, 6}, &1))
|> Stream.zip(Stream.cycle([1, 0]))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
update(acc, coordinate, v)
end)
%{m | matrix: matrix}
end
@doc """
Draw the dark module.
"""
@spec draw_dark_module(t) :: t
def draw_dark_module(%__MODULE__{matrix: matrix, modules: modules} = m) do
matrix = update(matrix, {modules - 8, 8}, 1)
%{m | matrix: matrix}
end
@doc """
Draw the reserved format information areas.
"""
@spec draw_reserved_format_areas(t) :: t
def draw_reserved_format_areas(%__MODULE__{matrix: matrix, modules: modules} = m) do
z = modules - 8
matrix =
[{{0, 8}, {1, 9}}, {{z, 8}, {1, 8}}, {{8, 0}, {9, 1}}, {{8, z}, {8, 1}}]
|> Stream.flat_map(fn {a, b} -> shape(a, b) end)
|> Enum.reduce(matrix, &update(&2, &1, :reserved))
%{m | matrix: matrix}
end
@doc """
Draw the reserved version information areas.
"""
@spec draw_reserved_version_areas(t) :: t
def draw_reserved_version_areas(%__MODULE__{version: version} = m) when version < 7, do: m
def draw_reserved_version_areas(%__MODULE__{matrix: matrix, modules: modules} = m) do
z = modules - 11
matrix =
[{{0, z}, {3, 6}}, {{z, 0}, {6, 3}}]
|> Stream.flat_map(fn {a, b} -> shape(a, b) end)
|> Enum.reduce(matrix, &update(&2, &1, :reserved))
%{m | matrix: matrix}
end
@doc """
Draw the data bits with mask.
"""
@spec draw_data_with_mask(t, binary) :: t
def draw_data_with_mask(%__MODULE__{matrix: matrix, modules: modules} = m, data) do
candidate =
Stream.unfold(modules - 1, fn
-1 -> nil
8 -> {8, 5}
n -> {n, n - 2}
end)
|> Stream.zip(Stream.cycle([:up, :down]))
|> Stream.flat_map(fn {z, path} -> path(path, {modules - 1, z}) end)
|> Stream.filter(&available?(matrix, &1))
|> Stream.zip(EQRCode.Encode.bits(data))
{mask, _, matrix} =
Stream.map(0b000..0b111, fn mask ->
matrix =
Enum.reduce(candidate, matrix, fn {coordinate, v}, acc ->
update(acc, coordinate, v ^^^ EQRCode.Mask.mask(mask, coordinate))
end)
{mask, EQRCode.Mask.score(matrix), matrix}
end)
|> Enum.min_by(&elem(&1, 1))
%{m | matrix: matrix, mask: mask}
end
@doc """
Draw the data bits with mask 0.
"""
@spec draw_data_with_mask0(t, binary) :: t
def draw_data_with_mask0(%__MODULE__{matrix: matrix, modules: modules} = m, data) do
matrix =
Stream.unfold(modules - 1, fn
-1 -> nil
8 -> {8, 5}
n -> {n, n - 2}
end)
|> Stream.zip(Stream.cycle([:up, :down]))
|> Stream.flat_map(fn {z, path} -> path(path, {modules - 1, z}) end)
|> Stream.filter(&available?(matrix, &1))
|> Stream.zip(EQRCode.Encode.bits(data))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
update(acc, coordinate, v ^^^ EQRCode.Mask.mask(0, coordinate))
end)
%{m | matrix: matrix, mask: 0}
end
defp path(:up, {x, y}),
do:
for(
i <- x..0,
j <- y..(y - 1),
do: {i, j}
)
defp path(:down, {x, y}),
do:
for(
i <- 0..x,
j <- y..(y - 1),
do: {i, j}
)
@doc """
Fill the reserved format information areas.
"""
@spec draw_format_areas(t) :: t
def draw_format_areas(%__MODULE__{matrix: matrix, modules: modules, mask: mask} = m) do
data = EQRCode.ReedSolomon.bch_encode(<<@ecc_l::2, mask::3>>)
matrix =
[
{{8, 0}, {9, 1}},
{{7, 8}, {1, -6}},
{{modules - 1, 8}, {1, -6}},
{{8, modules - 8}, {8, 1}}
]
|> Stream.flat_map(fn {a, b} -> shape(a, b) end)
|> Stream.filter(&reserved?(matrix, &1))
|> Stream.zip(Stream.cycle(data))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
put(acc, coordinate, v)
end)
%{m | matrix: matrix}
end
@doc """
Fill the reserved version information areas.
"""
@spec draw_version_areas(t) :: t
def draw_version_areas(%__MODULE__{version: version} = m) when version < 7, do: m
def draw_version_areas(%__MODULE__{matrix: matrix, modules: modules} = m) do
data = EQRCode.Encode.bits(<<0b000111110010010100::18>>)
z = modules - 9
matrix =
[
{{z, 5}, {1, -1}},
{{z, 4}, {1, -1}},
{{z, 3}, {1, -1}},
{{z, 2}, {1, -1}},
{{z, 1}, {1, -1}},
{{z, 0}, {1, -1}},
{{5, z}, {-1, 1}},
{{4, z}, {-1, 1}},
{{3, z}, {-1, 1}},
{{2, z}, {-1, 1}},
{{1, z}, {-1, 1}},
{{0, z}, {-1, 1}}
]
|> Stream.flat_map(fn {a, b} -> shape(a, b) end)
|> Stream.filter(&reserved?(matrix, &1))
|> Stream.zip(Stream.cycle(data))
|> Enum.reduce(matrix, fn {coordinate, v}, acc ->
put(acc, coordinate, v)
end)
%{m | matrix: matrix}
end
defp reserved?(matrix, {x, y}) do
get_in(matrix, [Access.elem(x), Access.elem(y)]) == :reserved
end
defp put(matrix, {x, y}, value) do
put_in(matrix, [Access.elem(x), Access.elem(y)], value)
end
@doc """
Draw the quite zone.
"""
@spec draw_quite_zone(t) :: t
def draw_quite_zone(%__MODULE__{matrix: matrix, modules: modules} = m) do
zone = Tuple.duplicate(0, modules + 4)
matrix =
Enum.reduce(0..(modules - 1), matrix, fn i, acc ->
update_in(acc, [Access.elem(i)], fn row ->
Tuple.insert_at(row, 0, 0)
|> Tuple.insert_at(0, 0)
|> Tuple.append(0)
|> Tuple.append(0)
end)
end)
|> Tuple.insert_at(0, zone)
|> Tuple.insert_at(0, zone)
|> Tuple.append(zone)
|> Tuple.append(zone)
%{m | matrix: matrix}
end
@doc """
Given the starting point {x, y} and {width, height}
returns the coordinates of the shape.
Example:
iex> EQRCode.Matrix.shape({0, 0}, {3, 3})
[{0, 0}, {0, 1}, {0, 2},
{1, 0}, {1, 1}, {1, 2},
{2, 0}, {2, 1}, {2, 2}]
"""
@spec shape(coordinate, {integer, integer}) :: [coordinate]
def shape({x, y}, {w, h}) do
for i <- x..(x + h - 1),
j <- y..(y + w - 1),
do: {i, j}
end
defp update(matrix, {x, y}, value) do
update_in(matrix, [Access.elem(x), Access.elem(y)], fn
nil -> value
val -> val
end)
end
defp available?(matrix, {x, y}) do
get_in(matrix, [Access.elem(x), Access.elem(y)]) == nil
end
end
|
lib/eqrcode/matrix.ex
| 0.854975
| 0.640439
|
matrix.ex
|
starcoder
|
defmodule StarkInfra.IssuingBalance do
alias __MODULE__, as: IssuingBalance
alias StarkInfra.Error
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
@moduledoc """
Groups IssuingBalance related functions
"""
@doc """
The IssuingBalance struct displays the current issuing balance of the Workspace,
which is the result of the sum of all transactions within this
Workspace. The balance is never generated by the user, but it
can be retrieved to see the available information.
## Attributes (return-only):
- `:id` [string]: unique id returned when IssuingBalance is created. ex: "5656565656565656"
- `:amount` [integer]: current balance amount of the Workspace in cents. ex: 200 (= R$ 2.00)
- `:currency` [string]: currency of the current Workspace. Expect others to be added eventually. ex: "BRL"
- `:updated` [DateTime]: latest update DateTime for the IssuingBalance. ex: ~U[2020-3-10 10:30:0:0]
"""
@enforce_keys [
:id,
:amount,
:currency,
:updated
]
defstruct [
:id,
:amount,
:currency,
:updated
]
@type t() :: %__MODULE__{}
@doc """
Receive the IssuingBalance struct linked to your Workspace in the Stark Infra API
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- IssuingBalance struct with updated attributes
"""
@spec get(
user: Project.t() | Organization.t() | nil
) ::
{ :ok, [IssuingBalance.t()] } |
{ :error, [error: Error.t()] }
def get(options \\ []) do
case Rest.get_list(resource(), options) |> Enum.take(1) do
[{:ok, balance}] -> {:ok, balance}
[{:error, error}] -> {:error, error}
end
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(
user: Project.t() | Organization.t() | nil
) :: any
def get!(options \\ []) do
Rest.get_list!(resource(), options) |> Enum.take(1) |> hd()
end
@doc false
def resource() do
{
"IssuingBalance",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%IssuingBalance{
id: json[:id],
amount: json[:amount],
currency: json[:currency],
updated: json[:updated] |> Check.datetime()
}
end
end
|
lib/issuing_balance/issuing_balance.ex
| 0.850949
| 0.502563
|
issuing_balance.ex
|
starcoder
|
defmodule BitPalPhx.ExchangeRate do
@moduledoc false
@type t :: %__MODULE__{
rate: Decimal.t(),
pair: {atom, atom}
}
defstruct [:rate, :pair]
@spec new(Money.t(), Money.t()) :: t
def new(amount, fiat_amount) do
%__MODULE__{
rate: Decimal.div(Money.to_decimal(fiat_amount), Money.to_decimal(amount)),
pair: {amount.currency, fiat_amount.currency}
}
end
@spec cast(map) :: {:ok, t} | {:error, :bad_pair} | :error
def cast(%{"rate" => rate, "pair" => pair}) do
with {:ok, pair} <- parse_pair(pair),
{rate, ""} <- parse_rate(rate) do
{:ok, %__MODULE__{pair: pair, rate: rate}}
else
err -> err
end
end
defp parse_rate(rate) do
Decimal.parse(rate)
end
defp parse_pair(pair) do
case String.split(pair, "-") do
[from, to] ->
{:ok, {from, to}}
_ ->
{:error, :bad_pair}
end
end
end
defmodule BitPalPhx.ExchangeRates do
@moduledoc false
alias BitPalPhx.Cache
alias BitPalPhx.ExchangeRate
alias BitPalPhx.ProcessRegistry
alias BitPalPhx.Socket
require Logger
@spec historic_rate({atom, atom}) :: ExchangeRate.t() | nil
def historic_rate(pair) do
Cache.get(pair)
end
def request(pair = {from, to}) do
topic = topic(pair)
Socket.join(topic)
{:ok, ref} = Socket.push(topic, "rate", %{from: from, to: to})
Task.Supervisor.start_child(
BitPalPhx.TaskSupervisor,
__MODULE__,
:await_request_task,
[ref, pair, self()]
)
ref
end
def await_request_task(ref, pair, pid) do
# Not needed?? Or what if we miss it with `handle_reply`?
Registry.register(ProcessRegistry, ProcessRegistry.via_tuple(ref), pair)
case await_request(ref, pair) do
{:ok, rate} ->
send(pid, {:exchange_rate, rate})
{:ok, rate}
x ->
Logger.warn("failed exchange rate request #{inspect(x)}")
end
end
def await_request(ref, pair) do
with {:ok, rate} <- Socket.await(ref),
{:ok, rate} <- ExchangeRate.cast(rate) do
Cache.put(pair, rate)
{:ok, rate}
else
x -> x
end
end
def await_request!(ref, pair) do
{:ok, rate} = await_request(ref, pair)
rate
end
defp topic({from, to}) do
"exchange_rate:#{from}-#{to}"
end
end
|
lib/bitpal_phx/exchange_rate.ex
| 0.803174
| 0.545225
|
exchange_rate.ex
|
starcoder
|
defmodule OAuthXYZ.Model.ResourceRequest do
@moduledoc """
Resource Request Handling Module.
```
# Resources
[
{
"actions": [
"read",
"write",
"dolphin"
],
"locations": [
"https://server.example.net/",
"https://resource.local/other"
],
"datatypes": [
"metadata",
"images"
]
},
"dolphin-metadata"
]
# Resource
## handle
"dolphin-metadata"
## object
{
"actions": [
"read",
"write",
"dolphin"
],
"locations": [
"https://server.example.net/",
"https://resource.local/other"
],
"datatypes": [
"metadata",
"images"
]
}
```
"""
@type t :: %__MODULE__{}
defstruct [
#! :string
:handle,
#! :list
:actions,
#! :list
:locations,
#! :list
:datatypes
]
@doc """
Parse each resource and return structure list
"""
@spec parse([request :: map | String.t()]) :: [t]
def parse(resource_list) when is_list(resource_list),
do: Enum.map(resource_list, fn resource -> parse_resource(resource) end)
@doc """
Parse string or map and return structure
"""
@spec parse_resource(request :: map | String.t()) :: t
def parse_resource(handle) when is_binary(handle), do: %__MODULE__{handle: handle}
def parse_resource(request) when is_map(request) do
parsed_request =
%{}
|> parse_actions(request)
|> parse_locations(request)
|> parse_datatypes(request)
%__MODULE__{
actions: parsed_request.actions,
locations: parsed_request.locations,
datatypes: parsed_request.datatypes
}
end
# private
defp parse_actions(keys, %{"actions" => actions}), do: Map.put(keys, :actions, actions)
defp parse_actions(keys, _), do: Map.put(keys, :actions, nil)
defp parse_locations(keys, %{"locations" => locations}),
do: Map.put(keys, :locations, locations)
defp parse_locations(keys, _), do: Map.put(keys, :locations, nil)
defp parse_datatypes(keys, %{"datatypes" => datatypes}),
do: Map.put(keys, :datatypes, datatypes)
defp parse_datatypes(keys, _), do: Map.put(keys, :datatypes, nil)
end
|
lib/oauth_xyz/model/resource_request.ex
| 0.749546
| 0.734548
|
resource_request.ex
|
starcoder
|
defmodule Ra do
require Logger
@doc """
Defines the banner to use when showing the help.
"""
defmacro banner(banner) do
quote do
@banner unquote(banner)
end
end
@doc """
Defines and describes a subcommand.
You have do provide at least the `name` of the command as a string and the
function `f` that shall be called when this command is given.
Also you can provide an optional `description` which will be displayed when
showing the help.
The function `f` needs to accept a single argument, beeing a three-tuple:
`{arguments, options, config}`.
"""
defmacro command(name, description \\ "", f) do
quote do
@commands @commands ++ [{ unquote(to_string name), unquote(description) }]
def _command([unquote(to_string name)|args], opts) do
rc = Ra.RcFile.load(__MODULE__)
{args, opts, rc} |> unquote(f)
end
end
end
@doc """
Defines and describes an option.
You have to provide at least the `name` of the option, also you can define
its `type` (`:string` is assumed as a default) and also you can optionally
provide an `description`, the description is shown when the help is printed.
Remember, when you want to have a `description`, you also need to provide the
`type`.
`type` may be one of the following atoms:
* `:string` if the options value is supposed to be a string (default)
* `:integer` if the options value is supposed to be an integer
* `:float` if the options value is supposed to be a float
* `:boolean` if the option is meant to be an on/off-switch. One can use
`--opt`/`--no-opt` syntax for them.
"""
defmacro option(name, type \\ :string, description \\ "") do
quote do
@options Map.put(@options, unquote(name), { unquote(type), unquote(description) })
@opt_dict Map.put(@opt_dict, unquote(name), unquote(type))
end
end
defmacro rc_file(dict) do
quote do
@commands @commands ++ [{ "initrc", "Initialize the runtime configuration file." }]
def init_rc, do: _init_rc(unquote(dict), Ra.RcFile.exist?(__MODULE__))
def _init_rc(_, true), do: nil
def _init_rc(d, _), do: Ra.RcFile.touch(d, __MODULE__)
def _command(["initrc"|_], _), do: init_rc()
end
end
@doc """
**This macro is deprecated**: Reassembles all the stuff configured beforehand.
This macro needs to be used last in the module, as of version 0.3.3 its
functionality is applied automatically by using a before_compile hook. **As of
version 0.4.0 this macro will be removed!**
It is also responsible for creating the `run` function needed by Elixir.
"""
defmacro parse do
IO.warn("Explicit usage of Ra.parse/0 is deprecated. The macro will be removed in 0.4.0.", Macro.Env.stacktrace(__CALLER__))
quote do
@commands @commands ++ [{ "help", "View this help information." }]
def _command(["help"|_], _), do: help()
def _command(_, _) do
IO.puts "Command unknown."
help()
end
def help do
IO.puts "#{@banner}\n"
IO.puts "OPTIONS:\n"
@options
|> Map.keys
|> Enum.map(&(" --#{to_string &1} [#{Map.get(@options, &1) |> elem(0) |> to_string}] - #{Map.get(@options, &1) |> elem(1) |> to_string}"))
|> Enum.join("\n")
|> IO.puts
IO.puts "\nCOMMANDS:\n"
@commands
|> Enum.map(&(" #{elem(&1, 0)} - #{elem(&1, 1)}"))
|> Enum.join("\n")
|> IO.puts
end
def run(args) do
opts = OptionParser.parse(args, strict: @opt_dict)
_command(elem(opts, 1), elem(opts, 0))
end
end
end
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__)
@banner ""
@commands []
@options %{}
@opt_dict %{}
end
end
end
|
lib/ra.ex
| 0.771542
| 0.629276
|
ra.ex
|
starcoder
|
defmodule Ankh.HTTP2.Frame do
@moduledoc """
HTTP/2 frame struct
The __using__ macro injects the frame struct needed by `Ankh.HTTP2.Frame`.
"""
require Logger
alias Ankh.HTTP2.Frame.Encodable
@typedoc "Struct injected by the `Ankh.HTTP2.Frame` __using__ macro."
@type t :: struct()
@typedoc "Frame length"
@type length :: non_neg_integer()
@typedoc "Frame type code"
@type type :: non_neg_integer()
@typedoc "Frame data"
@type data :: iodata()
@typedoc "Encode/Decode options"
@type options :: keyword()
@header_size 9
@doc """
Injects the frame struct in a module.
- type: HTTP/2 frame type code
- flags: data type implementing `Ankh.HTTP2.Frame.Encodable`
- payload: data type implementing `Ankh.HTTP2.Frame.Encodable`
"""
@spec __using__(type: type(), flags: atom() | nil, payload: atom() | nil) :: Macro.t()
defmacro __using__(args) do
with {:ok, type} <- Keyword.fetch(args, :type),
flags <- Keyword.get(args, :flags),
payload <- Keyword.get(args, :payload) do
quote bind_quoted: [type: type, flags: flags, payload: payload] do
alias Ankh.HTTP2.Frame
alias Ankh.HTTP2.Stream, as: HTTP2Stream
@typedoc """
- length: payload length in bytes
- flags: data type implementing `Ankh.HTTP2.Frame.Encodable`
- stream_id: Stream ID of the frame
- payload: data type implementing `Ankh.HTTP2.Frame.Encodable`
"""
@type t :: %__MODULE__{
length: Frame.length(),
type: Frame.type(),
stream_id: HTTP2Stream.id(),
flags: Encodable.t() | nil,
payload: Encodable.t() | nil
}
flags = if flags, do: struct(flags), else: nil
payload = if payload, do: struct(payload), else: nil
defstruct length: 0,
type: type,
stream_id: 0,
flags: flags,
payload: payload
end
else
:error ->
raise "Missing type code: You must provide a type code for the frame"
end
end
@doc """
Decodes a binary into a frame struct
Parameters:
- struct: struct using `Ankh.HTTP2.Frame`
- binary: data to decode into the struct
- options: options to pass as context to the decoding function
"""
@spec decode(t(), binary(), options()) :: {:ok, t()} | {:error, any()}
def decode(frame, data, options \\ [])
def decode(frame, <<0::24, _type::8, flags::binary-size(1), _reserved::1, id::31>>, options) do
with {:ok, flags} <- Encodable.decode(frame.flags, flags, options),
do: {:ok, %{frame | stream_id: id, flags: flags, payload: nil}}
end
def decode(
frame,
<<length::24, _type::8, flags::binary-size(1), _reserved::1, id::31, payload::binary>>,
options
) do
with {:ok, flags} <- Encodable.decode(frame.flags, flags, options),
payload_options <- Keyword.put(options, :flags, flags),
{:ok, payload} <- Encodable.decode(frame.payload, payload, payload_options),
do: {:ok, %{frame | length: length, stream_id: id, flags: flags, payload: payload}}
end
@doc """
Encodes a frame struct into binary
Parameters:
- struct: struct using `Ankh.HTTP2.Frame`
- options: options to pass as context to the encoding function
"""
@spec encode(t(), options()) :: {:ok, t(), data} | {:error, any()}
def encode(frame, options \\ [])
def encode(%{type: type, flags: flags, stream_id: id, payload: nil} = frame, options) do
with {:ok, flags} <- Encodable.encode(flags, options) do
{
:ok,
frame,
[<<0::24, type::8, flags::binary-size(1), 0::1, id::31>>]
}
end
end
def encode(%{type: type, flags: nil, stream_id: id, payload: payload} = frame, options) do
with {:ok, payload} <- Encodable.encode(payload, options) do
length = IO.iodata_length(payload)
{
:ok,
%{frame | length: length},
[<<length::24, type::8, fc00:e968:6179::de52:7100, 0::1, id::31>> | payload]
}
end
end
def encode(%{type: type, stream_id: id, flags: flags, payload: payload} = frame, options) do
payload_options = Keyword.put(options, :flags, flags)
with {:ok, payload} <- Encodable.encode(payload, payload_options),
{:ok, flags} <- Encodable.encode(flags, options) do
length = IO.iodata_length(payload)
{
:ok,
%{frame | length: length},
[<<length::24, type::8, flags::binary-size(1), 0::1, id::31>> | payload]
}
end
end
@doc """
Returns a stream of frames from a buffer, returning the leftover buffer data
and the frame header information and data (without decoding it) in a tuple:
`{remaining_buffer, {length, type, id, frame_data}}`
or nil to signal partial leftover data:
`{remaining_buffer, nil}`
"""
@spec stream(iodata()) :: Enumerable.t()
def stream(data) do
Stream.unfold(data, fn
<<length::24, type::8, _flags::binary-size(1), _reserved::1, id::31, rest::binary>> =
data
when byte_size(rest) >= length ->
frame_size = @header_size + length
frame_data = binary_part(data, 0, frame_size)
rest_size = byte_size(data) - frame_size
rest_data = binary_part(data, frame_size, rest_size)
{{rest_data, {length, type, id, frame_data}}, rest_data}
data when byte_size(data) == 0 ->
nil
data ->
{{data, nil}, data}
end)
end
end
|
lib/ankh/http2/frame.ex
| 0.917261
| 0.463201
|
frame.ex
|
starcoder
|
defmodule Credo.Code do
@moduledoc """
Credo.Code contains a lot of utility or helper functions that deal with the
analysis if - you guessed it - Code.
Whenever a function serves a general purpose in this area, e.g. getting the
value of a module attribute inside a given module, we want to extract that
function and put it in the Credo.Code namespace, so others can utilize them
without reinventing the wheel.
The most often utilized functions are conveniently imported to
`Credo.Check.CodeHelper`.
"""
alias Credo.SourceFile
defmodule ParserError do
@moduledoc """
This is an internal Issue raised by Credo when it finds itself unable to
parse the source code in a file.
"""
@explanation []
#use Credo.Check, category: :error, base_priority: :normal
end
@doc """
Prewalks a given SourceFile's AST or a given AST.
Technically this is just a wrapper around `Macro.prewalk/3`.
"""
def prewalk(ast_or_source_file, fun, accumulator \\ [])
def prewalk(%SourceFile{ast: source_ast}, fun, accumulator) do
prewalk(source_ast, fun, accumulator)
end
def prewalk(source_ast, fun, accumulator) do
{_, accumulated} = Macro.prewalk(source_ast, accumulator, fun)
accumulated
end
@doc """
Postwalks a given SourceFile's AST or a given AST.
Technically this is just a wrapper around `Macro.postwalk/3`.
"""
def postwalk(ast_or_source_file, fun, accumulator \\ [])
def postwalk(%SourceFile{ast: source_ast}, fun, accumulator) do
postwalk(source_ast, fun, accumulator)
end
def postwalk(source_ast, fun, accumulator) do
{_, accumulated} = Macro.postwalk(source_ast, accumulator, fun)
accumulated
end
@doc """
Takes a SourceFile or String and returns an AST.
"""
def ast(%SourceFile{source: source, filename: filename}) do
ast(source, filename)
end
def ast(source, filename \\ "nofilename") do
case Code.string_to_quoted(source, line: 1) do
{:ok, value} -> {:ok, value}
{:error, error} -> {:error, [issue_for(error, filename)]}
end
end
@doc """
Converts a String into a List of tuples of `{line_no, line}`.
"""
def to_lines(source) do
source
|> String.split("\n")
|> Enum.with_index
|> Enum.map(fn({line, i}) -> {i + 1, line} end)
end
@doc """
Converts a String into a List of tokens using the `:elixir_tokenizer`.
"""
def to_tokens(source) do
{_, _, _, tokens} =
source
|> String.to_char_list
|> :elixir_tokenizer.tokenize(1, [])
tokens
end
defp issue_for({line_no, error_message, _}, filename) do
%Credo.Issue{
check: ParserError,
category: :error,
filename: filename,
message: error_message,
line_no: line_no
}
end
end
|
lib/credo/code.ex
| 0.744285
| 0.55097
|
code.ex
|
starcoder
|
defmodule Coxir.Struct.Guild do
@moduledoc """
Defines methods used to interact with Discord guilds.
Refer to [this](https://discordapp.com/developers/docs/resources/guild#guild-object)
for a list of fields and a broader documentation.
In addition, the following fields are also embedded.
- `owner` - an user object
- `afk_channel` - a channel object
- `embed_channel` - a channel object
- `system_channel` - a channel object
- `channels` - list of channel objects
- `members` - list of member objects
- `roles` - list of role objects
"""
@type guild :: String.t() | map
use Coxir.Struct
use Bitwise
alias Coxir.Gateway
alias Coxir.Struct.{User, Role, Member, Channel}
def pretty(struct) do
struct
|> replace(:owner_id, &User.get/1)
|> replace(:afk_channel_id, &Channel.get/1)
|> replace(:embed_channel_id, &Channel.get/1)
|> replace(:widget_channel_id, &Channel.get/1)
|> replace(:system_channel_id, &Channel.get/1)
|> replace(:channels, &Channel.get/1)
|> replace(:members, &Member.get/1)
|> replace(:roles, &Role.get/1)
end
@doc """
Used to grab the shard of a given guild.
Returns the `PID` of the shard's process.
"""
@spec shard(guild) :: pid
def shard(%{id: id}),
do: shard(id)
def shard(guild) do
guild = guild
|> String.to_integer
shards = Gateway
|> Supervisor.count_children
|> Map.get(:workers)
(guild >>> 22)
|> rem(shards)
|> Gateway.get
end
@doc """
Modifies a given guild.
Returns a guild object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `name` - guild name
- `region` - guild voice region
- `icon` - base64 encoded 128x128 jpeg image
- `splash` - base64 encoded 128x128 jpeg image
- `afk_timeout` - voice AFK timeout in seconds
- `afk_channel_id` - voice AFK channel
- `system_channel_id` - channel to which system messages are sent
Refer to [this](https://discordapp.com/developers/docs/resources/guild#modify-guild)
for a broader explanation on the fields and their defaults.
"""
@spec edit(guild, Enum.t()) :: map
def edit(%{id: id}, params),
do: edit(id, params)
def edit(guild, params) do
API.request(:patch, "guilds/#{guild}", params)
|> pretty
end
@doc """
Changes the name of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_name(guild, String.t()) :: map
def set_name(guild, name),
do: edit(guild, name: name)
@doc """
Changes the icon of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_icon(guild, String.t()) :: map
def set_icon(guild, icon),
do: edit(guild, icon: icon)
@doc """
Changes the region of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_region(guild, String.t()) :: map
def set_region(guild, region),
do: edit(guild, region: region)
@doc """
Changes the splash of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_splash(guild, String.t()) :: map
def set_splash(guild, splash),
do: edit(guild, splash: splash)
@doc """
Changes the voice AFK timeout of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_afk_timeout(guild, Integer.t) :: map
def set_afk_timeout(guild, timeout),
do: edit(guild, afk_timeout: timeout)
@doc """
Changes the voice AFK channel of a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_afk_channel(guild, String.t()) :: map
def set_afk_channel(guild, channel),
do: edit(guild, afk_channel_id: channel)
@doc """
Changes the channel to which system messages are sent on a given guild.
Returns a guild object upon success
or a map containing error information.
"""
@spec set_system_channel(guild, String.t()) :: map
def set_system_channel(guild, channel),
do: edit(guild, system_channel_id: channel)
@doc """
Deletes a given guild.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec delete(guild) :: :ok | map
def delete(%{id: id}),
do: delete(id)
def delete(guild) do
API.request(:delete, "guilds/#{guild}")
end
@doc """
Leaves from a given guild.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec leave(guild) :: :ok | map
def leave(%{id: id}),
do: leave(id)
def leave(guild) do
API.request(:delete, "users/@me/guilds/#{guild}")
end
@doc """
Creates a role on a given guild.
Returns a role object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `name` - name of the role
- `color` - RGB color value
- `permissions` - bitwise of the permissions
- `hoist` - whether the role should be displayed separately
- `mentionable` - whether the role should be mentionable
Refer to [this](https://discordapp.com/developers/docs/resources/guild#create-guild-role)
for a broader explanation on the fields and their defaults.
"""
@spec create_role(guild, Enum.t()) :: map
def create_role(guild, params \\ %{})
def create_role(%{id: id}, params),
do: create_role(id, params)
def create_role(guild, params) do
API.request(:post, "guilds/#{guild}/roles", params)
|> put(:guild_id, guild)
|> Role.pretty
end
@doc """
Modifies the positions of a set of roles on a given guild.
Returns a list of role objects upon success
or a map containing error information.
#### Params
Must be a list of maps with the fields listed below.
- `id` - snowflake of the role
- `position` - sorting position of the role
"""
@spec edit_role_positions(guild, list) :: list | map
def edit_role_positions(%{id: id}, params),
do: edit_role_positions(id, params)
def edit_role_positions(guild, params) do
API.request(:patch, "guilds/#{guild}/roles", params)
|> case do
list when is_list(list) ->
for role <- list do
role
|> put(:guild_id, guild)
|> Role.pretty
end
error -> error
end
end
@doc """
Fetches the roles from a given guild.
Returns a list of role objects upon success
or a map containing error information.
"""
@spec get_roles(guild) :: list | map
def get_roles(%{id: id}),
do: get_roles(id)
def get_roles(guild) do
API.request(:get, "guilds/#{guild}/roles")
|> case do
list when is_list(list) ->
for role <- list do
role
|> put(:guild_id, guild)
|> Role.pretty
end
error -> error
end
end
@doc """
Creates a channel on a given guild.
Returns a channel object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `name` - channel name (2-100 characters)
- `type` - the type of channel
- `nswf` - whether the channel is NSFW
- `bitrate` - the bitrate in bits of the voice channel
- `user_limit` - the user limit of the voice channel
- `permission_overwrites` - channel-specific permissions
- `parent_id` - id of the parent category
Refer to [this](https://discordapp.com/developers/docs/resources/guild#create-guild-channel)
for a broader explanation on the fields and their defaults.
"""
@spec create_channel(guild, Enum.t()) :: map
def create_channel(%{id: id}, params),
do: create_channel(id, params)
def create_channel(guild, params) do
API.request(:post, "guilds/#{guild}/channels", params)
end
@doc """
Adds an user to a given guild.
Returns a member object upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `access_token` - an oauth2 access token
- `nick` - value to set the user's nickname to
- `roles` - list of role ids the user is assigned
- `mute` - whether the user is muted
- `deaf` - whether the user is deafened
Refer to [this](https://discordapp.com/developers/docs/resources/guild#add-guild-member)
for a broader explanation on the fields and their defaults.
"""
@spec add_member(guild, String.t(), Enum.t()) :: map
def add_member(%{id: id}, user, params),
do: add_member(id, user, params)
def add_member(guild, user, params) do
API.request(:put, "guilds/#{guild}/members/#{user}", params)
|> Member.pretty
end
@doc """
Fetches a member from a given guild.
Returns a member object upon success
or a map containing error information.
"""
@spec get_member(guild, String.t()) :: map
def get_member(%{id: id}, user),
do: get_member(id, user)
def get_member(guild, user) do
Member.get({guild, user})
|> case do
nil ->
API.request(:get, "guilds/#{guild}/members/#{user}")
|> put(:id, {guild, user})
|> Member.pretty
member -> member
end
end
@doc """
Fetches the members from a given guild.
Returns a list of member objects upon success
or a map containing error information.
#### Query
Must be a keyword list with the fields listed below.
- `limit` - max number of members to return (1-1000)
- `after` - the highest user id in the previous page
Refer to [this](https://discordapp.com/developers/docs/resources/guild#list-guild-members)
for a broader explanation on the fields and their defaults.
"""
@spec list_members(guild, Keyword.t()) :: list | map
def list_members(term, query \\ [])
def list_members(%{id: id}, query),
do: list_members(id, query)
def list_members(guild, query) do
API.request(:get, "guilds/#{guild}/members", "", params: query)
|> case do
list when is_list(list) ->
for member <- list do
member
|> put(:id, {guild, member.user.id})
|> Member.pretty
end
error -> error
end
end
@doc """
Gets the number of members that would be removed in a prune.
Returns a map with a `pruned` field
or a map containing error information.
#### Query
Must be a keyword list with the fields listed below.
- `days` - number of days to count prune for (1 or more)
"""
@spec get_prune(guild, Keyword.t()) :: map
def get_prune(%{id: id}, query),
do: get_prune(id, query)
def get_prune(guild, query) do
API.request(:get, "guilds/#{guild}/prune", "", params: query)
end
@doc """
Begins a prune operation for a given guild.
Returns a map with a `pruned` field
or a map containing error information.
#### Query
Must be a keyword list with the fields listed below.
- `days` - number of days to count prune for (1 or more)
"""
@spec do_prune(guild, Keyword.t()) :: map
def do_prune(%{id: id}, query),
do: do_prune(id, query)
def do_prune(guild, query) do
API.request(:post, "guilds/#{guild}/prune", "", params: query)
end
@doc """
Fetches the bans from a given guild.
Returns a list of ban objects
or a map containing error information.
"""
@spec get_bans(guild) :: list | map
def get_bans(%{id: id}),
do: get_bans(id)
def get_bans(guild) do
API.request(:get, "guilds/#{guild}/bans")
end
@doc """
Removes the ban for an user on a given guild.
Returns the atom `:ok` upon success
or a map containing error information.
"""
@spec unban(guild, String.t()) :: :ok | map
def unban(%{id: id}, user),
do: unban(id, user)
def unban(guild, user) do
API.request(:delete, "guilds/#{guild}/bans/#{user}")
end
@doc """
Fetches the invites from a given guild.
Returns a list of invite objects upon success
or a map containing error information.
"""
@spec get_invites(guild) :: list | map
def get_invites(%{id: id}),
do: get_invites(id)
def get_invites(guild) do
API.request(:get, "guilds/#{guild}/invites")
end
@doc """
Attaches an integration to a given guild.
Returns the atom `:ok` upon success
or a map containing error information.
#### Params
Must be an enumerable with the fields listed below.
- `type` - the integration type
- `id` - the integration id
"""
@spec create_integration(guild, Enum.t()) :: :ok | map
def create_integration(%{id: id}, params),
do: create_integration(id, params)
def create_integration(guild, params) do
API.request(:post, "guilds/#{guild}/integrations", params)
|> put(:guild_id, guild)
end
@doc """
Fetches the integrations from a given guild.
Returns a list of integration objects
or a map containing error information.
"""
@spec get_integrations(guild) :: list | map
def get_integrations(%{id: id}),
do: get_integrations(id)
def get_integrations(guild) do
API.request(:get, "guilds/#{guild}/integrations")
|> case do
list when is_list(list) ->
for integration <- list do
integration
|> put(:guild_id, guild)
end
error -> error
end
end
@doc """
Fetches the webhooks from a given guild.
Returns a list of webhook objects
or a map containing error information.
"""
@spec get_webhooks(guild) :: list | map
def get_webhooks(%{id: id}),
do: get_webhooks(id)
def get_webhooks(guild) do
API.request(:get, "guilds/#{guild}/webhooks")
end
@doc """
Fetches the voice regions for a given guild.
Returns a list of voice region objects
or a map containing error information.
"""
@spec get_regions(guild) :: list | map
def get_regions(%{id: id}),
do: get_regions(id)
def get_regions(guild) do
API.request(:get, "guilds/#{guild}/regions")
end
@doc """
Fetches a list of voice regions.
Returns a list of voice region objects
or a map containing error information.
"""
@spec get_regions :: list | map
def get_regions do
API.request(:get, "voice/regions")
end
end
|
lib/coxir/struct/guild.ex
| 0.876086
| 0.420005
|
guild.ex
|
starcoder
|
defmodule Protox.MergeMessage do
@moduledoc """
This module provides a helper function to merge messages.
"""
alias Protox.Field
@doc """
Singular fields of `msg` will be overwritten, if specified in `from`, except for
embedded messages which will be merged. Repeated fields will be concatenated.
Note that "specified" has a different meaning in protobuf 2 and 3:
- 2: if the singular field from `from` is nil, the value from `msg` is kept
- 3: if the singular field from `from` is set to the default value, the value from `msg` is
kept. This behaviour matches the C++ reference implementation behaviour.
- `msg` and `from` must be of the same type; or
- either `msg` or `from` is `nil`: the non-nil message is returned; or
- both are `nil`: `nil` is returned
# Example
iex> r1 = %Protobuf2{a: 0, s: :ONE}
iex> r2 = %Protobuf2{a: nil, s: :TWO}
iex> Protox.MergeMessage.merge(r1, r2)
%Protobuf2{a: 0, s: :TWO}
iex> Protox.MergeMessage.merge(r2, r1)
%Protobuf2{a: 0, s: :ONE}
"""
@spec merge(struct() | nil, struct() | nil) :: struct() | nil
def merge(nil, from), do: from
def merge(msg, nil), do: msg
def merge(msg, from) do
unknown_fields_name = msg.__struct__.unknown_fields_name()
Map.merge(msg, from, fn
:__struct__, v1, _v2 ->
v1
^unknown_fields_name, v1, _v2 ->
v1
name, v1, v2 ->
merge_field(msg, name, v1, v2)
end)
end
# -- Private
defp merge_field(msg, name, v1, v2) do
case msg.__struct__.field_def(name) do
{:ok, %Field{kind: :packed}} ->
v1 ++ v2
{:ok, %Field{kind: :unpacked}} ->
v1 ++ v2
{:ok, %Field{kind: {:scalar, _}, type: {:message, _}}} ->
merge(v1, v2)
{:ok, %Field{kind: {:scalar, _}}} ->
{:ok, default} = msg.__struct__.default(name)
merge_scalar(msg.__struct__.syntax(), v1, v2, default)
{:ok, %Field{kind: :map, type: {_, {:message, _}}}} ->
Map.merge(v1, v2, fn _key, w1, w2 -> merge(w1, w2) end)
{:ok, %Field{kind: :map}} ->
Map.merge(v1, v2)
{:error, :no_such_field} ->
merge_oneof(msg, v1, v2)
end
end
defp merge_scalar(:proto2, v1, nil, _default), do: v1
defp merge_scalar(:proto3, v1, v2, v2), do: v1
defp merge_scalar(_syntax, _v1, v2, _default), do: v2
defp merge_oneof(
msg,
{v1_child_field, v1_child_value},
{v2_child_field, v2_child_value} = v2
)
when v1_child_field == v2_child_field do
{:ok, v1_child_field_def} = msg.__struct__.field_def(v1_child_field)
{:ok, v2_child_field_def} = msg.__struct__.field_def(v2_child_field)
if is_oneof_message(v1_child_field_def) and is_oneof_message(v2_child_field_def) do
{v1_child_field, merge(v1_child_value, v2_child_value)}
else
v2
end
end
defp merge_oneof(_msg, v1, nil), do: v1
defp merge_oneof(_msg, _v1, v2), do: v2
defp is_oneof_message(%Field{kind: {:oneof, _}, type: {:message, _}}), do: true
defp is_oneof_message(_), do: false
end
|
lib/protox/merge_message.ex
| 0.870212
| 0.617729
|
merge_message.ex
|
starcoder
|
defmodule AVLTree.Node do
@moduledoc false
@compile {:inline,
value: 1,
height: 1,
fix_height: 1,
rotate_left: 1,
rotate_right: 1,
big_rotate_left: 1,
big_rotate_right: 1,
balance: 1}
def put(nil, value, _less), do: {value, 1, nil, nil}
def put({v, h, l, r}, value, less) do
cond do
less.(value, v) ->
case put(l, value, less) do
{:update, l} -> {:update, {v, h, l, r}}
l -> balance({v, h, l, r})
end
less.(v, value) ->
case put(r, value, less) do
{:update, r} -> {:update, {v, h, l, r}}
r -> balance({v, h, l, r})
end
true ->
{:update, {value, h, l, r}}
end
end
def put_lower(nil, value, _less), do: {value, 1, nil, nil}
def put_lower({v, h, l, r}, value, less) do
balance(
if less.(v, value) do
{v, h, l, put_lower(r, value, less)}
else
{v, h, put_lower(l, value, less), r}
end
)
end
def put_upper(nil, value, _less), do: {value, 1, nil, nil}
def put_upper({v, h, l, r}, value, less) do
balance(
if less.(value, v) do
{v, h, put_upper(l, value, less), r}
else
{v, h, l, put_upper(r, value, less)}
end
)
end
def member?(nil, _value, _less), do: false
def member?({v, _h, l, r}, value, less) do
cond do
less.(value, v) -> member?(l, value, less)
less.(v, value) -> member?(r, value, less)
true -> true
end
end
def get(nil, _value, default, _less), do: default
def get({v, _h, l, r}, value, default, less) do
cond do
less.(value, v) -> get(l, value, default, less)
less.(v, value) -> get(r, value, default, less)
true -> v
end
end
def get_first(nil, default), do: default
def get_first({v, _h, nil, _r}, _default), do: v
def get_first({_v, _h, l, _r}, default), do: get_first(l, default)
def get_last(nil, default), do: default
def get_last({v, _h, _l, nil}, _default), do: v
def get_last({_v, _h, _l, r}, default), do: get_last(r, default)
def get_lower(nil, _value, default, _less), do: default
def get_lower({v, _h, l, r}, value, default, less) do
case less.(v, value) do
true ->
get_lower(r, value, default, less)
false ->
case get_lower(l, value, default, less) do
nil ->
case less.(value, v) do
true -> default
false -> v
end
value ->
value
end
end
end
def get_upper(nil, _value, default, _less), do: default
def get_upper({v, _h, l, r}, value, default, less) do
case less.(value, v) do
true ->
get_upper(l, value, default, less)
false ->
case get_upper(r, value, default, less) do
nil ->
case less.(v, value) do
true -> default
false -> v
end
value ->
value
end
end
end
def height(nil), do: 0
def height({_v, h, _l, _r}), do: h
def value({v, _h, _l, _r}), do: v
def delete(nil, _value, _less), do: {false, nil}
def delete({v, h, l, r} = a, value, less) do
cond do
less.(value, v) ->
case delete(l, value, less) do
{true, l} -> {true, balance({v, h, l, r})}
{false, _} -> {false, a}
end
less.(v, value) ->
case delete(r, value, less) do
{true, r} -> {true, balance({v, h, l, r})}
{false, _} -> {false, a}
end
true ->
{true, delete_node(a)}
end
end
def delete_lower(nil, _value, _less), do: {false, nil}
def delete_lower({v, h, l, r} = a, value, less) do
case less.(v, value) do
true ->
case delete_lower(r, value, less) do
{true, r} -> {true, balance({v, h, l, r})}
{false, _} -> {false, a}
end
false ->
case delete_lower(l, value, less) do
{true, l} ->
{true, balance({v, h, l, r})}
{false, _} ->
case less.(value, v) do
true -> {false, a}
false -> {true, delete_node(a)}
end
end
end
end
def delete_upper(nil, _value, _less), do: {false, nil}
def delete_upper({v, h, l, r} = a, value, less) do
case less.(value, v) do
true ->
case delete_upper(l, value, less) do
{true, l} -> {true, balance({v, h, l, r})}
{false, _} -> {false, a}
end
false ->
case delete_upper(r, value, less) do
{true, r} ->
{true, balance({v, h, l, r})}
{false, _} ->
case less.(v, value) do
true -> {false, a}
false -> {true, delete_node(a)}
end
end
end
end
def iter_lower(root), do: iter_lower_impl(root, [])
def iter_lower_impl({_v, _h, l, _r} = a, iter), do: iter_lower_impl(l, [a | iter])
def iter_lower_impl(nil, iter), do: iter
def next([{_v, _h, _, r} = n | tail]), do: {n, iter_lower_impl(r, tail)}
def next([]), do: :none
def view(root) do
{_, _, canvas} = __MODULE__.View.node_view(root)
Enum.join(canvas, "\n")
end
defp fix_height({v, _h, l, r}) do
{v, max(height(l), height(r)) + 1, l, r}
end
defp rotate_left({v, h, l, {rv, rh, rl, rr}}) do
fix_height({rv, rh, fix_height({v, h, l, rl}), rr})
end
defp rotate_right({v, h, {lv, lh, ll, lr}, r}) do
fix_height({lv, lh, ll, fix_height({v, h, lr, r})})
end
defp big_rotate_left({v, h, l, r}) do
rotate_left({v, h, l, rotate_right(r)})
end
defp big_rotate_right({v, h, l, r}) do
rotate_right({v, h, rotate_left(l), r})
end
defp balance(a) do
a = fix_height(a)
{_v, _h, l, r} = a
cond do
height(r) - height(l) == 2 ->
{_rv, _rh, rl, rr} = r
if height(rl) <= height(rr) do
rotate_left(a)
else
big_rotate_left(a)
end
height(l) - height(r) == 2 ->
{_lv, _lh, ll, lr} = l
if height(lr) <= height(ll) do
rotate_right(a)
else
big_rotate_right(a)
end
true ->
a
end
end
defp delete_node({_v, _h, l, r}) do
if height(r) > height(l) do
{{v, h, _l, _r}, r} = delete_min(r)
balance({v, h, l, r})
else
if l == nil do
r
else
{{v, h, _l, _r}, l} = delete_max(l)
balance({v, h, l, r})
end
end
end
defp delete_min({v, h, l, r} = a) do
if l do
{m, l} = delete_min(l)
{m, balance({v, h, l, r})}
else
{a, r}
end
end
defp delete_max({v, h, l, r} = a) do
if r do
{m, r} = delete_max(r)
{m, balance({v, h, l, r})}
else
{a, l}
end
end
end
|
lib/avl_tree/node.ex
| 0.721253
| 0.511229
|
node.ex
|
starcoder
|
defmodule Sslcerts do
@moduledoc """
# Sslcerts
An elixir wrapper to [Let's Encrypt](https://letsencrypt.org/) and [Certbot](https://certbot.eff.org/) for SSL certification management.
This library is sufficiently opinionated, so to learn more about how to integrate Let's Encrypt
SSL certs into your project without having to follow the style of this project, please
refer to [Phoenix/Elixir App Secured with Let’s Encrypt](https://medium.com/@a4word/phoenix-app-secured-with-let-s-encrypt-469ac0995775)
This wrapper provides two basic functions.
* Create a new certification for your site
* Replace an existing and soon to expiore certification for your site
This is meant to be run on your production server, and as this library expands, will include
managing certifications across multiple boxes.
## Installation
### Command Line (Latest Version)
To install the `sslcerts` command line tool (whose only dependency is Erlang), then
you can [install it using escript](https://hexdocs.pm/mix/master/Mix.Tasks.Escript.Install.html).
```bash
# Install from GitHub
mix escript.install github capbash/sslcerts
# Install form HEX.pm
mix escript.install hex sslcerts
```
If you see a warning like
```bash
warning: you must append "~/.mix/escripts" to your PATH
if you want to invoke escripts by name
```
Then, make sure to update your PATH variable. Here's how on a Mac OS X, but each
[environment is slightly different](https://unix.stackexchange.com/questions/26047/how-to-correctly-add-a-path-to-path).
```bash
vi ~/.bash_profile
# Add a line like the following
PATH="$HOME/.mix/escripts:$PATH"
export PATH
```
Start a new terminal session. You will know it's working when you can *find* it using *where*
```
where sslcerts
```
### Command Line (Other Versions)
To install a specific version, branch, tag or commit, adjust any one of the following
```bash
# Install from a specific version
mix escript.install hex sslcerts 1.2.3
# Install from the latest of a specific branch
mix escript.install github capbash/sslcerts branch git_branch
# Install from a specific tag
mix escript.install github capbash/sslcerts tag git_tag
# Install from a specific commit
mix escript.install github capbash/sslcerts ref git_ref
```
Again, checkout [mix escript.install](https://hexdocs.pm/mix/Mix.Tasks.Escript.Install.html) for
more information about installing global tasks.
### Mix Tasks
More likley, you will have an Elixir phoenix application and you can
add a dependency to your `mix.exs` file.
```elixir
@deps [
sslcerts: "~> 0.1.0"
]
```
This will give you access to `sslcerts *` tasks (instead of globally installing
the `sslcerts` escript). You will also have programtic access from your `Sslcerts` module
as well; so you could expose feature directly within your application as well.
## Configure Host
Before you can use the sslcerts, you will need to configure your host / domain name that
you are trying to secure.
Let's say your domain is namedb.org, then configure it as follows:
# using escript
sslcerts init
sslcerts config host namedb.org
# using mix tasks
sslcerts init
sslcerts config host namedb.org
And to confirm it's set, run
sslcerts config
And the output should look similar to:
domains: ["FILL_ME_IN.com"]
email: "YOUR_EMAIL_HERE"
ini: "/etc/letsencrypt/letsencrypt.ini"
keysize: 4096
## Available Commands / Tasks
To get help on the available commands, run
# using escript
sslcerts
# using mix tasks
mix sslcerts
The output will look similar to the following
sslcerts v0.1.0
sslcerts allows elixir/phoenix apps to easily create SSL certs (using Let's Encrypt and Certbot).
Available tasks:
sslcerts config # Reads, updates or deletes Sslcerts config
sslcerts create # Create a new certificate
sslcerts init # Initialize your sslcerts config
sslcerts install # Install / Initialize your server to generate SSL certs
sslcerts renew # Renew an existing certificate
Further information can be found here:
-- https://hex.pm/packages/sslcerts
-- https://github.com/capbash/sslcerts
Please note that the mix tasks and sslcerts scripts provide identical functionality,
they are just structured slightly differently.
In general,
* `mix sslcerts.<sub command> <options> <args>` for mix tasks
* `sslcerts <sub command> <options> <args>` for escript
Make sure that have installed sslcerts correctly for mix tasks (if you want to use mix
tasks), or escript (if you want to use escript).
## Elixir API
These features are also available from within Elixir through `Sslcerts` modules,
this gives you better programatic access to return data (presented as a map),
but in most cases probably is not required to automate your infrastructure.
If we start an iEX session in your project that includes the sslcerts dependency,
you can access the same information in Elixir.
iex> Sslcerts.config
%{email: "YOUR_EMAIL_HERE",
domains: ["FILL_ME_IN.com"]}
This is the first release, which just manages the configs. Concrete implemetation
(and supporting documentation) coming soon.
The underlying configs are stored in `Sslcerts.Worker` ([OTP GenServer](https://elixir-lang.org/getting-started/mix-otp/genserver.html)).
If you change your configurations and need them reloaded, then call
and can be reloaded using
iex> Sslcerts.reload
"""
def version(), do: unquote(Mix.Project.config()[:version])
def elixir_version(), do: unquote(System.version())
def start(), do: {:ok, _started} = Application.ensure_all_started(:sslcerts)
@doc """
Retrieve the SSLCERTS configs.
"""
def config do
GenServer.call(Sslcerts.Worker, :config)
end
@doc """
Reload the SSLCERTS configs from the defaulted location
"""
def reload, do: GenServer.call(Sslcerts.Worker, :reload)
def reload(filename), do: GenServer.call(Sslcerts.Worker, {:reload, filename})
end
|
lib/sslcerts.ex
| 0.737536
| 0.901488
|
sslcerts.ex
|
starcoder
|
defmodule Objex do
defmacro __using__(_opts) do
quote do
import Objex, only: [class: 2, defm: 2]
end
end
defmacro class(name, do: body) do
quote do
defmodule unquote(name) do
use GenServer
def start_link(ref) do
GenServer.start_link(__MODULE__, %{},
name: {:via, Registry, {Objex.Registry, ref}})
end
def new do
this = make_ref()
{:ok, _pid} = __MODULE__.start_link(this)
__MODULE__.on_init(this) # Calls the "constructor" (that isn't allowed any params btw).
this
end
def on_init(_), do: nil
defoverridable [on_init: 1]
unquote(body)
end
end
end
defmacro defm(head, body) do
{fn_name, [this | call_args] = args} = decompose_head(head) # Splits the head into `{function name, args}`
{[_this | binding_names], bound_args} = bind_args(args) # Binds each arg to a unique name
# Replaces the args with the bound args.
# `fun(a, b, c)` becomes `fun(a = arg0, b = arg1, c = arg2)`.
# This is to prevent a `CompileError: unbound variable _` when using `_` as an
# argument (which happens because we're passing the args to `GenServer.call/2`).
head = Macro.postwalk(head, fn
{name, meta, old_args}
when (name == fn_name and old_args == args) ->
{name, meta, bound_args}
other -> other
end)
# Replaces `this.name = 1` by `env = Map.put(env, :name, 1)` and `this.name` by
# `Map.fetch!(env, :name)` (yep, you can't use names that don't exist).
# `env` is the state of the process.
body = Macro.prewalk(body, &update_env/1)
# Allows to ignore guards on `handle_call/3`.
# Replacing the head of handle_call to add the guard would be nicer but it seems
# like it'd ruin my fun.
fn_id = :erlang.term_to_binary(head)
quote do
# The "method" becomes a function that calls the process registered in
# `Objex.Registry` (`this` contains the id -- the Reference created using
# `make_ref()` in `new/0`).
def unquote(head) do
GenServer.call({:via, Registry, {Objex.Registry, unquote(this)}},
{unquote(fn_id), unquote(binding_names)})
end
# The body of the method goes here!
def handle_call({unquote(fn_id), unquote(call_args)}, from, env) do
result = unquote(body[:do])
{:reply, result, env}
end
end
end
# Replaces instances of `this` to use the process' state.
defp update_env({:=, _, [{{:., _, [{:this, _, nil}, name]}, _, []}, value]}) do
quote do
env = Map.put(env, unquote(name), unquote(value))
unquote(value)
end
end
defp update_env({{:., _, [{:this, _, nil}, name]}, _, []}),
do: quote(do: Map.fetch!(env, unquote(name)))
defp update_env(n), do: n
# Returns a tuple containing the function name and its list of arguments.
defp decompose_head({:when, _, [head | _]}),
do: decompose_head(head)
defp decompose_head(head),
do: Macro.decompose_call(head)
# Binds every argument to a unique name.
defp bind_args([]), do: []
defp bind_args(args) do
args
|> Enum.with_index()
|> Enum.map(fn {arg, index} ->
binding = Macro.var(:"arg#{index}", __MODULE__)
{binding, quote(do: unquote(arg) = unquote(binding))}
end)
|> Enum.unzip
end
end
|
lib/objex.ex
| 0.598782
| 0.4291
|
objex.ex
|
starcoder
|
defmodule StringFormatterUtils do
@moduledoc """
Contains various helper methods to avoid duplicating the stuff all over
"""
@doc """
Transforms a map or a keyword list into a new map
where all keys are strings
"""
def normalize_params(params) do
params
|> Enum.map(fn {key, val} -> {to_string(key), val} end)
|> Enum.into(%{})
end
@doc """
Evaluates the given placeholder by looking up its value
in the params map. Returns {placeholder} if nothing found
"""
def eval_holder(placeholder, params) do
case params[placeholder] do
nil -> "{#{placeholder}}"
other -> to_string(other)
end
end
def split_1(string) do
do_split(string, string, 0)
end
defp do_split("", string, _), do: [string, "", ""]
defp do_split(<<x::binary-size(1), rest::binary>>, orig, idx) when x == "{" or x == "}" do
#safe to match ascii chars {,}, see https://en.wikipedia.org/wiki/UTF-8
#Backward compatibility: One-byte codes are used for the ASCII values 0 through 127,...
#Bytes in this range are not used anywhere else... as it will not accidentally see those ASCII characters in the middle of a multi-byte character.
[binary_part(orig, 0, idx), x, rest]
end
defp do_split(<<_x::binary-size(1), rest::binary>>, orig, idx) do
do_split(rest, orig, idx + 1)
end
#https://stackoverflow.com/a/44120981/289992
def split_2(binary) do
case :binary.match(binary, ["{", "}"]) do
{start, length} ->
before = :binary.part(binary, 0, start)
match = :binary.part(binary, start, length)
after_ = :binary.part(binary, start + length, byte_size(binary) - (start + length))
[before, match, after_]
:nomatch -> [binary, "", ""]
end
end
def split_3(string) do
case :binary.match(string, ["{", "}"]) do
{start, length} ->
<<a::binary-size(start), b::binary-size(length), c::binary>> = string
[a, b, c]
:nomatch -> [string, "", ""]
end
end
end
|
pattern_matching_and_state_machines/lib/string_formatter_utils.ex
| 0.601477
| 0.478163
|
string_formatter_utils.ex
|
starcoder
|
defmodule Serum.Result do
@moduledoc """
This module defines types for positive results or errors returned by
functions in this project.
"""
import Serum.IOProxy, only: [put_err: 2]
@type t :: :ok | error()
@type t(type) :: {:ok, type} | error()
@type error :: {:error, err_details()}
@type err_details :: msg_detail() | full_detail() | nest_detail()
@type msg_detail :: binary()
@type full_detail :: {binary(), binary(), non_neg_integer()}
@type nest_detail :: {term(), [error()]}
@doc """
Takes a list of results without value and checks if there is no error.
Returns `:ok` if there is no error.
Returns an aggregated error object if there is one or more errors.
"""
@spec aggregate([t()], term()) :: t()
def aggregate(results, msg) do
results
|> do_aggregate([])
|> case do
[] -> :ok
errors when is_list(errors) -> {:error, {msg, errors}}
end
end
@spec do_aggregate([t()], [t()]) :: [t()]
defp do_aggregate(results, errors)
defp do_aggregate([], errors), do: errors |> Enum.reverse() |> Enum.uniq()
defp do_aggregate([:ok | results], errors), do: do_aggregate(results, errors)
defp do_aggregate([{:error, _} = error | results], errors) do
do_aggregate(results, [error | errors])
end
@doc """
Takes a list of results with values and checks if there is no error.
If there is no error, it returns `{:ok, list}` where `list` is a list of
returned values.
Returns an aggregated error object if there is one or more errors.
"""
@spec aggregate_values([t(term())], term()) :: t([term()])
def aggregate_values(results, msg) do
results
|> do_aggregate_values([], [])
|> case do
{values, []} -> {:ok, values}
{_, errors} when is_list(errors) -> {:error, {msg, errors}}
end
end
@spec do_aggregate_values([t(term())], [term()], [t()]) :: {[term()], [t()]}
defp do_aggregate_values(results, values, errors)
defp do_aggregate_values([], values, errors) do
{Enum.reverse(values), errors |> Enum.reverse() |> Enum.uniq()}
end
defp do_aggregate_values([{:ok, value} | results], values, errors) do
do_aggregate_values(results, [value | values], errors)
end
defp do_aggregate_values([{:error, _} = error | results], values, errors) do
do_aggregate_values(results, values, [error | errors])
end
@doc "Prints an error object in a beautiful format."
@spec show(t() | t(term()), non_neg_integer()) :: :ok
def show(result, indent \\ 0)
def show(:ok, depth), do: put_err(:info, get_message(:ok, depth))
def show({:ok, _} = result, depth), do: put_err(:info, get_message(result, depth))
def show(error, depth), do: put_err(:error, get_message(error, depth))
@doc """
Gets a human friendly message from the given `result`.
You can control the indentation level by passing a non-negative integer to
the `depth` parameter.
"""
@spec get_message(t() | t(term), non_neg_integer()) :: binary()
def get_message(result, depth) do
result |> do_get_message(depth) |> IO.iodata_to_binary()
end
@spec do_get_message(t() | t(term), non_neg_integer()) :: IO.chardata()
defp do_get_message(result, depth)
defp do_get_message(:ok, depth), do: indented("No error detected", depth)
defp do_get_message({:ok, _}, depth), do: do_get_message(:ok, depth)
defp do_get_message({:error, msg}, depth) when is_binary(msg) do
indented(msg, depth)
end
defp do_get_message({:error, {posix, file, line}}, depth) when is_atom(posix) do
msg = posix |> :file.format_error() |> IO.iodata_to_binary()
do_get_message({:error, {msg, file, line}}, depth)
end
defp do_get_message({:error, {msg, file, 0}}, depth) when is_binary(msg) do
indented([file, ": ", msg], depth)
end
defp do_get_message({:error, {msg, file, line}}, depth) when is_binary(msg) do
indented([file, ?:, to_string(line), ": ", msg], depth)
end
defp do_get_message({:error, {msg, errors}}, depth) when is_list(errors) do
head = indented(["\x1b[1;31m", to_string(msg), ":\x1b[0m"], depth)
children = Enum.map(errors, &do_get_message(&1, depth + 1))
Enum.intersperse([head | children], ?\n)
end
@spec indented(IO.chardata(), non_neg_integer()) :: IO.chardata()
defp indented(str, 0), do: str
defp indented(str, depth), do: [List.duplicate(" ", depth - 1), "\x1b[31m-\x1b[0m ", str]
end
|
lib/serum/result.ex
| 0.858704
| 0.470615
|
result.ex
|
starcoder
|
defmodule Paidy.Payment do
@moduledoc """
Functions for working with payments at Paidy. Through this API you can:
* create a payment,
* capture a payment,
* update a payment,
* get a payment,
* refund a payment,
* partially refund a payment.
* close a payment.
Paidy API reference: https://paidy.com/docs/api/jp/index.html#2-
"""
@endpoint "payments"
@doc """
Create a payment.
Creates a payment with createable information.
Accepts the following parameters:
* `params` - a list of params to be created (optional; defaults to `[]`).
Returns a `{:ok, payment}` tuple.
## Examples
params = %{
amount: 12500,
shipping_address: %{
line1: "AXISビル 10F",
line2: "六本木4-22-1",
state: "港区",
city: "東京都",
zip: "106-2004"
},
order: %{
order_ref: "your_order_ref",
items: [%{
quantity: 1,
id: "PDI001",
title: "Paidyスニーカー",
description: "Paidyスニーカー",
unit_price: 12000
}],
tax: 300,
shipping: 200
},
store_name: "Paidy sample store",
buyer_data: %{
age: 29,
order_count: 1000,
ltv: 250000,
last_order_amount: 20000,
last_order_at: 20
},
description: "hoge",
token_id: "tok_foobar",
currency: "JPY",
metadata: %{}
}
{:ok, payment} = Paidy.Payment.create(params)
"""
def create(params) do
create(params, Paidy.config_or_env_key())
end
@doc """
Create a payment. Accepts Paidy API key.
Creates a payment with createable information.
Accepts the following parameters:
* `params` - a list of params to be created (optional; defaults to `[]`).
Returns a `{:ok, payment}` tuple.
## Examples
params = %{
amount: 12500,
shipping_address: %{
line1: "AXISビル 10F",
line2: "六本木4-22-1",
state: "港区",
city: "東京都",
zip: "106-2004"
},
order: %{
order_ref: "your_order_ref",
items: [%{
quantity: 1,
id: "PDI001",
title: "Paidyスニーカー",
description: "Paidyスニーカー",
unit_price: 12000
}],
tax: 300,
shipping: 200
},
store_name: "Paidy sample store",
buyer_data: %{
age: 29,
order_count: 1000,
ltv: 250000,
last_order_amount: 20000,
last_order_at: 20
},
description: "hoge",
token_id: "tok_foobar",
currency: "JPY",
metadata: %{}
}
{:ok, payment} = Paidy.Payment.create(params, "my_key")
"""
def create(params, key) do
Paidy.make_request_with_key(:post, "#{@endpoint}", key, params)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Update a payment.
Updates a payment with changeable information.
Accepts the following parameters:
* `params` - a list of params to be updated (optional; defaults to `[]`).
Available parameters are: `description`, `metadata`, `receipt_email`,
`fraud_details` and `shipping`.
Returns a `{:ok, payment}` tuple.
## Examples
params = %{
description: "Changed payment"
}
{:ok, payment} = Paidy.Payment.change("payment_id", params)
"""
def change(id, params) do
change(id, params, Paidy.config_or_env_key())
end
@doc """
Update a payment. Accepts Paidy API key.
Updates a payment with changeable information.
Accepts the following parameters:
* `params` - a list of params to be updated (optional; defaults to `[]`).
Available parameters are: `description`, `metadata`, `receipt_email`,
`fraud_details` and `shipping`.
Returns a `{:ok, payment}` tuple.
## Examples
params = %{
description: "Changed payment"
}
{:ok, payment} = Paidy.Payment.change("payment_id", params, "my_key")
"""
def change(id, params, key) do
Paidy.make_request_with_key(:put, "#{@endpoint}/#{id}", key, params)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Capture a payment.
Captures a payment that is currently pending.
Note: you can default a payment to be automatically captured by setting `capture: true` in the payment create params.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.capture("payment_id")
"""
def capture(id) do
capture(id, Paidy.config_or_env_key())
end
@doc """
Capture a payment. Accepts Paidy API key.
Captures a payment that is currently pending.
Note: you can default a payment to be automatically captured by setting `capture: true` in the payment create params.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.capture("payment_id", "my_key")
"""
def capture(id, key) do
Paidy.make_request_with_key(:post, "#{@endpoint}/#{id}/captures", key)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Get a payment.
Gets a payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.get("payment_id")
"""
def get(id) do
get(id, Paidy.config_or_env_key())
end
@doc """
Get a payment. Accepts Paidy API key.
Gets a payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.get("payment_id", "my_key")
"""
def get(id, key) do
Paidy.make_request_with_key(:get, "#{@endpoint}/#{id}", key)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Refund a payment.
Refunds a payment completely.
Note: use `refund_partial` if you just want to perform a partial refund.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.refund("payment_id", "capture_id")
"""
def refund(id, capture_id) do
refund(id, capture_id, Paidy.config_or_env_key())
end
@doc """
Refund a payment. Accepts Paidy API key.
Refunds a payment completely.
Note: use `refund_partial` if you just want to perform a partial refund.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.refund("payment_id", "capture_id", "my_key")
"""
def refund(id, capture_id, key) do
params = %{capture_id: capture_id}
Paidy.make_request_with_key(:post, "#{@endpoint}/#{id}/refunds", key, params)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Partially refund a payment.
Refunds a payment partially.
Accepts the following parameters:
* `amount` - amount to be refunded (required).
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.refund_partial("payment_id", "capture_id", 500)
"""
def refund_partial(id, capture_id, amount) do
refund_partial(id, capture_id, amount, Paidy.config_or_env_key())
end
@doc """
Partially refund a payment. Accepts Paidy API key.
Refunds a payment partially.
Accepts the following parameters:
* `amount` - amount to be refunded (required).
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.refund_partial("payment_id", "capture_id", 500, "my_key")
"""
def refund_partial(id, capture_id, amount, key) do
params = %{capture_id: capture_id, amount: amount}
Paidy.make_request_with_key(:post, "#{@endpoint}/#{id}/refunds", key, params)
|> Paidy.Util.handle_paidy_response()
end
@doc """
Close a payment.
Closes a payment that is currently pending.
Note: you can default a payment to be automatically closed by setting `close: true` in the payment create params.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.close("payment_id")
"""
def close(id) do
close(id, Paidy.config_or_env_key())
end
@doc """
Close a payment. Accepts Paidy API key.
Closes a payment that is currently pending.
Note: you can default a payment to be automatically closed by setting `close: true` in the payment create params.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = Paidy.Payment.close("payment_id", "my_key")
"""
def close(id, key) do
Paidy.make_request_with_key(:post, "#{@endpoint}/#{id}/close", key)
|> Paidy.Util.handle_paidy_response()
end
end
|
lib/paidy/payment.ex
| 0.886525
| 0.604778
|
payment.ex
|
starcoder
|
defmodule Geometry.Feature do
@moduledoc """
A combination of a `geometry` and `properties`.
"""
alias Geometry.{Feature, GeoJson}
defstruct [:geometry, properties: %{}]
@type t :: %Feature{
geometry: Geometry.t() | nil,
properties: map()
}
@doc """
Creates a new empty `Feature`.
## Examples
iex> Feature.new()
%Feature{}
"""
@spec new :: %Feature{geometry: nil, properties: %{}}
def new, do: %Feature{}
@doc """
Creates a new `Feature`.
## Examples
iex> Feature.new(
...> geometry: Point.new(1, 2),
...> properties: %{facility: :hotel}
...> )
%Feature{
geometry: %Point{coordinate: [1, 2]},
properties: %{facility: :hotel}
}
"""
@spec new(geometry: Geometry.t(), properties: map()) :: t()
def new(data), do: struct(Feature, data)
@doc """
Returns `true` for an empty `Feature`.
## Examples
iex> Feature.empty?(Feature.new())
true
iex> Feature.empty?(Feature.new(geometry: Point.new(1, 2)))
false
"""
@spec empty?(t()) :: boolean()
def empty?(%Feature{geometry: geometry}), do: is_nil(geometry)
@doc """
Returns an `:ok` tuple with the `Feature` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
The `:type` option specifies which type is expected. The
possible values are `:z`, `:m`, and `:zm`.
## Examples
iex> ~s({
...> "type": "Feature",
...> "geometry": {"type": "Point", "coordinates": [1, 2, 3]},
...> "properties": {"facility": "Hotel"}
...> })
iex> |> Jason.decode!()
iex> |> Feature.from_geo_json(type: :z)
{:ok, %Feature{
geometry: %PointZ{coordinate: [1, 2, 3]},
properties: %{"facility" => "Hotel"}
}}
iex> ~s({
...> "type": "Feature",
...> "geometry": {"type": "Point", "coordinates": [1, 2]},
...> "properties": {"facility": "Hotel"}
...> })
iex> |> Jason.decode!()
iex> |> Feature.from_geo_json()
{:ok, %Feature{
geometry: %Point{coordinate: [1, 2]},
properties: %{"facility" => "Hotel"}
}}
"""
@spec from_geo_json(Geometry.geo_json_term(), opts) :: {:ok, t()} | Geometry.geo_json_error()
when opts: [type: :z | :m | :zm]
def from_geo_json(json, opts \\ []), do: GeoJson.to_feature(json, opts)
@doc """
The same as `from_geo_josn/1`, but raises a `Geometry.Error` exception if it
fails.
## Examples
iex> ~s({
...> "type": "Feature",
...> "geometry": {"type": "Point", "coordinates": [1, 2, 3]},
...> "properties": {"facility": "Hotel"}
...> })
iex> |> Jason.decode!()
iex> |> Feature.from_geo_json!(type: :m)
%Feature{
geometry: %PointM{coordinate: [1, 2, 3]},
properties: %{"facility" => "Hotel"}
}
"""
@spec from_geo_json!(Geometry.geo_json_term(), opts) :: t()
when opts: [type: :z | :m | :zm]
def from_geo_json!(json, opts \\ []) do
case GeoJson.to_feature(json, opts) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `Feature`.
## Examples
iex> Feature.to_geo_json(Feature.new(
...> geometry: Point.new(1, 2),
...> properties: %{facility: :hotel}
...> ))
%{
"type" => "Feature",
"geometry" => %{
"type" => "Point",
"coordinates" => [1, 2]
},
"properties" => %{facility: :hotel}
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%Feature{} = feature) do
geometry =
case feature.geometry do
nil -> nil
geometry -> Geometry.to_geo_json(geometry)
end
%{
"type" => "Feature",
"geometry" => geometry,
"properties" => feature.properties
}
end
end
|
lib/geometry/feature.ex
| 0.949658
| 0.741884
|
feature.ex
|
starcoder
|
defmodule NervesTime.RTC.DS3231.Date do
@moduledoc false
alias NervesTime.RealTimeClock.BCD
@doc """
Return a list of commands for reading and writing the time-date registers
"""
def reads() do
# Register 0x00 to 0x06
[{:write_read, <<0x0>>, 7}]
end
@doc """
Decode register values into a date
This only returns years between 2000 and 2099.
"""
@spec decode(<<_::56>>) :: {:ok, NaiveDateTime.t()} | {:error, any()}
def decode(<<seconds_bcd, minutes_bcd, hours24_bcd, _day, date_bcd, month_bcd, year_bcd>>) do
{:ok,
%NaiveDateTime{
microsecond: {0, 0},
second: BCD.to_integer(seconds_bcd),
minute: BCD.to_integer(minutes_bcd),
hour: BCD.to_integer(hours24_bcd),
day: BCD.to_integer(date_bcd),
month: BCD.to_integer(month_bcd),
year: 2000 + BCD.to_integer(year_bcd)
}}
end
def decode(_other), do: {:error, :invalid}
@doc """
Encode the specified date to register values.
Only dates between 2001 and 2099 are supported. This avoids the need to deal
with the leap year special case for 2000. That would involve setting the
century bit and that seems like a pointless complexity for a date that has come and gone.
"""
@spec encode(NaiveDateTime.t()) :: {:ok, <<_::56>>} | {:error, any()}
def encode(%NaiveDateTime{year: year} = date_time) when year > 2000 and year < 2100 do
{microseconds, _precision} = date_time.microsecond
seconds_bcd = BCD.from_integer(round(date_time.second + microseconds / 1_000_000))
minutes_bcd = BCD.from_integer(date_time.minute)
hours24_bcd = BCD.from_integer(date_time.hour)
day_bcd = BCD.from_integer(Calendar.ISO.day_of_week(year, date_time.month, date_time.day))
date_bcd = BCD.from_integer(date_time.day)
month_bcd = BCD.from_integer(date_time.month)
year_bcd = BCD.from_integer(year - 2000)
{:ok,
<<
seconds_bcd,
minutes_bcd,
hours24_bcd,
day_bcd,
date_bcd,
month_bcd,
year_bcd
>>}
end
def encode(_invalid_date) do
{:error, :invalid_date}
end
end
|
lib/nerves_time/rtc/ds3231/date.ex
| 0.788135
| 0.477371
|
date.ex
|
starcoder
|
defmodule Parent.Supervisor do
@moduledoc """
Supervisor of child processes.
This module works similarly to callbackless supervisors started with `Supervisor.start_link/2`.
To start a supervisor and some children you can do the following:
Parent.Supervisor.start_link([
child_spec1,
child_spec2,
# ...
])
To install a parent supervisor in the supervision tree you can provide child specification in the
shape of `{Parent.Supervisor, {children, parent_options}}`.
To build a dedicate supervisor module you can do:
defmodule MySupervisor do
use Parent.Supervisor
def start_link(children, options),
do: Parent.Supervisor.start_link(children, options)
# ...
end
And now, you can install this supervisor in the supervision tree by passing
`{MySupervisor, {child_specs, parent_options}}` as the child specification to the parent.
You can interact with the running supervisor using functions from the `Parent.Client` module.
Refer to the `Parent` module for detailed explanation of child specifications, parent options,
and behaviour of parent processes.
In case you need more flexibility, take a look at `Parent.GenServer`.
"""
use Parent.GenServer
@doc """
Starts the parent process.
This function returns only after all the children have been started. If a child fails to start,
the parent process will terminate all successfully started children, and then itself.
"""
@spec start_link([Parent.start_spec()], Parent.GenServer.options()) :: GenServer.on_start()
def start_link(children, options \\ []),
do: Parent.GenServer.start_link(__MODULE__, children, options)
@impl GenServer
def init(children) do
Parent.start_all_children!(children)
{:ok, nil}
end
@spec child_spec({[Parent.start_spec()], Parent.GenServer.options()}) :: Parent.child_spec()
def child_spec({children, options}) do
[start: {__MODULE__, :start_link, [children, options]}]
|> Parent.parent_spec()
|> Parent.child_spec(id: Keyword.get(options, :name, __MODULE__))
end
@doc false
defmacro __using__(_) do
quote do
def child_spec(arg) do
[start: {__MODULE__, :start_link, [arg]}]
|> Parent.parent_spec()
|> Parent.child_spec(id: __MODULE__)
end
defoverridable child_spec: 1
end
end
end
|
lib/parent/supervisor.ex
| 0.78609
| 0.488405
|
supervisor.ex
|
starcoder
|
defmodule PassiveSupport.Enum do
@moduledoc """
Functions extending the functionality of enumerables.
"""
@doc """
Converts an enumerable to a `Map`, using the index of
each item as the item's key.
## Examples
iex> to_map(["hello", "world", "how", "are", "you"])
%{0 => "hello", 1 => "world", 2 => "how", 3 => "are", 4 => "you"}
iex> to_map(["Elixir", "is", "cool"])
%{0 => "Elixir", 1 => "is", 2 => "cool"}
"""
@spec to_map(Enumerable.t) :: Map.t
def to_map(enum), do: to_map(enum, fn (_item, item_index) -> item_index end)
@doc ~S"""
Not to be confused with Enum.map/2, returns a `Map` with the key for each
item derived by the return of `key_function(item)` or `key_function(item, index)`
## Examples
iex> to_map(["Elixir", "is", "cool"], &String.reverse/1)
%{"si" => "is", "looc" => "cool", "rixilE" => "Elixir"}
iex> to_map(["hello", "world", "how", "are", "you"], fn (_, index) -> index end)
%{0 => "hello", 1 => "world", 2 => "how", 3 => "are", 4 => "you"}
"""
@spec to_map(Enumerable.t, function) :: Map.t
def to_map(enum, key_function) when is_function(key_function, 1), do:
enum
|> Stream.map(fn item -> {key_function.(item), item} end)
|> Enum.into(%{})
def to_map(enum, key_function) when is_function(key_function, 2), do:
enum
|> Stream.with_index
|> Stream.map(fn {item, item_index} -> {key_function.(item, item_index), item} end)
|> Enum.into(%{})
@doc ~S"""
Returns true if the given `fun` evaluates to false on all
of the items in the enumberable.
Iteration stops at the first invocation that returns a
truthy value (not `false` or `nil`). Invokes an identity
function if one is not provided.
## Examples
iex> test_list = [1, 2, 3]
...> none?(test_list, &(&1 == 0))
true
...> none?([0 | test_list], &(&1 == 0))
false
iex> none?([])
true
iex> none?([nil])
true
iex> none?([true])
false
"""
@spec none?(Enumerable.t, function) :: boolean
def none?(enum, fun \\ &(&1))
def none?(enum, fun), do: !Enum.any?(enum, fun)
@doc """
Deep-converts `enum` to a list.
## Examples
iex> deep_to_list(%{"game_types" => ["card"], "rulesets_known" => [%{"poker" => "texas hold 'em", "hearts" => true}]})
[{"game_types", ["card"]}, {"rulesets_known", [[{"hearts", true}, {"poker", "texas hold 'em"}]]}]
"""
@spec deep_to_list(Enumertable.t(Enumerable.t)) :: list(list())
def deep_to_list(enum) do
Enum.map(enum, fn
{key, value} ->
if Enumerable.impl_for(value),
do: {key, deep_to_list(value)},
else: {key, value}
value ->
if Enumerable.impl_for(value),
do: deep_to_list(value),
else: value
end)
end
@doc ~S"""
Generates a list of all possible permutations of the given enumerable.
## Examples
iex> permutations(~w"hi ho hey!")
[
["hi", "ho", "hey!"],
["hi", "hey!", "ho"],
["ho", "hi", "hey!"],
["ho", "hey!", "hi"],
["hey!", "hi", "ho"],
["hey!", "ho", "hi"]
]
"""
@spec permutations(Enumerable.t) :: [[any]]
def permutations(enum), do: PassiveSupport.Stream.permutations(enum) |> Enum.to_list
end
|
lib/passive_support/base/enum.ex
| 0.843686
| 0.546496
|
enum.ex
|
starcoder
|
defmodule AWS.TimestreamWrite do
@moduledoc """
Amazon Timestream is a fast, scalable, fully managed time series database
service that makes it easy to store and analyze trillions of time series data
points per day.
With Timestream, you can easily store and analyze IoT sensor data to derive
insights from your IoT applications. You can analyze industrial telemetry to
streamline equipment management and maintenance. You can also store and analyze
log data and metrics to improve the performance and availability of your
applications. Timestream is built from the ground up to effectively ingest,
process, and store time series data. It organizes data to optimize query
processing. It automatically scales based on the volume of data ingested and on
the query volume to ensure you receive optimal performance while inserting and
querying data. As your data grows over time, Timestream’s adaptive query
processing engine spans across storage tiers to provide fast analysis while
reducing costs.
"""
@doc """
Creates a new Timestream database.
If the KMS key is not specified, the database will be encrypted with a
Timestream managed KMS key located in your account. Refer to [AWS managed KMS keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk)
for more info. Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def create_database(client, input, options \\ []) do
request(client, "CreateDatabase", input, options)
end
@doc """
The CreateTable operation adds a new table to an existing database in your
account.
In an AWS account, table names must be at least unique within each Region if
they are in the same database. You may have identical table names in the same
Region if the tables are in seperate databases. While creating the table, you
must specify the table name, database name, and the retention properties.
Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def create_table(client, input, options \\ []) do
request(client, "CreateTable", input, options)
end
@doc """
Deletes a given Timestream database.
*This is an irreversible operation. After a database is deleted, the time series
data from its tables cannot be recovered.*
All tables in the database must be deleted first, or a ValidationException error
will be thrown.
"""
def delete_database(client, input, options \\ []) do
request(client, "DeleteDatabase", input, options)
end
@doc """
Deletes a given Timestream table.
This is an irreversible operation. After a Timestream database table is deleted,
the time series data stored in the table cannot be recovered.
"""
def delete_table(client, input, options \\ []) do
request(client, "DeleteTable", input, options)
end
@doc """
Returns information about the database, including the database name, time that
the database was created, and the total number of tables found within the
database.
Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def describe_database(client, input, options \\ []) do
request(client, "DescribeDatabase", input, options)
end
@doc """
DescribeEndpoints returns a list of available endpoints to make Timestream API
calls against.
This API is available through both Write and Query.
Because Timestream’s SDKs are designed to transparently work with the service’s
architecture, including the management and mapping of the service endpoints, *it
is not recommended that you use this API unless*:
* Your application uses a programming language that does not yet
have SDK support
* You require better control over the client-side implementation
For detailed information on how to use DescribeEndpoints, see [The Endpoint Discovery Pattern and REST
APIs](https://docs.aws.amazon.com/timestream/latest/developerguide/Using-API.endpoint-discovery.html).
"""
def describe_endpoints(client, input, options \\ []) do
request(client, "DescribeEndpoints", input, options)
end
@doc """
Returns information about the table, including the table name, database name,
retention duration of the memory store and the magnetic store.
Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def describe_table(client, input, options \\ []) do
request(client, "DescribeTable", input, options)
end
@doc """
Returns a list of your Timestream databases.
Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def list_databases(client, input, options \\ []) do
request(client, "ListDatabases", input, options)
end
@doc """
A list of tables, along with the name, status and retention properties of each
table.
"""
def list_tables(client, input, options \\ []) do
request(client, "ListTables", input, options)
end
@doc """
List all tags on a Timestream resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Associate a set of tags with a Timestream resource.
You can then activate these user-defined tags so that they appear on the Billing
and Cost Management console for cost allocation tracking.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes the association of tags from a Timestream resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Modifies the KMS key for an existing database.
While updating the database, you must specify the database name and the
identifier of the new KMS key to be used (`KmsKeyId`). If there are any
concurrent `UpdateDatabase` requests, first writer wins.
"""
def update_database(client, input, options \\ []) do
request(client, "UpdateDatabase", input, options)
end
@doc """
Modifies the retention duration of the memory store and magnetic store for your
Timestream table.
Note that the change in retention duration takes effect immediately. For
example, if the retention period of the memory store was initially set to 2
hours and then changed to 24 hours, the memory store will be capable of holding
24 hours of data, but will be populated with 24 hours of data 22 hours after
this change was made. Timestream does not retrieve data from the magnetic store
to populate the memory store.
Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def update_table(client, input, options \\ []) do
request(client, "UpdateTable", input, options)
end
@doc """
The WriteRecords operation enables you to write your time series data into
Timestream.
You can specify a single data point or a batch of data points to be inserted
into the system. Timestream offers you with a flexible schema that auto detects
the column names and data types for your Timestream tables based on the
dimension names and data types of the data points you specify when invoking
writes into the database. Timestream support eventual consistency read
semantics. This means that when you query data immediately after writing a batch
of data into Timestream, the query results might not reflect the results of a
recently completed write operation. The results may also include some stale
data. If you repeat the query request after a short time, the results should
return the latest data. Service quotas apply. For more information, see [Access Management](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html)
in the Timestream Developer Guide.
"""
def write_records(client, input, options \\ []) do
request(client, "WriteRecords", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "timestream"}
host = build_host("ingest.timestream", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.0"},
{"X-Amz-Target", "Timestream_20181101.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/timestream_write.ex
| 0.895916
| 0.684877
|
timestream_write.ex
|
starcoder
|
defmodule DynamicInputsFor do
@moduledoc """
Helpers to create HTML forms with nested fields that can be created and deleted dynamically from
the client.
The functions in this module extend the functionality of `Phoenix.HTML.Form.inputs_for/4` to
return the html of a form with nested fields grouped in HTML tags with a class called `fields` and
the necessary anchors to add javascript that allows to add more fields dynamically from the
client.
"""
import Phoenix.HTML
import Phoenix.HTML.Form
import Phoenix.HTML.Tag
@doc """
It works like `Phoenix.HTML.Form.inputs_for/4`, but it also returns an HTML tag with the
information needed to add and delete fields from the client. The template argument is used to
generate the new nested fields.
The `dynamic_add_button/3` function generates the button to add the fields.
## Options
* `:wrapper_tag` - HTML tag name to wrap the fields.
* `:wrapper_attrs` - HTML attributes for the wrapper.
* `:only_mark_deleted` - create an input called `delete` with `"true"` value and add the
`deleted-fields` class to the wrapper to choose how to handle after validation errors. By
default when a group of nested inputs is deleted the content is deleted, to avoid HTML
validations, it is hidden and the input called `delete` with `"true"` value is created.
See `Phoenix.HTML.Form.inputs_for/4` for other options.
"""
def dynamic_inputs_for(form, association, template, options \\ [], fun)
when is_atom(association) or is_binary(association) do
{wrapper_attrs, options} = Keyword.pop(options, :wrapper_attrs, [])
{wrapper_tag, options} = Keyword.pop(options, :wrapper_tag, :div)
{only_mark_deleted, options} = Keyword.pop(options, :only_mark_deleted, false)
wrapper_attrs = Keyword.update(wrapper_attrs, :class, "fields", &("fields " <> &1))
wrapper_attrs = Keyword.put(wrapper_attrs, :data_assoc, association)
# Remove the parameters of the form to force that the prepended values are always rendered
form_template =
form.source
|> form.impl.to_form(
%{form | params: %{}},
association,
Keyword.put(options, :prepend, [template])
)
|> hd()
html_template =
wrapper_tag
|> content_tag([fun.(form_template)], wrapper_attrs)
|> safe_to_string()
[
inputs_for(form, association, options, fn form_assoc ->
wrapper_attrs = Keyword.put(wrapper_attrs, :data_assoc_index, form_assoc.index)
fields_for_association(form_assoc, fun, wrapper_tag, wrapper_attrs, only_mark_deleted)
end),
content_tag(wrapper_tag, [],
id: "dynamic_info_#{association}",
style: "display: none;",
data: [
assoc: [
template: html_template,
id: form_template.id,
name: form_template.name,
only_mark_deleted: only_mark_deleted
],
assoc: association
]
)
]
end
defp fields_for_association(
%Phoenix.HTML.Form{params: %{"delete" => "true"}} = form,
fun,
wrapper_tag,
wrapper_attrs,
true
) do
wrapper_attrs = Keyword.update(wrapper_attrs, :class, "", &(&1 <> " deleted-fields"))
hidden_input = hidden_input(form, :delete)
content_tag(wrapper_tag, [fun.(form), hidden_input], wrapper_attrs)
end
defp fields_for_association(
%Phoenix.HTML.Form{params: %{"delete" => "true"}} = form,
_fun,
wrapper_tag,
wrapper_attrs,
_only_mark_deleted
) do
wrapper_attrs = Keyword.update(wrapper_attrs, :class, "", &(&1 <> " deleted-fields"))
wrapper_attrs = Keyword.put(wrapper_attrs, :style, "display: none;")
hidden_input = hidden_input(form, :delete)
content_tag(wrapper_tag, [hidden_input], wrapper_attrs)
end
defp fields_for_association(form, fun, wrapper_tag, wrapper_attrs, _only_mark_deleted) do
content_tag(wrapper_tag, [fun.(form)], wrapper_attrs)
end
@doc """
Creates a button to add more nested fields to the fields generated with `dynamic_inputs_for/5`.
"""
def dynamic_add_button(association, content)
when is_atom(association) or is_binary(association) do
dynamic_add_button(association, content, [])
end
def dynamic_add_button(association, attrs, do: block)
when (is_atom(association) or is_binary(association)) and is_list(attrs) do
dynamic_add_button(association, block, attrs)
end
def dynamic_add_button(association, content, attrs)
when (is_atom(association) or is_binary(association)) and is_list(attrs) do
content_tag(
:button,
content,
Keyword.merge(attrs, type: "button", data_assoc: association, data_assoc_add: "")
)
end
@doc """
Creates a button to mark association for deletion. When the button is pressed, a hidden input
called `delete` is created and set to `"true"`. For this button to work, it must be called within
the function that is passed to `dynamic_inputs_for/5`.
"""
def dynamic_delete_button(content) do
dynamic_delete_button(content, [])
end
def dynamic_delete_button(attrs, do: block) when is_list(attrs) do
dynamic_delete_button(block, attrs)
end
def dynamic_delete_button(content, attrs) when is_list(attrs) do
content_tag(
:button,
content,
Keyword.merge(attrs, type: "button", data_assoc_delete: "")
)
end
end
|
lib/dynamic_inputs_for.ex
| 0.86031
| 0.497803
|
dynamic_inputs_for.ex
|
starcoder
|
defmodule Screens.Headways do
@moduledoc false
alias Screens.Schedules.Schedule
@dayparts [
{:late_night, ~T[00:00:00], :close},
{:early_morning, :open, ~T[06:30:00]},
{:am_peak, ~T[06:30:00], ~T[09:00:00]},
{:midday, ~T[09:00:00], ~T[15:30:00]},
{:pm_peak, ~T[15:30:00], ~T[18:30:00]},
{:evening, ~T[18:30:00], ~T[20:00:00]},
{:late_night, ~T[20:00:00], :midnight}
]
def by_route_id(route_id, stop_id, direction_id, service_level, time \\ DateTime.utc_now()) do
current_schedule = schedule_with_override(time, service_level)
current_daypart = daypart(time, stop_id, direction_id)
headway(route_id, current_schedule, current_daypart)
end
defp headway(_, _, :overnight), do: nil
# Weekday
defp headway("Green-B", :weekday, :early_morning), do: 10
defp headway("Green-B", :weekday, :am_peak), do: 6
defp headway("Green-B", :weekday, :midday), do: 8
defp headway("Green-B", :weekday, :pm_peak), do: 6
defp headway("Green-B", :weekday, :evening), do: 7
defp headway("Green-B", :weekday, :late_night), do: 9
defp headway("Green-C", :weekday, :early_morning), do: 10
defp headway("Green-C", :weekday, :am_peak), do: 6
defp headway("Green-C", :weekday, :midday), do: 9
defp headway("Green-C", :weekday, :pm_peak), do: 7
defp headway("Green-C", :weekday, :evening), do: 7
defp headway("Green-C", :weekday, :late_night), do: 10
defp headway("Green-D", :weekday, :early_morning), do: 11
defp headway("Green-D", :weekday, :am_peak), do: 6
defp headway("Green-D", :weekday, :midday), do: 8
defp headway("Green-D", :weekday, :pm_peak), do: 6
defp headway("Green-D", :weekday, :evening), do: 8
defp headway("Green-D", :weekday, :late_night), do: 11
defp headway("Green-E", :weekday, :early_morning), do: 10
defp headway("Green-E", :weekday, :am_peak), do: 6
defp headway("Green-E", :weekday, :midday), do: 8
defp headway("Green-E", :weekday, :pm_peak), do: 7
defp headway("Green-E", :weekday, :evening), do: 9
defp headway("Green-E", :weekday, :late_night), do: 9
# Saturday
defp headway("Green-B", :saturday, :early_morning), do: 14
defp headway("Green-B", :saturday, _), do: 9
defp headway("Green-C", :saturday, :early_morning), do: 14
defp headway("Green-C", :saturday, _), do: 9
defp headway("Green-D", :saturday, :early_morning), do: 14
defp headway("Green-D", :saturday, :am_peak), do: 13
defp headway("Green-D", :saturday, _), do: 9
defp headway("Green-E", :saturday, :early_morning), do: 13
defp headway("Green-E", :saturday, _), do: 10
# Sunday
defp headway("Green-B", :sunday, :early_morning), do: 12
defp headway("Green-B", :sunday, :am_peak), do: 12
defp headway("Green-B", :sunday, _), do: 9
defp headway("Green-C", :sunday, :early_morning), do: 12
defp headway("Green-C", :sunday, _), do: 10
defp headway("Green-D", :sunday, :early_morning), do: 14
defp headway("Green-D", :sunday, :am_peak), do: 13
defp headway("Green-D", :sunday, _), do: 11
defp headway("Green-E", :sunday, :early_morning), do: 15
defp headway("Green-E", :sunday, _), do: 12
defp schedule_with_override(time, service_level) do
# Level 3 turns weekday into Saturday schedule
# Level 4 is always Sunday schedule
# Otherwise, use normal schedule
case {service_level, schedule(time)} do
{3, :weekday} -> :saturday
{4, _} -> :sunday
{_, schedule} -> schedule
end
end
defp schedule(utc_time) do
# Note: This is a hack.
# Split the service day at 3am by shifting to Pacific Time.
# Midnight at Pacific Time is always 3am here.
{:ok, pacific_time} = DateTime.shift_zone(utc_time, "America/Los_Angeles")
service_date = DateTime.to_date(pacific_time)
case Date.day_of_week(service_date) do
7 -> :sunday
6 -> :saturday
_ -> :weekday
end
end
defp daypart(utc_time, stop_id, direction_id) do
{:ok, local_time} = DateTime.shift_zone(utc_time, "America/New_York")
local_time = DateTime.to_time(local_time)
{daypart, _, _} =
Enum.find(
@dayparts,
{:overnight, nil, nil},
&match(&1, local_time, {stop_id, direction_id})
)
daypart
end
defp match({daypart, :open, t_end}, local_time, {stop_id, direction_id}) do
t_start = service_start(stop_id, direction_id)
case t_start do
%Time{} -> match({daypart, t_start, t_end}, local_time, nil)
nil -> false
end
end
defp match({daypart, t_start, :close}, local_time, {stop_id, direction_id}) do
t_end = service_end(stop_id, direction_id)
case t_end do
%Time{} -> match({daypart, t_start, t_end}, local_time, nil)
nil -> false
end
end
defp match({_daypart, t_start, :midnight}, local_time, _) do
Time.compare(t_start, local_time) == :lt
end
defp match({_daypart, t_start, t_end}, local_time, _) do
Enum.member?([:lt, :eq], Time.compare(t_start, local_time)) and
Time.compare(local_time, t_end) == :lt
end
defp service_start_or_end(stop_id, direction_id, min_or_max_fn) do
with {:ok, schedules} <- Schedule.fetch(%{stop_ids: [stop_id], direction_id: direction_id}),
[_ | _] = arrival_times <- get_arrival_times(schedules) do
{:ok, local_dt} =
arrival_times
|> min_or_max_fn.()
|> DateTime.shift_zone("America/New_York")
DateTime.to_time(local_dt)
else
_ -> nil
end
end
defp get_arrival_times(schedules) do
schedules
|> Enum.map(& &1.arrival_time)
|> Enum.reject(&is_nil(&1))
end
# Time to stop showing Good Night screen if there are no predictions
defp service_start(stop_id, direction_id) do
service_start_or_end(stop_id, direction_id, &Enum.min/1)
end
# Time to begin showing Good Night screen if there are no predictions
defp service_end(stop_id, direction_id) do
service_start_or_end(stop_id, direction_id, &Enum.max/1)
end
end
|
lib/screens/headways.ex
| 0.68437
| 0.590897
|
headways.ex
|
starcoder
|
defmodule ExNcurses do
alias ExNcurses.{Getstr, Server}
@moduledoc """
ExNcurses lets Elixir programs create text-based user interfaces using ncurses.
Aside from keyboard input, ExNcurses looks almost like a straight translation of the C-based
ncurses API. ExNcurses sends key events via messages. See `listen/0` for this.
Ncurses documentation can be found at:
* [The ncurses project page](https://www.gnu.org/software/ncurses/)
* [opengroup.org](http://pubs.opengroup.org/onlinepubs/7908799/xcurses/curses.h.html)
* [] for ncurses documentation.
"""
@type pair :: non_neg_integer()
@type color_name :: :black | :red | :green | :yellow | :blue | :magenta | :cyan | :white
@type color :: 0..7 | color_name()
@type window :: reference()
def fun(:F1), do: 265
def fun(:F2), do: 266
def fun(:F3), do: 267
def fun(:F4), do: 268
def fun(:F5), do: 269
def fun(:F6), do: 270
def fun(:F7), do: 271
def fun(:F8), do: 272
def fun(:F9), do: 273
def fun(:F10), do: 274
def fun(:F11), do: 275
def fun(:F12), do: 276
@doc """
Initialize ncurses on a terminal. This should be called before any of the
other functions.
By default, ncurses uses the current terminal. If you're debugging or want to
have IEx available while in ncurses-mode you can also have it use a different
window. One way of doing this is to open up another terminal session. At the
prompt, run `tty`. Then pass the path that it returns to this function.
Currently input doesn't work in this mode.
TODO: Return stdscr (a window)
"""
@spec initscr(String.t()) :: :ok
defdelegate initscr(termname \\ ""), to: Server
@doc """
Stop using ncurses and clean the terminal back up.
"""
@spec endwin() :: :ok
defdelegate endwin(), to: Server
@doc """
Print the specified string and advance the cursor.
Unlike the ncurses printw, this version doesn't support format
specification. It is really the same as `addstr/1`.
"""
@spec printw(String.t()) :: :ok
def printw(s), do: Server.invoke(:printw, {s})
@spec addstr(String.t()) :: :ok
def addstr(s), do: Server.invoke(:addstr, {s})
@spec mvprintw(non_neg_integer(), non_neg_integer(), String.t()) :: :ok
def mvprintw(y, x, s), do: Server.invoke(:mvprintw, {y, x, s})
@spec mvaddstr(non_neg_integer(), non_neg_integer(), String.t()) :: :ok
def mvaddstr(y, x, s), do: Server.invoke(:mvaddstr, {y, x, s})
@doc """
Draw a border around the current window.
"""
@spec border() :: :ok
def border(), do: Server.invoke(:border, {})
@doc """
Draw a wborder around a specific window.
"""
@spec wborder(window()) :: :ok
def wborder(w), do: Server.invoke(:wborder, {w})
@doc """
Move the cursor to the new location.
"""
@spec mvcur(non_neg_integer(), non_neg_integer(), non_neg_integer(), non_neg_integer()) :: :ok
def mvcur(oldrow, oldcol, newrow, newcol),
do: Server.invoke(:mvcur, {oldrow, oldcol, newrow, newcol})
@doc """
Refresh the display. This needs to be called after any of the print or
addstr functions to render their changes.
"""
@spec refresh() :: :ok
def refresh(), do: Server.invoke(:refresh)
@spec wrefresh(window()) :: :ok
def wrefresh(w), do: Server.invoke(:wrefresh, {w})
@doc """
Clear the screen
"""
@spec clear() :: :ok
def clear(), do: Server.invoke(:clear)
@spec wclear(window()) :: :ok
def wclear(w), do: Server.invoke(:wclear, {w})
@spec raw() :: :ok
def raw(), do: Server.invoke(:raw)
@spec cbreak() :: :ok
def cbreak(), do: Server.invoke(:cbreak)
@spec nocbreak() :: :ok
def nocbreak(), do: Server.invoke(:nocbreak)
@spec noecho() :: :ok
def noecho(), do: Server.invoke(:noecho)
@spec beep() :: :ok
def beep(), do: Server.invoke(:beep)
@doc """
Set the cursor mode
* 0 = Invisible
* 1 = Terminal-specific normal mode
* 2 = Terminal-specific high visibility mode
"""
@spec curs_set(0..2) :: :ok
def curs_set(visibility), do: Server.invoke(:curs_set, {visibility})
@doc """
Return the cursor's column.
"""
@spec getx() :: non_neg_integer()
def getx(), do: Server.invoke(:getx)
@doc """
Return the cursor's row.
"""
@spec gety() :: non_neg_integer()
def gety(), do: Server.invoke(:gety)
@spec flushinp() :: :ok
def flushinp(), do: Server.invoke(:flushinp)
@doc """
Enable the terminal's keypad to capture function keys as single characters.
"""
@spec keypad() :: :ok
def keypad(), do: Server.invoke(:keypad)
@doc """
Enable scrolling on `stdscr`.
"""
@spec scrollok() :: :ok
def scrollok(), do: Server.invoke(:scrollok)
@doc """
Enable the use of colors.
"""
@spec start_color() :: :ok
def start_color(), do: Server.invoke(:start_color)
@doc """
Return whether the display supports color
"""
@spec has_colors() :: boolean()
def has_colors(), do: Server.invoke(:has_colors)
@doc """
Initialize a foreground/background color pair
"""
@spec init_pair(pair(), color(), color()) :: :ok
def init_pair(pair, f, b),
do: Server.invoke(:init_pair, {pair, color_to_number(f), color_to_number(b)})
@doc """
Turn on the bit-masked attribute values pass in on the current screen.
"""
@spec attron(pair()) :: :ok
def attron(pair), do: Server.invoke(:attron, {pair})
@doc """
Turn off the bit-masked attribute values pass in on the current screen.
"""
@spec attroff(pair()) :: :ok
def attroff(pair), do: Server.invoke(:attroff, {pair})
@doc """
Set a scrollable region on the `stdscr`
"""
@spec setscrreg(non_neg_integer(), non_neg_integer()) :: :ok
def setscrreg(top, bottom), do: Server.invoke(:setscrreg, {top, bottom})
@doc """
Create a new window with number of nlines, number columns, starting y position, and
starting x position.
"""
@spec newwin(non_neg_integer(), non_neg_integer(), non_neg_integer(), non_neg_integer()) ::
window()
def newwin(nlines, ncols, begin_y, begin_x),
do: Server.invoke(:newwin, {nlines, ncols, begin_y, begin_x})
@doc """
Delete a window `w`. This cleans up all memory resources associated with it. The application
must delete subwindows before deleteing the main window.
"""
@spec delwin(window()) :: :ok
def delwin(w), do: Server.invoke(:delwin, {w})
@doc """
Add a string to a window `win`. This function will advance the cursor position,
perform special character processing, and perform wrapping.
"""
@spec waddstr(window(), String.t()) :: :ok
def waddstr(win, str), do: Server.invoke(:waddstr, {win, str})
@doc """
Move the cursor associated with the specified window to (y, x) relative to the window's orgin.
"""
@spec wmove(window(), non_neg_integer(), non_neg_integer()) :: :ok
def wmove(win, y, x), do: Server.invoke(:wmove, {win, y, x})
@doc """
Move the cursor for the current window to (y, x) relative to the window's orgin.
"""
@spec move(non_neg_integer(), non_neg_integer()) :: :ok
def move(y, x), do: Server.invoke(:move, {y, x})
@doc """
Return the number of visible columns
"""
@spec cols() :: non_neg_integer()
def cols(), do: Server.invoke(:cols)
@doc """
Return the number of visible lines
"""
@spec lines() :: non_neg_integer()
def lines(), do: Server.invoke(:lines)
@doc """
Poll for a key press.
See `listen/0` for a better way of getting keyboard input.
"""
def getch() do
listen()
c =
receive do
{:ex_ncurses, :key, key} -> key
end
stop_listening()
c
end
@doc """
Poll for a string.
"""
def getstr() do
listen()
noecho()
str = getstr_loop(Getstr.init(gety(), getx(), 60))
stop_listening()
str
end
defp getstr_loop(state) do
receive do
{:ex_ncurses, :key, key} ->
case Getstr.process(state, key) do
{:done, str} ->
str
{:not_done, new_state} ->
getstr_loop(new_state)
end
end
end
# Common initialization
def n_begin() do
initscr()
raw()
cbreak()
end
def n_end() do
nocbreak()
endwin()
end
@doc """
Listen for events.
Events will be sent as messages of the form:
`{ex_ncurses, :key, key}`
"""
defdelegate listen(), to: Server
@doc """
Stop listening for events
"""
defdelegate stop_listening(), to: Server
defp color_to_number(x) when is_integer(x), do: x
defp color_to_number(:black), do: 0
defp color_to_number(:red), do: 1
defp color_to_number(:green), do: 2
defp color_to_number(:yellow), do: 3
defp color_to_number(:blue), do: 4
defp color_to_number(:magenta), do: 5
defp color_to_number(:cyan), do: 6
defp color_to_number(:white), do: 7
end
|
lib/ex_ncurses.ex
| 0.812979
| 0.491578
|
ex_ncurses.ex
|
starcoder
|
defmodule Ecto.Adapters.SQL.Sandbox do
@moduledoc ~S"""
A pool for concurrent transactional tests.
The sandbox pool is implemented on top of an ownership mechanism.
When started, the pool is in automatic mode, which means the
repository will automatically check connections out as with any
other pool.
The `mode/2` function can be used to change the pool mode to
manual or shared. In both modes, the connection must be explicitly
checked out before use. When explicit checkouts are made, the
sandbox will wrap the connection in a transaction by default and
control who has access to it. This means developers have a safe
mechanism for running concurrent tests against the database.
## Database support
While both PostgreSQL and MySQL support SQL Sandbox, only PostgreSQL
supports concurrent tests while running the SQL Sandbox. Therefore, do
not run concurrent tests with MySQL as you may run into deadlocks due to
its transaction implementation.
## Example
The first step is to configure your database to use the
`Ecto.Adapters.SQL.Sandbox` pool. You set those options in your
`config/config.exs` (or preferrably `config/test.exs`) if you
haven't yet:
config :my_app, Repo,
pool: Ecto.Adapters.SQL.Sandbox
Now with the test database properly configured, you can write
transactional tests:
# At the end of your test_helper.exs
# Set the pool mode to manual for explicitly checkouts
Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual)
defmodule PostTest do
# Once the model is manual, tests can also be async
use ExUnit.Case, async: true
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo)
end
test "create post" do
# Use the repository as usual
assert %Post{} = TestRepo.insert!(%Post{})
end
end
## Collaborating processes
The example above is straight-forward because we have only
a single process using the database connection. However,
sometimes a test may need to interact with multiple processes,
all using the same connection so they all belong to the same
transaction.
Before we discuss solutions, let's see what happens if we try
to use a connection from a new process without explicitly
checking it out first:
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo)
end
test "create two posts, one sync, another async" do
task = Task.async(fn ->
TestRepo.insert!(%Post{title: "async"})
end)
assert %Post{} = TestRepo.insert!(%Post{title: "sync"})
assert %Post{} = Task.await(task)
end
The test above will fail with an error similar to:
** (RuntimeError) cannot find ownership process for #PID<0.35.0>
That's because the `setup` block is checking out the connection only
for the test process. Once we spawn a Task, there is no connection
assigned to it and it will fail.
The sandbox module provides two ways of doing so, via allowances or
by running in shared mode.
### Allowances
The idea behind allowances is that you can explicitly tell a process
which checked out connection it should use, allowing multiple processes
to collaborate over the same connection. Let's give it a try:
test "create two posts, one sync, another async" do
parent = self()
task = Task.async(fn ->
Ecto.Adapters.SQL.Sandbox.allow(TestRepo, parent, self())
TestRepo.insert!(%Post{title: "async"})
end)
assert %Post{} = TestRepo.insert!(%Post{title: "sync"})
assert %Post{} = Task.await(task)
end
And that's it, by calling `allow/3`, we are explicitly assigning
the parent's connection (i.e. the test process' connection) to
the task.
Because allowances use an explicit mechanism, their advantage
is that you can still run your tests in async mode. The downside
is that you need to explicitly control and allow every single
process. This is not always possible. In such cases, you will
want to use shared mode.
### Shared mode
Shared mode allows a process to share its connection with any other
process automatically, without relying on explicit allowances.
Let's change the example above to use shared mode:
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo)
# Setting the shared mode must be done only after checkout
Ecto.Adapters.SQL.Sandbox.mode(TestRepo, {:shared, self()})
end
test "create two posts, one sync, another async" do
task = Task.async(fn ->
TestRepo.insert!(%Post{title: "async"})
end)
assert %Post{} = TestRepo.insert!(%Post{title: "sync"})
assert %Post{} = Task.await(task)
end
By calling `mode({:shared, self()})`, any process that needs
to talk to the database will now use the same connection as the
one checked out by the test process during the `setup` block.
Make sure to always check a connection out before setting the mode
to `{:shared, self()}`.
The advantage of shared mode is that by calling a single function,
you will ensure all upcoming processes and operations will use that
shared connection, without a need to explicitly allow them. The
downside is that tests can no longer run concurrently in shared mode.
### Summing up
There are two mechanisms for explicit ownerships:
* Using allowances - requires explicit allowances via `allow/3`.
Tests may run concurrently.
* Using shared mode - does not require explicit allowances.
Tests cannot run concurrently.
"""
defmodule Connection do
@moduledoc false
@behaviour DBConnection
def connect({conn_mod, state}) do
case conn_mod.init(state) do
{:ok, state} -> {:ok, {conn_mod, state}}
{:error, _} = err -> err
end
end
def disconnect(err, {conn_mod, state}) do
conn_mod.disconnect(err, state)
end
def checkout(state), do: proxy(:checkout, state, [])
def checkin(state), do: proxy(:checkin, state, [])
def ping(state), do: proxy(:ping, state, [])
def handle_begin(opts, state) do
opts = [mode: :savepoint] ++ opts
proxy(:handle_begin, state, [opts])
end
def handle_commit(opts, state) do
opts = [mode: :savepoint] ++ opts
proxy(:handle_commit, state, [opts])
end
def handle_rollback(opts, state) do
opts = [mode: :savepoint] ++ opts
proxy(:handle_rollback, state, [opts])
end
def handle_prepare(query, opts, state),
do: proxy(:handle_prepare, state, [query, opts])
def handle_execute(query, params, opts, state),
do: proxy(:handle_execute, state, [query, params, opts])
def handle_execute_close(query, params, opts, state),
do: proxy(:handle_execute_close, state, [query, params, opts])
def handle_close(query, opts, state),
do: proxy(:handle_close, state, [query, opts])
def handle_info(msg, state),
do: proxy(:handle_info, state, [msg])
defp proxy(fun, {conn_mod, state}, args) do
result = apply(conn_mod, fun, args ++ [state])
pos = :erlang.tuple_size(result)
:erlang.setelement(pos, result, {conn_mod, :erlang.element(pos, result)})
end
end
defmodule Pool do
@moduledoc false
@behaviour DBConnection.Pool
def start_link(_module, _opts) do
raise "should never be invoked"
end
def child_spec(_module, _opts, _child_opts) do
raise "should never be invoked"
end
def checkout(pool, opts) do
pool_mod = opts[:sandbox_pool]
case pool_mod.checkout(pool, opts) do
{:ok, pool_ref, conn_mod, conn_state} ->
case conn_mod.handle_begin([mode: :transaction]++opts, conn_state) do
{:ok, _, conn_state} ->
{:ok, pool_ref, Connection, {conn_mod, conn_state}}
{_error_or_disconnect, err, conn_state} ->
pool_mod.disconnect(pool_ref, err, conn_state, opts)
end
error ->
error
end
end
def checkin(pool_ref, {conn_mod, conn_state}, opts) do
pool_mod = opts[:sandbox_pool]
case conn_mod.handle_rollback([mode: :transaction]++opts, conn_state) do
{:ok, _, conn_state} ->
pool_mod.checkin(pool_ref, conn_state, opts)
{_error_or_disconnect, err, conn_state} ->
pool_mod.disconnect(pool_ref, err, conn_state, opts)
end
end
def disconnect(owner, exception, {_conn_mod, conn_state}, opts) do
opts[:sandbox_pool].disconnect(owner, exception, conn_state, opts)
end
def stop(owner, reason, {_conn_mod, conn_state}, opts) do
opts[:sandbox_pool].stop(owner, reason, conn_state, opts)
end
end
@doc """
Sets the mode for the `repo` pool.
The mode can be `:auto`, `:manual` or `:shared`.
"""
def mode(repo, mode)
when mode in [:auto, :manual]
when elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do
{name, opts} = repo.__pool__
if opts[:pool] != DBConnection.Ownership do
raise """
cannot configure sandbox with pool #{inspect opts[:pool]}.
To use the SQL Sandbox, configure your repository pool as:
pool: #{inspect __MODULE__}
"""
end
# If the mode is set to anything but shared, let's
# automatically checkin the current connection to
# force it to act according to the chosen mode.
if mode in [:auto, :manual] do
checkin(repo, [])
end
DBConnection.Ownership.ownership_mode(name, mode, opts)
end
@doc """
Checks a connection out for the given `repo`.
The process calling `checkout/2` will own the connection
until it calls `checkin/2` or until it crashes when then
the connection will be automatically reclaimed by the pool.
## Options
* `:sandbox` - when true the connection is wrapped in
a transaction. Defaults to true.
"""
def checkout(repo, opts \\ []) do
{name, opts} =
if Keyword.get(opts, :sandbox, true) do
proxy_pool(repo)
else
repo.__pool__
end
DBConnection.Ownership.ownership_checkout(name, opts)
end
@doc """
Checks in the connection back into the sandbox pool.
"""
def checkin(repo, _opts \\ []) do
{name, opts} = repo.__pool__
DBConnection.Ownership.ownership_checkin(name, opts)
end
@doc """
Allows the `allow` process to use the same connection as `parent`.
"""
def allow(repo, parent, allow, _opts \\ []) do
{name, opts} = repo.__pool__
DBConnection.Ownership.ownership_allow(name, parent, allow, opts)
end
defp proxy_pool(repo) do
{name, opts} = repo.__pool__
{pool, opts} = Keyword.pop(opts, :ownership_pool, DBConnection.Poolboy)
{name, [repo: repo, sandbox_pool: pool, ownership_pool: Pool] ++ opts}
end
end
|
lib/ecto/adapters/sql/sandbox.ex
| 0.885798
| 0.66556
|
sandbox.ex
|
starcoder
|
defmodule Tesla.Middleware.Normalize do
@moduledoc false
def call(env, next, _opts) do
env
|> normalize
|> Tesla.run(next)
|> normalize
end
def normalize({:error, reason}) do
raise %Tesla.Error{message: "adapter error: #{inspect(reason)}", reason: reason}
end
def normalize(env) do
env
|> Map.update!(:status, &normalize_status/1)
|> Map.update!(:headers, &normalize_headers/1)
|> Map.update!(:body, &normalize_body/1)
end
def normalize_status(nil), do: nil
def normalize_status(status) when is_integer(status), do: status
def normalize_status(status) when is_binary(status), do: status |> String.to_integer()
def normalize_status(status) when is_list(status),
do: status |> to_string |> String.to_integer()
def normalize_headers(headers) when is_map(headers) or is_list(headers) do
Enum.into(headers, %{}, fn {k, v} ->
{k |> to_string |> String.downcase(), v |> to_string}
end)
end
def normalize_body(data) when is_list(data), do: IO.iodata_to_binary(data)
def normalize_body(data), do: data
end
defmodule Tesla.Middleware.BaseUrl do
@behaviour Tesla.Middleware
@moduledoc """
Set base URL for all requests.
The base URL will be prepended to request path/url only
if it does not include http(s).
### Example usage
```
defmodule MyClient do
use Tesla
plug Tesla.Middleware.BaseUrl, "https://api.github.com"
end
MyClient.get("/path") # equals to GET https://api.github.com/path
MyClient.get("http://example.com/path") # equals to GET http://example.com/path
```
"""
def call(env, next, base) do
env
|> apply_base(base)
|> Tesla.run(next)
end
defp apply_base(env, base) do
if Regex.match?(~r/^https?:\/\//, env.url) do
# skip if url is already with scheme
env
else
%{env | url: join(base, env.url)}
end
end
defp join(base, url) do
case {String.last(to_string(base)), url} do
{nil, url} -> url
{"/", "/" <> rest} -> base <> rest
{"/", rest} -> base <> rest
{_, "/" <> rest} -> base <> "/" <> rest
{_, rest} -> base <> "/" <> rest
end
end
end
defmodule Tesla.Middleware.Headers do
@behaviour Tesla.Middleware
@moduledoc """
Set default headers for all requests
### Example usage
```
defmodule Myclient do
use Tesla
plug Tesla.Middleware.Headers, %{"User-Agent" => "Tesla"}
end
```
"""
def call(env, next, headers) do
env
|> merge(headers)
|> Tesla.run(next)
end
defp merge(env, nil), do: env
defp merge(env, headers) do
Map.update!(env, :headers, &Map.merge(&1, headers))
end
end
defmodule Tesla.Middleware.Query do
@behaviour Tesla.Middleware
@moduledoc """
Set default query params for all requests
### Example usage
```
defmodule Myclient do
use Tesla
plug Tesla.Middleware.Query, [token: "some-token"]
end
```
"""
def call(env, next, query) do
env
|> merge(query)
|> Tesla.run(next)
end
defp merge(env, nil), do: env
defp merge(env, query) do
Map.update!(env, :query, &(&1 ++ query))
end
end
defmodule Tesla.Middleware.Opts do
@behaviour Tesla.Middleware
@moduledoc """
Set default opts for all requests
### Example usage
```
defmodule Myclient do
use Tesla
plug Tesla.Middleware.Opts, [some: "option"]
end
```
"""
def call(env, next, opts) do
Tesla.run(%{env | opts: env.opts ++ opts}, next)
end
end
defmodule Tesla.Middleware.BaseUrlFromConfig do
def call(env, next, opts) do
base = config(opts)[:base_url]
Tesla.Middleware.BaseUrl.call(env, next, base)
end
defp config(opts) do
Application.get_env(Keyword.fetch!(opts, :otp_app), Keyword.fetch!(opts, :module))
end
end
|
lib/tesla/middleware/core.ex
| 0.816333
| 0.59131
|
core.ex
|
starcoder
|
defmodule Galaxy.DNS do
@moduledoc """
This topologying strategy works by loading all your Erlang nodes (within Pods) in the current [DNS
namespace](https://kubernetes.io/docs/concepts/service-networking/dns-pod-service/).
It will fetch the targets of all pods under a shared headless service and attempt to connect.
It will continually monitor and update its connections every 5s.
It assumes that all Erlang nodes were launched under a base name, are using longnames, and are unique
based on their FQDN, rather than the base hostname. In other words, in the following
longname, `<basename>@<ip>`, `basename` would be the value configured through
`application_name`.
"""
use GenServer
require Logger
@default_initial_delay 0
@default_polling_interval 5000
@default_epmd_port 4369
def start_link(options) do
{sup_opts, opts} = Keyword.split(options, [:name])
GenServer.start_link(__MODULE__, opts, sup_opts)
end
@impl true
def init(options) do
case Keyword.get(options, :hosts) do
[] ->
:ignore
hosts ->
unless topology = options[:topology] do
raise ArgumentError, "expected :topology option to be given"
end
initial_delay = Keyword.get(options, :initial_delay, @default_initial_delay)
polling_interval = Keyword.get(options, :polling_interval, @default_polling_interval)
epmd_port = Keyword.get(options, :epmd_port, @default_epmd_port)
state = %{
topology: topology,
hosts: hosts,
epmd_port: epmd_port,
polling_interval: polling_interval
}
Process.send_after(self(), :poll, initial_delay)
{:ok, state}
end
end
@impl true
def handle_info(:poll, state) do
knowns_hosts = [node() | state.topology.members()]
discovered_hosts = poll_services_hosts(state.hosts, state.epmd_port)
new_hosts = discovered_hosts -- knowns_hosts
{nodes, _} = state.topology.connect_nodes(new_hosts)
Enum.each(nodes, &Logger.debug(["DNS connected ", &1 |> to_string(), " node"]))
Process.send_after(self(), :poll, state.polling_interval)
{:noreply, state}
end
defp poll_services_hosts(hosts, port) do
hosts
|> Enum.flat_map(&resolve_service_nodes/1)
|> Enum.filter(&filter_epmd_hosts(&1, port))
|> Enum.map(&normalize_node_hosts/1)
|> :net_adm.world_list()
end
defp resolve_service_nodes(service) do
case :inet_res.getbyname(service |> to_charlist(), :srv) do
{:ok, {:hostent, _, _, _, _, hosts}} ->
hosts
{:error, :nxdomain} ->
Logger.error(["Can't resolve DNS for ", service])
[]
{:error, :timeout} ->
Logger.error(["DNS timeout for ", service])
[]
_ ->
[]
end
end
defp filter_epmd_hosts(host, port),
do: match?({_, _, ^port, _}, host)
defp normalize_node_hosts({_, _, _, host}),
do: host |> List.to_atom()
end
|
lib/galaxy/dns.ex
| 0.79858
| 0.544256
|
dns.ex
|
starcoder
|
defmodule AWS.SecretsManager do
@moduledoc """
AWS Secrets Manager API Reference
AWS Secrets Manager provides a service to enable you to store, manage, and
retrieve, secrets.
This guide provides descriptions of the Secrets Manager API. For more
information about using this service, see the [AWS Secrets Manager User Guide](https://docs.aws.amazon.com/secretsmanager/latest/userguide/introduction.html).
## API Version
This version of the Secrets Manager API Reference documents the Secrets Manager
API version 2017-10-17.
As an alternative to using the API, you can use one of the AWS SDKs, which
consist of libraries and sample code for various programming languages and
platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a
convenient way to create programmatic access to AWS Secrets Manager. For
example, the SDKs provide cryptographically signing requests, managing errors,
and retrying requests automatically. For more information about the AWS SDKs,
including downloading and installing them, see [Tools for Amazon Web Services](http://aws.amazon.com/tools/).
We recommend you use the AWS SDKs to make programmatic API calls to Secrets
Manager. However, you also can use the Secrets Manager HTTP Query API to make
direct calls to the Secrets Manager web service. To learn more about the Secrets
Manager HTTP Query API, see [Making Query Requests](https://docs.aws.amazon.com/secretsmanager/latest/userguide/query-requests.html)
in the *AWS Secrets Manager User Guide*.
Secrets Manager API supports GET and POST requests for all actions, and doesn't
require you to use GET for some actions and POST for others. However, GET
requests are subject to the limitation size of a URL. Therefore, for operations
that require larger sizes, use a POST request.
## Support and Feedback for AWS Secrets Manager
We welcome your feedback. Send your comments to
[<EMAIL>](mailto:<EMAIL>), or post your feedback and questions in the [AWS Secrets Manager Discussion
Forum](http://forums.aws.amazon.com/forum.jspa?forumID=296). For more
information about the AWS Discussion Forums, see [Forums Help](http://forums.aws.amazon.com/help.jspa).
## How examples are presented
The JSON that AWS Secrets Manager expects as your request parameters and the
service returns as a response to HTTP query requests contain single, long
strings without line breaks or white space formatting. The JSON shown in the
examples displays the code formatted with both line breaks and white space to
improve readability. When example input parameters can also cause long strings
extending beyond the screen, you can insert line breaks to enhance readability.
You should always submit the input as a single JSON text string.
## Logging API Requests
AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API
calls for your AWS account and delivers log files to an Amazon S3 bucket. By
using information that's collected by AWS CloudTrail, you can determine the
requests successfully made to Secrets Manager, who made the request, when it was
made, and so on. For more about AWS Secrets Manager and support for AWS
CloudTrail, see [Logging AWS Secrets Manager Events with AWS CloudTrail](http://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail)
in the *AWS Secrets Manager User Guide*. To learn more about CloudTrail,
including enabling it and find your log files, see the [AWS CloudTrail User Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2017-10-17",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "secretsmanager",
global?: false,
protocol: "json",
service_id: "Secrets Manager",
signature_version: "v4",
signing_name: "secretsmanager",
target_prefix: "secretsmanager"
}
end
@doc """
Disables automatic scheduled rotation and cancels the rotation of a secret if
currently in progress.
To re-enable scheduled rotation, call `RotateSecret` with
`AutomaticallyRotateAfterDays` set to a value greater than 0. This immediately
rotates your secret and then enables the automatic schedule.
If you cancel a rotation while in progress, it can leave the `VersionStage`
labels in an unexpected state. Depending on the step of the rotation in
progress, you might need to remove the staging label `AWSPENDING` from the
partially created version, specified by the `VersionId` response value. You
should also evaluate the partially rotated new version to see if it should be
deleted, which you can do by removing all staging labels from the new version
`VersionStage` field.
To successfully start a rotation, the staging label `AWSPENDING` must be in one
of the following states:
* Not attached to any version at all
* Attached to the same version as the staging label `AWSCURRENT`
If the staging label `AWSPENDING` attached to a different version than the
version with `AWSCURRENT` then the attempt to rotate fails.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:CancelRotateSecret
## Related operations
* To configure rotation for a secret or to manually trigger a
rotation, use `RotateSecret`.
* To get the rotation configuration details for a secret, use
`DescribeSecret`.
* To list all of the currently available secrets, use `ListSecrets`.
* To list all of the versions currently associated with a secret,
use `ListSecretVersionIds`.
"""
def cancel_rotate_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelRotateSecret", input, options)
end
@doc """
Creates a new secret.
A secret in Secrets Manager consists of both the protected secret data and the
important information needed to manage the secret.
Secrets Manager stores the encrypted secret data in one of a collection of
"versions" associated with the secret. Each version contains a copy of the
encrypted secret data. Each version is associated with one or more "staging
labels" that identify where the version is in the rotation cycle. The
`SecretVersionsToStages` field of the secret contains the mapping of staging
labels to the active versions of the secret. Versions without a staging label
are considered deprecated and not included in the list.
You provide the secret data to be encrypted by putting text in either the
`SecretString` parameter or binary data in the `SecretBinary` parameter, but not
both. If you include `SecretString` or `SecretBinary` then Secrets Manager also
creates an initial secret version and automatically attaches the staging label
`AWSCURRENT` to the new version.
If you call an operation to encrypt or decrypt the `SecretString`
or `SecretBinary` for a secret in the same account as the calling user and that
secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the
account's default AWS managed customer master key (CMK) with the alias
`aws/secretsmanager`. If this key doesn't already exist in your account then
Secrets Manager creates it for you automatically. All users and roles in the
same AWS account automatically have access to use the default CMK. Note that if
an Secrets Manager API call results in AWS creating the account's AWS-managed
CMK, it can result in a one-time significant delay in returning the result.
If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the secret
value then you must create and use a custom AWS KMS CMK because you can't access
the default CMK for the account using credentials from a different AWS account.
Store the ARN of the CMK in the secret when you create the secret or when you
update it by including it in the `KMSKeyId`. If you call an API that must
encrypt or decrypt `SecretString` or `SecretBinary` using credentials from a
different account then the AWS KMS key policy must grant cross-account access to
that other account's user or role for both the kms:GenerateDataKey and
kms:Decrypt operations.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:CreateSecret
* kms:GenerateDataKey - needed only if you use a customer-managed
AWS KMS key to encrypt the secret. You do not need this permission to use the
account default AWS managed CMK for Secrets Manager.
* kms:Decrypt - needed only if you use a customer-managed AWS KMS
key to encrypt the secret. You do not need this permission to use the account
default AWS managed CMK for Secrets Manager.
* secretsmanager:TagResource - needed only if you include the `Tags`
parameter.
## Related operations
* To delete a secret, use `DeleteSecret`.
* To modify an existing secret, use `UpdateSecret`.
* To create a new version of a secret, use `PutSecretValue`.
* To retrieve the encrypted secure string and secure binary values,
use `GetSecretValue`.
* To retrieve all other details for a secret, use `DescribeSecret`.
This does not include the encrypted secure string and secure binary values.
* To retrieve the list of secret versions associated with the
current secret, use `DescribeSecret` and examine the `SecretVersionsToStages`
response value.
"""
def create_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSecret", input, options)
end
@doc """
Deletes the resource-based permission policy attached to the secret.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:DeleteResourcePolicy
## Related operations
* To attach a resource policy to a secret, use `PutResourcePolicy`.
* To retrieve the current resource-based policy attached to a
secret, use `GetResourcePolicy`.
* To list all of the currently available secrets, use `ListSecrets`.
"""
def delete_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteResourcePolicy", input, options)
end
@doc """
Deletes an entire secret and all of the versions.
You can optionally include a recovery window during which you can restore the
secret. If you don't specify a recovery window value, the operation defaults to
30 days. Secrets Manager attaches a `DeletionDate` stamp to the secret that
specifies the end of the recovery window. At the end of the recovery window,
Secrets Manager deletes the secret permanently.
At any time before recovery window ends, you can use `RestoreSecret` to remove
the `DeletionDate` and cancel the deletion of the secret.
You cannot access the encrypted secret information in any secret scheduled for
deletion. If you need to access that information, you must cancel the deletion
with `RestoreSecret` and then retrieve the information.
There is no explicit operation to delete a version of a secret.
Instead, remove all staging labels from the `VersionStage` field of a version.
That marks the version as deprecated and allows Secrets Manager to delete it as
needed. Versions without any staging labels do not show up in
`ListSecretVersionIds` unless you specify `IncludeDeprecated`.
The permanent secret deletion at the end of the waiting period is
performed as a background task with low priority. There is no guarantee of a
specific time after the recovery window for the actual delete operation to
occur.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:DeleteSecret
## Related operations
* To create a secret, use `CreateSecret`.
* To cancel deletion of a version of a secret before the recovery
window has expired, use `RestoreSecret`.
"""
def delete_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSecret", input, options)
end
@doc """
Retrieves the details of a secret.
It does not include the encrypted fields. Secrets Manager only returns fields
populated with a value in the response.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:DescribeSecret
## Related operations
* To create a secret, use `CreateSecret`.
* To modify a secret, use `UpdateSecret`.
* To retrieve the encrypted secret information in a version of the
secret, use `GetSecretValue`.
* To list all of the secrets in the AWS account, use `ListSecrets`.
"""
def describe_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSecret", input, options)
end
@doc """
Generates a random password of the specified complexity.
This operation is intended for use in the Lambda rotation function. Per best
practice, we recommend that you specify the maximum length and include every
character type that the system you are generating a password for can support.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:GetRandomPassword
"""
def get_random_password(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRandomPassword", input, options)
end
@doc """
Retrieves the JSON text of the resource-based policy document attached to the
specified secret.
The JSON request string input and response output displays formatted code with
white space and line breaks for better readability. Submit your input as a
single line JSON string.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:GetResourcePolicy
## Related operations
* To attach a resource policy to a secret, use `PutResourcePolicy`.
* To delete the resource-based policy attached to a secret, use
`DeleteResourcePolicy`.
* To list all of the currently available secrets, use `ListSecrets`.
"""
def get_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetResourcePolicy", input, options)
end
@doc """
Retrieves the contents of the encrypted fields `SecretString` or `SecretBinary`
from the specified version of a secret, whichever contains content.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:GetSecretValue
* kms:Decrypt - required only if you use a customer-managed AWS KMS
key to encrypt the secret. You do not need this permission to use the account's
default AWS managed CMK for Secrets Manager.
## Related operations
* To create a new version of the secret with different encrypted
information, use `PutSecretValue`.
* To retrieve the non-encrypted details for the secret, use
`DescribeSecret`.
"""
def get_secret_value(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetSecretValue", input, options)
end
@doc """
Lists all of the versions attached to the specified secret.
The output does not include the `SecretString` or `SecretBinary` fields. By
default, the list includes only versions that have at least one staging label in
`VersionStage` attached.
Always check the `NextToken` response parameter when calling any of the `List*`
operations. These operations can occasionally return an empty or shorter than
expected list of results even when there more results become available. When
this happens, the `NextToken` response parameter contains a value to pass to the
next call to the same API to request the next part of the list.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:ListSecretVersionIds
## Related operations
* To list the secrets in an account, use `ListSecrets`.
"""
def list_secret_version_ids(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSecretVersionIds", input, options)
end
@doc """
Lists all of the secrets that are stored by Secrets Manager in the AWS account.
To list the versions currently stored for a specific secret, use
`ListSecretVersionIds`. The encrypted fields `SecretString` and `SecretBinary`
are not included in the output. To get that information, call the
`GetSecretValue` operation.
Always check the `NextToken` response parameter when calling any of the `List*`
operations. These operations can occasionally return an empty or shorter than
expected list of results even when there more results become available. When
this happens, the `NextToken` response parameter contains a value to pass to the
next call to the same API to request the next part of the list.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:ListSecrets
## Related operations
* To list the versions attached to a secret, use
`ListSecretVersionIds`.
"""
def list_secrets(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListSecrets", input, options)
end
@doc """
Attaches the contents of the specified resource-based permission policy to a
secret.
A resource-based policy is optional. Alternatively, you can use IAM
identity-based policies that specify the secret's Amazon Resource Name (ARN) in
the policy statement's `Resources` element. You can also use a combination of
both identity-based and resource-based policies. The affected users and roles
receive the permissions that are permitted by all of the relevant policies. For
more information, see [Using Resource-Based Policies for AWS Secrets Manager](http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html).
For the complete description of the AWS policy syntax and grammar, see [IAM JSON Policy
Reference](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
in the *IAM User Guide*.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:PutResourcePolicy
## Related operations
* To retrieve the resource policy attached to a secret, use
`GetResourcePolicy`.
* To delete the resource-based policy attached to a secret, use
`DeleteResourcePolicy`.
* To list all of the currently available secrets, use `ListSecrets`.
"""
def put_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutResourcePolicy", input, options)
end
@doc """
Stores a new encrypted secret value in the specified secret.
To do this, the operation creates a new version and attaches it to the secret.
The version can contain a new `SecretString` value or a new `SecretBinary`
value. You can also specify the staging labels that are initially attached to
the new version.
The Secrets Manager console uses only the `SecretString` field. To add binary
data to a secret with the `SecretBinary` field you must use the AWS CLI or one
of the AWS SDKs.
* If this operation creates the first version for the secret then
Secrets Manager automatically attaches the staging label `AWSCURRENT` to the new
version.
* If you do not specify a value for VersionStages then Secrets
Manager automatically moves the staging label `AWSCURRENT` to this new version.
* If this operation moves the staging label `AWSCURRENT` from
another version to this version, then Secrets Manager also automatically moves
the staging label `AWSPREVIOUS` to the version that `AWSCURRENT` was removed
from.
* This operation is idempotent. If a version with a `VersionId` with
the same value as the `ClientRequestToken` parameter already exists and you
specify the same secret data, the operation succeeds but does nothing. However,
if the secret data is different, then the operation fails because you cannot
modify an existing version; you can only create new ones.
If you call an operation to encrypt or decrypt the `SecretString`
or `SecretBinary` for a secret in the same account as the calling user and that
secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the
account's default AWS managed customer master key (CMK) with the alias
`aws/secretsmanager`. If this key doesn't already exist in your account then
Secrets Manager creates it for you automatically. All users and roles in the
same AWS account automatically have access to use the default CMK. Note that if
an Secrets Manager API call results in AWS creating the account's AWS-managed
CMK, it can result in a one-time significant delay in returning the result.
If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the secret
value then you must create and use a custom AWS KMS CMK because you can't access
the default CMK for the account using credentials from a different AWS account.
Store the ARN of the CMK in the secret when you create the secret or when you
update it by including it in the `KMSKeyId`. If you call an API that must
encrypt or decrypt `SecretString` or `SecretBinary` using credentials from a
different account then the AWS KMS key policy must grant cross-account access to
that other account's user or role for both the kms:GenerateDataKey and
kms:Decrypt operations.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:PutSecretValue
* kms:GenerateDataKey - needed only if you use a customer-managed
AWS KMS key to encrypt the secret. You do not need this permission to use the
account's default AWS managed CMK for Secrets Manager.
## Related operations
* To retrieve the encrypted value you store in the version of a
secret, use `GetSecretValue`.
* To create a secret, use `CreateSecret`.
* To get the details for a secret, use `DescribeSecret`.
* To list the versions attached to a secret, use
`ListSecretVersionIds`.
"""
def put_secret_value(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutSecretValue", input, options)
end
@doc """
Remove regions from replication.
"""
def remove_regions_from_replication(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveRegionsFromReplication", input, options)
end
@doc """
Converts an existing secret to a multi-Region secret and begins replication the
secret to a list of new regions.
"""
def replicate_secret_to_regions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ReplicateSecretToRegions", input, options)
end
@doc """
Cancels the scheduled deletion of a secret by removing the `DeletedDate` time
stamp.
This makes the secret accessible to query once again.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:RestoreSecret
## Related operations
* To delete a secret, use `DeleteSecret`.
"""
def restore_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreSecret", input, options)
end
@doc """
Configures and starts the asynchronous process of rotating this secret.
If you include the configuration parameters, the operation sets those values for
the secret and then immediately starts a rotation. If you do not include the
configuration parameters, the operation starts a rotation with the values
already stored in the secret. After the rotation completes, the protected
service and its clients all use the new version of the secret.
This required configuration information includes the ARN of an AWS Lambda
function and the time between scheduled rotations. The Lambda rotation function
creates a new version of the secret and creates or updates the credentials on
the protected service to match. After testing the new credentials, the function
marks the new secret with the staging label `AWSCURRENT` so that your clients
all immediately begin to use the new version. For more information about
rotating secrets and how to configure a Lambda function to rotate the secrets
for your protected service, see [Rotating Secrets in AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html)
in the *AWS Secrets Manager User Guide*.
Secrets Manager schedules the next rotation when the previous one completes.
Secrets Manager schedules the date by adding the rotation interval (number of
days) to the actual date of the last rotation. The service chooses the hour
within that 24-hour date window randomly. The minute is also chosen somewhat
randomly, but weighted towards the top of the hour and influenced by a variety
of factors that help distribute load.
The rotation function must end with the versions of the secret in one of two
states:
* The `AWSPENDING` and `AWSCURRENT` staging labels are attached to
the same version of the secret, or
* The `AWSPENDING` staging label is not attached to any version of
the secret.
If the `AWSPENDING` staging label is present but not attached to the same
version as `AWSCURRENT` then any later invocation of `RotateSecret` assumes that
a previous rotation request is still in progress and returns an error.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:RotateSecret
* lambda:InvokeFunction (on the function specified in the secret's
metadata)
## Related operations
* To list the secrets in your account, use `ListSecrets`.
* To get the details for a version of a secret, use
`DescribeSecret`.
* To create a new version of a secret, use `CreateSecret`.
* To attach staging labels to or remove staging labels from a
version of a secret, use `UpdateSecretVersionStage`.
"""
def rotate_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RotateSecret", input, options)
end
@doc """
Removes the secret from replication and promotes the secret to a regional secret
in the replica Region.
"""
def stop_replication_to_replica(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StopReplicationToReplica", input, options)
end
@doc """
Attaches one or more tags, each consisting of a key name and a value, to the
specified secret.
Tags are part of the secret's overall metadata, and are not associated with any
specific version of the secret. This operation only appends tags to the existing
list of tags. To remove tags, you must use `UntagResource`.
The following basic restrictions apply to tags:
* Maximum number of tags per secret—50
* Maximum key length—127 Unicode characters in UTF-8
* Maximum value length—255 Unicode characters in UTF-8
* Tag keys and values are case sensitive.
* Do not use the `aws:` prefix in your tag names or values because
AWS reserves it for AWS use. You can't edit or delete tag names or values with
this prefix. Tags with this prefix do not count against your tags per secret
limit.
* If you use your tagging schema across multiple services and
resources, remember other services might have restrictions on allowed
characters. Generally allowed characters: letters, spaces, and numbers
representable in UTF-8, plus the following special characters: + - = . _ : / @.
If you use tags as part of your security strategy, then adding or removing a tag
can change permissions. If successfully completing this operation would result
in you losing your permissions for this secret, then the operation is blocked
and returns an Access Denied error.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:TagResource
## Related operations
* To remove one or more tags from the collection attached to a
secret, use `UntagResource`.
* To view the list of tags attached to a secret, use
`DescribeSecret`.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified secret.
This operation is idempotent. If a requested tag is not attached to the secret,
no error is returned and the secret metadata is unchanged.
If you use tags as part of your security strategy, then removing a tag can
change permissions. If successfully completing this operation would result in
you losing your permissions for this secret, then the operation is blocked and
returns an Access Denied error.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:UntagResource
## Related operations
* To add one or more tags to the collection attached to a secret,
use `TagResource`.
* To view the list of tags attached to a secret, use
`DescribeSecret`.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Modifies many of the details of the specified secret.
If you include a `ClientRequestToken` and *either* `SecretString` or
`SecretBinary` then it also creates a new version attached to the secret.
To modify the rotation configuration of a secret, use `RotateSecret` instead.
The Secrets Manager console uses only the `SecretString` parameter and therefore
limits you to encrypting and storing only a text string. To encrypt and store
binary data as part of the version of a secret, you must use either the AWS CLI
or one of the AWS SDKs.
* If a version with a `VersionId` with the same value as the
`ClientRequestToken` parameter already exists, the operation results in an
error. You cannot modify an existing version, you can only create a new version.
* If you include `SecretString` or `SecretBinary` to create a new
secret version, Secrets Manager automatically attaches the staging label
`AWSCURRENT` to the new version.
If you call an operation to encrypt or decrypt the `SecretString`
or `SecretBinary` for a secret in the same account as the calling user and that
secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the
account's default AWS managed customer master key (CMK) with the alias
`aws/secretsmanager`. If this key doesn't already exist in your account then
Secrets Manager creates it for you automatically. All users and roles in the
same AWS account automatically have access to use the default CMK. Note that if
an Secrets Manager API call results in AWS creating the account's AWS-managed
CMK, it can result in a one-time significant delay in returning the result.
If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the secret
value then you must create and use a custom AWS KMS CMK because you can't access
the default CMK for the account using credentials from a different AWS account.
Store the ARN of the CMK in the secret when you create the secret or when you
update it by including it in the `KMSKeyId`. If you call an API that must
encrypt or decrypt `SecretString` or `SecretBinary` using credentials from a
different account then the AWS KMS key policy must grant cross-account access to
that other account's user or role for both the kms:GenerateDataKey and
kms:Decrypt operations.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:UpdateSecret
* kms:GenerateDataKey - needed only if you use a custom AWS KMS key
to encrypt the secret. You do not need this permission to use the account's AWS
managed CMK for Secrets Manager.
* kms:Decrypt - needed only if you use a custom AWS KMS key to
encrypt the secret. You do not need this permission to use the account's AWS
managed CMK for Secrets Manager.
## Related operations
* To create a new secret, use `CreateSecret`.
* To add only a new version to an existing secret, use
`PutSecretValue`.
* To get the details for a secret, use `DescribeSecret`.
* To list the versions contained in a secret, use
`ListSecretVersionIds`.
"""
def update_secret(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSecret", input, options)
end
@doc """
Modifies the staging labels attached to a version of a secret.
Staging labels are used to track a version as it progresses through the secret
rotation process. You can attach a staging label to only one version of a secret
at a time. If a staging label to be added is already attached to another
version, then it is moved--removed from the other version first and then
attached to this one. For more information about staging labels, see [Staging Labels](https://docs.aws.amazon.com/secretsmanager/latest/userguide/terms-concepts.html#term_staging-label)
in the *AWS Secrets Manager User Guide*.
The staging labels that you specify in the `VersionStage` parameter are added to
the existing list of staging labels--they don't replace it.
You can move the `AWSCURRENT` staging label to this version by including it in
this call.
Whenever you move `AWSCURRENT`, Secrets Manager automatically moves the label
`AWSPREVIOUS` to the version that `AWSCURRENT` was removed from.
If this action results in the last label being removed from a version, then the
version is considered to be 'deprecated' and can be deleted by Secrets Manager.
## Minimum permissions
To run this command, you must have the following permissions:
* secretsmanager:UpdateSecretVersionStage
## Related operations
* To get the list of staging labels that are currently associated
with a version of a secret, use ` `DescribeSecret` ` and examine the
`SecretVersionsToStages` response value.
"""
def update_secret_version_stage(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSecretVersionStage", input, options)
end
@doc """
Validates that the resource policy does not grant a wide range of IAM principals
access to your secret.
The JSON request string input and response output displays formatted code with
white space and line breaks for better readability. Submit your input as a
single line JSON string. A resource-based policy is optional for secrets.
The API performs three checks when validating the secret:
* Sends a call to
[Zelkova](https://aws.amazon.com/blogs/security/protect-sensitive-data-in-the-cloud-with-automated-reasoning-zelkova/),
an automated reasoning engine, to ensure your Resource Policy does not allow
broad access to your secret.
* Checks for correct syntax in a policy.
* Verifies the policy does not lock out a caller.
## Minimum Permissions
You must have the permissions required to access the following APIs:
* `secretsmanager:PutResourcePolicy`
* `secretsmanager:ValidateResourcePolicy`
"""
def validate_resource_policy(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ValidateResourcePolicy", input, options)
end
end
|
lib/aws/generated/secrets_manager.ex
| 0.891369
| 0.623119
|
secrets_manager.ex
|
starcoder
|
defmodule Grizzly.Report do
@moduledoc """
Reports from Z-Wave commands
When you send a command in Z-Wave you will receive a report back.
## When Things Go Well
There are two primary reports that are returned when sending a command.
The first is `:command` report and the second is an `:ack_response` report.
These both will have a status of `:complete`.
Normally, an `:ack_response` report is returned when you set a value on a
device. This means the device received the command and is processing it,
not that the device has already processed it. You might have to go read the
value back after setting it if you want to make the device ran the set
based command.
A `:command` report type is returned often after reading a value from a
device. This report will have its `:command` field filled with a Z-Wave
command.
```elixir
case Grizzly.send_command(node_id, command, command_args, command_opts) do
{:ok, %Grizzly.Report{status: :complete, type: :command} = report} ->
# do something withe report.command
{:ok, %Grizzly.Report{status: :complete, type: :ack_response}} ->
# do whatever
end
```
## Queued Commands
When sending a command to a device that sleeping, normally battery powered
devices, the command will be queued internally. The command will still be
considered `:inflight` as it has not reached the device yet. You know when
a command has been queued when the report's `:status` field is `:inflight`
and the `:type` field is `:queued_delayed`. Fields to help you manage queued
commands are `:command_ref`, `:queued_delay`, and `:node_id`
During the command's queued lifetime the system sends pings back to the
caller to ensure that the low level connection is still established. This
also provides an updated delayed time before the device wakes up.
```elixir
case Grizzly.send_command(node_id, command, command_args, command_opts) do
{:ok, %Grizzly.Report{status: :inflight, type: :queued_delay}} ->
# the command was just queued
end
```
Once the command has been queued the calling process will receive messages
about the queued command like so:
```elixir
{:grizzly, :report, %Report{}}
```
This report can take two forms. One for a completed queued command and one
for a queued ping.
```elixir
def handle_info({:grizzly, :report, report}, state) do
case report do
%Grizzly.Report{status: :inflight, type: :queued_ping} ->
# handle the ping if you want
# an updated queue delay will be found in the :queued_delay
# field of the report
%Grizzly.Report{status: :complete, type: :command, queued: true} ->
# here if the :queued field is marked has true and the report is
# complete that will indicate a command has made it to the sleeping
# device and the device has received the command
%Grizzly.Report{status: :complete, type: :timeout, queued: true} ->
# The woke up and the controller sent the command but for reason
# the command's processing timed out
end
end
```
## Timeouts
If sending the command times out you will get a command with the `:type` of
`:timeout`
```elixir
case Grizzly.send_command(node_id, command, command_args, command_opts) do
{:ok, %Grizzly.Report{status: :complete, type: :timeout}} ->
# handle the timeout
end
```
The reason why this is considered okay is because the command that was sent
was valid and we were able to establish a connect to the desired device but
it just did not provide any report back.
## Full Example
The below example shows the various ways one might match after calling
`Grizzly.send_command/4`.
```elixir
case Grizzly.send_command(node_id, command, command_args, command_opts) do
{:ok, %Grizzly.Report{status: :complete, type: :command} = report} ->
handle_complete_report(report)
{:ok, %Grizzly.Report{status: :complete, type: :ack_response} = report} ->
handle_complete_report(report)
{:ok, %Grizzly.Report{status: :complete, type: :timeout} = report} ->
handle_timeout(report)
{:ok, %Grizzly.Report{status: :inflight, type: :queued} = report} ->
handle_queued_command(report)
end
```
note: the `handle_*` functions will need to implemented and are only used in
the example for illustration purposes
"""
alias Grizzly.ZWave
alias Grizzly.ZWave.Command
@typedoc """
All the data for the status and type of a report.
Fields
- `:status` - this indicates if the report is complete, inflight, or
timed out
- `:type` - this indicates if the report is contains a command or information
about being queued.
- `:command` - if the status is `:complete` and the type is `:command` then
this field will contain a Z-Wave command in the report.
- `:transmission_stats` - provides transmission stats for the command that
was sent
- `:queued_delay` - the delay time remaining if the report type is
`:queued_delay` or `:queued_ping`
- `:command_ref` - a reference to the command. This is mostly useful for
tracking queued commands
- `:node_id` - the node the report is responding from
- `:queued` - this flag marks if the command was ever queued before
completing
"""
@type t() :: %__MODULE__{
status: status(),
type: type(),
command: Command.t() | nil,
transmission_stats: [transmission_stat()],
queued_delay: non_neg_integer(),
command_ref: reference() | nil,
node_id: ZWave.node_id(),
queued: boolean()
}
@type type() ::
:ack_response
| :command
| :queued_ping
| :unsolicited
| :queued_delay
| :timeout
@type status() :: :inflight | :complete
@type opt() ::
{:transmission_stats, [transmission_stat()]}
| {:queued_delay, non_neg_integer()}
| {:command, Command.t()}
| {:command_ref, reference()}
| {:queued, boolean()}
@typedoc """
The RSSI value between each device that command had to route through to get
to the destination node
If the value is `:not_available` that means data for that hop in not available
or a hope did not take place. To see the nodes that the command was routed
through see the `:last_working_route` field of the transmission stats.
"""
@type rssi_value() :: integer() | :not_available
@type transmit_speed() :: float() | non_neg_integer()
@typedoc """
The various transmission stats that are provide by the Z-Wave network when
sending a command.
- `:transmit_channel` - the RF channel the command was transmitted on
- `:ack_channel` - the RF channel the acknowledgement report was
transmitted on
- `:rssi` - a 5 tuple that contains RSSI values for each hop in the Z-Wave
network
- `:last_working_route` - this contains a 4 tuple that shows what nodes the
Z-Wave command was routed through to the destination node. Also this
contains the speed by which the Z-Wave command was transmitted to the
destination
- `:transmission_time` - the length of time until the reception of an
acknowledgement in milliseconds
- `:route_changed` - this indicates if the route was changed for the
current transmission
"""
@type transmission_stat() ::
{:transmit_channel, non_neg_integer()}
| {:ack_channel, non_neg_integer()}
| {:rssi_hops, [rssi_value()]}
| {:rssi_4bars, 0..4 | :unknown}
| {:rssi_dbm, rssi_value()}
| {:last_working_route, [ZWave.node_id()]}
| {:transmit_speed, transmit_speed()}
| {:transmission_time, non_neg_integer()}
| {:route_changed, boolean()}
@enforce_keys [:status, :type, :node_id]
defstruct status: nil,
command: nil,
transmission_stats: [],
queued_delay: 0,
command_ref: nil,
node_id: nil,
type: nil,
queued: false
@doc """
Make a new `Grizzly.Report`
"""
@spec new(status(), type(), ZWave.node_id(), [opt()]) :: t()
def new(status, type, node_id, opts \\ []) do
%__MODULE__{
status: status,
type: type,
node_id: node_id,
command_ref: Keyword.get(opts, :command_ref, nil),
transmission_stats: Keyword.get(opts, :transmission_stats, []),
queued_delay: Keyword.get(opts, :queued_delay, 0),
command: Keyword.get(opts, :command),
queued: Keyword.get(opts, :queued, false)
}
end
end
|
lib/grizzly/report.ex
| 0.871023
| 0.82566
|
report.ex
|
starcoder
|
defmodule Day22.RecursiveGame do
defstruct p1_cards: [],
p1_next: [],
p1_total: 0,
p2_cards: [],
p2_next: [],
p2_total: 0,
building: 1,
seen: MapSet.new()
def build(%__MODULE__{building: 1} = game, "Player 1:"), do: game
def build(%__MODULE__{building: 1} = game, "Player 2:"), do: %__MODULE__{game | building: 2}
def build(game, ""), do: game
def build(%__MODULE__{building: 1} = game, n),
do: %__MODULE__{
game
| p1_cards: [String.to_integer(n) | game.p1_cards],
p1_total: game.p1_total + 1
}
def build(%__MODULE__{building: 2} = game, n),
do: %__MODULE__{
game
| p2_cards: [String.to_integer(n) | game.p2_cards],
p2_total: game.p2_total + 1
}
def prepare(%__MODULE__{} = game),
do: %__MODULE__{
game
| p1_cards: Enum.reverse(game.p1_cards),
p2_cards: Enum.reverse(game.p2_cards)
}
def play(%__MODULE__{p1_cards: [], p1_next: []} = game), do: -score(game.p2_cards, game.p2_next)
def play(%__MODULE__{p2_cards: [], p2_next: []} = game), do: score(game.p1_cards, game.p1_next)
def play(%__MODULE__{p1_cards: []} = game),
do: play(%__MODULE__{game | p1_cards: Enum.reverse(game.p1_next), p1_next: []})
def play(%__MODULE__{p2_cards: []} = game),
do: play(%__MODULE__{game | p2_cards: Enum.reverse(game.p2_next), p2_next: []})
def play(game) do
key =
{game.p1_cards ++ Enum.reverse(game.p1_next), game.p2_cards ++ Enum.reverse(game.p2_next)}
case MapSet.member?(game.seen, key) do
true ->
score(game.p1_cards, game.p1_next)
_ ->
do_play(%__MODULE__{game | seen: game.seen |> MapSet.put(key)})
end
end
def do_play(
%__MODULE__{p1_cards: [c1 | rest1], p1_total: p1, p2_cards: [c2 | rest2], p2_total: p2} =
game
)
when c1 < p1 and c2 < p2 do
subgame = %__MODULE__{
p1_cards: Enum.take(rest1 ++ Enum.reverse(game.p1_next), c1),
p1_total: c1,
p2_cards: Enum.take(rest2 ++ Enum.reverse(game.p2_next), c2),
p2_total: c2
}
case play(subgame) do
x when x < 0 ->
play(%__MODULE__{
game
| p1_cards: rest1,
p2_cards: rest2,
p2_next: [c1 | [c2 | game.p2_next]],
p1_total: game.p1_total - 1,
p2_total: game.p2_total + 1
})
_ ->
play(%__MODULE__{
game
| p1_cards: rest1,
p2_cards: rest2,
p1_next: [c2 | [c1 | game.p1_next]],
p1_total: game.p1_total + 1,
p2_total: game.p2_total - 1
})
end
end
def do_play(%__MODULE__{p1_cards: [c1 | rest1], p2_cards: [c2 | rest2]} = game) when c1 > c2 do
play(%__MODULE__{
game
| p1_cards: rest1,
p2_cards: rest2,
p1_next: [c2 | [c1 | game.p1_next]],
p1_total: game.p1_total + 1,
p2_total: game.p2_total - 1
})
end
def do_play(%__MODULE__{p1_cards: [c1 | rest1], p2_cards: [c2 | rest2]} = game) when c2 > c1 do
play(%__MODULE__{
game
| p1_cards: rest1,
p2_cards: rest2,
p2_next: [c1 | [c2 | game.p2_next]],
p1_total: game.p1_total - 1,
p2_total: game.p2_total + 1
})
end
def score(a, b) do
(b ++ Enum.reverse(a))
|> Enum.with_index(1)
|> Enum.map(fn {n, i} -> n * i end)
|> Enum.reduce(&Kernel.+/2)
end
end
|
year_2020/lib/day_22/recursive_game.ex
| 0.534855
| 0.470068
|
recursive_game.ex
|
starcoder
|
defmodule BSV.Block do
@moduledoc """
A block is a data structure consisting of a `t:BSV.BlockHeader.t/0` and a list
of [`transactions`](`t:BSV.Tx.t/0`).
Transaction data is permanently recorded into blocks. Because each block
contains a reference to the previous block, over time blocks form a chain.
"""
alias BSV.{BlockHeader, Hash, Serializable, Tx, VarInt}
import BSV.Util, only: [decode: 2, encode: 2]
defstruct header: nil, txns: []
@typedoc "Block struct"
@type t() :: %__MODULE__{
header: BlockHeader.t(),
txns: list(Tx.t())
}
@typedoc """
Merkle root - the result of hashing all of the transactions contained in the
block into a tree-like structure known as a Merkle tree.
"""
@type merkle_root() :: <<_::256>>
@doc """
Calculates and returns the result of hashing all of the transactions contained
in the block into a tree-like structure known as a Merkle tree.
"""
@spec calc_merkle_root(t()) :: merkle_root()
def calc_merkle_root(%__MODULE__{txns: txns}) do
txns
|> Enum.map(&Tx.get_hash/1)
|> hash_nodes()
end
# Iterates over the list of tx hashes and further hashes them together until
# the merkle root is calvulated
defp hash_nodes([hash]), do: hash
defp hash_nodes(nodes) when rem(length(nodes), 2) == 1,
do: hash_nodes(nodes ++ List.last(nodes))
defp hash_nodes(nodes) do
nodes
|> Enum.chunk_every(2)
|> Enum.map(fn [a, b] -> Hash.sha256_sha256(a <> b) end)
|> hash_nodes()
end
@doc """
Parses the given binary into a `t:BSV.Block.t/0`.
Returns the result in an `:ok` / `:error` tuple pair.
## Options
The accepted options are:
* `:encoding` - Optionally decode the binary with either the `:base64` or `:hex` encoding scheme.
"""
@spec from_binary(binary(), keyword()) :: {:ok, t()} | {:error, term()}
def from_binary(data, opts \\ []) when is_binary(data) do
encoding = Keyword.get(opts, :encoding)
with {:ok, data} <- decode(data, encoding),
{:ok, block, _rest} <- Serializable.parse(%__MODULE__{}, data)
do
{:ok, block}
end
end
@doc """
Parses the given binary into a `t:BSV.Block.t/0`.
As `from_binary/2` but returns the result or raises an exception.
"""
@spec from_binary!(binary(), keyword()) :: t()
def from_binary!(data, opts \\ []) when is_binary(data) do
case from_binary(data, opts) do
{:ok, block} ->
block
{:error, error} ->
raise BSV.DecodeError, error
end
end
@doc """
Serialises the given `t:BSV.Block.t/0` into a binary.
## Options
The accepted options are:
* `:encoding` - Optionally encode the binary with either the `:base64` or `:hex` encoding scheme.
"""
@spec to_binary(t()) :: binary()
def to_binary(%__MODULE__{} = block, opts \\ []) do
encoding = Keyword.get(opts, :encoding)
block
|> Serializable.serialize()
|> encode(encoding)
end
@doc """
Calculates the `t:BSV.Block.merkle_root/0` of the given block and compares the
result to the value contained the `t:BSV.BlockHeader.t/0`.
"""
@spec validate_merkle_root(t()) :: boolean()
def validate_merkle_root(%__MODULE__{header: header} = block),
do: calc_merkle_root(block) == (header && header.merkle_root)
defimpl Serializable do
@impl true
def parse(block, data) do
with {:ok, header, data} <- Serializable.parse(%BlockHeader{}, data),
{:ok, txns, rest} <- VarInt.parse_items(data, Tx)
do
{:ok, struct(block, [
header: header,
txns: txns,
]), rest}
end
end
@impl true
def serialize(%{header: header, txns: txns}) do
header_data = Serializable.serialize(header)
txns_data = Enum.reduce(txns, VarInt.encode(length(txns)), fn tx, data ->
data <> Serializable.serialize(tx)
end)
<<
header_data::binary,
txns_data::binary
>>
end
end
end
|
lib/bsv/block.ex
| 0.889936
| 0.817356
|
block.ex
|
starcoder
|
defmodule Beeline.HealthChecker do
@moduledoc """
A GenServer which periodically polls a producer's stream positions and
process
This GenServer emits `:telemetry` measurements which serve as an interface
for exporting this health-check information to an external monitoring
service.
`Beeline.HealthChecker.Logger` is included as a reasonable default consumer
for this telemetry. You may wish to export this telemetry to another system
such as appsignal or grafana in order to create alerts when processors fall
behind.
## Telemetry
* `[:beeline, :health_check, :stop]` - dispatched by each HealthChecker
process after polling the producer's position and process information
* Measurement: `%{duration: native_time}` - the time taken to perform
the position and process checks
* Metadata, a map with the following keys:
* `:producer` (module) - the producer module being measured
* `:alive?` (boolean) - whether the producer process is alive
* `:stream_name` (string) - the EventStoreDB stream name from which
the producer reads
* `:hostname` (string) - the hostname of the machine on which
the health checker process is being run
* `:interval` (integer) - the milliseconds the health checker process
has waited (minus drift) since the last poll
* `:drift` (integer) - the milliseconds used for drifting the interval
for the last poll
* `:measurement_time` (UTC datetime) - the time when the poll started
* `:prior_position` (integer) - the `:current_position` from the last
poll
* `:current_position` (integer) - the current stream position of
the producer
* `:head_position` (integer) - the stream position of the head
(the latest event) of the EventStoreDB stream
"""
@behaviour GenServer
defstruct [
:producer,
:stream_name,
:interval_fn,
:drift_fn,
:get_stream_position,
:get_head_position,
:hostname,
interval: 0,
drift: 0,
current_position: -1
]
@doc false
def child_spec({config, key, producer}) do
%{
id: {__MODULE__, key},
start: {__MODULE__, :start_link, [{config, producer}]},
type: :worker,
restart: :permanent
}
end
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@impl GenServer
def init({config, producer}) do
state =
%__MODULE__{
producer: producer.name,
stream_name: producer.stream_name,
get_head_position: fn ->
Beeline.EventStoreDB.latest_event_number(
producer.adapter,
producer.connection,
producer.stream_name
)
end,
get_stream_position:
wrap_function(config.get_stream_position, producer.name),
interval_fn: wrap_function(config.health_check_interval),
drift_fn: wrap_function(config.health_check_drift),
hostname: hostname()
}
|> schedule_next_poll()
{:ok, state}
end
@impl GenServer
def handle_info(:poll, state) do
state =
state
|> poll_producer()
|> schedule_next_poll()
{:noreply, state}
end
defp schedule_next_poll(state) do
interval = state.interval_fn.()
drift = state.drift_fn.()
Process.send_after(self(), :poll, interval + drift)
%__MODULE__{
state
| drift: drift,
interval: interval
}
end
defp poll_producer(state) do
metadata = %{
producer: state.producer,
stream_name: state.stream_name,
hostname: state.hostname,
interval: state.interval,
drift: state.drift,
measurement_time: DateTime.utc_now(),
prior_position: state.current_position
}
:telemetry.span(
[:beeline, :health_check],
metadata,
fn ->
current_position = state.get_stream_position.()
metadata =
Map.merge(metadata, %{
current_position: current_position,
head_position: state.get_head_position.(),
alive?: alive?(state.producer)
})
state = put_in(state.current_position, current_position)
{state, metadata}
end
)
end
defp alive?(producer) do
case GenServer.whereis(producer) do
nil -> false
pid -> Process.alive?(pid)
end
end
defp hostname do
case :inet.gethostname() do
{:ok, hostname_charlist} ->
hostname_charlist |> to_string()
_ ->
nil
end
end
# coveralls-ignore-start
defp wrap_function(function) when is_function(function, 0), do: function
defp wrap_function({m, f, a}), do: fn -> apply(m, f, a) end
defp wrap_function(value), do: fn -> value end
defp wrap_function(function, producer_name) when is_function(function, 1) do
fn -> function.(producer_name) end
end
defp wrap_function({m, f, a}, producer_name) do
fn -> apply(m, f, [producer_name | a]) end
end
# coveralls-ignore-stop
end
|
lib/beeline/health_checker.ex
| 0.845241
| 0.568865
|
health_checker.ex
|
starcoder
|
defmodule Bfs do
defp neighbors(graph, v) do
{Map.get(graph, v, MapSet.new), v}
end
defp find_path(_, from, to, path) when from == to, do: path
defp find_path(prev, from, to, path) when from != to, do: find_path(prev, from, Map.get(prev, to), [Map.get(prev, to) | path])
@doc """
Given graph and 2 points, return the distance and shortest path.
## Example
iex> graph = %{"0" => MapSet.new(["2"]),"1" => MapSet.new(["1"]),"2" => MapSet.new(["0", "3", "4"]),"3" => MapSet.new(["2", "4"]),"4" => MapSet.new(["2", "3", "6"]),"5" => MapSet.new(["6"]),"6" => MapSet.new(["4", "5"])}
iex> Bfs.shortest_path!(graph, "0", "0")
{0, ["0"]}
iex> Bfs.shortest_path!(graph, "0", "6")
{3, ["0", "2", "4", "6"]}
iex> Bfs.shortest_path!(graph, "3", "6")
{2, ["3", "4", "6"]}
iex> Bfs.shortest_path!(graph, "6", "3")
{2, ["6", "4", "3"]}
"""
def shortest_path!(graph, from, to) when from != to, do: shortest_path(graph, from, [from], MapSet.new , to, 0, %{})
def shortest_path!(_, from, to) when from == to, do: {0, [from]}
defp shortest_path(_, _, [], _, _, _, _) do
throw "NOT FOUND"
end
defp shortest_path(graph, from, frontier, seen, to, dist, prev) do
result = for v <- frontier do
neighbors(graph, v)
end
unseen_neighbors = result
|> Enum.map(fn x -> x |> elem(0) end)
|> Enum.reduce(MapSet.new, &MapSet.union/2)
|> MapSet.difference(seen)
newdist = dist + 1
newprev = result
|> Enum.reduce(prev, fn x, acc -> Map.merge(
Enum.reduce(x |> elem(0), %{}, fn (y, acc) ->
cond do
MapSet.member?(unseen_neighbors, y) -> Map.put(acc, y, x |> elem(1))
true -> acc
end
end
), acc) end)
case MapSet.member?(unseen_neighbors, to) do
true-> {newdist, find_path(newprev, from, to, [to])}
false-> shortest_path(
graph,
from,
MapSet.to_list(unseen_neighbors),
MapSet.union(seen, MapSet.new(unseen_neighbors)),
to,
newdist,
newprev
)
end
end
end
"""
graph = %{
"0" => MapSet.new(["2"]),
"1" => MapSet.new(["1"]),
"2" => MapSet.new(["0", "3", "4"]),
"3" => MapSet.new(["2", "4"]),
"4" => MapSet.new(["2", "3", "6"]),
"5" => MapSet.new(["6"]),
"6" => MapSet.new(["4", "5"])
}
IO.inspect "Graph"
IO.inspect graph
IO.inspect "0 -> 0"
IO.inspect Bfs.shortest_path!(graph, "0", "0")
IO.inspect "0 -> 6"
IO.inspect Bfs.shortest_path!(graph, "0", "6")
IO.inspect "3 -> 6"
IO.inspect Bfs.shortest_path!(graph, "3", "6")
IO.inspect "6 -> 3"
IO.inspect Bfs.shortest_path!(graph, "6", "3")
IO.inspect "7 -> 5"
IO.inspect Bfs.shortest_path!(graph, "7", "5")
IO.inspect "5 -> 7"
IO.inspect Bfs.shortest_path!(graph, "5", "7")
"""
|
bfs_elixir/lib/bfs.ex
| 0.827863
| 0.537466
|
bfs.ex
|
starcoder
|
defmodule GGity.Labels do
@moduledoc """
Common functions for transforming axis tick labels.
Break (i.e. axis tick or legend item) labels are formatted based on a
scale's `:labels` option. This option can be provided in several forms:
- `nil` - No labels are drawn
- `:waivers` - drawn using the default formatting (`Kernel.to_string/1`)
- a function that takes a single argument representing the break value and returns a binary
- an atom, representing the name of a built-in formatting function (e.g., `commas/1`)
Note that the built-in formatting functions are not intended to be robust. One option for
finely-tuned formatting would be to pass functions from the [Cldr family of packages](https://hexdocs.pm/ex_cldr) (must be
added as a separate dependency).
## Examples
```
data
|> Plot.new(%{x: "x", y: "y"})
|> Plot.geom_point()
|> Plot.scale_x_continuous(labels: nil)
# value 1000 is printed as an empty string
```
```
data
|> Plot.new(%{x: "x", y: "y"})
|> Plot.geom_point()
|> Plot.scale_x_continuous() # This is equivalant to Plot.scale_x_continuous(labels: :waivers)
# value 1000 (integer) is printed as "1000"
# value 1000.0 (float) is printed as "1.0e3"
```
```
data
|> Plot.new(%{x: "x", y: "y", c: "color"})
|> Plot.geom_point()
|> Plot.scale_color_viridis(labels: fn value -> value <> "!" end)
# value "First Item" is printed as "First Item!"
```
```
data
|> Plot.new(%{x: "x", y: "y"})
|> Plot.geom_point()
|> Plot.scale_x_continuous(labels: :commas)
# value 1000 (integer) is printed as "1,000"
# value 1000 (float) is printed as "1,000"
```
Date scales (e.g., `GGity.Scale.X.Date`) are a special case.
For those scales, if a value for `:date_labels` has been specified, that
pattern overrides any value for the `:labels` option. See
`GGity.Plot.scale_x_date/2` for more information regarding
date labels.
"""
alias GGity.{Labels, Scale}
@type tick_value() :: %Date{} | %DateTime{} | number()
@doc false
@spec format(map(), tick_value()) :: String.t()
def format(%scale_type{date_labels: {pattern, options}}, value)
when scale_type in [Scale.X.Date, Scale.X.DateTime] do
NimbleStrftime.format(value, pattern, options)
end
def format(%scale_type{date_labels: pattern}, value)
when scale_type in [Scale.X.Date, Scale.X.DateTime] and is_binary(pattern) do
NimbleStrftime.format(value, pattern)
end
def format(%{labels: :waivers}, value), do: to_string(value)
def format(%{labels: nil}, _value), do: ""
def format(%{labels: built_in_function}, value) when is_atom(built_in_function) do
apply(Labels, built_in_function, [value])
end
def format(%{labels: formatter}, value) when is_function(formatter) do
formatter.(value)
end
@doc """
Applies a comma separator to a number and converts it to a string.
If the number is a float, it is first rounded using `Kernel.to_string/1`.
Note that simple floating point arithmetic is used; the various issues/errors
associated with floating point values apply.
iex> GGity.Labels.commas(5000.0)
"5,000"
iex> GGity.Labels.commas(1000.6)
"1,001"
iex> GGity.Labels.commas(100.0)
"100"
iex> GGity.Labels.commas(10_000_000)
"10,000,000"
"""
@spec commas(number()) :: String.t()
def commas(value) when is_number(value) do
value
|> round()
|> to_charlist()
|> Enum.reverse()
|> comma_separate([])
|> to_string()
end
defp comma_separate([first, second, third | []], acc) do
[third, second, first | acc]
end
defp comma_separate([first, second, third | tail], acc) do
acc = [',', third, second, first | acc]
comma_separate(tail, acc)
end
defp comma_separate([first | [second]], acc), do: [second, first | acc]
defp comma_separate([first], acc), do: [first | acc]
defp comma_separate([], acc), do: acc
@doc """
Formats a number in U.S. dollars and cents.
If the value is greater than or equal to 100,000 rounds to the nearest
dollar and does not display cents.
Note that simple floating point arithmetic is used; the various issues/errors
associated with floating point values apply.
iex> GGity.Labels.dollar(5000.0)
"$5,000.00"
iex> GGity.Labels.dollar(1000.6)
"$1,000.60"
iex> GGity.Labels.dollar(100.0)
"$100.00"
iex> GGity.Labels.dollar(10_000_000)
"$10,000,000"
"""
@spec dollar(number()) :: String.t()
def dollar(value) when is_float(value) do
cents =
((Float.round(value, 2) - floor(value)) * 100)
|> round()
|> Integer.digits()
|> Enum.take(2)
|> Enum.join()
|> String.pad_trailing(2, "0")
case value do
value when value >= 100_000 ->
"$" <> commas(floor(value))
value ->
"$#{commas(floor(value))}.#{cents}"
end
end
def dollar(value) when is_integer(value) do
dollar(value * 1.0)
end
@doc """
Formats a number as a percent.
Accepts a `:precision` option specifying the number of decimal places
to be displayed.
Note that simple floating point arithmetic is used; the various issues/errors
associated with floating point values apply.
iex> GGity.Labels.percent(0.5)
"50%"
iex> GGity.Labels.percent(0.111)
"11%"
iex> GGity.Labels.percent(0.015, precision: 1)
"1.5%"
iex> GGity.Labels.percent(10)
"1000%"
"""
@spec percent(number(), keyword()) :: String.t()
def percent(value, options \\ [precision: 0])
def percent(value, precision: precision) when is_float(value) do
percent_value =
(value * 100)
|> Float.round(precision)
rounded_value =
case precision do
0 -> round(percent_value)
_other -> percent_value
end
to_string(rounded_value) <> "%"
end
def percent(value, _options) when is_integer(value) do
to_string(value * 100) <> "%"
end
end
|
lib/ggity/labels.ex
| 0.930993
| 0.921003
|
labels.ex
|
starcoder
|
defmodule Brain.PIDController do
use GenServer
require Logger
alias Brain.{ BlackBox, Memory }
def init(_) do
{:ok, configuration} = load_configuration()
{:ok, Map.merge(configuration,
%{
last_input: 0,
integrative_term: 0,
setpoint: 0,
process_name: Process.info(self())[:registered_name],
last_timestamp: nil
})
}
end
def start_link(opts) do
Logger.debug "Starting #{__MODULE__}..."
GenServer.start_link(__MODULE__, nil, opts)
end
def handle_call(:snapshot, _from, state) do
snapshot = %{
name: state[:process_name] |> Module.split |> List.last,
kp: state[:kp],
ki: state[:ki],
kd: state[:kd],
minimum_output: state[:minimum_output],
maximum_output: state[:maximum_output]
}
{:reply, {:ok, snapshot}, state}
end
def handle_call({:compute, input, setpoint, sample_rate}, _from, state) do
sample_rate_in_seconds = sample_rate / 1000
error = setpoint - input
proportional_term = state[:kp] * error
integrative_term = state[:integrative_term] + (state[:ki] * sample_rate_in_seconds) * error
derivative_term = (state[:kd] / sample_rate_in_seconds) * (input - state[:last_input])
# integrative_term = min(integrative_term, state[:maximum_output])
# integrative_term = max(integrative_term, state[:minimum_output])
output = proportional_term + integrative_term - derivative_term
output = min(output, state[:maximum_output])
output = max(output, state[:minimum_output])
trace(state, error, output, proportional_term, integrative_term, derivative_term, setpoint)
{:reply, {:ok, output}, Map.merge(state, %{
integrative_term: integrative_term,
last_input: input
})}
end
def handle_cast({:tune, %{kp: kp, ki: ki, kd: kd}}, state) do
state = Map.merge(state, %{
kp: kp,
ki: ki,
kd: kd
})
Logger.info "#{__MODULE__} (#{state[:process_name]}) tuned to kp: #{kp}, ki: #{ki}, kd: #{kd}..."
:ok = save_configuration(state)
{:noreply, state}
end
def handle_cast({:tune, %{kp: kp}}, state) do
state = Map.merge(state, %{
kp: kp
})
Logger.info "#{__MODULE__} (#{state[:process_name]}) tuned to kp: #{kp}..."
:ok = save_configuration(state)
{:noreply, state}
end
def handle_cast({:tune, %{ki: ki}}, state) do
state = Map.merge(state, %{
ki: ki
})
Logger.info "#{__MODULE__} (#{state[:process_name]}) tuned to ki: #{ki}..."
:ok = save_configuration(state)
{:noreply, state}
end
def handle_cast({:tune, %{kd: kd}}, state) do
state = Map.merge(state, %{
kd: kd,
})
Logger.info "#{__MODULE__} (#{state[:process_name]}) tuned to kd: #{kd}..."
:ok = save_configuration(state)
{:noreply, state}
end
def handle_cast(:reset, state) do
new_state = %{
integrative_term: 0
}
Logger.info "#{__MODULE__} (#{state[:process_name]}) reinitialized..."
{:noreply, Map.merge(state, new_state)}
end
defp trace(state, error, output, proportional_term, integrative_term, derivative_term, setpoint) do
data = %{
kp: state[:kp],
ki: state[:ki],
kd: state[:kd],
proportional_term: proportional_term,
integrative_term: integrative_term,
derivative_term: derivative_term,
setpoint: setpoint,
error: error,
output: output
}
BlackBox.trace(__MODULE__, Process.info(self())[:registered_name], data)
end
def to_csv(data) do
{:ok, data |> Map.values |> Enum.join(",")}
end
def csv_headers(data) do
{:ok, data |> Map.keys |> Enum.join(",")}
end
def compute(pid, input, setpoint, sample_rate) do
GenServer.call(pid, {:compute, input, setpoint, sample_rate})
end
def configure(pid, configuration) do
GenServer.call(pid, Tuple.insert_at(configuration, 0, :configure))
end
def snapshot(pid) do
GenServer.call(pid, :snapshot)
end
defp load_configuration do
process_name = Process.info(self())[:registered_name]
configuration = case Memory.retreive do
{:ok, nil} ->
Logger.debug("#{process_name} loaded default configuration.")
Application.get_env(:brain, process_name)
{:ok, saved_configuration} ->
Logger.debug("#{process_name} loaded saved configuration.")
saved_configuration
end
{:ok,
%{
kp: (configuration[:kp] || configuration["kp"]),
ki: (configuration[:ki] || configuration["ki"]),
kd: (configuration[:kd] || configuration["kd"]),
minimum_output: configuration[:minimum_output] || configuration["minimum_output"],
maximum_output: configuration[:maximum_output] || configuration["maximum_output"]
}
}
end
defp save_configuration(state) do
Memory.store(%{
kp: state[:kp],
ki: state[:ki],
kd: state[:kd],
minimum_output: state[:minimum_output],
maximum_output: state[:maximum_output]
})
end
end
|
apps/brain/lib/pid_controller.ex
| 0.781622
| 0.408896
|
pid_controller.ex
|
starcoder
|
defmodule LogWatcher.FileWatcher do
@moduledoc """
A GenServer that monitors a directory for file system changes,
and broadcasts progress over Elixir PubSub. The directory, called
the "session_log_path" is identified by a "session_id" that defines
the well-known PubSub topic, `session:session_id`.
Once started, clients of the server add individual files to be
watched for file modifications using the Elixir `FileSystem`
module, which uses POSIX inotify tools.
The files being watched are expected to produce output as JSON lines.
An Elixir `File.Stream` is opened on each file being watched.
If a file modification is detected, the server reads the new output
from the stream, parses each JSON line, and broadcasts the parsed
object on the PubSub session topic. A decoded line in the log file
is expected to be an Elixir map that includes a `:status` item.
The `:status` value is expected to be a string with one of these values:
"initializing" "created", "reading", "validating", "running",
"cancelled" or "completed". If the `:status` item is missing,
it will be set to "undefined".
A `:task_updated` message is sent for each line successfully parsed
from the file being watched.
A `:task_started` message is sent for the first line that has a
status of "running", "cancelled" or "completed".
A `:task_completed` message is sent for each line that has a status
of "cancelled" or "completed", but this is expected to happen
at most one time.
The payload for each of these messages is the file name (without
the path) that produced the change, and the map that was parsed,
containing at a minimum the `:session_id` and `:status` items.
"""
use GenServer
require Logger
alias LogWatcher.Tasks.Session
defmodule WatchedFile do
@enforce_keys [:stream]
defstruct stream: nil,
position: 0,
size: 0,
last_modified: 0,
start_sent: false
@type t :: %__MODULE__{
stream: File.Stream.t(),
position: integer(),
size: integer(),
last_modified: integer(),
start_sent: boolean()
}
@doc """
Construct a `LogWatcher.WatchedFile` struct for a directory and file name.
"""
@spec new(String.t(), String.t()) :: t()
def new(dir, file_name) do
path = Path.join(dir, file_name)
%__MODULE__{stream: File.stream!(path)}
end
end
defimpl String.Chars, for: LogWatcher.FileWatcher.WatchedFile do
def to_string(%WatchedFile{stream: stream, position: position, start_sent: start_sent}) do
"%LogWatcher.FileWatcher.WatchedFile{stream: #{stream.path}, position: #{position}, start_sent: #{
start_sent
}}"
end
end
@enforce_keys [:fs_pid, :session_id, :session_log_path]
defstruct fs_pid: nil,
session_id: nil,
session_log_path: nil,
files: %{}
@type state() :: %__MODULE__{
fs_pid: pid(),
session_id: String.t(),
session_log_path: String.t(),
files: map()
}
@type gproc_key :: {:n, :l, {:session_id, String.t()}}
@task_started_status ["running", "cancelled", "completed"]
@task_completed_status ["cancelled", "completed"]
@doc """
Public interface. Start the GenServer for a session.
"""
@spec start_link(Keyword.t()) :: GenServer.on_start()
def start_link(opts) do
session_id = Keyword.fetch!(opts, :session_id)
session_log_path = Keyword.fetch!(opts, :session_log_path)
_ = Logger.info("FileWatcher start_link #{session_id} #{session_log_path}")
GenServer.start_link(__MODULE__, [session_id, session_log_path], name: via_tuple(session_id))
end
@doc """
Public interface. Sends a call to kill the GenServer.
"""
@spec stop(String.t()) :: :ok
def stop(session_id) do
GenServer.call(via_tuple(session_id), :kill)
end
@doc """
Public interface. Add a file to the watch list.
"""
@spec add_watch(String.t(), String.t()) :: {:ok, String.t()}
def add_watch(session_id, file_name) do
_ = Logger.info("FileWatcher add_watch #{session_id} #{file_name}")
GenServer.call(via_tuple(session_id), {:add_watch, file_name})
end
@doc """
Public interface. Remove a file from the watch list.
"""
@spec remove_watch(String.t(), String.t()) :: {:ok, String.t()}
def remove_watch(session_id, file_name) do
GenServer.call(via_tuple(session_id), {:remove_watch, file_name})
end
@doc """
Public interface. Return the :via tuple for this server.
"""
@spec via_tuple(String.t()) :: {:via, :gproc, gproc_key()}
def via_tuple(session_id) do
{:via, :gproc, registry_key(session_id)}
end
@doc """
Public interface. Return the pid for a server.
"""
@spec registered(String.t()) :: pid() | :undefined
def registered(session_id) do
:gproc.where(registry_key(session_id))
end
@doc """
Public interface. Return the key used to register a server.
"""
@spec registry_key(String.t()) :: gproc_key()
def registry_key(session_id) do
{:n, :l, {:session_id, session_id}}
end
@doc false
@impl true
@spec init(term()) :: {:ok, state()}
def init([session_id, session_log_path]) do
_ = Logger.info("FileWatcher init #{session_id} #{session_log_path}")
args = [dirs: [session_log_path], recursive: false]
_ = Logger.info("FileWatcher start FileSystem link with #{inspect(args)}")
{:ok, fs_pid} = FileSystem.start_link(args)
FileSystem.subscribe(fs_pid)
initial_state = %__MODULE__{
fs_pid: fs_pid,
session_id: session_id,
session_log_path: session_log_path
}
{:ok, initial_state}
end
# events: {#PID<0.319.0>,
# "priv/mock_task/output/T10-create-003-log.jsonl",
# [:created]}
# events: {#PID<0.319.0>,
# "priv/mock_task/output/T10-create-003-log.jsonl",
# [:modified]}
# events: {#PID<0.319.0>,
# "priv/mock_task/output/T10-create-003-log.jsonl",
# [:modified, :closed]}
@doc false
@impl true
@spec handle_call(term(), GenServer.from(), state()) ::
{:reply, term(), state()} | {:stop, :normal, state()}
def handle_call(:kill, _from, state) do
# Handles :kill call.
# Checks for any final lines before stopping the GenServer.
next_state = check_all_files(state)
{:stop, :normal, :ok, next_state}
end
def handle_call(
{:add_watch, file_name},
_from,
%__MODULE__{session_id: session_id, session_log_path: session_log_path, files: files} =
state
) do
file = WatchedFile.new(session_log_path, file_name)
next_file = check_for_lines(session_id, file)
_ = Logger.info("FileWatcher watch added for #{file_name}")
next_state = %__MODULE__{state | files: Map.put_new(files, file_name, next_file)}
{:reply, {:ok, file_name}, next_state}
end
@doc false
def handle_call(
{:remove_watch, file_name},
_from,
%__MODULE__{session_id: session_id, files: files} = state
) do
next_state =
case Map.get(files, file_name) do
%WatchedFile{} = file ->
_ = check_for_lines(session_id, file)
%__MODULE__{state | files: Map.drop(files, file_name)}
_ ->
state
end
{:reply, {:ok, file_name}, next_state}
end
@doc false
@impl true
@spec handle_info(term(), state()) :: {:noreply, state()} | {:stop, :normal, state()}
def handle_info(
{:file_event, fs_pid, {path, events}},
%__MODULE__{fs_pid: fs_pid, session_id: session_id, files: files} = state
) do
# Logger.info("FileWatcher #{inspect(fs_pid)} #{path}: #{inspect(events)}")
file_name = Path.basename(path)
case Map.get(files, file_name) do
nil ->
{:noreply, state}
%WatchedFile{} = file ->
next_state =
if Enum.member?(events, :modified) do
next_file = check_for_lines(session_id, file)
%__MODULE__{state | files: Map.put(files, file_name, next_file)}
else
state
end
{:noreply, next_state}
end
end
def handle_info(
{:file_event, fs_pid, :stop},
%__MODULE__{fs_pid: fs_pid} = state
) do
_ = Logger.info("FileWatcher #{inspect(fs_pid)} :stop")
{:stop, :normal, state}
end
@spec check_all_files(state()) :: map()
defp check_all_files(%__MODULE__{session_id: session_id, files: files}) do
Enum.map(files, fn {file_name, file} ->
next_file = check_for_lines(session_id, file)
{file_name, next_file}
end)
|> Enum.into(%{})
end
@spec check_for_lines(String.t(), WatchedFile.t()) :: WatchedFile.t()
defp check_for_lines(
session_id,
%WatchedFile{
stream: stream,
position: position,
# last_modified: last_modified,
size: size
} = file
) do
# Don't check change in :mtime! Things can happen fast!
with {:exists, true} <- {:exists, File.exists?(stream.path)},
{:ok, stat} <- File.stat(stream.path),
# {:mtime, true} <- {:mtime, stat.mtime != last_modified},
{:size, true} <- {:size, stat.size >= size} do
lines =
stream
|> Stream.drop(position)
|> Enum.into([])
next_file = %WatchedFile{
file
| position: position + length(lines),
size: stat.size,
last_modified: stat.mtime
}
handle_lines(session_id, next_file, lines)
else
{:exists, _} ->
# Logger.error("FileWatcher #{stream.path} does not exist")
%WatchedFile{file | stream: File.stream!(stream.path), position: 0, size: 0}
{:size, _} ->
_ = Logger.error("FileWatcher no increase in size")
%WatchedFile{file | stream: File.stream!(stream.path), position: 0, size: 0}
# {:mtime, _} ->
# Logger.error("FileWatcher no change in mtime")
# file
{:error, reason} ->
_ = Logger.error("FileWatcher cannot stat #{stream.path}: #{inspect(reason)}")
%WatchedFile{file | stream: File.stream!(stream.path), position: 0, size: 0}
end
end
@spec handle_lines(String.t(), WatchedFile.t(), [String.t()]) :: WatchedFile.t()
defp handle_lines(_session_id, %WatchedFile{} = file, []), do: file
defp handle_lines(session_id, %WatchedFile{stream: stream} = file, lines) do
file_name = Path.basename(stream.path)
_ = Logger.info("FileWatcher got #{Enum.count(lines)} line(s) from #{file_name}")
Enum.reduce(lines, file, fn line, acc ->
broadcast_changes(session_id, file_name, line, acc)
end)
end
@spec broadcast_changes(String.t(), String.t(), String.t(), WatchedFile.t()) :: WatchedFile.t()
defp broadcast_changes(session_id, file_name, line, %WatchedFile{start_sent: start_sent} = file) do
case Jason.decode(line, keys: :atoms) do
{:ok, data} when is_map(data) ->
info =
data
|> Map.put_new(:session_id, session_id)
|> Map.put_new(:status, "undefined")
topic = Session.events_topic(info.session_id)
Session.broadcast(topic, {:task_updated, file_name, info})
next_file =
if !start_sent && Enum.member?(@task_started_status, info.status) do
Session.broadcast(topic, {:task_started, file_name, info})
%WatchedFile{file | start_sent: true}
else
file
end
if Enum.member?(@task_completed_status, info.status) do
Session.broadcast(topic, {:task_completed, file_name, info})
end
next_file
_ ->
_ = Logger.error("FileWatcher, ignoring non-JSON: #{line}")
file
end
end
end
|
apps/log_watcher/lib/log_watcher/file_watcher.ex
| 0.742608
| 0.436442
|
file_watcher.ex
|
starcoder
|
defmodule Day23 do
def solveA(filename) do
instructions =
filename
|> parse
rec_solve(instructions, %{}, 0, 0, tuple_size(instructions) - 1)
|> elem(0)
end
def solveB(filename) do
# Looking at the instructions we can see that h will be incremented when f = 0,
# and f = 0 when b = d.e, and b will be incremented until it reaches c.
# Since d and e starts both at 2 and get incremented by 1, the only times b will not be
# equal to d.e is when b is a prime number.
# So the number of incrementations for h is the number of times b is not a prime number,
# start at the initial value of b, stopping when b reaches c, and incrementing b by the
# given instructions.
all_instructions =
filename
|> parse
|> Tuple.to_list
init_instructions =
all_instructions
|> Enum.take(8)
|> List.to_tuple
[{_, _, _, _, neg_increment}, _] =
all_instructions
|> Enum.drop(30)
increment = - neg_increment
{_, %{"b" => start, "c" => stop}} =
init_instructions
|> rec_solve(%{"a" => 1}, 0, 0, tuple_size(init_instructions) - 1)
last =
(stop - start) / increment
|> round
(for n <- 0..last, do: start + n * increment)
|> Enum.reject(&is_prime/1)
|> length
end
def parse(filename) do
filename
|> File.stream!([:utf8], :line)
|> Enum.map(&parse_line/1)
|> List.to_tuple
end
def parse_line(line) do
[instruction, x, y] =
line
|> String.trim
|> String.split(" ")
{typex, x} = try_reg_val x
{typey, y} = try_reg_val y
{instruction, typex, x, typey, y}
end
def try_reg_val(x) do
try do
{:val, String.to_integer x}
rescue
ArgumentError ->
{:reg, x}
end
end
def rec_solve(_instructions, registers, pos, count, max)
when pos < 0 or pos > max do
{count, registers}
end
def rec_solve(instructions, registers, pos, count, max) do
{instr, typex, x, typey, y} = elem(instructions, pos)
valx =
case typex do
:reg -> Map.get registers, x, 0
:val -> x
end
valy =
case typey do
:reg -> Map.get registers, y, 0
:val -> y
end
{registers, pos, count} =
case instr do
"set" ->
registers = Map.put registers, x, valy
{registers, pos + 1, count}
"sub" ->
registers = Map.put registers, x, valx - valy
{registers, pos + 1, count}
"mul" ->
registers = Map.put registers, x, valx * valy
{registers, pos + 1, count + 1}
"jnz" ->
pos =
if valx != 0 do
pos + valy
else
pos + 1
end
{registers, pos, count}
end
rec_solve(instructions, registers, pos, count, max)
end
def is_prime(n) when n < 4 and n > 0, do: true
def is_prime(n) do
top = :math.sqrt(n) |> round
2 .. top
|> Enum.all?(fn i -> rem(n, i) != 0 end)
end
end
|
2017/elixir/day23/lib/day23.ex
| 0.560253
| 0.548492
|
day23.ex
|
starcoder
|
defmodule Honeybee.Handler do
@moduledoc """
`Honeybee.Handler` provides a simple way to invoke methods from a pluggable module.
When a handler is called, it expects to find the method to invoke in the `:action` option.
For instance, a handler invoked using `plug Handler, action: :create`, will invoke the create method.
The arguments passed to the action method is the conn, and the options for the invocation.
The options for the invocation are expected to be in the `:opts` option.
An example of a handler plug declaration looks like this
```
plug Handler, action: :create, opts: []
```
A handler can itself contain a plug pipeline, which is invoked prior to invocation of an action.
These plugs behave very similarly to the plugs of the `Plug.Builder`, and support guards just like the `Honeybee` Router.
However, in addition to the `method` variable in guards, it also provides an additional option, the `action` variable.
Here is an exmaple of a Honeybee Handler implementation
```
defmodule Example.Handler do
use Honeybee.Handler
plug Authorization when action in [:create]
def create(conn, _opts) do
body = conn.body_params
user = Models.User.create(body)
Plug.Conn.send_resp(conn, 200, user)
end
end
```
"""
@doc false
defmacro __using__(_opts \\ []) do
quote do
@behaviour Plug
def init(opts), do: Keyword.split(opts, [:action])
def call(%Elixir.Plug.Conn{} = conn, {[action: action], opts}) when is_atom(action) do
honeybee_handler_call(conn, action, Keyword.get(opts, :opts, []))
end
def honeybee_action_call(conn, {action, opts}), do: apply(__MODULE__, action, [conn, opts])
defoverridable init: 1, call: 2
import Honeybee.Handler
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@before_compile Honeybee.Handler
end
end
@doc false
defmacro __before_compile__(env) do
opts = quote do: opts
method = {:method, [generated: true], nil}
action = {:action, [generated: true], nil}
plugs = [
{:honeybee_action_call, {:unquote, [], [{action, opts}]}, true}
| Module.get_attribute(env.module, :plugs)
]
{conn, body} = Plug.Builder.compile(env, plugs, [])
quote generated: true do
defp honeybee_handler_call(
%Elixir.Plug.Conn{method: unquote(method)} = unquote(conn), unquote(action), unquote(opts)
), do: unquote(body)
end
end
@doc """
Declares a plug.
The `plug/2` macro can be used to declare a plug in the plug pipeline.
`Honeybee.Handler` supports plugs similar to the `Plug.Builder`, however there are a couple of caveats.
`plug/2` has guard support, which allows us to guard for the `method` and the `action` of the plugged call.
This allows you to write plugs which only apply to certain http-verbs of requests, and only certain actions.
```
plug Authorization when method in ["POST", "PUT", "PATCH"] and action in [:create, :update]
```
For more information on the plug pattern see `Plug`
"""
defmacro plug(plug, opts \\ [])
defmacro plug({:when, _, [plug, guards]}, opts), do: gen_plug(__CALLER__, plug, opts, guards)
defmacro plug(plug, {:when, _, [opts, guards]}), do: gen_plug(__CALLER__, plug, opts, guards)
defmacro plug(plug, opts), do: gen_plug(__CALLER__, plug, opts, true)
defp gen_plug(env, plug, opts, guards) do
plug = Macro.expand(plug, %{env | function: {:init, 1}})
quote do: @plugs {unquote(plug), unquote(opts), unquote(Macro.escape(guards))}
end
end
|
lib/honeybee/handler.ex
| 0.913165
| 0.874346
|
handler.ex
|
starcoder
|
defmodule AshPostgres.DataLayer do
@manage_tenant %Ash.Dsl.Section{
name: :manage_tenant,
describe: """
Configuration for the behavior of a resource that manages a tenant
""",
examples: [
"""
manage_tenant do
template ["organization_", :id]
create? true
update? false
end
"""
],
schema: [
template: [
type: {:custom, __MODULE__, :tenant_template, []},
required: true,
doc: """
A template that will cause the resource to create/manage the specified schema.
Use this if you have a resource that, when created, it should create a new tenant
for you. For example, if you have a `customer` resource, and you want to create
a schema for each customer based on their id, e.g `customer_10` set this option
to `["customer_", :id]`. Then, when this is created, it will create a schema called
`["customer_", :id]`, and run your tenant migrations on it. Then, if you were to change
that customer's id to `20`, it would rename the schema to `customer_20`. Generally speaking
you should avoid changing the tenant id.
"""
],
create?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically create a tenant when a record is created"
],
update?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically update the tenant name if the record is udpated"
]
]
}
@index %Ash.Dsl.Entity{
name: :index,
describe: """
Add an index to be managed by the migration generator.
""",
examples: [
"index [\"column\", \"column2\"], unique: true, where: \"thing = TRUE\""
],
target: AshPostgres.CustomIndex,
schema: AshPostgres.CustomIndex.schema(),
args: [:fields]
}
@custom_indexes %Ash.Dsl.Section{
name: :custom_indexes,
describe: """
A section for configuring indexes to be created by the migration generator.
In general, prefer to use `identities` for simple unique constraints. This is a tool to allow
for declaring more complex indexes.
""",
examples: [
"""
custom_indexes do
index [:column1, :column2], unique: true, where: "thing = TRUE"
end
"""
],
entities: [
@index
]
}
@reference %Ash.Dsl.Entity{
name: :reference,
describe: """
Configures the reference for a relationship in resource migrations.
Keep in mind that multiple relationships can theoretically involve the same destination and foreign keys.
In those cases, you only need to configure the `reference` behavior for one of them. Any conflicts will result
in an error, across this resource and any other resources that share a table with this one. For this reason,
instead of adding a reference configuration for `:nothing`, its best to just leave the configuration out, as that
is the default behavior if *no* relationship anywhere has configured the behavior of that reference.
""",
examples: [
"reference :post, on_delete: :delete, on_update: :update, name: \"comments_to_posts_fkey\""
],
args: [:relationship],
target: AshPostgres.Reference,
schema: AshPostgres.Reference.schema()
}
@references %Ash.Dsl.Section{
name: :references,
describe: """
A section for configuring the references (foreign keys) in resource migrations.
This section is only relevant if you are using the migration generator with this resource.
Otherwise, it has no effect.
""",
examples: [
"""
references do
reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey"
end
"""
],
entities: [@reference],
schema: [
polymorphic_on_delete: [
type: {:one_of, [:delete, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables."
],
polymorphic_on_update: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
],
polymorphic_name: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
]
]
}
@check_constraint %Ash.Dsl.Entity{
name: :check_constraint,
describe: """
Add a check constraint to be validated.
If a check constraint exists on the table but not in this section, and it produces an error, a runtime error will be raised.
Provide a list of attributes instead of a single attribute to add the message to multiple attributes.
By adding the `check` option, the migration generator will include it when generating migrations.
""",
examples: [
"""
check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive"
"""
],
args: [:attribute, :name],
target: AshPostgres.CheckConstraint,
schema: AshPostgres.CheckConstraint.schema()
}
@check_constraints %Ash.Dsl.Section{
name: :check_constraints,
describe: """
A section for configuring the check constraints for a given table.
This can be used to automatically create those check constraints, or just to provide message when they are raised
""",
examples: [
"""
check_constraints do
check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive"
end
"""
],
entities: [@check_constraint]
}
@references %Ash.Dsl.Section{
name: :references,
describe: """
A section for configuring the references (foreign keys) in resource migrations.
This section is only relevant if you are using the migration generator with this resource.
Otherwise, it has no effect.
""",
examples: [
"""
references do
reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey"
end
"""
],
entities: [@reference],
schema: [
polymorphic_on_delete: [
type: {:one_of, [:delete, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables."
],
polymorphic_on_update: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
],
polymorphic_name: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
]
]
}
@postgres %Ash.Dsl.Section{
name: :postgres,
describe: """
Postgres data layer configuration
""",
sections: [
@custom_indexes,
@manage_tenant,
@references,
@check_constraints
],
modules: [
:repo
],
examples: [
"""
postgres do
repo MyApp.Repo
table "organizations"
end
"""
],
schema: [
repo: [
type: :atom,
required: true,
doc:
"The repo that will be used to fetch your data. See the `AshPostgres.Repo` documentation for more"
],
migrate?: [
type: :boolean,
default: true,
doc:
"Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations`"
],
migration_types: [
type: :keyword_list,
default: [],
doc:
"A keyword list of attribute names to the ecto migration type that should be used for that attribute. Only necessary if you need to override the defaults."
],
base_filter_sql: [
type: :string,
doc:
"A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter"
],
skip_unique_indexes: [
type: {:custom, __MODULE__, :validate_skip_unique_indexes, []},
default: false,
doc: "Skip generating unique indexes when generating migrations"
],
unique_index_names: [
type: :any,
default: [],
doc: """
A list of unique index names that could raise errors, or an mfa to a function that takes a changeset
and returns the list. Must be in the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}`
Note that this is *not* used to rename the unique indexes created from `identities`.
Use `identity_index_names` for that. This is used to tell ash_postgres about unique indexes that
exist in the database that it didn't create.
"""
],
identity_index_names: [
type: :any,
default: [],
doc: """
A keyword list of identity names to the unique index name that they should use when being managed by the migration
generator.
"""
],
foreign_key_names: [
type: :any,
default: [],
doc: """
A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns the list.
Must be in the format `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}`
"""
],
table: [
type: :string,
doc:
"The table to store and read the resource from. Required unless `polymorphic?` is true."
],
polymorphic?: [
type: :boolean,
default: false,
doc: """
Declares this resource as polymorphic.
Polymorphic resources cannot be read or updated unless the table is provided in the query/changeset context.
For example:
PolymorphicResource
|> Ash.Query.set_context(%{data_layer: %{table: "table"}})
|> MyApi.read!()
When relating to polymorphic resources, you'll need to use the `context` option on relationships,
e.g
belongs_to :polymorphic_association, PolymorphicResource,
context: %{data_layer: %{table: "table"}}
"""
]
]
}
alias Ash.Filter
alias Ash.Query.{BooleanExpression, Not, Ref}
alias Ash.Query.Function.{Ago, Contains, If}
alias Ash.Query.Operator.IsNil
alias AshPostgres.Functions.{Fragment, TrigramSimilarity, Type}
import AshPostgres, only: [repo: 1]
@behaviour Ash.DataLayer
@sections [@postgres]
# This creates the atoms 0..500, which are used for calculations
# If you know of a way to get around the fact that subquery `parent_as` must be
# an atom, let me know.
@atoms Enum.into(0..500, %{}, fn i ->
{i, String.to_atom(to_string(i))}
end)
@moduledoc """
A postgres data layer that levereges Ecto's postgres capabilities.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Ash.Dsl.Extension,
sections: @sections,
transformers: [
AshPostgres.Transformers.VerifyRepo,
AshPostgres.Transformers.EnsureTableOrPolymorphic
]
@doc false
def tenant_template(value) do
value = List.wrap(value)
if Enum.all?(value, &(is_binary(&1) || is_atom(&1))) do
{:ok, value}
else
{:error, "Expected all values for `manages_tenant` to be strings or atoms"}
end
end
@doc false
def validate_skip_unique_indexes(indexes) do
indexes = List.wrap(indexes)
if Enum.all?(indexes, &is_atom/1) do
{:ok, indexes}
else
{:error, "All indexes to skip must be atoms"}
end
end
import Ecto.Query, only: [from: 2, subquery: 1]
@impl true
def can?(_, :async_engine), do: true
def can?(_, :transact), do: true
def can?(_, :composite_primary_key), do: true
def can?(_, :upsert), do: true
def can?(resource, {:join, other_resource}) do
data_layer = Ash.DataLayer.data_layer(resource)
other_data_layer = Ash.DataLayer.data_layer(other_resource)
data_layer == other_data_layer and repo(resource) == repo(other_resource)
end
def can?(resource, {:lateral_join, resources}) do
repo = repo(resource)
data_layer = Ash.DataLayer.data_layer(resource)
data_layer == __MODULE__ &&
Enum.all?(resources, fn resource ->
Ash.DataLayer.data_layer(resource) == data_layer && repo(resource) == repo
end)
end
def can?(_, :boolean_filter), do: true
def can?(_, {:aggregate, :count}), do: true
def can?(_, {:aggregate, :sum}), do: true
def can?(_, {:aggregate, :first}), do: true
def can?(_, {:aggregate, :list}), do: true
def can?(_, :aggregate_filter), do: true
def can?(_, :aggregate_sort), do: true
def can?(_, :expression_calculation), do: true
def can?(_, :expression_calculation_sort), do: true
def can?(_, :create), do: true
def can?(_, :select), do: true
def can?(_, :read), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :filter), do: true
def can?(_, :limit), do: true
def can?(_, :offset), do: true
def can?(_, :multitenancy), do: true
def can?(_, {:filter_expr, _}), do: true
def can?(_, :nested_expressions), do: true
def can?(_, {:query_aggregate, :count}), do: true
def can?(_, :sort), do: true
def can?(_, :distinct), do: true
def can?(_, {:sort, _}), do: true
def can?(_, _), do: false
@impl true
def in_transaction?(resource) do
repo(resource).in_transaction?()
end
@impl true
def limit(query, nil, _), do: {:ok, query}
def limit(query, limit, _resource) do
{:ok, from(row in query, limit: ^limit)}
end
@impl true
def source(resource) do
AshPostgres.table(resource) || ""
end
@impl true
def set_context(resource, data_layer_query, context) do
data_layer_query =
if context[:data_layer][:table] do
%{
data_layer_query
| from: %{data_layer_query.from | source: {context[:data_layer][:table], resource}}
}
else
data_layer_query
end
data_layer_query =
data_layer_query
|> default_bindings(resource, context)
{:ok, data_layer_query}
end
@impl true
def offset(query, nil, _), do: query
def offset(%{offset: old_offset} = query, 0, _resource) when old_offset in [0, nil] do
{:ok, query}
end
def offset(query, offset, _resource) do
{:ok, from(row in query, offset: ^offset)}
end
@impl true
def run_query(query, resource) do
if AshPostgres.polymorphic?(resource) && no_table?(query) do
raise_table_error!(resource, :read)
else
{:ok, repo(resource).all(query, repo_opts(query))}
end
end
defp no_table?(%{from: %{source: {"", _}}}), do: true
defp no_table?(_), do: false
defp repo_opts(%{tenant: tenant, resource: resource}) when not is_nil(tenant) do
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
[prefix: tenant]
else
[]
end
end
defp repo_opts(_), do: []
defp lateral_join_repo_opts(%{tenant: tenant}, resource) when not is_nil(tenant) do
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
[prefix: tenant]
else
[]
end
end
defp lateral_join_repo_opts(_, _), do: []
@impl true
def functions(resource) do
config = repo(resource).config()
functions = [AshPostgres.Functions.Type, AshPostgres.Functions.Fragment]
if "pg_trgm" in (config[:installed_extensions] || []) do
functions ++
[
AshPostgres.Functions.TrigramSimilarity
]
else
functions
end
end
@impl true
def run_aggregate_query(query, aggregates, resource) do
subquery = from(row in subquery(query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, resource)
)
{:ok, repo(resource).one(query, repo_opts(query))}
end
@impl true
def set_tenant(_resource, query, tenant) do
{:ok, Ecto.Query.put_query_prefix(query, to_string(tenant))}
end
@impl true
def run_aggregate_query_with_lateral_join(
query,
aggregates,
root_data,
destination_resource,
path
) do
case lateral_join_query(
query,
root_data,
path
) do
{:ok, lateral_join_query} ->
source_resource =
path
|> Enum.at(0)
|> elem(0)
|> Map.get(:resource)
subquery = from(row in subquery(lateral_join_query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, destination_resource)
)
{:ok, repo(source_resource).one(query, lateral_join_repo_opts(query, source_resource))}
{:error, error} ->
{:error, error}
end
end
@impl true
def run_query_with_lateral_join(
query,
root_data,
_destination_resource,
path
) do
source_query =
path
|> Enum.at(0)
|> elem(0)
case lateral_join_query(
query,
root_data,
path
) do
{:ok, query} ->
source_resource =
path
|> Enum.at(0)
|> elem(0)
|> Map.get(:resource)
{:ok,
repo(source_resource).all(query, lateral_join_repo_opts(source_query, source_resource))}
{:error, error} ->
{:error, error}
end
end
defp lateral_join_query(
query,
root_data,
[{source_query, source_field, destination_field, relationship}]
) do
source_values = Enum.map(root_data, &Map.get(&1, source_field))
source_query = Ash.Query.new(source_query)
subquery =
if query.windows[:order] do
subquery(
from(destination in query,
select_merge: %{__order__: over(row_number(), :order)},
where:
field(destination, ^destination_field) ==
field(parent_as(:source_record), ^source_field)
)
|> set_subquery_prefix(source_query, relationship.destination)
)
else
subquery(
from(destination in query,
where:
field(destination, ^destination_field) ==
field(parent_as(:source_record), ^source_field)
)
|> set_subquery_prefix(source_query, relationship.destination)
)
end
source_query.resource
|> Ash.Query.set_context(%{:data_layer => source_query.context[:data_layer]})
|> Ash.Query.set_tenant(source_query.tenant)
|> set_lateral_join_prefix(query)
|> case do
%{valid?: true} = query ->
Ash.Query.data_layer_query(query)
query ->
{:error, query}
end
|> case do
{:ok, data_layer_query} ->
if query.windows[:order] do
{:ok,
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
on: field(source, ^source_field) == field(destination, ^destination_field),
order_by: destination.__order__,
select: destination,
distinct: true
)}
else
{:ok,
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
on: field(source, ^source_field) == field(destination, ^destination_field),
select: destination,
distinct: true
)}
end
{:error, error} ->
{:error, error}
end
end
defp lateral_join_query(
query,
root_data,
[
{source_query, source_field, source_field_on_join_table, relationship},
{through_resource, destination_field_on_join_table, destination_field,
through_relationship}
]
) do
source_query = Ash.Query.new(source_query)
source_values = Enum.map(root_data, &Map.get(&1, source_field))
through_resource
|> Ash.Query.new()
|> Ash.Query.set_context(through_relationship.context)
|> Ash.Query.do_filter(through_relationship.filter)
|> Ash.Query.sort(through_relationship.sort)
|> Ash.Query.set_tenant(source_query.tenant)
|> set_lateral_join_prefix(query)
|> case do
%{valid?: true} = query ->
Ash.Query.data_layer_query(query)
query ->
{:error, query}
end
|> case do
{:ok, through_query} ->
source_query.resource
|> Ash.Query.new()
|> Ash.Query.set_context(relationship.context)
|> Ash.Query.set_context(%{:data_layer => source_query.context[:data_layer]})
|> set_lateral_join_prefix(query)
|> Ash.Query.do_filter(relationship.filter)
|> case do
%{valid?: true} = query ->
Ash.Query.data_layer_query(query)
query ->
{:error, query}
end
|> case do
{:ok, data_layer_query} ->
if query.windows[:order] do
subquery =
subquery(
from(
destination in query,
select_merge: %{__order__: over(row_number(), :order)},
join:
through in ^set_subquery_prefix(
through_query,
source_query,
relationship.through
),
on:
field(through, ^destination_field_on_join_table) ==
field(destination, ^destination_field),
where:
field(through, ^source_field_on_join_table) ==
field(parent_as(:source_record), ^source_field)
)
|> set_subquery_prefix(
source_query,
relationship.destination
)
)
{:ok,
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
select: destination,
order_by: destination.__order__,
distinct: true
)}
else
subquery =
subquery(
from(
destination in query,
join:
through in ^set_subquery_prefix(
through_query,
source_query,
relationship.through
),
on:
field(through, ^destination_field_on_join_table) ==
field(destination, ^destination_field),
where:
field(through, ^source_field_on_join_table) ==
field(parent_as(:source_record), ^source_field)
)
|> set_subquery_prefix(
source_query,
relationship.destination
)
)
{:ok,
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
select: destination,
distinct: true
)}
end
{:error, error} ->
{:error, error}
end
{:error, error} ->
{:error, error}
end
end
defp set_subquery_prefix(data_layer_query, source_query, resource) do
config = repo(resource).config()
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
%{
data_layer_query
| prefix:
to_string(
source_query.tenant || config[:default_prefix] ||
"public"
)
}
else
%{
data_layer_query
| prefix:
to_string(
config[:default_prefix] ||
"public"
)
}
end
end
defp set_lateral_join_prefix(ash_query, query) do
if Ash.Resource.Info.multitenancy_strategy(ash_query.resource) == :context do
Ash.Query.set_tenant(ash_query, query.prefix)
else
ash_query
end
end
@impl true
def resource_to_query(resource, _),
do: Ecto.Queryable.to_query({AshPostgres.table(resource) || "", resource})
@impl true
def create(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :create)
|> repo(resource).insert(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_create_tenant!(resource, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
defp maybe_create_tenant!(resource, result) do
if AshPostgres.manage_tenant_create?(resource) do
tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.create_tenant!(tenant_name, repo(resource))
else
:ok
end
end
defp maybe_update_tenant(resource, changeset, result) do
if AshPostgres.manage_tenant_update?(resource) do
changing_tenant_name? =
resource
|> AshPostgres.manage_tenant_template()
|> Enum.filter(&is_atom/1)
|> Enum.any?(&Ash.Changeset.changing_attribute?(changeset, &1))
if changing_tenant_name? do
old_tenant_name = tenant_name(resource, changeset.data)
new_tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.rename_tenant(repo(resource), old_tenant_name, new_tenant_name)
end
end
:ok
end
defp tenant_name(resource, result) do
resource
|> AshPostgres.manage_tenant_template()
|> Enum.map_join(fn item ->
if is_binary(item) do
item
else
result
|> Map.get(item)
|> to_string()
end
end)
end
defp handle_errors({:error, %Ecto.Changeset{errors: errors}}) do
{:error, Enum.map(errors, &to_ash_error/1)}
end
defp handle_errors({:ok, val}), do: {:ok, val}
defp to_ash_error({field, {message, vars}}) do
Ash.Error.Changes.InvalidAttribute.exception(
field: field,
message: message,
private_vars: vars
)
end
defp ecto_changeset(record, changeset, type) do
ecto_changeset =
record
|> set_table(changeset, type)
|> Ecto.Changeset.change(changeset.attributes)
|> add_configured_foreign_key_constraints(record.__struct__)
|> add_unique_indexes(record.__struct__, changeset)
|> add_check_constraints(record.__struct__)
case type do
:create ->
ecto_changeset
|> add_my_foreign_key_constraints(record.__struct__)
type when type in [:upsert, :update] ->
ecto_changeset
|> add_my_foreign_key_constraints(record.__struct__)
|> add_related_foreign_key_constraints(record.__struct__)
:delete ->
ecto_changeset
|> add_related_foreign_key_constraints(record.__struct__)
end
end
defp set_table(record, changeset, operation) do
if AshPostgres.polymorphic?(record.__struct__) do
table = changeset.context[:data_layer][:table] || AshPostgres.table(record.__struct__)
if table do
Ecto.put_meta(record, source: table)
else
raise_table_error!(changeset.resource, operation)
end
else
record
end
end
defp add_check_constraints(changeset, resource) do
resource
|> AshPostgres.check_constraints()
|> Enum.reduce(changeset, fn constraint, changeset ->
constraint.attribute
|> List.wrap()
|> Enum.reduce(changeset, fn attribute, changeset ->
Ecto.Changeset.check_constraint(changeset, attribute,
name: constraint.name,
message: constraint.message || "is invalid"
)
end)
end)
end
defp add_related_foreign_key_constraints(changeset, resource) do
# TODO: this doesn't guarantee us to get all of them, because if something is related to this
# schema and there is no back-relation, then this won't catch it's foreign key constraints
resource
|> Ash.Resource.Info.relationships()
|> Enum.map(& &1.destination)
|> Enum.uniq()
|> Enum.flat_map(fn related ->
related
|> Ash.Resource.Info.relationships()
|> Enum.filter(&(&1.destination == resource))
|> Enum.map(&Map.take(&1, [:source, :source_field, :destination_field]))
end)
|> Enum.uniq()
|> Enum.reduce(changeset, fn %{
source: source,
source_field: source_field,
destination_field: destination_field
},
changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, destination_field,
name: "#{AshPostgres.table(source)}_#{source_field}_fkey",
message: "would leave records behind"
)
end)
end
defp add_my_foreign_key_constraints(changeset, resource) do
resource
|> Ash.Resource.Info.relationships()
|> Enum.reduce(changeset, &Ecto.Changeset.foreign_key_constraint(&2, &1.source_field))
end
defp add_configured_foreign_key_constraints(changeset, resource) do
resource
|> AshPostgres.foreign_key_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
|> Enum.reduce(changeset, fn
{key, name}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name)
{key, name, message}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name, message: message)
end)
end
defp add_unique_indexes(changeset, resource, ash_changeset) do
changeset =
resource
|> Ash.Resource.Info.identities()
|> Enum.reduce(changeset, fn identity, changeset ->
name =
AshPostgres.identity_index_names(resource)[identity.name] ||
"#{table(resource, ash_changeset)}_#{identity.name}_index"
opts =
if Map.get(identity, :message) do
[name: name, message: identity.message]
else
[name: name]
end
Ecto.Changeset.unique_constraint(changeset, identity.keys, opts)
end)
names =
resource
|> AshPostgres.unique_index_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
names = [
{Ash.Resource.Info.primary_key(resource), table(resource, ash_changeset) <> "_pkey"} | names
]
Enum.reduce(names, changeset, fn
{keys, name}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name)
{keys, name, message}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name, message: message)
end)
end
@impl true
def upsert(resource, changeset, keys \\ nil) do
keys = keys || Ash.Resource.Info.primary_key(resource)
attributes = Map.keys(changeset.attributes) -- Map.get(changeset, :defaults, []) -- keys
repo_opts =
changeset
|> repo_opts()
|> Keyword.put(:on_conflict, {:replace, attributes})
|> Keyword.put(:conflict_target, keys)
if AshPostgres.manage_tenant_update?(resource) do
{:error, "Cannot currently upsert a resource that owns a tenant"}
else
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :upsert)
|> repo(resource).insert(Keyword.put(repo_opts, :returning, true))
|> handle_errors()
end
end
@impl true
def update(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :update)
|> repo(resource).update(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_update_tenant(resource, changeset, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
@impl true
def destroy(resource, %{data: record} = changeset) do
record
|> ecto_changeset(changeset, :delete)
|> repo(resource).delete(repo_opts(changeset))
|> case do
{:ok, _record} ->
:ok
{:error, error} ->
handle_errors({:error, error})
end
end
@impl true
def sort(query, sort, resource) do
query = default_bindings(query, resource)
sort
|> sanitize_sort()
|> Enum.reduce_while({:ok, %Ecto.Query.QueryExpr{expr: [], params: []}}, fn
{order, %Ash.Query.Calculation{} = calc}, {:ok, query_expr} ->
type =
if calc.type do
parameterized_type(calc.type, [])
else
nil
end
calc.opts
|> calc.module.expression(calc.context)
|> Ash.Filter.hydrate_refs(%{
resource: resource,
aggregates: query.__ash_bindings__.aggregate_defs,
calculations: %{},
public?: false
})
|> case do
{:ok, expr} ->
{params, expr} =
do_filter_to_expr(expr, query.__ash_bindings__, query_expr.params, false, type)
{:cont,
{:ok, %{query_expr | expr: query_expr.expr ++ [{order, expr}], params: params}}}
{:error, error} ->
{:halt, {:error, error}}
end
{order, sort}, {:ok, query_expr} ->
expr =
case Map.fetch(query.__ash_bindings__.aggregates, sort) do
{:ok, binding} ->
aggregate =
Ash.Resource.Info.aggregate(resource, sort) ||
raise "No such aggregate for query aggregate #{inspect(sort)}"
{:ok, field_type} =
if aggregate.field do
related = Ash.Resource.Info.related(resource, aggregate.relationship_path)
attr = Ash.Resource.Info.attribute(related, aggregate.field)
if attr && related do
{:ok, parameterized_type(attr.type, attr.constraints)}
else
{:ok, nil}
end
else
{:ok, nil}
end
default_value =
aggregate.default || Ash.Query.Aggregate.default_value(aggregate.kind)
if is_nil(default_value) do
{{:., [], [{:&, [], [binding]}, sort]}, [], []}
else
if field_type do
{:coalesce, [],
[
{{:., [], [{:&, [], [binding]}, sort]}, [], []},
{:type, [],
[
default_value,
field_type
]}
]}
else
{:coalesce, [],
[
{{:., [], [{:&, [], [binding]}, sort]}, [], []},
default_value
]}
end
end
:error ->
{{:., [], [{:&, [], [0]}, sort]}, [], []}
end
{:cont, {:ok, %{query_expr | expr: query_expr.expr ++ [{order, expr}]}}}
end)
|> case do
{:ok, %{expr: []}} ->
{:ok, query}
{:ok, sort_expr} ->
new_query =
query
|> Map.update!(:order_bys, fn order_bys ->
order_bys = order_bys || []
order_bys ++ [sort_expr]
end)
|> Map.update!(:windows, fn windows ->
order_by_expr = %{sort_expr | expr: [order_by: sort_expr.expr]}
Keyword.put(windows, :order, order_by_expr)
end)
{:ok, new_query}
{:error, error} ->
{:error, error}
end
end
@impl true
def select(query, select, resource) do
query = default_bindings(query, resource)
{:ok,
from(row in query,
select: struct(row, ^select)
)}
end
@impl true
def distinct(query, distinct_on, resource) do
query = default_bindings(query, resource)
query =
query
|> default_bindings(resource)
|> Map.update!(:distinct, fn distinct ->
distinct =
distinct ||
%Ecto.Query.QueryExpr{
expr: []
}
expr =
Enum.map(distinct_on, fn distinct_on_field ->
binding =
case Map.fetch(query.__ash_bindings__.aggregates, distinct_on_field) do
{:ok, binding} ->
binding
:error ->
0
end
{:asc, {{:., [], [{:&, [], [binding]}, distinct_on_field]}, [], []}}
end)
%{distinct | expr: distinct.expr ++ expr}
end)
{:ok, query}
end
defp sanitize_sort(sort) do
sort
|> List.wrap()
|> Enum.map(fn
{sort, {order, context}} ->
{ash_to_ecto_order(order), {sort, context}}
{sort, order} ->
{ash_to_ecto_order(order), sort}
sort ->
sort
end)
end
defp ash_to_ecto_order(:asc_nils_last), do: :asc_nulls_last
defp ash_to_ecto_order(:asc_nils_first), do: :asc_nulls_first
defp ash_to_ecto_order(:desc_nils_last), do: :desc_nulls_last
defp ash_to_ecto_order(:desc_nils_first), do: :desc_nulls_first
defp ash_to_ecto_order(other), do: other
@impl true
def filter(query, %{expression: false}, _resource) do
impossible_query = from(row in query, where: false)
{:ok, Map.put(impossible_query, :__impossible__, true)}
end
def filter(query, filter, _resource) do
relationship_paths =
filter
|> Filter.relationship_paths()
|> Enum.map(fn path ->
if can_inner_join?(path, filter) do
{:inner, relationship_path_to_relationships(filter.resource, path)}
else
{:left, relationship_path_to_relationships(filter.resource, path)}
end
end)
query
|> join_all_relationships(relationship_paths, filter)
|> case do
{:ok, query} ->
{:ok, add_filter_expression(query, filter)}
{:error, error} ->
{:error, error}
end
end
defp default_bindings(query, resource, context \\ %{}) do
Map.put_new(query, :__ash_bindings__, %{
current: Enum.count(query.joins) + 1,
calculations: %{},
aggregates: %{},
aggregate_defs: %{},
context: context,
bindings: %{0 => %{path: [], type: :root, source: resource}}
})
end
@known_inner_join_operators [
Eq,
GreaterThan,
GreaterThanOrEqual,
In,
LessThanOrEqual,
LessThan,
NotEq
]
|> Enum.map(&Module.concat(Ash.Query.Operator, &1))
@known_inner_join_functions [
Ago,
Contains
]
|> Enum.map(&Module.concat(Ash.Query.Function, &1))
@known_inner_join_predicates @known_inner_join_functions ++ @known_inner_join_operators
defp can_inner_join?(path, expr, seen_an_or? \\ false)
defp can_inner_join?(path, %{expression: expr}, seen_an_or?),
do: can_inner_join?(path, expr, seen_an_or?)
defp can_inner_join?(_path, expr, _seen_an_or?) when expr in [nil, true, false], do: true
defp can_inner_join?(path, %BooleanExpression{op: :and, left: left, right: right}, seen_an_or?) do
can_inner_join?(path, left, seen_an_or?) || can_inner_join?(path, right, seen_an_or?)
end
defp can_inner_join?(path, %BooleanExpression{op: :or, left: left, right: right}, _) do
can_inner_join?(path, left, true) && can_inner_join?(path, right, true)
end
defp can_inner_join?(
_,
%Not{},
_
) do
false
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, left: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, right: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__function__?: true, arguments: arguments},
seen_an_or?
)
when struct in @known_inner_join_predicates do
if Enum.any?(arguments, &match?(%Ref{relationship_path: ^search_path}, &1)) do
not seen_an_or?
else
true
end
end
defp can_inner_join?(_, _, _), do: false
@impl true
def add_aggregate(query, aggregate, _resource, add_base? \\ true) do
resource = aggregate.resource
query = default_bindings(query, resource)
query_and_binding =
case get_binding(resource, aggregate.relationship_path, query, :aggregate) do
nil ->
relationship = Ash.Resource.Info.relationship(resource, aggregate.relationship_path)
if relationship.type == :many_to_many do
subquery = aggregate_subquery(relationship, aggregate, query)
case join_all_relationships(
query,
[
{{:aggregate, aggregate.name, subquery},
relationship_path_to_relationships(resource, aggregate.relationship_path)}
],
nil
) do
{:ok, new_query} ->
{:ok,
{new_query,
get_binding(resource, aggregate.relationship_path, new_query, :aggregate)}}
{:error, error} ->
{:error, error}
end
else
subquery = aggregate_subquery(relationship, aggregate, query)
case join_all_relationships(
query,
[
{{:aggregate, aggregate.name, subquery},
relationship_path_to_relationships(resource, aggregate.relationship_path)}
],
nil
) do
{:ok, new_query} ->
{:ok,
{new_query,
get_binding(resource, aggregate.relationship_path, new_query, :aggregate)}}
{:error, error} ->
{:error, error}
end
end
binding ->
{:ok, {query, binding}}
end
case query_and_binding do
{:ok, {query, binding}} ->
query_with_aggregate_binding =
put_in(
query.__ash_bindings__.aggregates,
Map.put(query.__ash_bindings__.aggregates, aggregate.name, binding)
)
query_with_aggregate_defs =
put_in(
query_with_aggregate_binding.__ash_bindings__.aggregate_defs,
Map.put(
query_with_aggregate_binding.__ash_bindings__.aggregate_defs,
aggregate.name,
aggregate
)
)
new_query =
query_with_aggregate_defs
|> add_aggregate_to_subquery(resource, aggregate, binding)
|> select_aggregate(resource, aggregate, add_base?)
{:ok, new_query}
{:error, error} ->
{:error, error}
end
end
@impl true
def add_calculation(query, calculation, expression, resource) do
query = default_bindings(query, resource)
query =
if query.select do
query
else
from(row in query,
select: row,
select_merge: %{aggregates: %{}, calculations: %{}}
)
end
{params, expr} =
do_filter_to_expr(
expression,
query.__ash_bindings__,
query.select.params
)
{:ok,
query
|> Map.update!(:select, &add_to_calculation_select(&1, expr, List.wrap(params), calculation))}
end
defp select_aggregate(query, resource, aggregate, add_base?) do
binding = get_binding(resource, aggregate.relationship_path, query, :aggregate)
query =
if query.select do
query
else
if add_base? do
from(row in query,
select: row,
select_merge: %{aggregates: %{}, calculations: %{}}
)
else
from(row in query, select: row)
end
end
%{query | select: add_to_aggregate_select(query.select, binding, aggregate)}
end
defp add_to_calculation_select(
%{
expr:
{:merge, _,
[
first,
{:%{}, _,
[{:aggregates, {:%{}, [], agg_fields}}, {:calculations, {:%{}, [], fields}}]}
]}
} = select,
expr,
params,
%{load: nil} = calculation
) do
field =
{:type, [],
[
expr,
parameterized_type(calculation.type, [])
]}
name =
if calculation.sequence == 0 do
calculation.name
else
String.to_existing_atom("#{calculation.sequence}")
end
new_fields = [
{name, field}
| fields
]
%{
select
| expr:
{:merge, [],
[
first,
{:%{}, [],
[{:aggregates, {:%{}, [], agg_fields}}, {:calculations, {:%{}, [], new_fields}}]}
]},
params: params
}
end
defp add_to_calculation_select(
%{expr: select_expr} = select,
expr,
params,
%{load: load_as} = calculation
) do
field =
{:type, [],
[
expr,
parameterized_type(calculation.type, [])
]}
load_as =
if calculation.sequence == 0 do
load_as
else
"#{load_as}_#{calculation.sequence}"
end
%{
select
| expr: {:merge, [], [select_expr, {:%{}, [], [{load_as, field}]}]},
params: params
}
end
defp parameterized_type({:array, type}, constraints) do
{:array, parameterized_type(type, constraints[:items] || [])}
end
defp parameterized_type(type, constraints) do
if Ash.Type.ash_type?(type) do
parameterized_type(Ash.Type.ecto_type(type), constraints)
else
if is_atom(type) && :erlang.function_exported(type, :type, 1) do
{:parameterized, type, constraints}
else
type
end
end
end
defp add_to_aggregate_select(
%{
expr:
{:merge, _,
[
first,
{:%{}, _,
[{:aggregates, {:%{}, [], fields}}, {:calculations, {:%{}, [], calc_fields}}]}
]}
} = select,
binding,
%{load: nil} = aggregate
) do
accessed = {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
field =
{:type, [],
[
accessed,
parameterized_type(aggregate.type, [])
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
{:type, [],
[
aggregate.default_value,
parameterized_type(aggregate.type, [])
]}
]}
end
new_fields = [
{aggregate.name, field_with_default}
| fields
]
%{
select
| expr:
{:merge, [],
[
first,
{:%{}, [],
[{:aggregates, {:%{}, [], new_fields}}, {:calculations, {:%{}, [], calc_fields}}]}
]}
}
end
defp add_to_aggregate_select(
%{expr: expr} = select,
binding,
%{load: load_as} = aggregate
) do
accessed = {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
field =
{:type, [],
[
accessed,
parameterized_type(aggregate.type, [])
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
{:type, [],
[
aggregate.default_value,
parameterized_type(aggregate.type, [])
]}
]}
end
%{select | expr: {:merge, [], [expr, {:%{}, [], [{load_as, field_with_default}]}]}}
end
defp add_aggregate_to_subquery(query, resource, aggregate, binding) do
new_joins =
List.update_at(query.joins, binding - 1, fn join ->
aggregate_query =
if aggregate.authorization_filter do
{:ok, filter} =
filter(
join.source.from.source.query,
aggregate.authorization_filter,
Ash.Resource.Info.related(resource, aggregate.relationship_path)
)
filter
else
join.source.from.source.query
end
new_aggregate_query = add_subquery_aggregate_select(aggregate_query, aggregate, resource)
put_in(join.source.from.source.query, new_aggregate_query)
end)
%{
query
| joins: new_joins
}
end
defp aggregate_subquery(%{type: :many_to_many} = relationship, aggregate, root_query) do
destination =
case maybe_get_resource_query(relationship.destination, relationship, root_query) do
{:ok, query} ->
query
_ ->
relationship.destination
end
join_relationship =
Ash.Resource.Info.relationship(relationship.source, relationship.join_relationship)
through =
case maybe_get_resource_query(relationship.through, join_relationship, root_query) do
{:ok, query} ->
query
_ ->
relationship.through
end
query =
from(destination in destination,
join: through in ^through,
on:
field(through, ^relationship.destination_field_on_join_table) ==
field(destination, ^relationship.destination_field),
group_by: field(through, ^relationship.source_field_on_join_table),
select: %{__source_field: field(through, ^relationship.source_field_on_join_table)}
)
query_tenant = aggregate.query && aggregate.query.tenant
root_tenant = root_query.prefix
if Ash.Resource.Info.multitenancy_strategy(relationship.destination) &&
(root_tenant ||
query_tenant) do
Ecto.Query.put_query_prefix(query, query_tenant || root_tenant)
else
%{query | prefix: repo(relationship.destination).config()[:default_prefix] || "public"}
end
end
defp aggregate_subquery(relationship, aggregate, root_query) do
destination =
case maybe_get_resource_query(relationship.destination, relationship, root_query) do
{:ok, query} ->
query
_ ->
relationship.destination
end
query =
from(row in destination,
group_by: ^relationship.destination_field,
select: field(row, ^relationship.destination_field)
)
query_tenant = aggregate.query && aggregate.query.tenant
root_tenant = root_query.prefix
if Ash.Resource.Info.multitenancy_strategy(relationship.destination) &&
(root_tenant ||
query_tenant) do
Ecto.Query.put_query_prefix(query, query_tenant || root_tenant)
else
%{
query
| prefix: repo(relationship.destination).config()[:default_prefix] || "public"
}
end
end
defp order_to_postgres_order(dir) do
case dir do
:asc -> nil
:asc_nils_last -> " ASC NULLS LAST"
:asc_nils_first -> " ASC NULLS FIRST"
:desc -> " DESC"
:desc_nils_last -> " DESC NULLS LAST"
:desc_nils_first -> " DESC NULLS FIRST"
end
end
defp add_subquery_aggregate_select(query, %{kind: :first} = aggregate, _resource) do
query = default_bindings(query, aggregate.resource)
key = aggregate.field
type = parameterized_type(aggregate.type, [])
field =
if aggregate.query && aggregate.query.sort && aggregate.query.sort != [] do
sort_expr =
aggregate.query.sort
|> Enum.map(fn {sort, order} ->
case order_to_postgres_order(order) do
nil ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}]
order ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}, raw: order]
end
end)
|> Enum.intersperse(raw: ", ")
|> List.flatten()
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: " ORDER BY "
] ++
close_paren(sort_expr)}
else
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: ")"
]}
end
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
value =
{:fragment, [],
[
raw: "(",
expr: filtered,
raw: ")[1]"
]}
with_default =
if aggregate.default_value do
{:coalesce, [], [value, {:type, [], [aggregate.default_value, type]}]}
else
value
end
casted =
{:type, [],
[
with_default,
type
]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, casted}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp add_subquery_aggregate_select(query, %{kind: :list} = aggregate, _resource) do
query = default_bindings(query, aggregate.resource)
key = aggregate.field
type = parameterized_type(aggregate.type, [])
field =
if aggregate.query && aggregate.query.sort && aggregate.query.sort != [] do
sort_expr =
aggregate.query.sort
|> Enum.map(fn {sort, order} ->
case order_to_postgres_order(order) do
nil ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}]
order ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}, raw: order]
end
end)
|> Enum.intersperse(raw: ", ")
|> List.flatten()
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: " ORDER BY "
] ++
close_paren(sort_expr)}
else
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: ")"
]}
end
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
with_default =
if aggregate.default_value do
{:coalesce, [], [filtered, {:type, [], [aggregate.default_value, type]}]}
else
filtered
end
cast = {:type, [], [with_default, {:array, type}]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp add_subquery_aggregate_select(query, %{kind: kind} = aggregate, resource)
when kind in [:count, :sum] do
query = default_bindings(query, aggregate.resource)
key = aggregate.field || List.first(Ash.Resource.Info.primary_key(resource))
type = parameterized_type(aggregate.type, [])
field = {kind, [], [{{:., [], [{:&, [], [0]}, key]}, [], []}]}
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
with_default =
if aggregate.default_value do
{:coalesce, [], [filtered, {:type, [], [aggregate.default_value, type]}]}
else
filtered
end
cast = {:type, [], [with_default, type]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp close_paren(list) do
count = length(list)
case List.last(list) do
{:raw, _} ->
List.update_at(list, count - 1, fn {:raw, str} ->
{:raw, str <> ")"}
end)
_ ->
list ++ [{:raw, ")"}]
end
end
defp relationship_path_to_relationships(resource, path, acc \\ [])
defp relationship_path_to_relationships(_resource, [], acc), do: Enum.reverse(acc)
defp relationship_path_to_relationships(resource, [relationship | rest], acc) do
relationship = Ash.Resource.Info.relationship(resource, relationship)
relationship_path_to_relationships(relationship.destination, rest, [relationship | acc])
end
defp join_all_relationships(query, relationship_paths, filter, path \\ [], source \\ nil) do
query = default_bindings(query, source)
Enum.reduce_while(relationship_paths, {:ok, query}, fn
{_join_type, []}, {:ok, query} ->
{:cont, {:ok, query}}
{join_type, [relationship | rest_rels]}, {:ok, query} ->
source = source || relationship.source
current_path = path ++ [relationship]
current_join_type =
case join_type do
{:aggregate, _name, _agg} when rest_rels != [] ->
:left
other ->
other
end
if has_binding?(source, Enum.reverse(current_path), query, current_join_type) do
{:cont, {:ok, query}}
else
case join_relationship(
query,
relationship,
Enum.map(path, & &1.name),
current_join_type,
source,
filter
) do
{:ok, joined_query} ->
joined_query_with_distinct = add_distinct(relationship, join_type, joined_query)
case join_all_relationships(
joined_query_with_distinct,
[{join_type, rest_rels}],
filter,
current_path,
source
) do
{:ok, query} ->
{:cont, {:ok, query}}
{:error, error} ->
{:halt, {:error, error}}
end
{:error, error} ->
{:halt, {:error, error}}
end
end
end)
end
defp has_binding?(resource, path, query, {:aggregate, _, _}),
do: has_binding?(resource, path, query, :aggregate)
defp has_binding?(resource, candidate_path, %{__ash_bindings__: _} = query, type) do
Enum.any?(query.__ash_bindings__.bindings, fn
{_, %{path: path, source: source, type: ^type}} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, path, candidate_path, source)
_ ->
false
end)
end
defp has_binding?(_, _, _, _), do: false
defp get_binding(resource, path, %{__ash_bindings__: _} = query, type) do
paths =
Enum.flat_map(query.__ash_bindings__.bindings, fn
{binding, %{path: path, type: ^type}} ->
[{binding, path}]
_ ->
[]
end)
Enum.find_value(paths, fn {binding, candidate_path} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) && binding
end)
end
defp get_binding(_, _, _, _), do: nil
defp add_distinct(relationship, join_type, joined_query) do
if relationship.cardinality == :many and join_type == :left && !joined_query.distinct do
from(row in joined_query,
distinct: ^Ash.Resource.Info.primary_key(relationship.destination)
)
else
joined_query
end
end
defp join_relationship(query, relationship, path, join_type, source, filter) do
case Map.get(query.__ash_bindings__.bindings, path) do
%{type: existing_join_type} when join_type != existing_join_type ->
raise "unreachable?"
nil ->
do_join_relationship(query, relationship, path, join_type, source, filter)
_ ->
{:ok, query}
end
end
defp do_join_relationship(
query,
%{type: :many_to_many} = relationship,
path,
kind,
source,
filter
) do
join_relationship = Ash.Resource.Info.relationship(source, relationship.join_relationship)
with {:ok, relationship_through} <-
maybe_get_resource_query(relationship.through, join_relationship, query),
{:ok, relationship_destination} <-
maybe_get_resource_query(relationship.destination, relationship, query) do
relationship_through =
relationship_through
|> Ecto.Queryable.to_query()
|> set_join_prefix(query, relationship.through)
relationship_destination =
relationship_destination
|> Ecto.Queryable.to_query()
|> set_join_prefix(query, relationship.destination)
binding_kind =
case kind do
{:aggregate, _, _} ->
:left
other ->
other
end
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == binding_kind && data.path == Enum.reverse(path) do
binding
end
end)
used_calculations =
Ash.Filter.used_calculations(
filter,
relationship.destination,
path ++ [relationship.name]
)
used_aggregates = used_aggregates(filter, relationship, used_calculations, path)
Enum.reduce_while(used_aggregates, {:ok, relationship_destination}, fn agg, {:ok, query} ->
agg = %{agg | load: agg.name}
case add_aggregate(query, agg, relationship.destination, false) do
{:ok, query} ->
{:cont, {:ok, query}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, relationship_destination} ->
relationship_destination =
case used_aggregates do
[] ->
relationship_destination
_ ->
subquery(relationship_destination)
end
new_query =
case kind do
{:aggregate, _, subquery} ->
{subquery, alias_name} =
agg_subquery_for_lateral_join(current_binding, query, subquery, relationship)
from([{row, current_binding}] in query,
left_lateral_join: through in ^subquery
)
|> Map.update!(:aliases, &Map.put(&1, alias_name, current_binding))
:inner ->
from([{row, current_binding}] in query,
join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
_ ->
from([{row, current_binding}] in query,
left_join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
left_join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
end
join_path =
Enum.reverse([
String.to_existing_atom(to_string(relationship.name) <> "_join_assoc") | path
])
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
case kind do
{:aggregate, _, _subquery} ->
{:ok,
new_query
|> add_binding(binding_data)}
_ ->
{:ok,
new_query
|> add_binding(%{path: join_path, type: :left, source: source})
|> add_binding(binding_data)}
end
{:error, error} ->
{:error, error}
end
end
end
defp do_join_relationship(query, relationship, path, kind, source, filter) do
case maybe_get_resource_query(relationship.destination, relationship, query) do
{:error, error} ->
{:error, error}
{:ok, relationship_destination} ->
relationship_destination =
relationship_destination
|> Ecto.Queryable.to_query()
|> set_join_prefix(query, relationship.destination)
binding_kind =
case kind do
{:aggregate, _, _} ->
:left
other ->
other
end
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == binding_kind && data.path == Enum.reverse(path) do
binding
end
end)
used_calculations =
Ash.Filter.used_calculations(
filter,
relationship.destination,
path ++ [relationship.name]
)
used_aggregates = used_aggregates(filter, relationship, used_calculations, path)
Enum.reduce_while(used_aggregates, {:ok, relationship_destination}, fn agg,
{:ok, query} ->
agg = %{agg | load: agg.name}
case add_aggregate(query, agg, relationship.destination, false) do
{:ok, query} ->
{:cont, {:ok, query}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, relationship_destination} ->
relationship_destination =
case used_aggregates do
[] ->
relationship_destination
_ ->
subquery(relationship_destination)
end
new_query =
case kind do
{:aggregate, _, subquery} ->
{subquery, alias_name} =
agg_subquery_for_lateral_join(current_binding, query, subquery, relationship)
from([{row, current_binding}] in query,
left_lateral_join: destination in ^subquery,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
|> Map.update!(:aliases, &Map.put(&1, alias_name, current_binding))
:inner ->
from([{row, current_binding}] in query,
join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
_ ->
from([{row, current_binding}] in query,
left_join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
end
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
{:ok,
new_query
|> add_binding(binding_data)}
{:error, error} ->
{:error, error}
end
end
end
defp agg_subquery_for_lateral_join(current_binding, query, subquery, relationship) do
alias_name = @atoms[current_binding]
inner_sub = from(destination in subquery, [])
{dest_binding, dest_field} =
case relationship.type do
:many_to_many ->
{1, relationship.source_field_on_join_table}
_ ->
{0, relationship.destination_field}
end
inner_sub_with_where =
Map.put(inner_sub, :wheres, [
%Ecto.Query.BooleanExpr{
expr:
{:==, [],
[
{{:., [], [{:&, [], [dest_binding]}, dest_field]}, [], []},
{{:., [], [{:parent_as, [], [alias_name]}, relationship.source_field]}, [], []}
]},
op: :and
}
])
subquery =
from(
sub in subquery(inner_sub_with_where),
select: field(sub, ^dest_field)
)
|> set_join_prefix(query, relationship.destination)
{subquery, alias_name}
end
defp used_aggregates(filter, relationship, used_calculations, path) do
Ash.Filter.used_aggregates(filter, path ++ [relationship.name]) ++
Enum.flat_map(
used_calculations,
fn calculation ->
case Ash.Filter.hydrate_refs(
calculation.module.expression(calculation.opts, calculation.context),
%{
resource: relationship.destination,
aggregates: %{},
calculations: %{},
public?: false
}
) do
{:ok, hydrated} ->
Ash.Filter.used_aggregates(hydrated)
_ ->
[]
end
end
)
end
defp set_join_prefix(join_query, query, resource) do
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
%{join_query | prefix: query.prefix || "public"}
else
%{
join_query
| prefix: repo(resource).config()[:default_prefix] || "public"
}
end
end
defp add_filter_expression(query, filter) do
wheres =
filter
|> split_and_statements()
|> Enum.map(fn filter ->
{params, expr} = filter_to_expr(filter, query.__ash_bindings__, [])
%Ecto.Query.BooleanExpr{
expr: expr,
op: :and,
params: params
}
end)
%{query | wheres: query.wheres ++ wheres}
end
defp split_and_statements(%Filter{expression: expression}) do
split_and_statements(expression)
end
defp split_and_statements(%BooleanExpression{op: :and, left: left, right: right}) do
split_and_statements(left) ++ split_and_statements(right)
end
defp split_and_statements(%Not{expression: %Not{expression: expression}}) do
split_and_statements(expression)
end
defp split_and_statements(%Not{
expression: %BooleanExpression{op: :or, left: left, right: right}
}) do
split_and_statements(%BooleanExpression{
op: :and,
left: %Not{expression: left},
right: %Not{expression: right}
})
end
defp split_and_statements(other), do: [other]
defp filter_to_expr(expr, bindings, params, embedded? \\ false, type \\ nil)
defp filter_to_expr(%Filter{expression: expression}, bindings, params, embedded?, type) do
filter_to_expr(expression, bindings, params, embedded?, type)
end
# A nil filter means "everything"
defp filter_to_expr(nil, _, _, _, _), do: {[], true}
# A true filter means "everything"
defp filter_to_expr(true, _, _, _, _), do: {[], true}
# A false filter means "nothing"
defp filter_to_expr(false, _, _, _, _), do: {[], false}
defp filter_to_expr(expression, bindings, params, embedded?, type) do
do_filter_to_expr(expression, bindings, params, embedded?, type)
end
defp do_filter_to_expr(expr, bindings, params, embedded? \\ false, type \\ nil)
defp do_filter_to_expr(
%BooleanExpression{op: op, left: left, right: right},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, embedded?)
{params, {op, [], [left_expr, right_expr]}}
end
defp do_filter_to_expr(%Not{expression: expression}, bindings, params, embedded?, _type) do
{params, new_expression} = do_filter_to_expr(expression, bindings, params, embedded?)
{params, {:not, [], [new_expression]}}
end
defp do_filter_to_expr(
%TrigramSimilarity{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
{params, {:fragment, [], [raw: "similarity(", expr: arg1, raw: ", ", expr: arg2, raw: ")"]}}
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, false)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
{params, {:type, [], [arg1, parameterized_type(arg2, [])]}}
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2, constraints], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
{params, {:type, [], [arg1, parameterized_type(arg2, constraints)]}}
end
defp do_filter_to_expr(
%Fragment{arguments: arguments, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
arguments =
case arguments do
[{:raw, _} | _] ->
arguments
arguments ->
[{:raw, ""} | arguments]
end
arguments =
case List.last(arguments) do
nil ->
arguments
{:raw, _} ->
arguments
_ ->
arguments ++ [{:raw, ""}]
end
{params, fragment_data} =
Enum.reduce(arguments, {params, []}, fn
{:raw, str}, {params, fragment_data} ->
{params, fragment_data ++ [{:raw, str}]}
{:casted_expr, expr}, {params, fragment_data} ->
{params, fragment_data ++ [{:expr, expr}]}
{:expr, expr}, {params, fragment_data} ->
{params, expr} = do_filter_to_expr(expr, bindings, params, pred_embedded? || embedded?)
{params, fragment_data ++ [{:expr, expr}]}
end)
{params, {:fragment, [], fragment_data}}
end
defp do_filter_to_expr(
%IsNil{left: left, right: right, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?)
{params,
{:==, [],
[
{:is_nil, [], [left_expr]},
right_expr
]}}
end
defp do_filter_to_expr(
%Ago{arguments: [left, right], embedded?: _pred_embedded?},
_bindings,
params,
_embedded?,
_type
)
when is_integer(left) and (is_binary(right) or is_atom(right)) do
{params ++ [{DateTime.utc_now(), {:param, :any_datetime}}],
{:datetime_add, [], [{:^, [], [Enum.count(params)]}, left * -1, to_string(right)]}}
end
defp do_filter_to_expr(
%Contains{arguments: [left, %Ash.CiString{} = right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: "::citext, ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%Contains{arguments: [left, right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: ", ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%If{arguments: [condition, when_true, when_false], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
[condition_type, when_true_type, when_false_type] =
case determine_types(If, [condition, when_true, when_false]) do
[condition_type, when_true] ->
[condition_type, when_true, nil]
[condition_type, when_true, when_false] ->
[condition_type, when_true, when_false]
end
{params, condition} =
do_filter_to_expr(condition, bindings, params, pred_embedded? || embedded?, condition_type)
{params, when_true} =
do_filter_to_expr(when_true, bindings, params, pred_embedded? || embedded?, when_true_type)
{params, when_false} =
do_filter_to_expr(
when_false,
bindings,
params,
pred_embedded? || embedded?,
when_false_type
)
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "CASE WHEN ",
casted_expr: condition,
raw: " THEN ",
casted_expr: when_true,
raw: " ELSE ",
casted_expr: when_false,
raw: " END"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%mod{
__predicate__?: _,
left: left,
right: right,
embedded?: pred_embedded?,
operator: :<>
},
bindings,
params,
embedded?,
type
) do
[left_type, right_type] = determine_types(mod, [left, right])
{params, left_expr} =
do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?, left_type)
{params, right_expr} =
do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?, right_type)
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
casted_expr: left_expr,
raw: " || ",
casted_expr: right_expr
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%mod{
__predicate__?: _,
left: left,
right: right,
embedded?: pred_embedded?,
operator: op
},
bindings,
params,
embedded?,
_type
) do
[left_type, right_type] = determine_types(mod, [left, right])
{params, left_expr} =
do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?, left_type)
{params, right_expr} =
do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?, right_type)
{params,
{op, [],
[
left_expr,
right_expr
]}}
end
defp do_filter_to_expr(
%Ref{
attribute: %Ash.Query.Calculation{} = calculation,
relationship_path: [],
resource: resource
},
bindings,
params,
embedded?,
type
) do
calculation = %{calculation | load: calculation.name}
case Ash.Filter.hydrate_refs(
calculation.module.expression(calculation.opts, calculation.context),
%{
resource: resource,
aggregates: %{},
calculations: %{},
public?: false
}
) do
{:ok, expression} ->
do_filter_to_expr(
expression,
bindings,
params,
embedded?,
type
)
{:error, _error} ->
{params, nil}
end
end
defp do_filter_to_expr(
%Ref{
attribute: %Ash.Query.Calculation{} = calculation,
relationship_path: relationship_path
} = ref,
bindings,
params,
embedded?,
type
) do
binding_to_replace =
Enum.find_value(bindings.bindings, fn {i, binding} ->
if binding.path == relationship_path do
i
end
end)
temp_bindings =
bindings.bindings
|> Map.delete(0)
|> Map.update!(binding_to_replace, &Map.merge(&1, %{path: [], type: :root}))
case Ash.Filter.hydrate_refs(
calculation.module.expression(calculation.opts, calculation.context),
%{
resource: ref.resource,
aggregates: %{},
calculations: %{},
public?: false
}
) do
{:ok, hydrated} ->
hydrated
|> Ash.Filter.update_aggregates(fn aggregate, _ ->
%{aggregate | relationship_path: []}
end)
|> do_filter_to_expr(
%{bindings | bindings: temp_bindings},
params,
embedded?,
type
)
_ ->
{params, nil}
end
end
defp do_filter_to_expr(
%Ref{attribute: %Ash.Query.Aggregate{} = aggregate} = ref,
bindings,
params,
_embedded?,
_type
) do
expr = {{:., [], [{:&, [], [ref_binding(ref, bindings)]}, aggregate.name]}, [], []}
type = parameterized_type(aggregate.type, [])
type =
if aggregate.kind == :list do
{:array, type}
else
type
end
with_default =
if aggregate.default_value do
{:coalesce, [], [expr, {:type, [], [aggregate.default_value, type]}]}
else
expr
end
{params, with_default}
end
defp do_filter_to_expr(
%Ref{attribute: %{name: name}} = ref,
bindings,
params,
_embedded?,
_type
) do
{params, {{:., [], [{:&, [], [ref_binding(ref, bindings)]}, name]}, [], []}}
end
defp do_filter_to_expr({:embed, other}, _bindings, params, _true, _type) do
{params, other}
end
defp do_filter_to_expr(%Ash.CiString{string: string}, bindings, params, embedded?, type) do
{params, string} = do_filter_to_expr(string, bindings, params, embedded?)
do_filter_to_expr(
%Fragment{
embedded?: embedded?,
arguments: [
raw: "",
casted_expr: string,
raw: "::citext"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(%MapSet{} = mapset, bindings, params, embedded?, type) do
do_filter_to_expr(Enum.to_list(mapset), bindings, params, embedded?, type)
end
defp do_filter_to_expr(other, _bindings, params, true, _type) do
{params, other}
end
defp do_filter_to_expr(value, _bindings, params, false, type) do
type = type || :any
value = last_ditch_cast(value, type)
{params ++ [{value, type}], {:^, [], [Enum.count(params)]}}
end
defp last_ditch_cast(value, {:in, type}) when is_list(value) do
Enum.map(value, &last_ditch_cast(&1, type))
end
defp last_ditch_cast(value, _) when is_boolean(value) do
value
end
defp last_ditch_cast(value, _) when is_atom(value) do
to_string(value)
end
defp last_ditch_cast(value, _type) do
value
end
defp determine_types(mod, values) do
Code.ensure_compiled(mod)
cond do
:erlang.function_exported(mod, :types, 0) ->
mod.types()
:erlang.function_exported(mod, :args, 0) ->
mod.args()
true ->
[:any]
end
|> Enum.map(fn types ->
case types do
:same ->
types =
for _ <- values do
:same
end
closest_fitting_type(types, values)
:any ->
for _ <- values do
:any
end
types ->
closest_fitting_type(types, values)
end
end)
|> Enum.min_by(fn types ->
types
|> Enum.map(&vagueness/1)
|> Enum.sum()
end)
end
defp closest_fitting_type(types, values) do
types_with_values = Enum.zip(types, values)
types_with_values
|> fill_in_known_types()
|> clarify_types()
end
defp clarify_types(types) do
basis =
types
|> Enum.map(&elem(&1, 0))
|> Enum.min_by(&vagueness(&1))
Enum.map(types, fn {type, _value} ->
replace_same(type, basis)
end)
end
defp replace_same({:in, type}, basis) do
{:in, replace_same(type, basis)}
end
defp replace_same(:same, :same) do
:any
end
defp replace_same(:same, {:in, :same}) do
{:in, :any}
end
defp replace_same(:same, basis) do
basis
end
defp replace_same(other, _basis) do
other
end
defp fill_in_known_types(types) do
Enum.map(types, &fill_in_known_type/1)
end
defp fill_in_known_type(
{vague_type, %Ref{attribute: %{type: type, constraints: constraints}}} = ref
)
when vague_type in [:any, :same] do
if Ash.Type.ash_type?(type) do
type = type |> Ash.Type.ecto_type() |> parameterized_type(constraints) |> array_to_in()
{type, ref}
else
type =
if is_atom(type) && :erlang.function_exported(type, :type, 1) do
{:parameterized, type, []} |> array_to_in()
else
type |> array_to_in()
end
{type, ref}
end
end
defp fill_in_known_type(
{{:array, type}, %Ref{attribute: %{type: {:array, type}} = attribute} = ref}
) do
{:in, fill_in_known_type({type, %{ref | attribute: %{attribute | type: type}}})}
end
defp fill_in_known_type({type, value}), do: {array_to_in(type), value}
defp array_to_in({:array, v}), do: {:in, array_to_in(v)}
defp array_to_in({:parameterized, type, constraints}),
do: {:parameterized, array_to_in(type), constraints}
defp array_to_in(v), do: v
defp vagueness({:in, type}), do: vagueness(type)
defp vagueness(:same), do: 2
defp vagueness(:any), do: 1
defp vagueness(_), do: 0
defp ref_binding(
%{attribute: %Ash.Query.Aggregate{} = aggregate, relationship_path: []},
bindings
) do
Enum.find_value(bindings.bindings, fn {binding, data} ->
data.path == aggregate.relationship_path && data.type == :aggregate && binding
end) ||
Enum.find_value(bindings.bindings, fn {binding, data} ->
data.path == aggregate.relationship_path && data.type in [:inner, :left, :root] && binding
end)
end
defp ref_binding(
%{attribute: %Ash.Query.Calculation{}} = ref,
bindings
) do
Enum.find_value(bindings.bindings, fn {binding, data} ->
data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding
end)
end
defp ref_binding(%{attribute: %Ash.Resource.Attribute{}} = ref, bindings) do
Enum.find_value(bindings.bindings, fn {binding, data} ->
data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding
end)
end
defp ref_binding(%{attribute: %Ash.Query.Aggregate{}} = ref, bindings) do
Enum.find_value(bindings.bindings, fn {binding, data} ->
data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding
end)
end
defp add_binding(query, data) do
current = query.__ash_bindings__.current
bindings = query.__ash_bindings__.bindings
new_ash_bindings = %{
query.__ash_bindings__
| bindings: Map.put(bindings, current, data),
current: current + 1
}
%{query | __ash_bindings__: new_ash_bindings}
end
@impl true
def transaction(resource, func) do
repo(resource).transaction(func)
end
@impl true
def rollback(resource, term) do
repo(resource).rollback(term)
end
defp maybe_get_resource_query(resource, relationship, root_query) do
resource
|> Ash.Query.new()
|> Map.put(:context, root_query.__ash_bindings__.context)
|> Ash.Query.set_context(relationship.context)
|> Ash.Query.do_filter(relationship.filter)
|> Ash.Query.sort(Map.get(relationship, :sort))
|> case do
%{valid?: true} = query ->
initial_query = %{resource_to_query(resource, nil) | prefix: Map.get(root_query, :prefix)}
Ash.Query.data_layer_query(query,
only_validate_filter?: false,
initial_query: initial_query
)
query ->
{:error, query}
end
end
defp table(resource, changeset) do
changeset.context[:data_layer][:table] || AshPostgres.table(resource)
end
defp raise_table_error!(resource, operation) do
if AshPostgres.polymorphic?(resource) do
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
Polymorphic resources require that the `data_layer[:table]` context is provided.
See the guide on polymorphic resources for more information.
"""
else
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
"""
end
end
end
|
lib/data_layer.ex
| 0.85747
| 0.479869
|
data_layer.ex
|
starcoder
|
defmodule YukiHelper.Download do
@moduledoc """
Provides a module related to downloading teastcases.
"""
alias YukiHelper.{Config, Problem, Api.Yukicoder}
alias YukiHelper.Exceptions.DownloadError
@typedoc """
Two types of testcase file, input file and output file.
"""
@type filetype() :: :in | :out
@typedoc """
Filename of the testcase.
"""
@type filename() :: String.t()
@typedoc """
A list of filename of the testcase.
"""
@type filename_list() :: [filename()]
@typedoc """
Data of the response body.
"""
@type data() :: String.t()
@doc """
Gets a list of testcase for the specified problem.
"""
@spec get_testcases(Config.t(), Problem.no(), keyword()) :: {:ok, filename_list()} | {:error, term()}
def get_testcases(config, no, opts \\ []) do
path = if Keyword.get(opts, :problem_id),
do: "/problems/#{no}/file/in",
else: "/problems/no/#{no}/file/in"
headers = Config.headers!(config)
options = Config.options!(config)
with res <- Yukicoder.get!(path, headers, options),
200 <- Map.get(res, :status_code),
body <- Map.get(res, :body) do
{:ok, body}
else
404 ->
{
:error,
%DownloadError{
path: path,
status: 404,
description: "a target was not found"
}
}
code ->
{
:error,
%DownloadError{
path: path,
status: code,
description: "an unexpected error has occurred"
}
}
end
end
@spec get_testcases!(Config.t(), Problem.no(), keyword()) :: filename_list()
def get_testcases!(config, no, opts \\ []) do
case get_testcases(config, no, opts) do
{:ok, body} -> body
{:error, err} -> Mix.raise err
end
end
@doc """
Downloads the specified testcase for the problem.
"""
@spec get_testcase(Config.t(), Problem.no(), filename(), filetype(), keyword()) :: {:ok, data()} | {:error, term()}
def get_testcase(config, no, filename, type, opts \\ []) do
path = if Keyword.get(opts, :problem_id),
do: "/problems/#{no}/file/#{type}/#{filename}",
else: "/problems/no/#{no}/file/#{type}/#{filename}"
headers = Config.headers!(config)
options = Config.options!(config)
with res <- Yukicoder.get!(path, headers, options),
200 <- Map.get(res, :status_code),
body <- Map.get(res, :body) do
body = if is_number(body), do: "#{body}\n", else: body
{:ok, body}
else
404 ->
{
:error,
%DownloadError{
path: path,
status: 404,
description: "a target was not found"
}
}
code ->
{
:error,
%DownloadError{
path: path,
status: code,
description: "an unexpected error has occurred"
}
}
end
end
@spec get_testcase!(Config.t(), Problem.no(), filename(), filetype(), keyword()) :: data()
def get_testcase!(config, no, filename, type, opts \\ []) do
case get_testcase(config, no, filename, type, opts) do
{:ok, body} -> body
{:error, err} -> Mix.raise err
end
end
@doc """
Returns whetheir testcases have already been downloaded or not.
"""
@spec download_tastcases?(filename_list(), Config.t(), Problem.no()) :: boolean()
def download_tastcases?(testcase_list, config, no) do
root = Path.expand(Problem.problem_path(config, no))
Enum.reduce(testcase_list, true, fn file, download? ->
Enum.reduce([:in, :out], download?, fn filetype, download? ->
download? && File.exists?(Path.join([root, "#{filetype}", file]))
end)
end)
end
end
|
lib/yuki_helper/donwload.ex
| 0.762468
| 0.431345
|
donwload.ex
|
starcoder
|
defmodule Grid do
defstruct max_x: 0, max_y: 0, nodes: %{}
def all_possible_moves(grid) do
grid.nodes
|> Map.values
|> Enum.flat_map(fn(node) ->
possible_moves(node, grid)
|> Enum.map(fn(new_position) -> %{node: node, new_position: new_position} end)
end
)
|> MapSet.new
end
def move(node, new_position, grid) do
%Grid{
max_x: grid.max_x,
max_y: grid.max_y,
nodes: move_node(node, new_position, grid.nodes)
}
end
defp move_node(from_node, new_position, nodes) do
[new_x, new_y] = [elem(new_position, 0), elem(new_position, 1)]
nodes
|> Map.values
|> Enum.map(fn(n) ->
cond do
n.x == from_node.x && n.y == from_node.y -> %GridNode{size: n.size, x: n.x, y: n.y, used: 0, tag: {-1, -1}}
n.x == new_x && n.y == new_y -> %GridNode{size: n.size, x: n.x, y: n.y, used: from_node.used, tag: from_node.tag}
n -> n
end
end
)
|> Enum.map(fn(node) -> {{node.x, node.y}, node} end)
|> Enum.into(%{})
end
defp possible_moves(node, grid) do
node
|> possible_directions(grid.max_x, grid.max_y)
|> Enum.filter(fn(pos) -> GridNode.would_fit_on(node, grid.nodes[pos]) end)
end
defp possible_directions(node, max_x, max_y) do
case {node.x, node.y} do
{0, 0} -> [{0, 1}, {1, 0}]
{0, y} when y === max_y -> [{0, max_y - 1}, {1, max_y}]
{x, 0} when x === max_x -> [{max_x - 1, 0}, {max_x, 1}]
{x, y} when x === max_x and y === max_y -> [{x - 1, y}, {x, y - 1}]
{x, y} when x === max_x -> [{x - 1, y}, {x, y - 1}, {x, y + 1}]
{x, y} when y === max_y -> [{x, y - 1}, {x - 1, y}, {x + 1, y}]
{0, y} -> [{1, y}, {0, y - 1}, {0, y + 1}]
{x, 0} -> [{x, 1}, {x - 1, 0}, {x + 1, 0}]
{x, y} -> [{x - 1, y}, {x + 1, y}, {x, y - 1}, {x, y + 1}]
end
end
end
defimpl String.Chars, for: Grid do
def to_string(grid) do
0..grid.max_y
|> Enum.map(fn(y) -> 0..grid.max_x |> Enum.map(fn(x) -> GridNode.to_s(grid.nodes[{x, y}], grid) end) end)
|> Enum.join("\r\n")
end
end
|
elixir-22/lib/grid.ex
| 0.661048
| 0.646962
|
grid.ex
|
starcoder
|
defmodule StarkInfra.PixKey do
alias __MODULE__, as: PixKey
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
@moduledoc """
Groups PixKey related functions
"""
@doc """
PixKeys link bank account information to key ids.
Key ids are a convenient way to search and pass bank account information.
When you initialize a Pix Key, the entity will not be automatically
created in the Stark Infra API. The 'create' function sends the structs
to the Stark Infra API and returns the created struct.
## Parameters (required):
- `:account_created` [Date, DateTime or string]: opening Date or DateTime for the linked account. ex: "2022-01-01".
- `:account_number` [string]: number of the linked account. ex: "76543".
- `:account_type` [string]: type of the linked account. Options: "checking", "savings", "salary" or "payment".
- `:branch_code` [string]: branch code of the linked account. ex: 1234.
- `:name` [string]: holder's name of the linked account. ex: "<NAME>".
- `:tax_id` [string]: holder's taxId (CPF/CNPJ) of the linked account. ex: "012.345.678-90".
## Parameters (optional):
- `:id` [string, default nil]: id of the registered PixKey. Allowed types are: CPF, CNPJ, phone number or email. If this parameter is not passed, an EVP will be created. ex: "+5511989898989";
- `:tags` [list of strings, default nil]: list of strings for reference when searching for PixKeys. ex: ["employees", "monthly"]
## Attributes (return-only):
- `:owned` [DateTime]: datetime when the key was owned by the holder. ex: ~U[2020-3-10 10:30:0:0]
- `:owner_type` [string]: type of the owner of the PixKey. Options: "business" or "individual".
- `:status` [string]: current PixKey status. Options: "created", "registered", "canceled", "failed"
- `:bank_code` [string]: bank_code of the account linked to the Pix Key. ex: "20018183".
- `:bank_name` [string]: name of the bank that holds the account linked to the PixKey. ex: "StarkBank"
- `:type` [string]: type of the PixKey. Options: "cpf", "cnpj", "phone", "email" and "evp",
- `:created` [DateTime]: creation datetime for the PixKey. ex: ~U[2020-03-10 10:30:0:0]
"""
@enforce_keys [
:account_created,
:account_number,
:account_type,
:branch_code,
:name,
:tax_id
]
defstruct [
:account_created,
:account_number,
:account_type,
:branch_code,
:name,
:tax_id,
:id,
:tags,
:owned,
:owner_type,
:status,
:bank_code,
:bank_name,
:type,
:created
]
@type t() :: %__MODULE__{}
@doc """
Create a PixKey linked to a specific account in the Stark Infra API
## Options:
- `:key` [PixKey struct]: PixKey struct to be created in the API.
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- PixKey struct with updated attributes.
"""
@spec create(
PixKey.t() | map(),
user: Project.t() | Organization.t() | nil
) ::
{:ok, PixKey.t()} |
{:error, [error: Error.t()]}
def create(keys, options \\ []) do
Rest.post_single(
resource(),
keys,
options
)
end
@doc """
Same as create(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec create!(
PixKey.t() | map(),
user: Project.t() | Organization.t() | nil
) :: any
def create!(keys, options \\ []) do
Rest.post_single!(
resource(),
keys,
options
)
end
@doc """
Retrieve the PixKey struct linked to your Workspace in the Stark Infra API by its id.
## Parameters (required):
- `:id` [string]: struct unique id. ex: "5656565656565656".
- `:payer_id` [string]: tax id (CPF/CNPJ) of the individual or business requesting the PixKey information. This id is used by the Central Bank to limit request rates. ex: "20.018.183/0001-80".
## Options:
- `:end_to_end_id` [string, default nil]: central bank's unique transaction id. If the request results in the creation of a PixRequest, the same endToEndId should be used. If this parameter is not passed, one endToEndId will be automatically created. Example: "E00002649202201172211u34srod19le"
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- PixKey struct that corresponds to the given id.
"""
@spec get(
binary,
payer_id: binary,
end_to_end_id: binary | nil,
user: Project.t() | Organization.t() | nil
) ::
{:ok, PixKey.t()} |
{:error, [error: Error.t()]}
def get(id, payer_id, options \\ []) do
options = [payer_id: payer_id] ++ options
Rest.get_id(resource(), id, options)
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(
binary,
payer_id: binary,
end_to_end_id: binary | nil,
user: Project.t() | Organization.t() | nil
) :: any
def get!(id, payer_id, options \\ []) do
options = [payer_id: payer_id] ++ options
Rest.get_id!(resource(), id, options)
end
@doc """
Receive a stream of PixKeys structs previously created in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created after a specified date. ex: ~D[2020, 3, 10]
- `:before` [Date or string, default nil]: date filter for structs created before a specified date. ex: ~D[2020, 3, 10]
- `:status` [list of strings, default nil]: filter for status of retrieved structs. Options: "created", "registered", "canceled", "failed".
- `:tags` [list of strings, default nil]: tags to filter retrieved structs. ex: ["tony", "stark"]
- `:ids` [list of strings, default nil]: list of ids to filter retrieved structs. ex: ["5656565656565656", "4545454545454545"]
- `:type` [list of strings, default nil]: filter for the type of retrieved PixKeys. Options: "cpf", "cnpj", "phone", "email", "evp".
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of PixKey structs with updated attributes
"""
@spec query(
limit: integer,
after: Date.t(),
before: Date.t(),
status: binary,
tags: [binary],
ids: [binary],
user: Project.t() | Organization.t() | nil
) ::
({:cont, [PixKey.t()]} |
{:error, [Error.t()]},
any -> any)
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer,
after: Date.t(),
before: Date.t(),
status: binary,
tags: [binary],
ids: [binary],
user: Project.t() | Organization.t() | nil
) :: any
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a stream of PixKeys structs previously created in the Stark Infra API
## Options:
- `:cursor` [string, default nil]: cursor returned on the previous page function call.
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Max = 100. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created after a specified date. ex: ~D[2020, 3, 10]
- `:before` [Date or string, default nil]: date filter for structs created before a specified date. ex: ~D[2020, 3, 10]
- `:status` [list of strings, default nil]: filter for status of retrieved structs. Options: "created", "failed", "delivered", "confirmed", "success", "canceled"
- `:tags` [list of strings, default nil]: tags to filter retrieved structs. ex: ["tony", "stark"]
- `:ids` [list of strings, default nil]: list of ids to filter retrieved structs. ex: ["5656565656565656", "4545454545454545"]
- `:type` [list of strings, default nil]: filter for the type of retrieved PixKeys. Options: "cpf", "cnpj", "phone", "email", "evp".
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- cursor to retrieve the next page of PixKey structs
- stream of PixKey structs with updated attributes
"""
@spec page(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
status: binary,
tags: [binary],
ids: [binary],
user: Project.t() | Organization.t() | nil
) ::
{:ok, {:binary, [PixKey.t()]}} |
{ :error, [error: Error.t()]}
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
status: binary,
tags: [binary],
ids: [binary],
user: Project.t() | Organization.t() | nil
) :: any
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc """
Update a PixKey parameters by passing id.
## Parameters (required):
- `:id` [string]: PixKey id. ex: '5656565656565656'
- `:reason` [string]: reason why the PixKey is being patched. Options: "branchTransfer", "reconciliation" or "userRequested".
## Parameters (optional):
- `:account_created` [Date, DateTime or string, default nil]: opening Date or DateTime for the account to be linked. ex: "2022-01-01.
- `:account_number` [string, default nil]: number of the account to be linked. ex: "76543".
- `:account_type` [string, default nil]: type of the account to be linked. Options: "checking", "savings", "salary" or "payment".
- `:branch_code` [string, default nil]: branch code of the account to be linked. ex: 1234".
- `:name` [string, default nil]: holder's name of the account to be linked. ex: "<NAME>".
## Return:
- PixKey with updated attributes
"""
@spec update(
binary,
reason: binary,
account_created: Date.t(),
account_number: binary,
account_type: binary,
branch_code: binary,
name: binary,
user: Project.t() | Organization.t() | nil
) ::
{:ok, PixKey.t()} |
{:error, [error: Error.t()]}
def update(id, reason, parameters \\ []) do
parameters = [reason: reason] ++ parameters
Rest.patch_id(
resource(),
id,
parameters
)
end
@doc """
Same as update(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec update!(
binary,
reason: binary,
account_created: Date.t(),
account_number: binary,
account_type: binary,
branch_code: binary,
name: binary,
user: Project.t() | Organization.t() | nil
) :: any
def update!(id, reason, parameters \\ []) do
parameters = [reason: reason] ++ parameters
Rest.patch_id!(
resource(),
id,
parameters
)
end
@doc """
Delete a pixKey entity previously created in the Stark Infra API
## Parameters (required):
- `:id` [string]: struct unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- canceled pixKey struct
"""
@spec cancel(
id: binary,
user: Project.t() | Organization.t() | nil
) ::
{:ok, [PixKey.t()] } |
{:error, [error: Error.t()]}
def cancel(id, options \\ []) do
Rest.delete_id(
resource(),
id,
options
)
end
@doc """
Same as cancel(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec cancel!(
id: binary,
user: Project.t() | Organization.t() | nil
) :: any
def cancel!(id, options \\ []) do
Rest.delete_id!(
resource(),
id,
options
)
end
@doc false
def resource() do
{
"PixKey",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%PixKey{
account_created: json[:account_created] |> Check.datetime(),
account_number: json[:account_number],
account_type: json[:account_type],
branch_code: json[:branch_code],
name: json[:name],
tax_id: json[:tax_id],
id: json[:id],
tags: json[:tags],
owned: json[:owned] |> Check.datetime(),
owner_type: json[:owner_type],
status: json[:status],
bank_code: json[:bank_code],
bank_name: json[:bank_name],
type: json[:type],
created: json[:created] |> Check.datetime()
}
end
end
|
lib/pix_key/pix_key.ex
| 0.91096
| 0.657786
|
pix_key.ex
|
starcoder
|
defmodule Want.Map do
@moduledoc """
Manages conversions to and from maps.
"""
@type input :: Keyword.t() | map()
@type schema :: map()
@type key :: binary() | atom()
@type opts :: Keyword.t()
@type result :: {:ok, result :: map()} | {:error, reason :: binary()}
defimpl Want.Dump, for: Map do
@doc """
Dump a map value to a keyword list. All values inside the map will
be dumped using the associated `Want` module `dump/1` clauses.
"""
def dump(input, _opts) do
input
|> Map.to_list()
|> Enum.reduce_while([], fn({k, v}, out) ->
case Want.dump(v) do
{:ok, v} ->
{:cont, [{k, v} | out]}
{:error, reason} ->
{:halt, "Failed to dump value for key #{k}: #{inspect reason}"}
end
end)
|> case do
{:error, reason} ->
{:error, reason}
kv ->
{:ok, Enum.reverse(kv)}
end
end
end
defimpl Want.Update, for: Map do
@doc """
Update a Map type. For every key specified in the new value, corresponding
keys in the old value will be updated using the `Want.Update` protocol. Any
keys in :new that do not exist in :old will be added.
"""
def update(old, new) when is_map(new) or is_list(new) do
{:ok, new
|> Enum.reduce(old, fn({key, value}, out) ->
Map.update(out, key, value, fn v ->
case Want.Update.update(v, value) do
{:ok, new} ->
new
{:error, _reason} ->
v
end
end)
end)}
end
end
@doc """
Cast an incoming keyword list or map to an output map using the
provided schema to control conversion rules and validations.
## Options
* `:merge` - Provide a map matching the given schema that contains default values to be
used if the input value does not contain a particular field. Useful when updating a map
with new inputs without overwriting all fields.
## Examples
iex> Want.Map.cast(%{"id" => 1}, %{id: [type: :integer]})
{:ok, %{id: 1}}
iex> Want.Map.cast(%{}, %{id: [type: :integer, default: 1]})
{:ok, %{id: 1}}
iex> Want.Map.cast(%{"id" => "bananas"}, %{id: [type: :integer, default: 1]})
{:ok, %{id: 1}}
iex> Want.Map.cast(%{"hello" => "world", "foo" => "bar"}, %{hello: [], foo: [type: :atom]})
{:ok, %{hello: "world", foo: :bar}}
iex> Want.Map.cast(%{"date" => %Date{year: 2022, month: 01, day: 02}}, %{date: [type: :date]})
{:ok, %{date: %Date{year: 2022, month: 01, day: 02}}}
iex> Want.Map.cast(%{"date" => "2022-01-02"}, %{date: [type: :date]})
{:ok, %{date: %Date{year: 2022, month: 01, day: 02}}}
iex> Want.Map.cast(%{"hello" => "world"}, %{hello: [], foo: [required: true]})
{:error, "Failed to cast key foo (key :foo not found) and no default value provided."}
iex> Want.Map.cast(%{"hello" => "world"}, %{hello: [type: :enum, valid: [:world]]})
{:ok, %{hello: :world}}
iex> Want.Map.cast(%{"hello" => "world"}, %{hello: [], foo: []})
{:ok, %{hello: "world"}}
iex> Want.Map.cast(%{"hello" => %{"foo" => "bar"}}, %{hello: %{foo: [type: :atom]}})
{:ok, %{hello: %{foo: :bar}}}
iex> Want.Map.cast(%{"id" => "bananas"}, %{id: [type: :integer, default: 1]}, merge: %{id: 2})
{:ok, %{id: 2}}
"""
@spec cast(value :: input(), schema :: schema()) :: result()
def cast(input, schema),
do: cast(input, schema, [])
@spec cast(value :: input(), schema :: schema(), opts :: Keyword.t()) :: result()
def cast(input, schema, opts) when is_map(schema) and (is_list(input) or is_map(input)) do
schema
|> Enum.reduce_while(%{}, fn({key, field_opts}, out) ->
with {:error, reason} <- cast(input, field_opts[:from] || key, field_opts),
{false, _reason} <- {is_map(field_opts), reason},
{{:ok, default}, _} <- {merge_or_default(key, field_opts, opts), reason} do
{:cont, Map.put(out, key, default)}
else
{:ok, value} ->
{:cont, Map.put(out, key, value)}
{true, reason} ->
{:halt, {:error, "Failed to cast key #{key} to map: #{reason}"}}
{{:error, :no_default}, reason} ->
if field_opts[:required] do
{:halt, {:error, "Failed to cast key #{key} (#{reason}) and no default value provided."}}
else
{:cont, out}
end
end
end)
|> case do
result when is_map(result) ->
{:ok, result}
{:error, reason} ->
{:error, reason}
end
end
@spec cast(input :: any(), key :: key(), opts :: opts() | map()) :: {:ok, result :: any()} | {:error, reason :: binary()}
def cast(input, key, opts) when (is_list(input) or is_map(input)) and is_binary(key) and not is_nil(key) and not is_struct(input) do
input
|> Enum.find(fn
{k, _v} when is_atom(k) -> Atom.to_string(k) == key
{k, _v} when is_binary(k) -> k == key
_ -> false
end)
|> case do
{_, v} -> cast(v, type(opts), opts)
nil -> {:error, "key #{inspect key} not found"}
end
end
def cast(input, key, opts) when (is_list(input) or is_map(input)) and is_atom(key) and not is_nil(key) and not is_struct(input) do
input
|> Enum.find(fn
{k, _v} when is_atom(k) -> k == key
{k, _v} when is_binary(k) -> k == Atom.to_string(key)
_ -> false
end)
|> case do
{_, v} -> cast(v, type(opts), opts)
nil -> {:error, "key #{inspect key} not found"}
end
end
def cast(input, :integer, opts),
do: Want.Integer.cast(input, opts)
def cast(input, :string, opts),
do: Want.String.cast(input, opts)
def cast(input, :float, opts),
do: Want.Float.cast(input, opts)
def cast(input, :atom, opts),
do: Want.Atom.cast(input, opts)
def cast(input, :sort, opts),
do: Want.Sort.cast(input, opts)
def cast(input, :enum, opts),
do: Want.Enum.cast(input, opts)
def cast(input, :datetime, opts),
do: Want.DateTime.cast(input, opts)
def cast(input, :date, opts),
do: Want.Date.cast(input, opts)
def cast(input, nil, opts) when is_map(opts),
do: cast(input, opts)
def cast(_input, type, _opts),
do: {:error, "unknown cast type #{inspect type} specified"}
#
# Attempt to generate a value for a given map key, either using the cast options'
# `merge` map or a default value from the field options.
#
defp merge_or_default(key, field_opts, cast_opts) do
cond do
Map.has_key?(Keyword.get(cast_opts, :merge, %{}), key) ->
{:ok, Map.get(Keyword.get(cast_opts, :merge, %{}), key)}
Keyword.has_key?(field_opts, :default) ->
{:ok, field_opts[:default]}
true ->
{:error, :no_default}
end
end
#
# Pull a type specified from a set of options
#
defp type(opts) when is_list(opts),
do: Keyword.get(opts, :type, :string)
defp type(opts) when is_map(opts),
do: nil
end
|
lib/want/map.ex
| 0.859192
| 0.40592
|
map.ex
|
starcoder
|
defmodule Multichain do
@moduledoc """
This library is a thin wrapper for Multichain JSON RPC. Instead of manually craft HTTP call with all params, we can put the param in config and call one function as interface to all Multichain api.
Everything in this module represent lowest Multichain API. Any Multichain API which didn't take any argument can be called directly with format `Multichain.<method_name>/0`.
How to use
1. Add dependency
In your mix.exs:
```
defp deps do
[
{:multichain, "~> 0.0.1"}
]
end
```
2. Add config
Add your node information to your config.exs:
```
config :multichain,
protocol: "http",
port: "1234",
host: "127.0.0.1",
username: "multichainrpc",
password: "<PASSWORD>",
chain: "chain1"
```
3. Done. You can now call any multichain api using `api/2`
For example you want to call `getinfo` api, you only need to create Map and pass to the function.
```
iex(1)> param = %{method: "getinfo", params: []}
%{method: "getinfo", params: []}
iex>(2)> Multichain.api(param)
{:ok,
%{
"error" => nil,
"id" => nil,
"result" => %{
"balance" => 0.0,
"blocks" => 1001,
"burnaddress" => "1XXXXXXWjEXXXXXXxiXXXXXXMvXXXXXXUd2fZG",
"chainname" => "getchain",
"connections" => 0,
"description" => "MultiChain awesome",
"difficulty" => 6.0e-8,
"errors" => "",
"incomingpaused" => false,
"keypoololdest" => 1526642153,
"keypoolsize" => 2,
"miningpaused" => false,
"nodeaddress" => "getchain@192.168.127.12.:1234",
"nodeversion" => 10004901,
"paytxfee" => 0.0,
"port" => 9243,
"protocol" => "multichain",
"protocolversion" => 10010,
"proxy" => "",
"reindex" => false,
"relayfee" => 0.0,
"setupblocks" => 60,
"testnet" => false,
"timeoffset" => 0,
"version" => "1.0.4",
"walletdbversion" => 2,
"walletversion" => 60000
}
}}
```
The full api and params you need to pass can be found on official [Multichain API Documentation](https://www.multichain.com/developers/json-rpc-api/). Basically you put the method name
"""
alias Multichain.Http
@doc """
This is the function where you can call all individual Multichain API. Only pass the method name as `String` and params as `List`.
Some of example can be seen below:
```
Multichain.api("listaddresses", ["*", true, 3, -3])
Multichain.api("getinfo", [])
Multichain.api("help", [])
```
```
iex(1)> Multichain.api("validateaddress", ["<KEY>"])
{:ok,
%{
"error" => nil,
"id" => nil,
"result" => %{
"account" => "",
"address" => "1KFjut7GpLN2DSvRrh6UATxYxy5nxYaY7EGhys",
"ismine" => false,
"isscript" => false,
"isvalid" => true,
"iswatchonly" => true,
"synchronized" => false
}
}}
```
"""
def api(method, params) do
Http.jsonrpccall(method, params)
end
@doc """
This function will return list of balance. If not found will return empty list.
```
iex(1)> Multichain.balance("1DEd7MqSxLgpDs9uUipcmfXqWxxpzwiJ8SojmY")
{:ok,
%{
"error" => nil,
"id" => nil,
"result" => [
%{"assetref" => "176-266-23437", "name" => "MMK", "qty" => 1.0e3}
]
}}
```
"""
def balances(address) do
Http.jsonrpccall("getaddressbalances", [address, 1, true])
end
@doc """
This function will return spesific balance. It will always return number.
Any other problem such as wrong address and even connection problem will be translated to zero (0).
```
iex(1)> Multichain.balance("1DEd7MqSxLgpDs9uUipcmfXqWxxpzwiJ8SojmY", "176-266-23437")
1.0e3
```
"""
def balance(address, assetcode) do
# TODO do lock unspent! read the api docs and include the locked unspent.
case Http.jsonrpccall("getaddressbalances", [address, 1, true]) do
{:ok, result} -> find_asset(result["result"], assetcode)
_ -> 0
end
end
@doc """
This function will return spesific balance. It will return tuple with atom and result.
While `balance/2` will always return number, `balance!/2` will tell you if error happened.
```
iex(1)> Multichain.balance!("1DEd7MqSxLgpDs9uUipcmfXqWxxpzwiJ8SojcmY", "176-266-23437")
{:error,
"Error code: 500. Reason: Invalid address: 1DEd7MqSxLgpDs9uUipcmfXqWxxpzwiJ8SojcmY"
iex(2)> Multichain.balance!("1DEd7MqSxLgpDs9uUipcmfXqWxxpzwiJ8SojmY", "176-266-23437")
{:ok, 1.0e3}
```
"""
def balance!(address, assetcode) do
case Http.jsonrpccall("getaddressbalances", [address, 1, true]) do
{:ok, result} -> find_asset!(result["result"], assetcode)
other -> other
end
end
@doc """
This function will return list of all asset own by the node's wallet where is_mine is true.
```
iex(1)> Multichain.nodebalance
{:ok,
[
%{"assetref" => "6196-266-29085", "name" => "MMKP", "qty" => 9.0e7},
%{"assetref" => "176-266-23437", "name" => "MMK", "qty" => 1.74e5},
%{"assetref" => "60-266-6738", "name" => "GET", "qty" => 9970.0}
]}
```
"""
def nodebalance do
case Http.jsonrpccall("gettotalbalances", []) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function will return configuration of run time parameter of the multichain.
"""
def getruntimeparams do
case Http.jsonrpccall("getruntimeparams", []) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
Get the list of Multichain api.
"""
def help do
case Http.jsonrpccall("help", []) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function will return information about the connected Multichain's node.
Usually this is used to check the connection.
"""
def getinfo do
case Http.jsonrpccall("getinfo", []) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
@doc """
This function will return list of all address own by the Node's wallet including each asset list.
```
iex(1)> Multichain.allbalances
{:ok,
%{
"1MRUjzje91QBpnBqkhAdrnCDKHikXFhsPQ4rA2" => [
%{"assetref" => "6196-266-29085", "name" => "MMKP", "qty" => 9.0e7},
%{"assetref" => "176-266-23437", "name" => "MMK", "qty" => 1.74e5},
%{"assetref" => "60-266-6738", "name" => "GET", "qty" => 9970.0}
],
"total" => [
%{"assetref" => "6196-266-29085", "name" => "MMKP", "qty" => 9.0e7},
%{"assetref" => "176-266-23437", "name" => "MMK", "qty" => 1.74e5},
%{"assetref" => "60-266-6738", "name" => "GET", "qty" => 9970.0}
]
}}
```
"""
def allbalances do
case Http.jsonrpccall("getmultibalances", []) do
{:ok, result} -> {:ok, result["result"]}
error -> error
end
end
# ------------------------------------------------Private Area ----------------------------------
defp find_asset(list, assetcode) do
case Enum.filter(list, fn x -> x["assetref"] == assetcode end) do
[] -> 0
[ada] -> ada["qty"]
_ -> 0
end
end
defp find_asset!(list, assetcode) do
case Enum.filter(list, fn x -> x["assetref"] == assetcode end) do
[] -> {:ok, 0}
[ada] -> {:ok, ada["qty"]}
_ -> {:error, "Unknown Error"}
end
end
end
|
lib/multichain.ex
| 0.728748
| 0.669779
|
multichain.ex
|
starcoder
|
defmodule Transmap do
@default_key :_default
@spread_key :_spread
@doc """
Transforms the given map.
`rule_map` is a map has elements `key => rule`.
case {key, rule} do
{:_default, rule} -> # Change the default rule for the map from false to the given rule
{:_spread, targets} -> # Pop the maps for specific keys and merge it into the result map
{key, true} -> # Put the value for a specific key to the result map.
{key, false} -> # Don't put the value for a specific key to the result map.
{key, rule_map} when is_map(rule_map) -> # Transform the map for a specific key with the given rule
{key, {new_key, rule}} -> # Rename the key to new_key
{key, new_key} -> # Shorthand for {new_key, true}
end
If the `:diff` option is `true`, removes empty maps in the result.
## Examples
Basic transformation:
iex> map = %{a: 1, b: 2, c: 3}
iex> rule = %{a: true}
iex> Transmap.transform(map, rule)
%{a: 1}
iex> map = %{a: %{b: 1, c: 2}, d: %{e: 3, f: 4}, g: %{h: 5}}
iex> rule = %{a: %{c: true}, d: true, g: %{i: true}}
iex> Transmap.transform(map, rule)
%{a: %{c: 2}, d: %{e: 3, f: 4}, g: %{}}
iex> Transmap.transform(map, rule, diff: true)
%{a: %{c: 2}, d: %{e: 3, f: 4}}
Transfomation with `_default`:
iex> map = %{a: 1, b: 2, c: %{d: 3, e: 4}}
iex> rule = %{_default: true, b: false, c: %{_default: true}}
iex> Transmap.transform(map, rule)
%{a: 1, c: %{d: 3, e: 4}}
Transformation with `_spread`:
iex> map = %{a: %{b: 1, c: 2}, d: %{e: %{f: 3, g: 4}, h: 5}}
iex> rule = %{_spread: [[:a], [:d, :e]], a: true, d: %{e: %{f: true}}}
iex> Transmap.transform(map, rule)
%{b: 1, c: 2, d: %{}, f: 3}
iex> Transmap.transform(map, rule, diff: true)
%{b: 1, c: 2, f: 3}
iex> map = %{a: %{b: %{c: 1, d: 2}}}
iex> rule = %{_spread: [[:a, :b]], a: %{b: %{_default: true, c: :C}}}
iex> Transmap.transform(map, rule)
%{C: 1, d: 2, a: %{}}
iex> Transmap.transform(map, rule, diff: true)
%{C: 1, d: 2}
Transformation with renaming:
iex> map = %{a: 1, b: 2, c: %{d: 3, e: 4}, f: %{g: 5}}
iex> rule = %{_spread: [[:f]], a: {"A", true}, b: "B", c: {:C, %{d: 6}}, f: %{g: "G"}}
iex> Transmap.transform(map, rule)
%{"A" => 1, "B" => 2, :C => %{6 => 3}, "G" => 5}
iex> map = %{a: 1, b: %{a: 2}, c: %{a: 3}}
iex> rule = %{_spread: [[:b], [:c]], a: true, b: %{a: :b}, c: %{a: {:c, true}}}
iex> Transmap.transform(map, rule)
%{a: 1, b: 2, c: 3}
"""
@spec transform(data :: any, rule :: any) :: any
def transform(data, rule, opts \\ [])
def transform(data, _, opts) when not is_map(data), do: data
def transform(data, true, opts), do: data
def transform(data, rule, opts) when is_map(data) do
{default, rule} = Map.pop(rule, @default_key, false)
{spread, rule} = Map.pop(rule, @spread_key, [])
{data, rule} = apply_spread(data, rule, spread)
diff = Keyword.get(opts, :diff, false)
Enum.reduce(data, %{}, fn {key, value}, result ->
{key, rule_value} = with_key(key, Map.get(rule, key, default))
if rule_value != false do
filtered = transform(value, rule_value, opts)
case {filtered, diff} do
{map, true} when is_map(map) and map_size(map) == 0 -> result
{other, _} -> Map.put(result, key, other)
end
else
result
end
end)
end
defp apply_spread(data, rule, spread) do
Enum.reduce(spread, {data, rule}, fn keys, {data, rule} ->
{data_map, data} = pop_in(data, keys)
{rule_map, rule} = pop_in(rule, keys)
if is_map(data_map) do
if is_map(rule_map) do
{default, rule_map} = Map.pop(rule_map, @default_key, false)
rule_map = Enum.reduce(data_map, rule_map, fn {key, _}, rule_map ->
Map.put_new(rule_map, key, default)
end)
{data_map, rule_map} = apply_rename(data_map, rule_map)
data = Map.merge(data, data_map)
rule = Map.merge(rule, rule_map)
{data, rule}
else
rule_map = for {key, _} <- data_map, into: %{}, do: {key, true}
data = Map.merge(data, data_map)
rule = Map.merge(rule, rule_map)
{data, rule}
end
else
{data, rule}
end
end)
end
defp apply_rename(data_map, rule_map) do
reducer = fn {key, rule}, {data_map, rule_map} ->
{new_key, rule} = with_key(key, rule)
if key == new_key do
{data_map, rule_map}
else
rule_map = rule_map |> Map.delete(key) |> Map.put(new_key, rule)
data_map = if Map.has_key?(data_map, key) do
{data, data_map} = Map.pop(data_map, key)
Map.put(data_map, new_key, data)
else
data_map
end
{data_map, rule_map}
end
end
Enum.reduce(rule_map, {data_map, rule_map}, reducer)
end
defp with_key(key, rule) do
case rule do
boolean when is_boolean(boolean) -> {key, boolean}
{key, rule_value} -> {key, rule_value}
rule_value when is_map(rule_value) -> {key, rule_value}
new_key -> {new_key, true}
end
end
end
|
lib/transmap.ex
| 0.798854
| 0.613497
|
transmap.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.