hexsha stringlengths 40 40 | size int64 2 991k | ext stringclasses 2 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 208 | max_stars_repo_name stringlengths 6 106 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses list | max_stars_count int64 1 33.5k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 208 | max_issues_repo_name stringlengths 6 106 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses list | max_issues_count int64 1 16.3k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 208 | max_forks_repo_name stringlengths 6 106 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses list | max_forks_count int64 1 6.91k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 991k | avg_line_length float64 1 36k | max_line_length int64 1 977k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0830eb11ce654801cef47b0ee2f864af272e32a3 | 1,908 | ex | Elixir | clients/compute/lib/google_api/compute/v1/model/target_pools_add_instance_request.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/compute/lib/google_api/compute/v1/model/target_pools_add_instance_request.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/model/target_pools_add_instance_request.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Compute.V1.Model.TargetPoolsAddInstanceRequest do
@moduledoc """
## Attributes
* `instances` (*type:* `list(GoogleApi.Compute.V1.Model.InstanceReference.t)`, *default:* `nil`) - A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs:
- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name
- projects/project-id/zones/zone/instances/instance-name
- zones/zone/instances/instance-name
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:instances => list(GoogleApi.Compute.V1.Model.InstanceReference.t())
}
field(:instances, as: GoogleApi.Compute.V1.Model.InstanceReference, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Compute.V1.Model.TargetPoolsAddInstanceRequest do
def decode(value, options) do
GoogleApi.Compute.V1.Model.TargetPoolsAddInstanceRequest.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Compute.V1.Model.TargetPoolsAddInstanceRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 38.16 | 247 | 0.751048 |
08310c0b1b3997dc7241ae612b876e60f8aec5cf | 57 | ex | Elixir | lib/habex_web/views/page_view.ex | richardpanda/habex | ba7869f11d8c736db3cbedfb326c754a2d79bec8 | [
"MIT"
] | null | null | null | lib/habex_web/views/page_view.ex | richardpanda/habex | ba7869f11d8c736db3cbedfb326c754a2d79bec8 | [
"MIT"
] | null | null | null | lib/habex_web/views/page_view.ex | richardpanda/habex | ba7869f11d8c736db3cbedfb326c754a2d79bec8 | [
"MIT"
] | null | null | null | defmodule HabexWeb.PageView do
use HabexWeb, :view
end
| 14.25 | 30 | 0.789474 |
083110e4a8137045c70876f23aa7983f68dd4cda | 19,480 | ex | Elixir | lib/phoenix_live_view/upload_config.ex | jonatanklosko/phoenix_live_view | 95d5c7ccd0ac66e04b15c7b6128d44b60767e682 | [
"MIT"
] | 1 | 2021-04-22T16:40:32.000Z | 2021-04-22T16:40:32.000Z | lib/phoenix_live_view/upload_config.ex | jonatanklosko/phoenix_live_view | 95d5c7ccd0ac66e04b15c7b6128d44b60767e682 | [
"MIT"
] | null | null | null | lib/phoenix_live_view/upload_config.ex | jonatanklosko/phoenix_live_view | 95d5c7ccd0ac66e04b15c7b6128d44b60767e682 | [
"MIT"
] | 1 | 2021-01-14T12:58:22.000Z | 2021-01-14T12:58:22.000Z | defmodule Phoenix.LiveView.UploadEntry do
@moduledoc """
The struct representing an upload entry.
"""
alias Phoenix.LiveView.UploadEntry
defstruct progress: 0,
preflighted?: false,
upload_config: nil,
upload_ref: nil,
ref: nil,
uuid: nil,
valid?: false,
done?: false,
cancelled?: false,
client_name: nil,
client_size: nil,
client_type: nil,
client_last_modified: nil
@type t :: %__MODULE__{
progress: integer(),
upload_config: String.t() | :atom,
upload_ref: String.t(),
ref: String.t() | nil,
uuid: String.t() | nil,
valid?: boolean(),
done?: boolean(),
cancelled?: boolean(),
client_name: String.t() | nil,
client_size: integer() | nil,
client_type: String.t() | nil,
client_last_modified: integer() | nil
}
@doc false
def put_progress(%UploadEntry{} = entry, 100) do
%UploadEntry{entry | progress: 100, done?: true}
end
def put_progress(%UploadEntry{} = entry, progress) do
%UploadEntry{entry | progress: progress}
end
end
defmodule Phoenix.LiveView.UploadConfig do
@moduledoc """
The struct representing an upload.
"""
alias Phoenix.LiveView.UploadConfig
alias Phoenix.LiveView.UploadEntry
@default_max_file_size 8_000_000
@default_chunk_size 64_000
@default_chunk_timeout 10_000
@unregistered :unregistered
@invalid :invalid
@too_many_files :too_many_files
if Version.match?(System.version(), ">= 1.8.0") do
@derive {Inspect,
only: [
:name,
:ref,
:entries,
:max_entries,
:max_file_size,
:accept,
:errors,
:auto_upload?,
:progress_event
]}
end
defstruct name: nil,
cid: :unregistered,
client_key: nil,
max_entries: 1,
max_file_size: @default_max_file_size,
chunk_size: @default_chunk_size,
chunk_timeout: @default_chunk_timeout,
entries: [],
entry_refs_to_pids: %{},
entry_refs_to_metas: %{},
accept: [],
acceptable_types: MapSet.new(),
acceptable_exts: MapSet.new(),
external: false,
allowed?: false,
ref: nil,
errors: [],
auto_upload?: false,
progress_event: nil
@type t :: %__MODULE__{
name: atom() | String.t(),
# a nil cid represents a LiveView socket
cid: :unregistered | nil | integer(),
client_key: String.t(),
max_entries: pos_integer(),
max_file_size: pos_integer(),
entries: list(),
entry_refs_to_pids: %{String.t() => pid() | :unregistered | :done},
entry_refs_to_metas: %{String.t() => map()},
accept: list() | :any,
acceptable_types: MapSet.t(),
acceptable_exts: MapSet.t(),
external:
(UploadEntry.t(), Phoenix.LiveView.Socket.t() ->
{:ok | :error, meta :: %{uploader: String.t()}, Phoenix.LiveView.Socket.t()})
| false,
allowed?: boolean,
errors: list(),
ref: String.t(),
auto_upload?: boolean(),
progress_event:
(name :: atom() | String.t(), UploadEntry.t(), Phoenix.LiveView.Socket.t() ->
{:noreply, Phoenix.LiveView.Socket.t()})
| nil
}
@doc false
# we require a random_ref in order to ensure unique calls to `allow_upload`
# invalidate old uploads on the client and expire old tokens for the same
# upload name
def build(name, random_ref, [_ | _] = opts) when is_atom(name) do
{html_accept, acceptable_types, acceptable_exts} =
case Keyword.fetch(opts, :accept) do
{:ok, [_ | _] = accept} ->
{types, exts} = validate_accept_option(accept)
{Enum.join(accept, ","), types, exts}
{:ok, :any} ->
{:any, MapSet.new(), MapSet.new()}
{:ok, other} ->
raise ArgumentError, """
invalid accept filter provided to allow_upload.
A list of the following unique file type specifiers are supported:
* A valid case-insensitive filename extension, starting with a period (".") character.
For example: .jpg, .pdf, or .doc.
* A valid MIME type string, with no extensions.
Alternately, you can provide the atom :any to allow any kind of file. Got:
#{inspect(other)}
"""
:error ->
raise ArgumentError, """
the :accept option is required when allowing uploads.
Provide a list of unique file type specifiers or the atom :any to allow any kind of file.
"""
end
external =
case Keyword.fetch(opts, :external) do
{:ok, func} when is_function(func, 2) ->
func
{:ok, other} ->
raise ArgumentError, """
invalid :external value provided to allow_upload.
Only an anymous function receiving the socket as an argument is supported. Got:
#{inspect(other)}
"""
:error ->
false
end
max_file_size =
case Keyword.fetch(opts, :max_file_size) do
{:ok, pos_integer} when is_integer(pos_integer) and pos_integer > 0 ->
pos_integer
{:ok, other} ->
raise ArgumentError, """
invalid :max_file_size value provided to allow_upload.
Only a positive integer is supported (Defaults to #{@default_max_file_size} bytes). Got:
#{inspect(other)}
"""
:error ->
@default_max_file_size
end
chunk_size =
case Keyword.fetch(opts, :chunk_size) do
{:ok, pos_integer} when is_integer(pos_integer) and pos_integer > 0 ->
pos_integer
{:ok, other} ->
raise ArgumentError, """
invalid :chunk_size value provided to allow_upload.
Only a positive integer is supported (Defaults to #{@default_chunk_size} bytes). Got:
#{inspect(other)}
"""
:error ->
@default_chunk_size
end
chunk_timeout =
case Keyword.fetch(opts, :chunk_timeout) do
{:ok, pos_integer} when is_integer(pos_integer) and pos_integer > 0 ->
pos_integer
{:ok, other} ->
raise ArgumentError, """
invalid :chunk_timeout value provided to allow_upload.
Only a positive integer in milliseconds is supported (Defaults to #{
@default_chunk_timeout
} ms). Got:
#{inspect(other)}
"""
:error ->
@default_chunk_timeout
end
progress_event =
case Keyword.fetch(opts, :progress) do
{:ok, func} when is_function(func, 3) ->
func
{:ok, other} ->
raise ArgumentError, """
invalid :progress value provided to allow_upload.
Only 3-arity anonymous function is supported. Got:
#{inspect(other)}
"""
:error ->
nil
end
%UploadConfig{
ref: random_ref,
name: name,
max_entries: opts[:max_entries] || 1,
max_file_size: max_file_size,
entry_refs_to_pids: %{},
entry_refs_to_metas: %{},
accept: html_accept,
acceptable_types: acceptable_types,
acceptable_exts: acceptable_exts,
external: external,
chunk_size: chunk_size,
chunk_timeout: chunk_timeout,
progress_event: progress_event,
auto_upload?: Keyword.get(opts, :auto_upload, false),
allowed?: true
}
end
@doc false
def entry_pid(%UploadConfig{} = conf, %UploadEntry{} = entry) do
case Map.fetch(conf.entry_refs_to_pids, entry.ref) do
{:ok, pid} when is_pid(pid) -> pid
{:ok, status} when status in [@unregistered, @invalid] -> nil
end
end
@doc false
def get_entry_by_pid(%UploadConfig{} = conf, channel_pid) when is_pid(channel_pid) do
Enum.find_value(conf.entry_refs_to_pids, fn {ref, pid} ->
if channel_pid == pid do
get_entry_by_ref(conf, ref)
end
end)
end
@doc false
def get_entry_by_ref(%UploadConfig{} = conf, ref) do
Enum.find(conf.entries, fn %UploadEntry{} = entry -> entry.ref === ref end)
end
@doc false
def unregister_completed_external_entry(%UploadConfig{} = conf, entry_ref) do
%UploadEntry{} = entry = get_entry_by_ref(conf, entry_ref)
drop_entry(conf, entry)
end
@doc false
def unregister_completed_entry(%UploadConfig{} = conf, entry_ref) do
%UploadEntry{} = entry = get_entry_by_ref(conf, entry_ref)
drop_entry(conf, entry)
end
@doc false
def registered?(%UploadConfig{} = conf) do
Enum.find(conf.entry_refs_to_pids, fn {_ref, maybe_pid} -> is_pid(maybe_pid) end)
end
@doc false
def mark_preflighted(%UploadConfig{} = conf) do
%UploadConfig{
conf
| entries: for(entry <- conf.entries, do: %UploadEntry{entry | preflighted?: true})
}
end
@doc false
def entries_awaiting_preflight(%UploadConfig{} = conf) do
for entry <- conf.entries, not entry.preflighted?, do: entry
end
@doc false
def register_entry_upload(%UploadConfig{} = conf, channel_pid, entry_ref)
when is_pid(channel_pid) do
case Map.fetch(conf.entry_refs_to_pids, entry_ref) do
{:ok, @unregistered} ->
{:ok,
%UploadConfig{
conf
| entry_refs_to_pids: Map.put(conf.entry_refs_to_pids, entry_ref, channel_pid)
}}
{:ok, existing_pid} when is_pid(existing_pid) ->
{:error, :already_registered}
:error ->
{:error, :disallowed}
end
end
# specifics on the `accept` attribute are illuminated in the spec:
# https://html.spec.whatwg.org/multipage/input.html#attr-input-accept
@accept_wildcards ~w(audio/* image/* video/*)
defp validate_accept_option(accept) do
{types, exts} =
Enum.reduce(accept, {[], []}, fn opt, {types_acc, exts_acc} ->
{type, exts} = accept_option!(opt)
{[type | types_acc], exts ++ exts_acc}
end)
{MapSet.new(types), MapSet.new(exts)}
end
# wildcards for media files
defp accept_option!(key) when key in @accept_wildcards, do: {key, []}
defp accept_option!(<<"." <> extname::binary>> = ext) do
if MIME.has_type?(extname) do
{MIME.type(extname), [ext]}
else
raise ArgumentError, """
invalid accept filter provided to allow_upload.
Expected a file extension with a known MIME type.
MIME types can be extended in your application configuration as follows:
config :mime, :types, %{
"application/vnd.api+json" => ["json-api"]
}
Got:
#{inspect(extname)}
"""
end
end
defp accept_option!(filter) when is_binary(filter) do
if MIME.valid?(filter) do
{filter, []}
else
raise ArgumentError, """
invalid accept filter provided to allow_upload.
Expected a known MIME type without parameters.
MIME types can be extended in your application configuration as follows:
config :mime, :types, %{
"application/vnd.api+json" => ["json-api"]
}
Got:
#{inspect(filter)}
"""
end
end
@doc false
def disallow(%UploadConfig{} = conf), do: %UploadConfig{conf | allowed?: false}
@doc false
def uploaded_entries(%UploadConfig{} = conf) do
Enum.filter(conf.entries, fn %UploadEntry{} = entry -> entry.progress == 100 end)
end
@doc false
def update_entry(%UploadConfig{} = conf, entry_ref, func) do
new_entries =
Enum.map(conf.entries, fn
%UploadEntry{ref: ^entry_ref} = entry -> func.(entry)
%UploadEntry{ref: _ef} = entry -> entry
end)
recalculate_computed_fields(%UploadConfig{conf | entries: new_entries})
end
@doc false
def update_progress(%UploadConfig{} = conf, entry_ref, progress)
when is_integer(progress) and progress >= 0 and progress <= 100 do
update_entry(conf, entry_ref, fn entry -> UploadEntry.put_progress(entry, progress) end)
end
@doc false
def update_entry_meta(%UploadConfig{} = conf, entry_ref, %{} = meta) do
case Map.fetch(meta, :uploader) do
{:ok, _} ->
:noop
:error ->
raise ArgumentError,
"external uploader metadata requires an :uploader key. Got: #{inspect(meta)}"
end
new_metas = Map.put(conf.entry_refs_to_metas, entry_ref, meta)
%UploadConfig{conf | entry_refs_to_metas: new_metas}
end
@doc false
def put_entries(%UploadConfig{} = conf, entries) do
new_entries =
for entry <- entries, !get_entry_by_ref(conf, Map.fetch!(entry, "ref")), do: entry
new_conf =
Enum.reduce(new_entries, conf, fn client_entry, acc ->
case cast_and_validate_entry(acc, client_entry) do
{:ok, new_conf} -> new_conf
{:error, new_conf} -> new_conf
end
end)
if too_many_files?(new_conf) do
{:error, put_error(new_conf, new_conf.ref, @too_many_files)}
else
case new_conf do
%UploadConfig{errors: []} = new_conf ->
{:ok, new_conf}
%UploadConfig{errors: [_ | _]} = new_conf ->
{:error, new_conf}
end
end
end
defp too_many_files?(%UploadConfig{entries: entries, max_entries: max}) do
length(entries) > max
end
defp cast_and_validate_entry(%UploadConfig{} = conf, %{"ref" => ref} = client_entry) do
:error = Map.fetch(conf.entry_refs_to_pids, ref)
entry = %UploadEntry{
ref: ref,
upload_ref: conf.ref,
upload_config: conf.name,
client_name: Map.fetch!(client_entry, "name"),
client_size: Map.fetch!(client_entry, "size"),
client_type: Map.fetch!(client_entry, "type"),
client_last_modified: Map.get(client_entry, "last_modified")
}
{:ok, entry}
|> validate_max_file_size(conf)
|> validate_accepted(conf)
|> case do
{:ok, entry} ->
{:ok, put_valid_entry(conf, entry)}
{:error, reason} ->
{:error, put_invalid_entry(conf, entry, reason)}
end
end
defp put_valid_entry(conf, entry) do
entry = %UploadEntry{entry | valid?: true, uuid: generate_uuid()}
new_pids = Map.put(conf.entry_refs_to_pids, entry.ref, @unregistered)
new_metas = Map.put(conf.entry_refs_to_metas, entry.ref, %{})
%UploadConfig{
conf
| entries: conf.entries ++ [entry],
entry_refs_to_pids: new_pids,
entry_refs_to_metas: new_metas
}
end
defp put_invalid_entry(conf, entry, reason) do
entry = %UploadEntry{entry | valid?: false}
new_pids = Map.put(conf.entry_refs_to_pids, entry.ref, @invalid)
new_metas = Map.put(conf.entry_refs_to_metas, entry.ref, %{})
new_conf = %UploadConfig{
conf
| entries: conf.entries ++ [entry],
entry_refs_to_pids: new_pids,
entry_refs_to_metas: new_metas
}
put_error(new_conf, entry.ref, reason)
end
defp validate_max_file_size({:ok, %UploadEntry{client_size: size}}, %UploadConfig{
max_file_size: max
})
when size > max or not is_integer(size),
do: {:error, :too_large}
defp validate_max_file_size({:ok, entry}, _conf), do: {:ok, entry}
defp validate_accepted({:ok, %UploadEntry{} = entry}, conf) do
if accepted?(conf, entry) do
{:ok, entry}
else
{:error, :not_accepted}
end
end
defp validate_accepted({:error, _} = error, _conf), do: error
defp accepted?(%UploadConfig{accept: :any}, %UploadEntry{}), do: true
defp accepted?(
%UploadConfig{acceptable_types: acceptable_types} = conf,
%UploadEntry{client_type: client_type} = entry
) do
cond do
# wildcard
String.starts_with?(client_type, "image/") and "image/*" in acceptable_types -> true
String.starts_with?(client_type, "audio/") and "audio/*" in acceptable_types -> true
String.starts_with?(client_type, "video/") and "video/*" in acceptable_types -> true
# strict
client_type in acceptable_types -> true
Path.extname(entry.client_name) in conf.acceptable_exts -> true
true -> false
end
end
defp recalculate_computed_fields(%UploadConfig{} = conf) do
recalculate_errors(conf)
end
defp recalculate_errors(%UploadConfig{ref: ref} = conf) do
if too_many_files?(conf) do
conf
else
new_errors =
Enum.filter(conf.errors, fn
{^ref, @too_many_files} -> false
_ -> true
end)
%UploadConfig{conf | errors: new_errors}
end
end
@doc false
def put_error(%UploadConfig{} = conf, _entry_ref, @too_many_files = reason) do
%UploadConfig{conf | errors: Enum.uniq(conf.errors ++ [{conf.ref, reason}])}
end
def put_error(%UploadConfig{} = conf, entry_ref, reason) do
%UploadConfig{conf | errors: conf.errors ++ [{entry_ref, reason}]}
end
@doc false
def cancel_entry(%UploadConfig{} = conf, %UploadEntry{} = entry) do
case entry_pid(conf, entry) do
channel_pid when is_pid(channel_pid) ->
Phoenix.LiveView.UploadChannel.cancel(channel_pid)
update_entry(conf, entry.ref, fn entry -> %UploadEntry{entry | cancelled?: true} end)
_ ->
drop_entry(conf, entry)
end
end
@doc false
def drop_entry(%UploadConfig{} = conf, %UploadEntry{ref: ref}) do
new_entries = for entry <- conf.entries, entry.ref != ref, do: entry
new_errors = Enum.filter(conf.errors, fn {error_ref, _} -> error_ref != ref end)
new_refs = Map.delete(conf.entry_refs_to_pids, ref)
new_metas = Map.delete(conf.entry_refs_to_metas, ref)
new_conf = %UploadConfig{
conf
| entries: new_entries,
errors: new_errors,
entry_refs_to_pids: new_refs,
entry_refs_to_metas: new_metas
}
recalculate_computed_fields(new_conf)
end
@doc false
def register_cid(%UploadConfig{} = conf, cid) do
%UploadConfig{conf | cid: cid}
end
# UUID generation
# Copyright (c) 2013 Plataformatec
# Copyright (c) 2020 Dashbit
# https://github.com/elixir-ecto/ecto/blob/99dff4c4403c258ea939fe9bdfb4e339baf05e13/lib/ecto/uuid.ex
defp generate_uuid do
<<u0::48, _::4, u1::12, _::2, u2::62>> = :crypto.strong_rand_bytes(16)
bin = <<u0::48, 4::4, u1::12, 2::2, u2::62>>
<<a1::4, a2::4, a3::4, a4::4, a5::4, a6::4, a7::4, a8::4, b1::4, b2::4, b3::4, b4::4, c1::4,
c2::4, c3::4, c4::4, d1::4, d2::4, d3::4, d4::4, e1::4, e2::4, e3::4, e4::4, e5::4, e6::4,
e7::4, e8::4, e9::4, e10::4, e11::4, e12::4>> = bin
<<e(a1), e(a2), e(a3), e(a4), e(a5), e(a6), e(a7), e(a8), ?-, e(b1), e(b2), e(b3), e(b4), ?-,
e(c1), e(c2), e(c3), e(c4), ?-, e(d1), e(d2), e(d3), e(d4), ?-, e(e1), e(e2), e(e3), e(e4),
e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12)>>
end
@compile {:inline, e: 1}
defp e(0), do: ?0
defp e(1), do: ?1
defp e(2), do: ?2
defp e(3), do: ?3
defp e(4), do: ?4
defp e(5), do: ?5
defp e(6), do: ?6
defp e(7), do: ?7
defp e(8), do: ?8
defp e(9), do: ?9
defp e(10), do: ?a
defp e(11), do: ?b
defp e(12), do: ?c
defp e(13), do: ?d
defp e(14), do: ?e
defp e(15), do: ?f
end
| 29.249249 | 102 | 0.600359 |
08312ab6b19eeee1ee80607d3eb22956b2023bb1 | 1,950 | ex | Elixir | lib/estated/property/tax.ex | jdav-dev/estated | a8476b803eff425b5b73517e7ea180bb7f8cc30b | [
"Apache-2.0"
] | null | null | null | lib/estated/property/tax.ex | jdav-dev/estated | a8476b803eff425b5b73517e7ea180bb7f8cc30b | [
"Apache-2.0"
] | null | null | null | lib/estated/property/tax.ex | jdav-dev/estated | a8476b803eff425b5b73517e7ea180bb7f8cc30b | [
"Apache-2.0"
] | null | null | null | defmodule Estated.Property.Tax do
@moduledoc "Tax record as provided by the assessor."
@moduledoc since: "0.2.0"
defstruct year: nil,
amount: nil,
exemptions: [],
rate_code_area: nil
@typedoc "Tax record as provided by the assessor."
@typedoc since: "0.2.0"
@type t :: %__MODULE__{
year: year() | nil,
amount: amount() | nil,
exemptions: exemptions(),
rate_code_area: rate_code_area() | nil
}
@typedoc """
The year the tax was levied.
Eg. **2017**
"""
@typedoc since: "0.2.0"
@type year :: pos_integer()
@typedoc """
The amount of tax on the property in dollars.
Eg. **2247**
"""
@typedoc since: "0.2.0"
@type amount :: integer()
@typedoc """
List of exemptions.
Eg. [**Agricultural, Low Income**](https://estated.com/developers/docs/v4/property/enum-overview#exemptions)
"""
@typedoc since: "0.2.0"
@type exemptions :: [String.t()]
@typedoc """
Represents separate tax jurisdictions within the county as provided on the county tax/assessment
roll.
Eg. **01H**
"""
@typedoc since: "0.2.0"
@type rate_code_area :: String.t()
@doc false
@doc since: "0.2.0"
@spec cast_list([map()]) :: [t()]
def cast_list(taxes) when is_list(taxes) do
Enum.map(taxes, &cast/1)
end
@spec cast_list(nil) :: nil
def cast_list(nil) do
[]
end
defp cast(%{} = taxes) do
Enum.reduce(taxes, %__MODULE__{}, &cast_field/2)
end
defp cast_field({"year", year}, acc) do
%__MODULE__{acc | year: year}
end
defp cast_field({"amount", amount}, acc) do
%__MODULE__{acc | amount: amount}
end
defp cast_field({"exemptions", exemptions}, acc) do
%__MODULE__{acc | exemptions: exemptions}
end
defp cast_field({"rate_code_area", rate_code_area}, acc) do
%__MODULE__{acc | rate_code_area: rate_code_area}
end
defp cast_field(_map_entry, acc) do
acc
end
end
| 22.159091 | 110 | 0.623077 |
0831317a613a4b5b5dc41a26f78c040ea9262993 | 4,554 | ex | Elixir | lib/make/session.ex | ne-sachirou/docker-elixir | 5b2c04f66229632c72e34668ccb98fe93a3916b3 | [
"Zlib"
] | 1 | 2018-11-02T11:03:57.000Z | 2018-11-02T11:03:57.000Z | lib/make/session.ex | ne-sachirou/docker-elixir | 5b2c04f66229632c72e34668ccb98fe93a3916b3 | [
"Zlib"
] | 36 | 2019-06-14T03:37:31.000Z | 2021-12-20T15:39:47.000Z | lib/make/session.ex | ne-sachirou/docker-elixir | 5b2c04f66229632c72e34668ccb98fe93a3916b3 | [
"Zlib"
] | null | null | null | defmodule Make.Session do
@moduledoc false
alias Make.Target
require Logger
use GenServer
@type name :: atom
@type status :: :none | :creating | :created | :failed
@type target_ref :: {atom, binary}
@type target :: term
@type t :: %__MODULE__{name: name, targets: %{target_ref => target}}
defstruct name: nil, targets: %{}
def start_link(args), do: GenServer.start_link(__MODULE__, args, name: args[:name])
@impl GenServer
def init(name: name, targets: targets) do
targets =
for target_or_ref <- targets,
reduce: %{},
do: (targets -> put_target_or_ref(targets, target_or_ref))
targets =
for {{type, name}, target} <- targets, into: %{} do
target =
if is_nil(target),
do: struct(Enum.find(Make.target_modules(), &(&1.type() == type)), name: name),
else: target
{{type, name}, target}
end
{:ok, %__MODULE__{name: name, targets: targets}}
end
@spec create(name, target_ref) :: status
def create(name, target_ref), do: GenServer.call(name, {:create, target_ref}, Make.timeout())
@doc false
@spec put_target(name, target_ref, target) :: any
def put_target(name, target_ref, target),
do: GenServer.cast(name, {:put_target, target_ref, target})
@spec target_status(name, target_ref) :: status
def target_status(name, target_ref), do: GenServer.call(name, {:target_status, target_ref})
@spec targets(name) :: %{target_ref => target}
def targets(name), do: GenServer.call(name, :targets)
@impl GenServer
def handle_call({:create, target_ref}, from, session) do
{target, session} =
case session.targets[target_ref] do
nil ->
raise("Unknown target: #{inspect(target_ref)}")
target ->
case target.status do
:none -> {target, put_in(session.targets[target_ref].status, :creating)}
status when status in [:creating, :created, :failed] -> {target, session}
end
end
case target.status do
:none ->
Logger.info("Start: #{inspect(target_ref)}")
{:ok, _} =
Task.start_link(fn ->
try do
target.depends
|> Task.async_stream(&create(session.name, &1), timeout: Make.timeout())
|> Stream.run()
target = target |> Target.create() |> put_in([Access.key!(:status)], :created)
put_target(session.name, target_ref, target)
GenServer.reply(from, __MODULE__.target_status(session.name, target_ref))
Logger.info("Created: #{inspect(target_ref)}")
rescue
err ->
Logger.error("Failed: #{inspect(target_ref)}")
put_target(session.name, target_ref, put_in(target.status, :failed))
GenServer.reply(from, __MODULE__.target_status(session.name, target_ref))
reraise err, __STACKTRACE__
end
end)
{:noreply, session}
:creating ->
{:ok, _} =
Task.start_link(fn ->
100
|> Stream.interval()
|> Stream.take_while(fn _ ->
__MODULE__.target_status(session.name, target_ref) == :creating
end)
|> Stream.run()
GenServer.reply(from, __MODULE__.target_status(session.name, target_ref))
end)
{:noreply, session}
status when status in [:created, :failed] ->
{:reply, status, session}
end
end
def handle_call({:target_status, target_ref}, _, session),
do: {:reply, Map.fetch!(session.targets, target_ref).status, session}
def handle_call(:targets, _, session), do: {:reply, session.targets, session}
@impl GenServer
def handle_cast({:put_target, target_ref, target}, session),
do: {:noreply, put_in(session.targets[target_ref], target)}
@spec put_target_or_ref(%{target_ref => target | nil}, target | target_ref) :: %{
target_ref => target | nil
}
defp put_target_or_ref(targets, {type, name}),
do: update_in(targets[{type, name}], &(&1 || nil))
defp put_target_or_ref(targets, target) do
targets =
for target_or_ref <- target.depends,
reduce: targets,
do: (targets -> put_target_or_ref(targets, target_or_ref))
target =
update_in(
target,
[Access.key!(:depends), Access.filter(&is_map/1)],
&{&1.__struct__.type(), &1.name}
)
put_in(targets[{target.__struct__.type(), target.name}], target)
end
end
| 30.563758 | 95 | 0.599693 |
0831482be3c6aaabb3fc4a6842d2b3e6114ec3a9 | 1,222 | ex | Elixir | lib/code_corps/stripe_service/adapters/stripe_file_upload.ex | fikape/code-corps-api | c21674b0b2a19fa26945c94268db8894420ca181 | [
"MIT"
] | 275 | 2015-06-23T00:20:51.000Z | 2021-08-19T16:17:37.000Z | lib/code_corps/stripe_service/adapters/stripe_file_upload.ex | fikape/code-corps-api | c21674b0b2a19fa26945c94268db8894420ca181 | [
"MIT"
] | 1,304 | 2015-06-26T02:11:54.000Z | 2019-12-12T21:08:00.000Z | lib/code_corps/stripe_service/adapters/stripe_file_upload.ex | fikape/code-corps-api | c21674b0b2a19fa26945c94268db8894420ca181 | [
"MIT"
] | 140 | 2016-01-01T18:19:47.000Z | 2020-11-22T06:24:47.000Z | defmodule CodeCorps.StripeService.Adapters.StripeFileUploadAdapter do
@moduledoc """
Used for conversion between stripe api payload maps and maps
usable for creation of `StripeFileUpload` records locally
"""
import CodeCorps.MapUtils, only: [rename: 3, keys_to_string: 1]
@stripe_attributes [:created, :id, :purpose, :size, :type]
@doc """
Converts a struct received from the Stripe API into a map that can be used
to create a `CodeCorps.StripeFileUpload` record
"""
def to_params(%Stripe.FileUpload{} = stripe_file_upload, %{} = attributes) do
result =
stripe_file_upload
|> Map.from_struct
|> Map.take(@stripe_attributes)
|> rename(:id, :id_from_stripe)
|> keys_to_string
|> add_non_stripe_attributes(attributes)
{:ok, result}
end
@non_stripe_attributes ["stripe_connect_account_id"]
defp add_non_stripe_attributes(%{} = params, %{} = attributes) do
attributes
|> get_non_stripe_attributes
|> add_to(params)
end
defp get_non_stripe_attributes(%{} = attributes) do
attributes
|> Map.take(@non_stripe_attributes)
end
defp add_to(%{} = attributes, %{} = params) do
params
|> Map.merge(attributes)
end
end
| 27.155556 | 79 | 0.696399 |
0831506d64a8d47d83cb8c60571a00b7c3eb2670 | 255 | ex | Elixir | lib/glasswing.ex | msaminsky/GlassWing | 1280ad7fe18ed7fd48cb5ff9b8aef35a0be55f1a | [
"MIT"
] | null | null | null | lib/glasswing.ex | msaminsky/GlassWing | 1280ad7fe18ed7fd48cb5ff9b8aef35a0be55f1a | [
"MIT"
] | null | null | null | lib/glasswing.ex | msaminsky/GlassWing | 1280ad7fe18ed7fd48cb5ff9b8aef35a0be55f1a | [
"MIT"
] | null | null | null | defmodule Glasswing do
@moduledoc """
Glasswing keeps the contexts that define your domain
and business logic.
Contexts are also responsible for managing your data, regardless
if it comes from the database, an external API or others.
"""
end
| 25.5 | 66 | 0.756863 |
08316463a84988b6ae4be74a59206bd08d34a1eb | 68 | exs | Elixir | config/releases.exs | yuchunc/mix-deploy-example | 19072e7f4aca62d782d2dba82e11cdc1ef7d8fd4 | [
"Apache-2.0"
] | null | null | null | config/releases.exs | yuchunc/mix-deploy-example | 19072e7f4aca62d782d2dba82e11cdc1ef7d8fd4 | [
"Apache-2.0"
] | null | null | null | config/releases.exs | yuchunc/mix-deploy-example | 19072e7f4aca62d782d2dba82e11cdc1ef7d8fd4 | [
"Apache-2.0"
] | null | null | null | import Config
# import_config "/etc/mix-deploy-example/config.exs"
| 17 | 52 | 0.779412 |
08317b1de2383cd53888fd49cbc6d2a47805dd32 | 357 | ex | Elixir | farmbot_ext/lib/farmbot_ext.ex | adamswsk/farmbot_os | d177d3b74888c1e7bcbf8f8595818708ee97f73b | [
"MIT"
] | 1 | 2021-08-23T13:36:14.000Z | 2021-08-23T13:36:14.000Z | farmbot_ext/lib/farmbot_ext.ex | adamswsk/farmbot_os | d177d3b74888c1e7bcbf8f8595818708ee97f73b | [
"MIT"
] | null | null | null | farmbot_ext/lib/farmbot_ext.ex | adamswsk/farmbot_os | d177d3b74888c1e7bcbf8f8595818708ee97f73b | [
"MIT"
] | null | null | null | defmodule FarmbotExt do
@moduledoc false
use Application
def start(_type, _args) do
Supervisor.start_link(children(), opts())
end
def opts, do: [strategy: :one_for_one, name: __MODULE__]
def children do
config = Application.get_env(:farmbot_ext, __MODULE__) || []
Keyword.get(config, :children, [FarmbotExt.Bootstrap])
end
end
| 21 | 64 | 0.708683 |
08317d3f1dbbc6f34a4237fe3be73d7721219a38 | 12,195 | exs | Elixir | test/hexpm/web/controllers/api/auth_controller_test.exs | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | test/hexpm/web/controllers/api/auth_controller_test.exs | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | test/hexpm/web/controllers/api/auth_controller_test.exs | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | defmodule Hexpm.Web.API.AuthControllerTest do
use Hexpm.ConnCase, async: true
setup do
owned_org = insert(:organization, public: false)
unowned_org = insert(:organization, public: false)
user = insert(:user)
insert(:organization_user, organization: owned_org, user: user)
user_full_key =
insert(
:key,
user: user,
permissions: [
build(:key_permission, domain: "api"),
build(:key_permission, domain: "repository", resource: owned_org.name)
]
)
organization_full_key =
insert(
:key,
organization: owned_org,
permissions: [
build(:key_permission, domain: "api"),
build(:key_permission, domain: "repository", resource: owned_org.name)
]
)
user_api_key = insert(:key, user: user, permissions: [build(:key_permission, domain: "api")])
organization_api_key =
insert(:key, organization: owned_org, permissions: [build(:key_permission, domain: "api")])
user_repo_key =
insert(
:key,
user: user,
permissions: [build(:key_permission, domain: "repository", resource: owned_org.name)]
)
organization_repo_key =
insert(
:key,
organization: owned_org,
permissions: [build(:key_permission, domain: "repository", resource: owned_org.name)]
)
user_all_repos_key =
insert(:key, user: user, permissions: [build(:key_permission, domain: "repositories")])
unowned_user_repo_key =
insert(
:key,
user: user,
permissions: [build(:key_permission, domain: "repository", resource: unowned_org.name)]
)
{:ok,
[
owned_org: owned_org,
unowned_org: unowned_org,
user: user,
user_full_key: user_full_key,
user_api_key: user_api_key,
user_repo_key: user_repo_key,
user_all_repos_key: user_all_repos_key,
unowned_user_repo_key: unowned_user_repo_key,
organization_full_key: organization_full_key,
organization_api_key: organization_api_key,
organization_repo_key: organization_repo_key
]}
end
describe "GET /api/auth" do
test "without key" do
build_conn()
|> get("api/auth", domain: "api")
|> response(401)
end
test "with invalid key" do
build_conn()
|> put_req_header("authorization", "ABC")
|> get("api/auth", domain: "api")
|> response(401)
end
test "without domain returns 400", %{user_full_key: key} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth")
|> response(400)
end
test "with revoked key", %{user: user} do
key =
insert(:key,
user: user,
permissions: [build(:key_permission, domain: "api")],
revoked_at: ~N[2018-01-01 00:00:00]
)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(401)
key =
insert(:key,
user: user,
permissions: [build(:key_permission, domain: "api")],
revoke_at: ~N[2018-01-01 00:00:00]
)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(401)
key =
insert(:key,
user: user,
permissions: [build(:key_permission, domain: "api")],
revoke_at: ~N[2030-01-01 00:00:00]
)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(204)
end
test "authenticate full user key", %{
user_full_key: key,
owned_org: owned_org,
unowned_org: unowned_org
} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: owned_org.name)
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: unowned_org.name)
|> response(401)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: "BADREPO")
|> response(401)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository")
|> response(401)
end
test "authenticate full organization key", %{
organization_full_key: key,
owned_org: owned_org,
unowned_org: unowned_org
} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: owned_org.name)
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: unowned_org.name)
|> response(401)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: "BADREPO")
|> response(401)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository")
|> response(401)
end
test "authenticate user api key", %{user_api_key: key} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: "myrepo")
|> response(401)
end
test "authenticate organization api key", %{organization_api_key: key} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: "myrepo")
|> response(401)
end
test "authenticate user read api key", %{user: user} do
permission = build(:key_permission, domain: "api", resource: "read")
key = insert(:key, user: user, permissions: [permission])
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(401)
end
test "authenticate user write api key", %{user: user} do
permission = build(:key_permission, domain: "api", resource: "write")
key = insert(:key, user: user, permissions: [permission])
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(401)
end
test "authenticate organization read api key", %{owned_org: owned_org} do
permission = build(:key_permission, domain: "api", resource: "read")
key = insert(:key, organization: owned_org, permissions: [permission])
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(401)
end
test "authenticate organization write api key", %{owned_org: owned_org} do
permission = build(:key_permission, domain: "api", resource: "write")
key = insert(:key, organization: owned_org, permissions: [permission])
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "write")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api", resource: "read")
|> response(401)
end
test "authenticate user repo key with all repositories", %{
user_all_repos_key: key,
owned_org: owned_org,
unowned_org: unowned_org
} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "api")
|> response(401)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repositories")
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: owned_org.name)
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: unowned_org.name)
|> response(403)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: "BADREPO")
|> response(403)
end
test "authenticate docs key", %{user: user, owned_org: owned_org} do
permission = build(:key_permission, domain: "docs", resource: owned_org.name)
key = insert(:key, user: user, permissions: [permission])
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "docs", resource: owned_org.name)
|> response(204)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "docs", resource: "not_my_org")
|> response(401)
end
test "authenticate repository key against repository without access permissions", %{
unowned_user_repo_key: key,
unowned_org: unowned_org
} do
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: unowned_org.name)
|> response(403)
end
test "authenticate user repository key without active billing", %{user: user} do
organization = insert(:organization, billing_active: false)
insert(:organization_user, organization: organization, user: user)
key =
insert(
:key,
user: user,
permissions: [build(:key_permission, domain: "repository", resource: organization.name)]
)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: organization.name)
|> response(403)
end
test "authenticate organization repository key without active billing" do
organization = insert(:organization, billing_active: false)
key =
insert(
:key,
organization: organization,
permissions: [build(:key_permission, domain: "repository", resource: organization.name)]
)
build_conn()
|> put_req_header("authorization", key.user_secret)
|> get("api/auth", domain: "repository", resource: organization.name)
|> response(403)
end
end
end
| 31.269231 | 98 | 0.61312 |
083181cbc189fd10d7d438be8fb5e8b87e81137b | 148 | ex | Elixir | lib/policy/exception.ex | system76/policy | 2d029725b549bb8b7e935abaf77b46cf6253854d | [
"MIT"
] | 4 | 2016-09-03T10:25:32.000Z | 2017-11-15T19:32:16.000Z | lib/policy/exception.ex | system76/policy | 2d029725b549bb8b7e935abaf77b46cf6253854d | [
"MIT"
] | null | null | null | lib/policy/exception.ex | system76/policy | 2d029725b549bb8b7e935abaf77b46cf6253854d | [
"MIT"
] | 2 | 2017-10-16T02:39:00.000Z | 2020-12-08T14:48:51.000Z | defmodule Policy.Exception do
defexception [:message]
end
defimpl Plug.Exception, for: Policy.Exception do
def status(_exception), do: 403
end
| 18.5 | 48 | 0.777027 |
0831876eea2ccd6dcca26cce6ac4eaf62ab84978 | 17,972 | ex | Elixir | apps/omg_watcher/lib/omg_watcher/exit_processor.ex | kendricktan/elixir-omg | 834c103fd5c4b9e063c1d32b9b4e5728abb64009 | [
"Apache-2.0"
] | null | null | null | apps/omg_watcher/lib/omg_watcher/exit_processor.ex | kendricktan/elixir-omg | 834c103fd5c4b9e063c1d32b9b4e5728abb64009 | [
"Apache-2.0"
] | null | null | null | apps/omg_watcher/lib/omg_watcher/exit_processor.ex | kendricktan/elixir-omg | 834c103fd5c4b9e063c1d32b9b4e5728abb64009 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule OMG.Watcher.ExitProcessor do
@moduledoc """
Imperative shell here, for functional core and more info see `OMG.Watcher.ExitProcessor.Core`
NOTE: Note that all calls return `db_updates` and relay on the caller to do persistence.
"""
alias OMG.Block
alias OMG.DB
alias OMG.Eth
alias OMG.State
alias OMG.State.Transaction
alias OMG.Utxo
alias OMG.Watcher.ExitProcessor
alias OMG.Watcher.ExitProcessor.Core
alias OMG.Watcher.ExitProcessor.InFlightExitInfo
alias OMG.Watcher.ExitProcessor.StandardExitChallenge
alias OMG.Watcher.Recorder
use OMG.Utils.LoggerExt
require Utxo
### Client
def start_link(_args) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
@doc """
Accepts events and processes them in the state - new exits are tracked.
Returns `db_updates`
"""
def new_exits(exits) do
GenServer.call(__MODULE__, {:new_exits, exits})
end
@doc """
Accepts events and processes them in the state - new in flight exits are tracked.
Returns `db_updates`
"""
def new_in_flight_exits(in_flight_exit_started_events) do
GenServer.call(__MODULE__, {:new_in_flight_exits, in_flight_exit_started_events})
end
@doc """
Accepts events and processes them in the state - finalized exits are untracked _if valid_ otherwise raises alert
Returns `db_updates`
"""
def finalize_exits(finalizations) do
GenServer.call(__MODULE__, {:finalize_exits, finalizations})
end
@doc """
Accepts events and processes them in the state - new piggybacks are tracked, if invalid raises an alert
Returns `db_updates`
"""
def piggyback_exits(piggybacks) do
GenServer.call(__MODULE__, {:piggyback_exits, piggybacks})
end
@doc """
Accepts events and processes them in the state - challenged exits are untracked
Returns `db_updates`
"""
def challenge_exits(challenges) do
GenServer.call(__MODULE__, {:challenge_exits, challenges})
end
@doc """
Accepts events and processes them in the state.
Competitors are stored for future use(i.e. to challenge an in flight exit).
Returns `db_updates`
"""
def new_ife_challenges(challenges) do
GenServer.call(__MODULE__, {:new_ife_challenges, challenges})
end
@doc """
Accepts events and processes them in state.
Returns `db_updates`
"""
def respond_to_in_flight_exits_challenges(responds) do
GenServer.call(__MODULE__, {:respond_to_in_flight_exits_challenges, responds})
end
@doc """
Accepts events and processes them in state.
Challenged piggybacks are forgotten.
Returns `db_updates`
"""
def challenge_piggybacks(challenges) do
GenServer.call(__MODULE__, {:challenge_piggybacks, challenges})
end
@doc """
Accepts events and processes them in state - finalized outputs are applied to the state.
Returns `db_updates`
"""
def finalize_in_flight_exits(finalizations) do
GenServer.call(__MODULE__, {:finalize_in_flight_exits, finalizations})
end
@doc """
Checks validity and causes event emission to `OMG.Watcher.Eventer`. Works with `OMG.State` to discern validity
"""
def check_validity do
GenServer.call(__MODULE__, :check_validity)
end
@doc """
Returns a map of requested in flight exits, where keys are IFE hashes and values are IFES
If given empty list of hashes, all IFEs are returned.
"""
@spec get_active_in_flight_exits() :: {:ok, %{binary() => InFlightExitInfo.t()}}
def get_active_in_flight_exits do
GenServer.call(__MODULE__, :get_active_in_flight_exits)
end
@doc """
Returns all information required to produce a transaction to the root chain contract to present a competitor for
a non-canonical in-flight exit
"""
@spec get_competitor_for_ife(binary()) :: {:ok, Core.competitor_data_t()} | {:error, :competitor_not_found}
def get_competitor_for_ife(txbytes) do
GenServer.call(__MODULE__, {:get_competitor_for_ife, txbytes})
end
@doc """
Returns all information required to produce a transaction to the root chain contract to present a proof of canonicity
for a challenged in-flight exit
"""
@spec prove_canonical_for_ife(binary()) :: {:ok, Core.prove_canonical_data_t()} | {:error, :canonical_not_found}
def prove_canonical_for_ife(txbytes) do
GenServer.call(__MODULE__, {:prove_canonical_for_ife, txbytes})
end
@spec get_input_challenge_data(Transaction.Signed.tx_bytes(), Transaction.input_index_t()) ::
{:ok, Core.input_challenge_data()} | {:error, Core.piggyback_challenge_data_error()}
def get_input_challenge_data(txbytes, input_index) do
GenServer.call(__MODULE__, {:get_input_challenge_data, txbytes, input_index})
end
@spec get_output_challenge_data(Transaction.Signed.tx_bytes(), Transaction.input_index_t()) ::
{:ok, Core.output_challenge_data()} | {:error, Core.piggyback_challenge_data_error()}
def get_output_challenge_data(txbytes, output_index) do
GenServer.call(__MODULE__, {:get_output_challenge_data, txbytes, output_index})
end
@doc """
Returns challenge for an exit
"""
@spec create_challenge(Utxo.Position.t()) ::
{:ok, StandardExitChallenge.t()} | {:error, :utxo_not_spent | :exit_not_found}
def create_challenge(exiting_utxo_pos) do
GenServer.call(__MODULE__, {:create_challenge, exiting_utxo_pos})
end
### Server
use GenServer
def init(:ok) do
{:ok, db_exits} = DB.exit_infos()
{:ok, db_ifes} = DB.in_flight_exits_info()
{:ok, db_competitors} = DB.competitors_info()
sla_margin = Application.fetch_env!(:omg_watcher, :exit_processor_sla_margin)
processor = Core.init(db_exits, db_ifes, db_competitors, sla_margin)
{:ok, _} = Recorder.start_link(%Recorder{name: __MODULE__.Recorder, parent: self()})
_ = Logger.info("Initializing with: #{inspect(processor)}")
processor
end
def handle_call({:new_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized exits: #{inspect(exits)}")
exit_contract_statuses =
Enum.map(exits, fn %{exit_id: exit_id} ->
{:ok, result} = Eth.RootChain.get_standard_exit(exit_id)
result
end)
{new_state, db_updates} = Core.new_exits(state, exits, exit_contract_statuses)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:new_in_flight_exits, events}, _from, state) do
_ = if not Enum.empty?(events), do: Logger.info("Recognized in-flight exits: #{inspect(events)}")
ife_contract_statuses =
Enum.map(
events,
fn %{call_data: %{in_flight_tx: bytes}} ->
{:ok, contract_ife_id} = Eth.RootChain.get_in_flight_exit_id(bytes)
{:ok, {timestamp, _, _, _, _}} = Eth.RootChain.get_in_flight_exit(contract_ife_id)
{timestamp, contract_ife_id}
end
)
{new_state, db_updates} = Core.new_in_flight_exits(state, events, ife_contract_statuses)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:finalize_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized finalizations: #{inspect(exits)}")
exits =
exits
|> Enum.map(fn %{exit_id: exit_id} ->
{:ok, {_, _, _, utxo_pos}} = Eth.RootChain.get_standard_exit(exit_id)
Utxo.Position.decode(utxo_pos)
end)
{:ok, db_updates_from_state, validities} = State.exit_utxos(exits)
{new_state, event_triggers, db_updates} = Core.finalize_exits(state, validities)
:ok = OMG.InternalEventBus.broadcast("events", {:emit_events, event_triggers})
{:reply, {:ok, db_updates ++ db_updates_from_state}, new_state}
end
def handle_call({:piggyback_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized piggybacks: #{inspect(exits)}")
{new_state, db_updates} = Core.new_piggybacks(state, exits)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:challenge_exits, exits}, _from, state) do
_ = if not Enum.empty?(exits), do: Logger.info("Recognized challenges: #{inspect(exits)}")
{new_state, db_updates} = Core.challenge_exits(state, exits)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:new_ife_challenges, challenges}, _from, state) do
_ = if not Enum.empty?(challenges), do: Logger.info("Recognized ife challenges: #{inspect(challenges)}")
{new_state, db_updates} = Core.new_ife_challenges(state, challenges)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:challenge_piggybacks, challenges}, _from, state) do
_ = if not Enum.empty?(challenges), do: Logger.info("Recognized piggyback challenges: #{inspect(challenges)}")
{new_state, db_updates} = Core.challenge_piggybacks(state, challenges)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:respond_to_in_flight_exits_challenges, responds}, _from, state) do
_ = if not Enum.empty?(responds), do: Logger.info("Recognized response to IFE challenge: #{inspect(responds)}")
{new_state, db_updates} = Core.respond_to_in_flight_exits_challenges(state, responds)
{:reply, {:ok, db_updates}, new_state}
end
def handle_call({:finalize_in_flight_exits, finalizations}, _from, state) do
_ = if not Enum.empty?(finalizations), do: Logger.info("Recognized ife finalizations: #{inspect(finalizations)}")
case Core.finalize_in_flight_exits(state, finalizations) do
{:ok, state, db_updates} ->
{:reply, {:ok, db_updates}, state}
{:unknown_piggybacks, unknown_piggybacks} ->
_ = Logger.error("Outputs not piggybacked: #{inspect(unknown_piggybacks)}")
{:stop, :unknown_piggybacks, state}
{:unknown_in_flight_exit, unknown_ifes} ->
_ = Logger.error("Unknown in-flight exits: #{inspect(unknown_ifes)}")
{:stop, :unknown_in_flight_exit, Elixir.Agent.Server, state}
end
end
@doc """
Combine data from `ExitProcessor` and `OMG.State` to figure out what to do about exits
"""
def handle_call(:check_validity, _from, state) do
{state1, request} = prepare_validity_check(state)
{chain_status, events} = Core.invalid_exits(request, state1)
{:reply, {chain_status, events}, state}
end
def handle_call(:get_active_in_flight_exits, _from, state),
do: {:reply, {:ok, Core.get_active_in_flight_exits(state)}, state}
def handle_call({:get_competitor_for_ife, txbytes}, _from, state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
competitor_result =
%ExitProcessor.Request{}
# TODO: run_status_gets and getting all non-existent UTXO positions imaginable can be optimized out heavily
# only the UTXO positions being inputs to `txbytes` must be looked at, but it becomes problematic as
# txbytes can be invalid so we'd need a with here...
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.get_competitor_for_ife(state, txbytes)
{:reply, competitor_result, state}
end
def handle_call({:prove_canonical_for_ife, txbytes}, _from, state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
canonicity_result =
%ExitProcessor.Request{}
# TODO: same comment as above in get_competitor_for_ife
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.prove_canonical_for_ife(txbytes)
{:reply, canonicity_result, state}
end
def handle_call({:get_input_challenge_data, txbytes, input_index}, _from, state) do
response =
%ExitProcessor.Request{}
|> run_status_gets()
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
|> Core.get_input_challenge_data(state, txbytes, input_index)
{:reply, response, state}
end
def handle_call({:get_output_challenge_data, txbytes, output_index}, _from, state) do
{state1, request} = prepare_validity_check(state)
response = Core.get_output_challenge_data(request, state1, txbytes, output_index)
{:reply, response, state}
end
def handle_call({:create_challenge, Utxo.position(blknum, _txindex, _oindex) = exiting_utxo_pos}, _from, state) do
with spending_blknum_response <- exiting_utxo_pos |> Utxo.Position.to_db_key() |> OMG.DB.spent_blknum(),
{:ok, hashes} <- OMG.DB.block_hashes([blknum]),
{:ok, [block]} <- OMG.DB.blocks(hashes),
{:ok, raw_spending_proof, exit_info, exit_txbytes} <-
Core.get_challenge_data(spending_blknum_response, exiting_utxo_pos, block, state),
encoded_utxo_pos <- Utxo.Position.encode(exiting_utxo_pos),
{:ok, exit_id} <- OMG.Eth.RootChain.get_standard_exit_id(exit_txbytes, encoded_utxo_pos) do
# TODO: we're violating the shell/core pattern here, refactor!
spending_proof =
case raw_spending_proof do
raw_blknum when is_number(raw_blknum) ->
{:ok, hashes} = OMG.DB.block_hashes([raw_blknum])
{:ok, [spending_block]} = OMG.DB.blocks(hashes)
Block.from_db_value(spending_block)
signed_tx ->
signed_tx
end
{:reply, {:ok, Core.create_challenge(exit_info, spending_proof, exiting_utxo_pos, exit_id)}, state}
else
error -> {:reply, error, state}
end
end
defp prepare_validity_check(state) do
# NOTE: future of using `ExitProcessor.Request` struct not certain, see that module for details
{request, state} =
%ExitProcessor.Request{}
|> run_status_gets()
# To find if IFE was included, see first if its inputs were spent.
|> Core.determine_ife_input_utxos_existence_to_get(state)
|> run_ife_input_utxo_existance()
# Next, check by what transactions they were spent.
|> Core.determine_ife_spends_to_get(state)
|> run_ife_spend_getting()
# Find tx bodies.
|> Core.determine_ife_blocks_to_get()
|> run_ife_block_getting()
# Compare found txes with ife.tx.
# If equal, persist information about position.
|> Core.find_ifes_in_blocks(state)
request =
request
|> Core.determine_utxo_existence_to_get(state)
|> run_utxo_exists()
|> Core.determine_spends_to_get(state)
|> run_spend_getting()
|> Core.determine_blocks_to_get()
|> run_block_getting()
{state, request}
end
defp run_status_gets(%ExitProcessor.Request{} = request) do
{:ok, eth_height_now} = Eth.get_ethereum_height()
{blknum_now, _} = State.get_status()
_ = Logger.debug("eth_height_now: #{inspect(eth_height_now)}, blknum_now: #{inspect(blknum_now)}")
%{request | eth_height_now: eth_height_now, blknum_now: blknum_now}
end
defp run_utxo_exists(%ExitProcessor.Request{utxos_to_check: positions} = request) do
result = positions |> Enum.map(&State.utxo_exists?/1)
_ = Logger.debug("utxos_to_check: #{inspect(positions)}, utxo_exists_result: #{inspect(result)}")
%{request | utxo_exists_result: result}
end
defp run_ife_input_utxo_existance(%ExitProcessor.Request{piggybacked_utxos_to_check: positions} = request) do
result = positions |> Enum.map(&State.utxo_exists?/1)
_ =
Logger.debug(
"piggybacked_utxos_to_check: #{inspect(positions)}, piggybacked_utxo_exists_result: #{inspect(result)}"
)
%{request | piggybacked_utxo_exists_result: result}
end
defp run_spend_getting(%ExitProcessor.Request{spends_to_get: positions} = request) do
result = positions |> Enum.map(&single_spend_getting/1)
_ = Logger.debug("spends_to_get: #{inspect(positions)}, spent_blknum_result: #{inspect(result)}")
%{request | spent_blknum_result: result}
end
defp run_ife_spend_getting(%ExitProcessor.Request{piggybacked_spends_to_get: positions} = request) do
result = positions |> Enum.map(&single_spend_getting/1)
_ =
Logger.debug(
"piggybacked_spends_to_get: #{inspect(positions)}, piggybacked_spent_blknum_result: #{inspect(result)}"
)
%{request | piggybacked_spent_blknum_result: result}
end
defp single_spend_getting(position) do
{:ok, spend_blknum} =
position
|> Utxo.Position.to_db_key()
|> OMG.DB.spent_blknum()
spend_blknum
end
defp run_block_getting(%ExitProcessor.Request{blknums_to_get: blknums} = request),
do: %{request | blocks_result: do_block_getting(blknums)}
defp run_ife_block_getting(%ExitProcessor.Request{piggybacked_blknums_to_get: blknums} = request),
do: %{request | piggybacked_blocks_result: do_block_getting(blknums)}
defp do_block_getting(blknums) do
_ = Logger.debug("blknums_to_get: #{inspect(blknums)}")
{:ok, hashes} = OMG.DB.block_hashes(blknums)
_ = Logger.debug("hashes: #{inspect(hashes)}")
{:ok, blocks} = OMG.DB.blocks(hashes)
_ = Logger.debug("blocks_result: #{inspect(blocks)}")
blocks |> Enum.map(&Block.from_db_value/1)
end
end
| 37.598326 | 119 | 0.709771 |
0831a6e95133513126af8d56f0576d8023be8549 | 2,048 | ex | Elixir | clients/health_care/lib/google_api/health_care/v1beta1/model/result.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/health_care/lib/google_api/health_care/v1beta1/model/result.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/health_care/lib/google_api/health_care/v1beta1/model/result.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.HealthCare.V1beta1.Model.Result do
@moduledoc """
The consent evaluation result for a single `data_id`.
## Attributes
* `consentDetails` (*type:* `%{optional(String.t) => GoogleApi.HealthCare.V1beta1.Model.ConsentEvaluation.t}`, *default:* `nil`) - The resource names of all evaluated Consents mapped to their evaluation.
* `consented` (*type:* `boolean()`, *default:* `nil`) - Whether the requested data is consented for the given use.
* `dataId` (*type:* `String.t`, *default:* `nil`) - The unique identifier of the data the consents were checked for.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:consentDetails => %{
optional(String.t()) => GoogleApi.HealthCare.V1beta1.Model.ConsentEvaluation.t()
},
:consented => boolean(),
:dataId => String.t()
}
field(:consentDetails, as: GoogleApi.HealthCare.V1beta1.Model.ConsentEvaluation, type: :map)
field(:consented)
field(:dataId)
end
defimpl Poison.Decoder, for: GoogleApi.HealthCare.V1beta1.Model.Result do
def decode(value, options) do
GoogleApi.HealthCare.V1beta1.Model.Result.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.HealthCare.V1beta1.Model.Result do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 37.236364 | 207 | 0.716797 |
0831f1d56f5ac960d403d302188353a240049b9b | 540 | exs | Elixir | apps/cron/test/cron/task/version_scan_test.exs | michaeljguarino/forge | 50ee583ecb4aad5dee4ef08fce29a8eaed1a0824 | [
"Apache-2.0"
] | 59 | 2021-09-16T19:29:39.000Z | 2022-03-31T20:44:24.000Z | apps/cron/test/cron/task/version_scan_test.exs | svilenkov/plural | ac6c6cc15ac4b66a3b5e32ed4a7bee4d46d1f026 | [
"Apache-2.0"
] | 111 | 2021-08-15T09:56:37.000Z | 2022-03-31T23:59:32.000Z | apps/cron/test/cron/task/version_scan_test.exs | svilenkov/plural | ac6c6cc15ac4b66a3b5e32ed4a7bee4d46d1f026 | [
"Apache-2.0"
] | 4 | 2021-12-13T09:43:01.000Z | 2022-03-29T18:08:44.000Z | defmodule Cron.Task.VersionScanTest do
use Core.SchemaCase, async: false
alias Cron.Task.VersionScan
alias Core.Schema.Version
use Mimic
setup :set_mimic_global
describe "#run/0" do
test "it will scan old images" do
versions = insert_list(3, :version)
me = self()
expect(Core.Conduit.Broker, :publish, 3, fn %{body: vsn}, :scan -> send(me, {:scan, vsn}) end)
3 = VersionScan.run()
for %{id: id} <- versions do
assert_receive {:scan, %Version{id: ^id}}
end
end
end
end
| 21.6 | 100 | 0.631481 |
08322a7fb214bbc7faa974ee3a47b27983c3ef10 | 1,959 | ex | Elixir | lib/phoenix_toggl/reports/reporter.ex | behrendtio/phoenix-toggl | dcaa2409ed8b53d3b56dc7ce02cdc4252dd19647 | [
"MIT"
] | 207 | 2016-02-24T06:43:04.000Z | 2021-03-05T02:26:47.000Z | lib/phoenix_toggl/reports/reporter.ex | behrendtio/phoenix-toggl | dcaa2409ed8b53d3b56dc7ce02cdc4252dd19647 | [
"MIT"
] | 6 | 2016-02-26T00:13:06.000Z | 2017-06-15T07:49:02.000Z | lib/phoenix_toggl/reports/reporter.ex | behrendtio/phoenix-toggl | dcaa2409ed8b53d3b56dc7ce02cdc4252dd19647 | [
"MIT"
] | 24 | 2016-02-24T18:02:21.000Z | 2019-11-11T12:47:06.000Z | defmodule PhoenixToggl.Reports.Reporter do
@moduledoc """
Retrieves necessary data from the database
"""
alias PhoenixToggl.{Repo, TimeEntry}
alias PhoenixToggl.Reports.{Data, Day}
alias Timex.{Date, Time, DateFormat}
@format_string "%Y-%m-%d"
def generate(), do: {:error, :invalid_params}
def generate(%{user: _user, number_of_weeks: _number_of_weeks} = params) do
generate_data params
end
def generate(_), do: {:error, :invalid_params}
defp generate_data(%{user: user, number_of_weeks: number_of_weeks}) do
now = Date.now
start_date = now
|> Date.subtract(Time.to_timestamp(number_of_weeks - 1, :weeks))
|> Date.beginning_of_week(:mon)
end_date = Date.end_of_week(now, :mon)
days = Date.diff(start_date, end_date, :days)
days_data = process_days(user, days, start_date)
total_duration = calculate_total_duration(days_data)
%Data{
user_id: user.id,
start_date: format_date(start_date),
end_date: format_date(end_date),
total_duration: total_duration,
days: days_data
}
end
defp process_days(user, days, start_date) do
0..days
|> Enum.map(fn(day) -> Task.async(fn -> calculate_day(user, start_date, day) end) end)
|> Enum.map(&(Task.await(&1)))
|> Enum.sort(&(&1.id < &2.id))
end
defp calculate_day(user, start_date, day_number) do
date = start_date
|> Date.add(Time.to_timestamp(day_number, :days))
|> format_date
total_duration = user
|> Ecto.assoc(:time_entries)
|> TimeEntry.total_duration_for_date(date)
|> Repo.one!
|> case do
nil -> 0
duration -> duration
end
%Day{
id: day_number,
date: date,
duration: total_duration
}
end
defp calculate_total_duration(days_data), do: Enum.reduce(days_data, 0, fn(x, acc) -> x.duration + acc end)
defp format_date(date), do: DateFormat.format!(date, @format_string, :strftime)
end
| 28.391304 | 109 | 0.663093 |
08323107a4c9515585ae2b0d5752476ba8a8f152 | 1,780 | exs | Elixir | playground/elixir-live-doom-fire/rel/config.exs | allmonty/doom-fire-algorithm | dac483b85973020c5f86962d3d9344018711043b | [
"MIT"
] | null | null | null | playground/elixir-live-doom-fire/rel/config.exs | allmonty/doom-fire-algorithm | dac483b85973020c5f86962d3d9344018711043b | [
"MIT"
] | null | null | null | playground/elixir-live-doom-fire/rel/config.exs | allmonty/doom-fire-algorithm | dac483b85973020c5f86962d3d9344018711043b | [
"MIT"
] | null | null | null | # Import all plugins from `rel/plugins`
# They can then be used by adding `plugin MyPlugin` to
# either an environment, or release definition, where
# `MyPlugin` is the name of the plugin module.
Path.join(["rel", "plugins", "*.exs"])
|> Path.wildcard()
|> Enum.map(&Code.eval_file(&1))
use Mix.Releases.Config,
# This sets the default release built by `mix release`
default_release: :default,
# This sets the default environment used by `mix release`
default_environment: Mix.env()
# For a full list of config options for both releases
# and environments, visit https://hexdocs.pm/distillery/configuration.html
# You may define one or more environments in this file,
# an environment's settings will override those of a release
# when building in that environment, this combination of release
# and environment configuration is called a profile
environment :dev do
# If you are running Phoenix, you should make sure that
# server: true is set and the code reloader is disabled,
# even in dev mode.
# It is recommended that you build with MIX_ENV=prod and pass
# the --env flag to Distillery explicitly if you want to use
# dev mode.
set dev_mode: true
set include_erts: false
set cookie: :"ZsX9H!)N|rj~BiaaS9s9mi&e!%$O0S[[frTa,[1Ndp7UDsqk2Re(Aa!eIVUubusA"
end
environment :prod do
set include_erts: true
set include_src: false
set cookie: :"~~RBj9ZXWK|8u&6!s%qx~I|.L6PFP^<Bh)J%K//4D1%gB*OHnrcsih|jm:[8B6Po"
end
# You may define one or more releases in this file.
# If you have not set a default release, or selected one
# when running `mix release`, the first release in the file
# will be used by default
release :live_doom_fire do
set version: current_version(:live_doom_fire)
set applications: [
:runtime_tools
]
end
| 32.962963 | 81 | 0.736517 |
083234bb845b2edf01ce7d67601ef8eeb5a60df3 | 26,195 | exs | Elixir | test/bitwise_ip/mask_test.exs | ajvondrak/bitwise_ip | e8be9f172618361373bcdd4a9083cb0ec9380ffd | [
"MIT"
] | 4 | 2021-07-05T20:43:02.000Z | 2022-01-07T04:06:29.000Z | test/bitwise_ip/mask_test.exs | ajvondrak/bitwise_ip | e8be9f172618361373bcdd4a9083cb0ec9380ffd | [
"MIT"
] | 1 | 2021-08-22T01:34:16.000Z | 2021-08-25T03:34:28.000Z | test/bitwise_ip/mask_test.exs | ajvondrak/bitwise_ip | e8be9f172618361373bcdd4a9083cb0ec9380ffd | [
"MIT"
] | null | null | null | defmodule BitwiseIp.MaskTest do
use ExUnit.Case, async: true
doctest BitwiseIp.Mask
alias BitwiseIp.Mask
describe "IPv4" do
test "encode/1" do
assert 0b00000000000000000000000000000000 == Mask.encode(:v4, 0)
assert 0b10000000000000000000000000000000 == Mask.encode(:v4, 1)
assert 0b11000000000000000000000000000000 == Mask.encode(:v4, 2)
assert 0b11100000000000000000000000000000 == Mask.encode(:v4, 3)
assert 0b11110000000000000000000000000000 == Mask.encode(:v4, 4)
assert 0b11111000000000000000000000000000 == Mask.encode(:v4, 5)
assert 0b11111100000000000000000000000000 == Mask.encode(:v4, 6)
assert 0b11111110000000000000000000000000 == Mask.encode(:v4, 7)
assert 0b11111111000000000000000000000000 == Mask.encode(:v4, 8)
assert 0b11111111100000000000000000000000 == Mask.encode(:v4, 9)
assert 0b11111111110000000000000000000000 == Mask.encode(:v4, 10)
assert 0b11111111111000000000000000000000 == Mask.encode(:v4, 11)
assert 0b11111111111100000000000000000000 == Mask.encode(:v4, 12)
assert 0b11111111111110000000000000000000 == Mask.encode(:v4, 13)
assert 0b11111111111111000000000000000000 == Mask.encode(:v4, 14)
assert 0b11111111111111100000000000000000 == Mask.encode(:v4, 15)
assert 0b11111111111111110000000000000000 == Mask.encode(:v4, 16)
assert 0b11111111111111111000000000000000 == Mask.encode(:v4, 17)
assert 0b11111111111111111100000000000000 == Mask.encode(:v4, 18)
assert 0b11111111111111111110000000000000 == Mask.encode(:v4, 19)
assert 0b11111111111111111111000000000000 == Mask.encode(:v4, 20)
assert 0b11111111111111111111100000000000 == Mask.encode(:v4, 21)
assert 0b11111111111111111111110000000000 == Mask.encode(:v4, 22)
assert 0b11111111111111111111111000000000 == Mask.encode(:v4, 23)
assert 0b11111111111111111111111100000000 == Mask.encode(:v4, 24)
assert 0b11111111111111111111111110000000 == Mask.encode(:v4, 25)
assert 0b11111111111111111111111111000000 == Mask.encode(:v4, 26)
assert 0b11111111111111111111111111100000 == Mask.encode(:v4, 27)
assert 0b11111111111111111111111111110000 == Mask.encode(:v4, 28)
assert 0b11111111111111111111111111111000 == Mask.encode(:v4, 29)
assert 0b11111111111111111111111111111100 == Mask.encode(:v4, 30)
assert 0b11111111111111111111111111111110 == Mask.encode(:v4, 31)
assert 0b11111111111111111111111111111111 == Mask.encode(:v4, 32)
end
test "decode/1" do
for mask <- 0..32 do
assert Mask.decode(:v4, Mask.encode(:v4, mask)) == mask
end
end
test "parse/1" do
assert {:ok, 0b00000000000000000000000000000000} = Mask.parse(:v4, "0")
assert {:ok, 0b11111111111111110000000000000000} = Mask.parse(:v4, "16")
assert {:ok, 0b11111111111111111111111111111111} = Mask.parse(:v4, "32")
assert {:error, _} = Mask.parse(:v4, "-1")
assert {:error, _} = Mask.parse(:v4, "not a mask")
assert {:error, _} = Mask.parse(:v4, "33")
end
test "parse!/1" do
{:ok, success} = Mask.parse(:v4, "24")
assert Mask.parse!(:v4, "24") == success
{:error, error} = Mask.parse(:v4, "error")
assert_raise ArgumentError, error, fn -> Mask.parse!(:v4, "error") end
end
end
describe "IPv6" do
test "encode/1" do
assert 0b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 0)
assert 0b10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 1)
assert 0b11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 2)
assert 0b11100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 3)
assert 0b11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 4)
assert 0b11111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 5)
assert 0b11111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 6)
assert 0b11111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 7)
assert 0b11111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 8)
assert 0b11111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 9)
assert 0b11111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 10)
assert 0b11111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 11)
assert 0b11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 12)
assert 0b11111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 13)
assert 0b11111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 14)
assert 0b11111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 15)
assert 0b11111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 16)
assert 0b11111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 17)
assert 0b11111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 18)
assert 0b11111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 19)
assert 0b11111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 20)
assert 0b11111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 21)
assert 0b11111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 22)
assert 0b11111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 23)
assert 0b11111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 24)
assert 0b11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 25)
assert 0b11111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 26)
assert 0b11111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 27)
assert 0b11111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 28)
assert 0b11111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 29)
assert 0b11111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 30)
assert 0b11111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 31)
assert 0b11111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 32)
assert 0b11111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 33)
assert 0b11111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 34)
assert 0b11111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 35)
assert 0b11111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 36)
assert 0b11111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 37)
assert 0b11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 38)
assert 0b11111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 39)
assert 0b11111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 40)
assert 0b11111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 41)
assert 0b11111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 42)
assert 0b11111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 43)
assert 0b11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 44)
assert 0b11111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 45)
assert 0b11111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 46)
assert 0b11111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 47)
assert 0b11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 48)
assert 0b11111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 49)
assert 0b11111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 50)
assert 0b11111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 51)
assert 0b11111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 52)
assert 0b11111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 53)
assert 0b11111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 54)
assert 0b11111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 55)
assert 0b11111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 56)
assert 0b11111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 57)
assert 0b11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 58)
assert 0b11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 59)
assert 0b11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 60)
assert 0b11111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 61)
assert 0b11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 62)
assert 0b11111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 63)
assert 0b11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 64)
assert 0b11111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 65)
assert 0b11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 66)
assert 0b11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 67)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 68)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 69)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 70)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 71)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 72)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 73)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 74)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 75)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 76)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 77)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 78)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 79)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000 == Mask.encode(:v6, 80)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000 == Mask.encode(:v6, 81)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000 == Mask.encode(:v6, 82)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000 == Mask.encode(:v6, 83)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000 == Mask.encode(:v6, 84)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000 == Mask.encode(:v6, 85)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000 == Mask.encode(:v6, 86)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000 == Mask.encode(:v6, 87)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000 == Mask.encode(:v6, 88)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000 == Mask.encode(:v6, 89)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000 == Mask.encode(:v6, 90)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000 == Mask.encode(:v6, 91)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000 == Mask.encode(:v6, 92)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000 == Mask.encode(:v6, 93)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000 == Mask.encode(:v6, 94)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000 == Mask.encode(:v6, 95)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000 == Mask.encode(:v6, 96)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000 == Mask.encode(:v6, 97)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000 == Mask.encode(:v6, 98)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000 == Mask.encode(:v6, 99)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000 == Mask.encode(:v6, 100)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000 == Mask.encode(:v6, 101)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000 == Mask.encode(:v6, 102)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000 == Mask.encode(:v6, 103)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000 == Mask.encode(:v6, 104)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000 == Mask.encode(:v6, 105)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000 == Mask.encode(:v6, 106)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000 == Mask.encode(:v6, 107)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000 == Mask.encode(:v6, 108)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000 == Mask.encode(:v6, 109)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000 == Mask.encode(:v6, 110)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000 == Mask.encode(:v6, 111)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000 == Mask.encode(:v6, 112)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000 == Mask.encode(:v6, 113)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000 == Mask.encode(:v6, 114)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000 == Mask.encode(:v6, 115)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000 == Mask.encode(:v6, 116)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000 == Mask.encode(:v6, 117)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000 == Mask.encode(:v6, 118)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000 == Mask.encode(:v6, 119)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000 == Mask.encode(:v6, 120)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000 == Mask.encode(:v6, 121)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000 == Mask.encode(:v6, 122)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000 == Mask.encode(:v6, 123)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000 == Mask.encode(:v6, 124)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000 == Mask.encode(:v6, 125)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100 == Mask.encode(:v6, 126)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110 == Mask.encode(:v6, 127)
assert 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111 == Mask.encode(:v6, 128)
end
test "decode/1" do
for mask <- 0..128 do
assert Mask.decode(:v6, Mask.encode(:v6, mask)) == mask
end
end
test "parse/1" do
assert {:ok, 0b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000} = Mask.parse(:v6, "0")
assert {:ok, 0b11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000} = Mask.parse(:v6, "64")
assert {:ok, 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111} = Mask.parse(:v6, "128")
assert {:error, _} = Mask.parse(:v6, "-1")
assert {:error, _} = Mask.parse(:v6, "not a mask")
assert {:error, _} = Mask.parse(:v6, "129")
end
test "parse!/1" do
{:ok, success} = Mask.parse(:v6, "92")
assert Mask.parse!(:v6, "92") == success
{:error, error} = Mask.parse(:v6, "error")
assert_raise ArgumentError, error, fn -> Mask.parse!(:v6, "error") end
end
end
end
| 115.90708 | 175 | 0.859897 |
083236ebf8c89a93bd96549ffac36244d9dd8b0f | 720 | exs | Elixir | config/config.exs | tiberiuc/admint | 6d1bd5462f49053c7c3999c35cc301c7b8f08a70 | [
"MIT"
] | null | null | null | config/config.exs | tiberiuc/admint | 6d1bd5462f49053c7c3999c35cc301c7b8f08a70 | [
"MIT"
] | null | null | null | config/config.exs | tiberiuc/admint | 6d1bd5462f49053c7c3999c35cc301c7b8f08a70 | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
# General application configuration
use Mix.Config
config :esbuild,
version: "0.12.18",
default: [
args: ~w(js/admint.js --bundle --target=es2016 --outdir=../priv/static/assets),
cd: Path.expand("../assets", __DIR__),
env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)}
]
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"
| 31.304348 | 83 | 0.723611 |
08327e58f851870abc2d1aec74e35b726d4c7d08 | 1,163 | exs | Elixir | clients/redis/mix.exs | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/redis/mix.exs | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/redis/mix.exs | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | defmodule GoogleApi.Redis.V1beta1.Mixfile do
use Mix.Project
@version "0.3.0"
def project do
[app: :google_api_redis,
version: @version,
elixir: "~> 1.4",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps(),
source_url: "https://github.com/GoogleCloudPlatform/elixir-google-api/tree/master/clients/FIXME"
]
end
def application() do
[extra_applications: [:logger]]
end
defp deps() do
[
{:google_gax, "~> 0.1"},
{:ex_doc, "~> 0.16", only: :dev},
{:dialyxir, "~> 0.5", only: [:dev], runtime: false}
]
end
defp description() do
"""
Creates and manages Redis instances on the Google Cloud Platform.
"""
end
defp package() do
[
files: ["lib", "mix.exs", "README*", "LICENSE"],
maintainers: ["Jeff Ching"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/GoogleCloudPlatform/elixir-google-api/tree/master/clients/FIXME",
"Homepage" => "https://cloud.google.com/memorystore/docs/redis/"
}
]
end
end
| 23.734694 | 105 | 0.592433 |
0832b6c3e36a0a0c5f19d574114fb4217788cbbc | 10,017 | exs | Elixir | test/ecto/datetime_test.exs | mschae/ecto | 00f85444c4f61080617179232c0d528381de5ec3 | [
"Apache-2.0"
] | null | null | null | test/ecto/datetime_test.exs | mschae/ecto | 00f85444c4f61080617179232c0d528381de5ec3 | [
"Apache-2.0"
] | null | null | null | test/ecto/datetime_test.exs | mschae/ecto | 00f85444c4f61080617179232c0d528381de5ec3 | [
"Apache-2.0"
] | null | null | null | defmodule Ecto.DateTest do
use ExUnit.Case, async: true
@date %Ecto.Date{year: 2015, month: 12, day: 31}
test "cast itself" do
assert Ecto.Date.cast(@date) == {:ok, @date}
end
test "cast strings" do
assert Ecto.Date.cast("2015-12-31") == {:ok, @date}
assert Ecto.Date.cast("2015-00-23") == :error
assert Ecto.Date.cast("2015-13-23") == :error
assert Ecto.Date.cast("2015-01-00") == :error
assert Ecto.Date.cast("2015-01-32") == :error
assert Ecto.Date.cast("2015-12-31 23:50:07") == {:ok, @date}
assert Ecto.Date.cast("2015-12-31T23:50:07") == {:ok, @date}
assert Ecto.Date.cast("2015-12-31T23:50:07Z") == {:ok, @date}
assert Ecto.Date.cast("2015-12-31T23:50:07.000Z") == {:ok, @date}
assert Ecto.Date.cast("2015-12-31P23:50:07") == :error
assert Ecto.Date.cast("2015-12-31T23:50:07.008") == {:ok, @date}
assert Ecto.Date.cast("2015-12-31T23:50:07.008Z") == {:ok, @date}
end
test "cast maps" do
assert Ecto.Date.cast(%{"year" => "2015", "month" => "12", "day" => "31"}) ==
{:ok, @date}
assert Ecto.Date.cast(%{year: 2015, month: 12, day: 31}) ==
{:ok, @date}
assert Ecto.Date.cast(%{"year" => "2015", "month" => "", "day" => "31"}) ==
:error
assert Ecto.Date.cast(%{"year" => "2015", "month" => nil, "day" => "31"}) ==
:error
assert Ecto.Date.cast(%{"year" => "2015", "month" => nil}) ==
:error
end
test "cast erl date" do
assert Ecto.Date.cast({2015, 12, 31}) == {:ok, @date}
assert Ecto.Date.cast({2015, 13, 31}) == :error
end
test "dump itself into a date triplet" do
assert Ecto.Date.dump(@date) == {:ok, {2015, 12, 31}}
assert Ecto.Date.dump({2015, 12, 31}) == :error
end
test "load a date triplet" do
assert Ecto.Date.load({2015, 12, 31}) == {:ok, @date}
assert Ecto.Date.load(@date) == :error
end
test "to_string" do
assert to_string(@date) == "2015-12-31"
assert Ecto.Date.to_string(@date) == "2015-12-31"
end
test "to_iso8601" do
assert Ecto.Date.to_iso8601(@date) == "2015-12-31"
end
test "to_erl and from_erl" do
assert @date |> Ecto.Date.to_erl |> Ecto.Date.from_erl == @date
end
test "inspect protocol" do
assert inspect(@date) == "#Ecto.Date<2015-12-31>"
end
end
defmodule Ecto.TimeTest do
use ExUnit.Case, async: true
@time %Ecto.Time{hour: 23, min: 50, sec: 07, usec: 0}
@time_zero %Ecto.Time{hour: 23, min: 50, sec: 0, usec: 0}
@time_usec %Ecto.Time{hour: 12, min: 40, sec: 33, usec: 30000}
test "cast itself" do
assert Ecto.Time.cast(@time) == {:ok, @time}
assert Ecto.Time.cast(@time_zero) == {:ok, @time_zero}
end
test "cast strings" do
assert Ecto.Time.cast("23:50:07") == {:ok, @time}
assert Ecto.Time.cast("23:50:07Z") == {:ok, @time}
assert Ecto.Time.cast("23:50:07.030")
== {:ok, %{@time | usec: 30000}}
assert Ecto.Time.cast("23:50:07.123456")
== {:ok, %{@time | usec: 123456}}
assert Ecto.Time.cast("23:50:07.123456Z")
== {:ok, %{@time | usec: 123456}}
assert Ecto.Time.cast("23:50:07.000123Z")
== {:ok, %{@time | usec: 123}}
assert Ecto.Time.cast("24:01:01") == :error
assert Ecto.Time.cast("00:61:00") == :error
assert Ecto.Time.cast("00:00:61") == :error
assert Ecto.Time.cast("00:00:009") == :error
assert Ecto.Time.cast("00:00:00.A00") == :error
end
test "cast maps" do
assert Ecto.Time.cast(%{"hour" => "23", "min" => "50", "sec" => "07"}) ==
{:ok, @time}
assert Ecto.Time.cast(%{hour: 23, min: 50, sec: 07}) ==
{:ok, @time}
assert Ecto.Time.cast(%{"hour" => "23", "min" => "50"}) ==
{:ok, @time_zero}
assert Ecto.Time.cast(%{hour: 23, min: 50}) ==
{:ok, @time_zero}
assert Ecto.Time.cast(%{hour: 12, min: 40, sec: 33, usec: 30_000}) ==
{:ok, @time_usec}
assert Ecto.Time.cast(%{"hour" => 12, "min" => 40, "sec" => 33, "usec" => 30_000}) ==
{:ok, @time_usec}
assert Ecto.Time.cast(%{"hour" => "", "min" => "50"}) ==
:error
assert Ecto.Time.cast(%{hour: 23, min: nil}) ==
:error
end
test "cast tuple" do
assert Ecto.Time.cast({23, 50, 07}) == {:ok, @time}
assert Ecto.Time.cast({12, 40, 33, 30000}) == {:ok, @time_usec}
assert Ecto.Time.cast({00, 61, 33}) == :error
end
test "dump itself into a time tuple" do
assert Ecto.Time.dump(@time) == {:ok, {23, 50, 7, 0}}
assert Ecto.Time.dump(@time_usec) == {:ok, {12, 40, 33, 30000}}
assert Ecto.Time.dump({23, 50, 07}) == :error
end
test "load tuple" do
assert Ecto.Time.load({23, 50, 07}) == {:ok, @time}
assert Ecto.Time.load({12, 40, 33, 30000}) == {:ok, @time_usec}
assert Ecto.Time.load(@time) == :error
end
test "to_string" do
assert to_string(@time) == "23:50:07"
assert Ecto.Time.to_string(@time) == "23:50:07"
assert to_string(@time_usec) == "12:40:33.030000"
assert Ecto.Time.to_string(@time_usec) == "12:40:33.030000"
assert to_string(%Ecto.Time{hour: 1, min: 2, sec: 3, usec: 4})
== "01:02:03.000004"
assert Ecto.Time.to_string(%Ecto.Time{hour: 1, min: 2, sec: 3, usec: 4})
== "01:02:03.000004"
end
test "to_iso8601" do
assert Ecto.Time.to_iso8601(@time) == "23:50:07"
assert Ecto.Time.to_iso8601(@time_usec) == "12:40:33.030000"
end
test "to_erl and from_erl" do
assert @time |> Ecto.Time.to_erl |> Ecto.Time.from_erl == @time
end
test "inspect protocol" do
assert inspect(@time) == "#Ecto.Time<23:50:07>"
assert inspect(@time_usec) == "#Ecto.Time<12:40:33.030000>"
end
end
defmodule Ecto.DateTimeTest do
use ExUnit.Case, async: true
@datetime %Ecto.DateTime{year: 2015, month: 1, day: 23, hour: 23, min: 50, sec: 07, usec: 0}
@datetime_zero %Ecto.DateTime{year: 2015, month: 1, day: 23, hour: 23, min: 50, sec: 0, usec: 0}
@datetime_usec %Ecto.DateTime{year: 2015, month: 1, day: 23, hour: 23, min: 50, sec: 07, usec: 8000}
@datetime_notime %Ecto.DateTime{year: 2015, month: 1, day: 23, hour: 0, min: 0, sec: 0, usec: 0}
test "cast itself" do
assert Ecto.DateTime.cast(@datetime) == {:ok, @datetime}
assert Ecto.DateTime.cast(@datetime_usec) == {:ok, @datetime_usec}
end
test "cast strings" do
assert Ecto.DateTime.cast("2015-01-23 23:50:07") == {:ok, @datetime}
assert Ecto.DateTime.cast("2015-01-23T23:50:07") == {:ok, @datetime}
assert Ecto.DateTime.cast("2015-01-23T23:50:07Z") == {:ok, @datetime}
assert Ecto.DateTime.cast("2015-01-23T23:50:07.000Z") == {:ok, @datetime}
assert Ecto.DateTime.cast("2015-01-23P23:50:07") == :error
assert Ecto.DateTime.cast("2015-01-23T23:50:07.008") == {:ok, @datetime_usec}
assert Ecto.DateTime.cast("2015-01-23T23:50:07.008Z") == {:ok, @datetime_usec}
assert Ecto.DateTime.cast("2015-01-23T23:50:07.008000789") == {:ok, @datetime_usec}
end
test "cast maps" do
assert Ecto.DateTime.cast(%{"year" => "2015", "month" => "1", "day" => "23",
"hour" => "23", "min" => "50", "sec" => "07"}) ==
{:ok, @datetime}
assert Ecto.DateTime.cast(%{year: 2015, month: 1, day: 23, hour: 23, min: 50, sec: 07}) ==
{:ok, @datetime}
assert Ecto.DateTime.cast(%{"year" => "2015", "month" => "1", "day" => "23",
"hour" => "23", "min" => "50"}) ==
{:ok, @datetime_zero}
assert Ecto.DateTime.cast(%{year: 2015, month: 1, day: 23, hour: 23, min: 50}) ==
{:ok, @datetime_zero}
assert Ecto.DateTime.cast(%{year: 2015, month: 1, day: 23, hour: 23,
min: 50, sec: 07, usec: 8_000}) ==
{:ok, @datetime_usec}
assert Ecto.DateTime.cast(%{"year" => 2015, "month" => 1, "day" => 23,
"hour" => 23, "min" => 50, "sec" => 07,
"usec" => 8_000}) ==
{:ok, @datetime_usec}
assert Ecto.DateTime.cast(%{"year" => "2015", "month" => "1", "day" => "23",
"hour" => "", "min" => "50"}) ==
:error
assert Ecto.DateTime.cast(%{year: 2015, month: 1, day: 23, hour: 23, min: nil}) ==
:error
end
test "cast tuple" do
assert Ecto.DateTime.cast({{2015, 1, 23}, {23, 50, 07}}) == {:ok, @datetime}
assert Ecto.DateTime.cast({{2015, 1, 23}, {23, 50, 07, 8000}}) == {:ok, @datetime_usec}
assert Ecto.DateTime.cast({{2015, 1, 23}, {25, 50, 07, 8000}}) == :error
end
test "dump itself to a tuple" do
assert Ecto.DateTime.dump(@datetime) == {:ok, {{2015, 1, 23}, {23, 50, 07, 0}}}
assert Ecto.DateTime.dump(@datetime_usec) == {:ok, {{2015, 1, 23}, {23, 50, 07, 8000}}}
assert Ecto.DateTime.dump({{2015, 1, 23}, {23, 50, 07}}) == :error
end
test "load tuple" do
assert Ecto.DateTime.load({{2015, 1, 23}, {23, 50, 07}}) == {:ok, @datetime}
assert Ecto.DateTime.load({{2015, 1, 23}, {23, 50, 07, 8000}}) == {:ok, @datetime_usec}
assert Ecto.DateTime.load(@datetime) == :error
end
test "from_date" do
assert Ecto.DateTime.from_date(%Ecto.Date{year: 2015, month: 1, day: 23}) == @datetime_notime
end
test "to_string" do
assert to_string(@datetime) == "2015-01-23 23:50:07"
assert Ecto.DateTime.to_string(@datetime) == "2015-01-23 23:50:07"
assert to_string(@datetime_usec) == "2015-01-23 23:50:07.008000"
assert Ecto.DateTime.to_string(@datetime_usec) == "2015-01-23 23:50:07.008000"
end
test "to_iso8601" do
assert Ecto.DateTime.to_iso8601(@datetime) == "2015-01-23T23:50:07Z"
assert Ecto.DateTime.to_iso8601(@datetime_usec) == "2015-01-23T23:50:07.008000Z"
end
test "to_erl and from_erl" do
assert @datetime |> Ecto.DateTime.to_erl |> Ecto.DateTime.from_erl == @datetime
end
test "inspect protocol" do
assert inspect(@datetime) == "#Ecto.DateTime<2015-01-23T23:50:07Z>"
assert inspect(@datetime_usec) == "#Ecto.DateTime<2015-01-23T23:50:07.008000Z>"
end
end
| 36.9631 | 102 | 0.577418 |
0832bc83283d9c023101ae154bde9cb3275bc0e1 | 288 | exs | Elixir | priv/repo/migrations/20170819223520_create_accounts.exs | nicksergeant/leather | 15b1c9403999737f7a6ee9a1c0349e047805bbe6 | [
"MIT"
] | 67 | 2016-10-24T04:11:40.000Z | 2021-11-25T16:46:51.000Z | priv/repo/migrations/20170819223520_create_accounts.exs | nicksergeant/leather | 15b1c9403999737f7a6ee9a1c0349e047805bbe6 | [
"MIT"
] | 6 | 2017-08-17T21:43:50.000Z | 2021-11-03T13:13:49.000Z | priv/repo/migrations/20170819223520_create_accounts.exs | nicksergeant/leather | 15b1c9403999737f7a6ee9a1c0349e047805bbe6 | [
"MIT"
] | 7 | 2017-08-13T01:43:37.000Z | 2022-01-11T04:38:27.000Z | defmodule Leather.Repo.Migrations.CreateAccounts do
use Ecto.Migration
def change do
create table(:accounts) do
add(:name, :string)
add(:user_id, references(:users, on_delete: :nothing))
timestamps()
end
create(index(:accounts, [:user_id]))
end
end
| 19.2 | 60 | 0.670139 |
0832c8c6ec6bcec669a09559377b86b4c5f33648 | 2,110 | exs | Elixir | test/dynamo_test.exs | azukiapp/dynamo | cad008fe7314e7ba7ef4990908956fbca60e980c | [
"Apache-2.0"
] | 1 | 2015-06-13T03:35:26.000Z | 2015-06-13T03:35:26.000Z | test/dynamo_test.exs | azukiapp/dynamo | cad008fe7314e7ba7ef4990908956fbca60e980c | [
"Apache-2.0"
] | null | null | null | test/dynamo_test.exs | azukiapp/dynamo | cad008fe7314e7ba7ef4990908956fbca60e980c | [
"Apache-2.0"
] | null | null | null | Code.require_file "../test_helper.exs", __FILE__
defmodule DynamoTest do
use ExUnit.Case, async: true
defmodule App do
use Dynamo
config :dynamo,
root: Path.expand("../fixtures", __FILE__),
endpoint: DynamoTest,
static_root: Path.expand("../fixtures/public", __FILE__),
static_route: "/public",
compile_on_demand: false,
reload_modules: false,
source_paths: [Path.expand("../fixtures/*", __FILE__)],
templates_paths: [Path.expand("../fixtures/templates", __FILE__)]
# Test session compilation as well
config :dynamo,
session_store: Session.CookieStore,
session_options:
[ key: "_foo_session",
secret: "8RR6p7LJ6YN+vn8br/qZ6R0A1UXWIYRLNuvqvmJw7eLf6/ZTDXljAdNpHQhIzIRF"]
end
defmodule ReloadApp do
use Dynamo
config :dynamo,
otp_app: :dynamo,
endpoint: DynamoTest,
static_root: false,
compile_on_demand: true,
reload_modules: true,
templates_paths: ["test/fixtures/templates"]
end
## Config
test "defines root based on otp app" do
assert ReloadApp.root == Path.expand("../..", __FILE__)
end
test "defines root based on user config" do
assert App.root == Path.expand("../fixtures", __FILE__)
end
## Filters
test "adds public filter" do
file = Path.expand("../fixtures/public", __FILE__)
assert Enum.first(App.__filters__) == Dynamo.Filters.Static.new("/public", file)
end
test "does not add public filter if disabled" do
refute Enum.any? ReloadApp.__filters__, match?({ Dynamo.Filters.Static, _, _ }, &1)
end
test "adds reloader filter" do
assert Dynamo.Filters.Loader.new(true, true) in ReloadApp.__filters__
end
test "does not add reloader filter if disabled" do
refute Enum.any? App.__filters__, match?({ Dynamo.Filters.Loader, _, _ }, &1)
end
## View paths
test "defines templates paths" do
assert App.templates_paths == [DynamoTest.App.CompiledTemplates]
templates = Path.expand("../fixtures/templates", __FILE__)
assert ReloadApp.templates_paths == [templates]
end
end
| 27.763158 | 87 | 0.679621 |
0832d579e56cb5929cc36621f5a6d2ce4482ef0c | 92,952 | exs | Elixir | test/broadway_test.exs | kianmeng/broadway | 8bc8b53d4659bad16b22ccca1bf482aa7c839051 | [
"Apache-2.0"
] | null | null | null | test/broadway_test.exs | kianmeng/broadway | 8bc8b53d4659bad16b22ccca1bf482aa7c839051 | [
"Apache-2.0"
] | null | null | null | test/broadway_test.exs | kianmeng/broadway | 8bc8b53d4659bad16b22ccca1bf482aa7c839051 | [
"Apache-2.0"
] | null | null | null | defmodule BroadwayTest do
use ExUnit.Case
import Integer
import ExUnit.CaptureLog
alias Broadway.{Message, BatchInfo, CallerAcknowledger}
defmodule ManualProducer do
@behaviour Broadway.Producer
use GenStage
@impl true
def init(%{test_pid: test_pid}) do
name = Process.info(self())[:registered_name]
send(test_pid, {:producer_initialized, name})
{:producer, %{test_pid: test_pid}, []}
end
def init(_args) do
{:producer, %{}}
end
@impl true
def handle_demand(demand, state) do
if test_pid = state[:test_pid] do
send(test_pid, {:handle_demand_called, demand, System.monotonic_time()})
end
{:noreply, [], state}
end
@impl true
def handle_info({:push_messages, messages}, state) do
{:noreply, messages, state}
end
@impl true
def handle_cast({:push_messages, messages}, state) do
{:noreply, messages, state}
end
@impl true
def handle_call({:push_messages, messages}, _from, state) do
{:reply, :reply, messages, state}
end
@impl true
def prepare_for_draining(%{test_pid: test_pid} = state) do
message = wrap_message(:message_during_cancel, test_pid)
send(self(), {:push_messages, [message]})
message = wrap_message(:message_after_cancel, test_pid)
Process.send_after(self(), {:push_messages, [message]}, 1)
{:noreply, [], state}
end
@impl true
def prepare_for_draining(state) do
{:noreply, [], state}
end
defp wrap_message(message, test_pid) do
ack = {CallerAcknowledger, {test_pid, make_ref()}, :ok}
%Message{data: message, acknowledger: ack}
end
end
defmodule Forwarder do
use Broadway
def handle_message(:default, message, %{test_pid: test_pid}) do
send(test_pid, {:message_handled, message.data})
message
end
def handle_batch(batcher, messages, _, %{test_pid: test_pid}) do
send(test_pid, {:batch_handled, batcher, messages})
messages
end
end
defmodule CustomHandlers do
use Broadway
def handle_message(_, message, %{handle_message: handler} = context) do
handler.(message, context)
end
def handle_batch(batcher, messages, batch_info, %{handle_batch: handler} = context) do
handler.(batcher, messages, batch_info, context)
end
end
defmodule CustomHandlersWithoutHandleBatch do
use Broadway
def handle_message(_, message, %{handle_message: handler} = context) do
handler.(message, context)
end
end
defmodule CustomHandlersWithHandleFailed do
use Broadway
def handle_message(_, message, %{handle_message: handler} = context) do
handler.(message, context)
end
def handle_batch(batcher, messages, batch_info, %{handle_batch: handler} = context) do
handler.(batcher, messages, batch_info, context)
end
def handle_failed(messages, %{handle_failed: handler} = context) do
handler.(messages, context)
end
end
defmodule CustomHandlersWithPreparedMessages do
use Broadway
def prepare_messages(messages, %{prepare_messages: prepare} = context) do
prepare.(messages, context)
end
def handle_message(_, message, %{handle_message: handler} = context) do
handler.(message, context)
end
end
defmodule Transformer do
def transform(event, test_pid: test_pid) do
if event == :kill_producer do
raise "Error raised"
end
%Message{
data: "#{event} transformed",
acknowledger: {Broadway.CallerAcknowledger, {test_pid, :ref}, :unused}
}
end
end
defmodule UsesRegistry do
use Broadway
def handle_message(_, message, _), do: message
def handle_batch(_, messages, _, _), do: messages
def process_name({:via, Registry, {registry, id}}, base_name) do
{:via, Registry, {registry, {id, base_name}}}
end
end
describe "use Broadway" do
test "generates child_spec/1" do
defmodule MyBroadway do
use Broadway
def handle_message(_, _, _), do: nil
def handle_batch(_, _, _, _), do: nil
end
assert MyBroadway.child_spec(:arg) == %{
id: BroadwayTest.MyBroadway,
start: {BroadwayTest.MyBroadway, :start_link, [:arg]},
shutdown: :infinity
}
end
test "generates child_spec/1 with overriden options" do
defmodule MyBroadwayWithCustomOptions do
use Broadway, id: :some_id
def handle_message(_, _, _), do: nil
def handle_batch(_, _, _, _), do: nil
end
assert MyBroadwayWithCustomOptions.child_spec(:arg).id == :some_id
end
end
describe "broadway configuration" do
test "invalid configuration options" do
assert_raise ArgumentError,
"invalid configuration given to Broadway.start_link/2, expected :name to be an atom or a {:via, module, term} tuple, got: 1",
fn -> Broadway.start_link(Forwarder, name: 1) end
end
test "invalid nested configuration options" do
opts = [
name: MyBroadway,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [
sqs: [concurrency: 2, batch_sizy: 10]
]
]
message = """
invalid configuration given to Broadway.start_link/2 for key [:batchers, :sqs], \
unknown options [:batch_sizy], valid options are: \
[:concurrency, :batch_size, :batch_timeout, :partition_by, :spawn_opt, :hibernate_after]\
"""
assert_raise ArgumentError, message, fn ->
Broadway.start_link(Forwarder, opts)
end
end
test "default number of producers is 1" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
assert get_n_producers(broadway) == 1
assert length(Broadway.producer_names(broadway)) == 1
end
test "set number of producers", %{test: test} do
broadway = new_unique_name()
self = self()
:telemetry.attach(
"#{test}",
[:broadway, :topology, :init],
fn name, measurements, metadata, _ ->
send(self, {:telemetry_event, name, measurements, metadata})
end,
nil
)
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}, concurrency: 3],
processors: [default: []],
batchers: [default: []]
)
assert get_n_producers(broadway) == 3
assert length(Broadway.producer_names(broadway)) == 3
assert_receive {:telemetry_event, [:broadway, :topology, :init], %{system_time: _},
%{config: config, supervisor_pid: supervisor_pid}}
when is_pid(supervisor_pid)
Enum.each(
[
:name,
:producer,
:processors
],
fn key ->
assert Keyword.has_key?(config, key)
end
)
end
test "default number of processors is schedulers_online * 2" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
assert get_n_processors(broadway) == System.schedulers_online() * 2
end
test "set number of processors" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 13]],
batchers: [default: []]
)
assert get_n_processors(broadway) == 13
end
test "default number of consumers is 1 per batcher" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [p1: [], p2: []]
)
assert get_n_consumers(broadway, :p1) == 1
assert get_n_consumers(broadway, :p2) == 1
end
test "set number of consumers" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [
p1: [concurrency: 2],
p2: [concurrency: 3]
]
)
assert get_n_consumers(broadway, :p1) == 2
assert get_n_consumers(broadway, :p2) == 3
end
test "default context is :context_not_set" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
pid = get_processor(broadway, :default)
assert :sys.get_state(pid).state.context == :context_not_set
end
test "invokes prepare_for_start callback on producers" do
defmodule PrepareProducer do
@behaviour Broadway.Producer
def prepare_for_start(Forwarder, opts) do
name = opts[:name]
child_spec = %{
id: Agent,
start: {Agent, :start_link, [fn -> %{} end, [name: Module.concat(name, "Agent")]]}
}
opts = put_in(opts[:producer][:module], {ManualProducer, []})
{[child_spec], opts}
end
end
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {PrepareProducer, []}],
processors: [default: []],
batchers: [default: [batch_size: 2]],
context: %{test_pid: self()}
)
# This is a message handled by ManualProducer,
# meaning the prepare_for_start rewrite worked.
send(
get_producer(broadway),
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:message_handled, 1}
assert_receive {:message_handled, 3}
assert_receive {:batch_handled, :default, [%{data: 1}, %{data: 3}]}
assert_receive {:ack, :ref, [_, _], []}
# Now show that the agent was started
assert Agent.get(Module.concat(broadway, "Agent"), & &1) == %{}
# An that is a child of the main supervisor
assert [_, _, _, _, _, {Agent, _, _, _}] =
Supervisor.which_children(Module.concat(broadway, "Broadway.Supervisor"))
end
test "the return value of the prepare_for_start callback is validated" do
defmodule BadPrepareProducer do
@behaviour Broadway.Producer
def prepare_for_start(Forwarder, opts) do
opts[:context][:prepare_for_start_return_value]
end
end
broadway = new_unique_name()
Process.flag(:trap_exit, true)
assert {:error, {%ArgumentError{} = error, _stacktrace}} =
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {BadPrepareProducer, []}],
processors: [default: []],
context: %{prepare_for_start_return_value: :bad_return_value}
)
assert Exception.message(error) ==
"expected BroadwayTest.BadPrepareProducer.prepare_for_start/2 to " <>
"return {child_specs, options}, got: :bad_return_value"
end
test "prepare_for_start callback validates options again" do
defmodule BadPrepareProducerOptions do
@behaviour Broadway.Producer
def prepare_for_start(Forwarder, opts) do
opts[:context][:prepare_for_start_return_value]
end
end
broadway = new_unique_name()
Process.flag(:trap_exit, true)
assert {:error, {%NimbleOptions.ValidationError{} = error, _stacktrace}} =
Broadway.start_link(Forwarder,
name: broadway,
producer: [module: {BadPrepareProducerOptions, []}],
processors: [default: []],
context: %{prepare_for_start_return_value: {[], _bad_opts = []}}
)
assert Exception.message(error) == "required option :name not found, received options: []"
end
test "injects the :broadway option when the producer config is a kw list" do
defmodule ProducerWithTopologyIndex do
@behaviour Broadway.Producer
def init(opts) do
send(opts[:test_pid], {:init_called, opts})
{:producer, opts}
end
def handle_demand(_demand, state) do
{:noreply, [], state}
end
end
name = new_unique_name()
Broadway.start_link(Forwarder,
name: name,
producer: [module: {ProducerWithTopologyIndex, [test_pid: self()]}],
processors: [default: []]
)
assert_receive {:init_called, opts}
assert opts[:broadway][:name] == name
assert opts[:broadway][:index] == 0
assert length(opts[:broadway][:processors]) == 1
# If the producer config is not a kw list, the index is not injected.
Broadway.start_link(Forwarder,
name: new_unique_name(),
producer: [module: {ProducerWithTopologyIndex, %{test_pid: self()}}],
processors: [default: []]
)
assert_receive {:init_called, map}
refute Map.has_key?(map, :broadway)
end
test "supports spawn_opt" do
# At a given level
Broadway.start_link(Forwarder,
name: broadway_name = new_unique_name(),
producer: [module: {ManualProducer, []}, spawn_opt: [priority: :high]],
processors: [default: []]
)
assert named_priority(get_producer(broadway_name)) == {:priority, :high}
assert named_priority(get_processor(broadway_name, :default)) == {:priority, :normal}
# At the root
Broadway.start_link(Forwarder,
name: broadway_name = new_unique_name(),
producer: [module: {ManualProducer, []}],
processors: [default: []],
spawn_opt: [priority: :high]
)
assert named_priority(get_producer(broadway_name)) == {:priority, :high}
assert named_priority(get_processor(broadway_name, :default)) == {:priority, :high}
end
defp named_priority(name), do: Process.info(Process.whereis(name), :priority)
end
test "push_messages/2" do
{:ok, _broadway} =
Broadway.start_link(Forwarder,
name: broadway_name = new_unique_name(),
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: [batch_size: 2]]
)
Broadway.push_messages(broadway_name, [
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
])
assert_receive {:message_handled, 1}
assert_receive {:message_handled, 3}
assert_receive {:batch_handled, :default, [%{data: 1}, %{data: 3}]}
assert_receive {:ack, :ref, [_, _], []}
end
describe "test_messages/3" do
setup do
handle_message = fn message, _ ->
message
end
context = %{
handle_message: handle_message
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]]
)
%{broadway: broadway_name}
end
test "metadata field is added to message if specified",
%{broadway: broadway} do
ref = Broadway.test_message(broadway, :message, metadata: %{field: :value})
assert_receive {:ack, ^ref, [%Message{data: :message, metadata: %{field: :value}}], []}
end
test "metadata field in message defaults to %{} if not specified",
%{broadway: broadway} do
ref = Broadway.test_message(broadway, :message)
assert_receive {:ack, ^ref, [%Message{data: :message, metadata: %{}}], []}
end
test "ack field is added to message if specified",
%{broadway: broadway} do
defmodule AckMod do
defdelegate ack(conf, successful, failed), to: Broadway.CallerAcknowledger
end
opts = [
acknowledger: fn _data, ack_ref -> {AckMod, ack_ref, :data} end
]
ref = Broadway.test_message(broadway, :message, opts)
assert_receive {:ack, ^ref, [%Message{data: :message, acknowledger: ack}], []}
assert {AckMod, _, :data} = ack
end
test "ack field in message defaults to Broadway.CallerAcknowledger if not specified",
%{broadway: broadway} do
ref = Broadway.test_message(broadway, :message)
assert_receive {:ack, ^ref, [%Message{data: :message, acknowledger: ack}], []}
assert {Broadway.CallerAcknowledger, {_, _}, :ok} = ack
end
end
describe "prepare_messages" do
setup do
prepare_messages = fn messages, _ ->
messages
|> Enum.reduce([], fn message, acc ->
case message.data do
:raise ->
raise "Error raised in preparing message"
:filter ->
acc
_ ->
[message | acc]
end
end)
|> Enum.reverse()
end
handle_message = fn
message, _ -> message
end
context = %{
prepare_messages: prepare_messages,
handle_message: handle_message
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithPreparedMessages,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
processor = get_processor(broadway_name, :default)
Process.link(Process.whereis(processor))
%{broadway: broadway_name, processor: processor}
end
test "raises if fewer messages are returned from prepare_messages", %{
broadway: broadway,
processor: processor
} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :filter, 3, 4], batch_mode: :flush)
assert_receive {:ack, ^ref, [], [%{}, %{}, %{}, %{}]}
end) =~
"[error] ** (RuntimeError) expected all messages to be returned from prepared_messages/2"
refute_received {:EXIT, _, ^processor}
end
test "all messages in the prepare_messages are marked as {kind, reason, stack} when an error is raised",
%{broadway: broadway, processor: processor} do
ref = Broadway.test_batch(broadway, [1, :raise, 3, 4], batch_mode: :flush)
assert_receive {:ack, ^ref, [], [%{data: 1}, %{data: :raise}, %{data: 3}, %{data: 4}]}
refute_received {:EXIT, _, ^processor}
end
test "all prepared messages are passed", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2], batch_mode: :bulk)
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 2}], []}
end
end
describe "processor" do
setup do
test_pid = self()
handle_message = fn message, _ ->
case message.data do
:fail -> Message.failed(%{message | batcher: :unknown}, "Failed message")
:raise -> raise "Error raised"
:bad_return -> :oops
:bad_batcher -> %{message | batcher: :unknown}
:broken_link -> Task.async(fn -> raise "oops" end) |> Task.await()
_ -> message
end
end
handle_batch = fn _, batch, _, _ ->
send(test_pid, {:batch_handled, batch})
for message <- batch, message.data != :discard_in_batcher do
if message.data == :fail_batcher do
Message.failed(message, "Failed batcher")
else
message
end
end
end
context = %{
handle_message: handle_message,
handle_batch: handle_batch
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]],
batchers: [default: [batch_size: 2]]
)
processor = get_processor(broadway_name, :default)
Process.link(Process.whereis(processor))
%{broadway: broadway_name, processor: processor}
end
test "successful messages are marked as :ok", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2], batch_mode: :bulk)
assert_receive {:batch_handled, [%{status: :ok}, %{status: :ok}]}
assert_receive {:ack, ^ref, [%{status: :ok}, %{status: :ok}], []}
end
test "failed messages are marked as {:failed, reason}", %{broadway: broadway} do
ref = Broadway.test_message(broadway, :fail)
assert_receive {:ack, ^ref, _, [%{status: {:failed, "Failed message"}}]}
end
test "failed messages are not forwarded to the batcher", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, :fail, :fail, 4])
assert_receive {:batch_handled, [%{data: 1}, %{data: 4}]}
assert_receive {:ack, ^ref, [%{status: :ok}, %{status: :ok}], []}
assert_receive {:ack, ^ref, _, [%{status: {:failed, "Failed message"}}]}
assert_receive {:ack, ^ref, _, [%{status: {:failed, "Failed message"}}]}
end
test "messages are marked as {kind, reason, stack} when an error is raised while processing",
%{broadway: broadway, processor: processor} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :raise, 4])
assert_receive {:ack, ^ref, [], [%{status: {:error, _, _}}]}
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 4}], []}
end) =~ "[error] ** (RuntimeError) Error raised"
refute_received {:EXIT, _, ^processor}
end
test "messages are marked as {kind, reason, stack} on bad return",
%{broadway: broadway, processor: processor} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :bad_return, 4])
assert_receive {:ack, ^ref, [], [%{status: {:error, _, _}}]}
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 4}], []}
end) =~
"[error] ** (RuntimeError) expected a Broadway.Message from handle_message/3, got :oops"
refute_received {:EXIT, _, ^processor}
end
test "messages are marked as {kind, reason, stack} on bad batcher",
%{broadway: broadway, processor: processor} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :bad_batcher, 4])
assert_receive {:ack, ^ref, [], [%{status: {:error, _, _}}]}
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 4}], []}
end) =~
"[error] ** (RuntimeError) message was set to unknown batcher :unknown. The known batchers are [:default]"
refute_received {:EXIT, _, ^processor}
end
test "messages are logged on incorrect batcher count",
%{broadway: broadway, processor: processor} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :discard_in_batcher, 4])
assert_receive {:ack, ^ref, [%{data: 1}], []}
assert_receive {:ack, ^ref, [%{data: 4}], []}
end) =~
"BroadwayTest.CustomHandlers.handle_batch/4 received 2 messages and returned only 1"
refute_received {:EXIT, _, ^processor}
end
test "processors do not crash on bad acknowledger",
%{broadway: broadway, processor: processor} do
[one, raise, four] = wrap_messages([1, :raise, 4])
raise = %{raise | acknowledger: {Unknown, :ack_ref, :ok}}
assert capture_log(fn ->
Broadway.push_messages(broadway, [one, raise, four])
assert_receive {:ack, _, [%{data: 1}, %{data: 4}], []}
end) =~ "[error] ** (UndefinedFunctionError) function Unknown.ack/3 is undefined"
refute_received {:EXIT, _, ^processor}
end
test "processors do not crash on broken link", %{broadway: broadway, processor: processor} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, :broken_link, 4])
assert_receive {:ack, ^ref, [], [%{status: {:exit, _, _}}]}
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 4}], []}
end) =~ "** (RuntimeError) oops"
refute_received {:EXIT, _, ^processor}
end
test "telemetry", %{broadway: broadway, test: test} do
self = self()
:ok =
:telemetry.attach_many(
"#{test}",
[
[:broadway, :processor, :start],
[:broadway, :processor, :stop],
[:broadway, :batcher, :start],
[:broadway, :batcher, :stop],
[:broadway, :batch_processor, :start],
[:broadway, :batch_processor, :stop],
[:broadway, :processor, :message, :start],
[:broadway, :processor, :message, :stop],
[:broadway, :processor, :message, :exception]
],
fn name, measurements, metadata, _ ->
assert metadata.name
assert metadata.topology_name == broadway
assert %{} = metadata.context
case name do
[:broadway, stage, _] when stage in [:processor, :batch_processor] ->
assert is_integer(metadata.index)
if stage == :batch_processor do
assert metadata.batch_info.batch_key
else
assert metadata.processor_key
end
[:broadway, :processor, :message, _] ->
assert is_integer(metadata.index)
assert metadata.processor_key
[:broadway, :batcher, _] ->
assert metadata.batcher_key
_ ->
:ok
end
send(self, {:telemetry_event, name, measurements, metadata})
end,
nil
)
ref = Broadway.test_batch(broadway, [1, 2, :fail, :fail_batcher, :raise])
assert_receive {:batch_handled, [%{data: 1, status: :ok}, %{data: 2, status: :ok}]}
assert_receive {:batch_handled, [%{data: :fail_batcher, status: :ok}]}
assert_receive {:ack, ^ref, [%{status: :ok}, %{status: :ok}], []}
assert_receive {:ack, ^ref, [], [%{status: {:failed, "Failed message"}}]}
assert_receive {:ack, ^ref, [], [%{status: {:failed, "Failed batcher"}}]}
assert_receive {:telemetry_event, [:broadway, :processor, :start], %{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :start],
%{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :stop], %{duration: _},
%{}}
assert_receive {:telemetry_event, [:broadway, :processor, :stop], %{duration: _}, metadata}
assert [] = metadata.failed_messages
assert_receive {:telemetry_event, [:broadway, :processor, :start], %{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :start],
%{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :stop], %{duration: _},
%{}}
assert_receive {:telemetry_event, [:broadway, :processor, :stop], %{duration: _},
%{failed_messages: []}}
assert [] = metadata.failed_messages
assert_receive {:telemetry_event, [:broadway, :processor, :start], %{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :start],
%{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :stop], %{duration: _},
%{}}
assert_receive {:telemetry_event, [:broadway, :processor, :stop], %{duration: _}, metadata}
assert [%{data: :fail}] = metadata.failed_messages
assert_receive {:telemetry_event, [:broadway, :processor, :start], %{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :start],
%{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :stop], %{duration: _},
%{}}
assert_receive {:telemetry_event, [:broadway, :processor, :stop], %{duration: _}, metadata}
assert [] = metadata.failed_messages
assert_receive {:telemetry_event, [:broadway, :batcher, :start], %{system_time: _},
%{messages: [_]}}
assert_receive {:telemetry_event, [:broadway, :batcher, :stop], %{duration: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :batcher, :start], %{system_time: _},
%{messages: [_]}}
assert_receive {:telemetry_event, [:broadway, :batcher, :stop], %{duration: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :batcher, :start], %{system_time: _},
%{messages: [_]}}
assert_receive {:telemetry_event, [:broadway, :batcher, :stop], %{duration: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :batch_processor, :start], %{system_time: _},
%{messages: [%{data: 1}, %{data: 2}], batch_info: %BatchInfo{}}}
assert_receive {:telemetry_event, [:broadway, :batch_processor, :stop], %{duration: _},
%{failed_messages: [], batch_info: %BatchInfo{}}}
assert_receive {:telemetry_event, [:broadway, :batch_processor, :start], %{system_time: _},
%{messages: [%{data: :fail_batcher}], batch_info: %BatchInfo{}}}
assert_receive {:telemetry_event, [:broadway, :batch_processor, :stop], %{duration: _},
%{failed_messages: [%{data: :fail_batcher}], batch_info: %BatchInfo{}}}
assert_receive {:telemetry_event, [:broadway, :processor, :start], %{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :start],
%{system_time: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :message, :exception],
%{duration: _}, %{}}
assert_receive {:telemetry_event, [:broadway, :processor, :stop], %{duration: _}, %{}}
assert_receive {:ack, ^ref, [], [%{status: {:error, _, _}}]}
:telemetry.detach("#{test}")
end
end
describe "pipeline without batchers" do
setup do
handle_message = fn message, _ ->
case message.data do
:fail -> Message.failed(message, "Failed message")
_ -> message
end
end
context = %{
handle_message: handle_message
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithoutHandleBatch,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]]
)
producer = get_producer(broadway_name)
%{broadway: broadway_name, producer: producer}
end
test "no batcher supervisor is initialized", %{broadway: broadway} do
assert Process.whereis(:"#{broadway}.Broadway.BatcherPartitionSupervisor") == nil
end
test "successful messages are marked as :ok", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2], batch_mode: :flush)
assert_receive {:ack, ^ref, [%{status: :ok}], []}
assert_receive {:ack, ^ref, [%{status: :ok}], []}
end
test "failed messages are marked as {:failed, reason}", %{broadway: broadway} do
ref = Broadway.test_message(broadway, :fail)
assert_receive {:ack, ^ref, _, [%{status: {:failed, "Failed message"}}]}
end
test "shutting down broadway waits until all events are processed",
%{broadway: broadway, producer: producer} do
# We suspend the producer to make sure that it doesn't process the messages early on
:sys.suspend(producer)
async_push_messages(producer, [1, 2, 3, 4])
Process.exit(Process.whereis(broadway), :shutdown)
:sys.resume(producer)
assert_receive {:ack, _, [%{data: 1}], []}
assert_receive {:ack, _, [%{data: 2}], []}
assert_receive {:ack, _, [%{data: 3}], []}
assert_receive {:ack, _, [%{data: 4}], []}
end
end
describe "put_batcher" do
setup do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
if is_odd(message.data) do
Message.put_batcher(message, :odd)
else
Message.put_batcher(message, :even)
end
end
context = %{
handle_message: handle_message,
handle_batch: fn batcher, batch, batch_info, _ ->
send(test_pid, {:batch_handled, batcher, batch, batch_info})
batch
end
}
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [
odd: [batch_size: 10, batch_timeout: 20],
even: [batch_size: 5, batch_timeout: 20]
]
)
%{broadway: broadway_name}
end
test "generate batches based on :batch_size", %{broadway: broadway} do
Broadway.test_batch(broadway, Enum.to_list(1..40))
assert_receive {:batch_handled, :odd, messages,
%BatchInfo{batcher: :odd, size: 10, trigger: :size}}
when length(messages) == 10
assert_receive {:batch_handled, :odd, messages,
%BatchInfo{batcher: :odd, size: 10, trigger: :size}}
when length(messages) == 10
assert_receive {:batch_handled, :even, messages,
%BatchInfo{batcher: :even, size: 5, trigger: :size}}
when length(messages) == 5
assert_receive {:batch_handled, :even, messages,
%BatchInfo{batcher: :even, size: 5, trigger: :size}}
when length(messages) == 5
assert_receive {:batch_handled, :even, messages,
%BatchInfo{batcher: :even, size: 5, trigger: :size}}
when length(messages) == 5
assert_receive {:batch_handled, :even, messages,
%BatchInfo{batcher: :even, size: 5, trigger: :size}}
when length(messages) == 5
refute_received {:batch_handled, _, _}
end
test "generate batches with the remaining messages after :batch_timeout is reached",
%{broadway: broadway} do
Broadway.test_batch(broadway, [1, 2, 3, 4, 5], batch_mode: :flush)
assert_receive {:batch_handled, :odd, messages, %BatchInfo{trigger: :flush}}
when length(messages) == 3
assert_receive {:batch_handled, :even, messages, %BatchInfo{trigger: :flush}}
when length(messages) == 2
refute_received {:batch_handled, _, _, _}
end
end
describe "put_batch_key" do
setup do
test_pid = self()
broadway_name = new_unique_name()
handle_message = fn message, _ ->
if is_odd(message.data) do
Message.put_batch_key(message, :odd)
else
Message.put_batch_key(message, :even)
end
end
context = %{
handle_message: handle_message,
handle_batch: fn batcher, batch, batch_info, _ ->
send(test_pid, {:batch_handled, batcher, batch_info})
batch
end
}
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: [batch_size: 2, batch_timeout: 20]]
)
%{broadway: broadway_name}
end
test "generate batches based on :batch_size", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, Enum.to_list(1..10))
assert_receive {:ack, ^ref, [%{data: 1, batch_key: :odd}, %{data: 3, batch_key: :odd}], []}
assert_receive {:ack, ^ref, [%{data: 2, batch_key: :even}, %{data: 4, batch_key: :even}],
[]}
assert_receive {:batch_handled, :default,
%BatchInfo{batch_key: :odd, size: 2, trigger: :size}}
assert_receive {:batch_handled, :default,
%BatchInfo{batch_key: :even, size: 2, trigger: :size}}
end
test "generate batches with the remaining messages after :batch_timeout is reached",
%{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2], batch_mode: :flush)
assert_receive {:ack, ^ref, [%{data: 1, batch_key: :odd}], []}
assert_receive {:ack, ^ref, [%{data: 2, batch_key: :even}], []}
end
end
describe "partition_by" do
@describetag processors_options: [], batchers_options: []
defmodule CustomHandlersForPartition do
use Broadway
def start_link(opts) do
Broadway.start_link(__MODULE__,
name: opts[:broadway_name],
context: opts[:context],
producer: [module: {ManualProducer, []}],
processors: [
default:
[
concurrency: 2
] ++ opts[:tags].processors_options
],
batchers: [
default:
[
concurrency: 2,
batch_size: Map.get(opts[:tags], :batch_size, 2),
batch_timeout: 80
] ++ opts[:tags].batchers_options
],
partition_by: opts[:partition_by]
)
end
def handle_message(_, message, %{handle_message: handler} = context) do
handler.(message, context)
end
def handle_batch(batcher, messages, batch_info, %{handle_batch: handler} = context) do
handler.(batcher, messages, batch_info, context)
end
end
setup tags do
test_pid = self()
broadway_name = new_unique_name()
context = %{
handle_message: fn message, _ ->
message =
if message.data >= 20 do
Message.put_batch_key(message, :over_twenty)
else
message
end
Process.sleep(round(:rand.uniform() * 20))
send(test_pid, {:message_handled, message.data, self()})
message
end,
handle_batch: fn _batcher, batch, _batch_info, _ ->
Process.sleep(round(:rand.uniform() * 20))
send(test_pid, {:batch_handled, Enum.map(batch, & &1.data), self()})
batch
end
}
partition_by = fn msg -> msg.data end
{:ok, _broadway} =
start_supervised(
{CustomHandlersForPartition,
[
broadway_name: broadway_name,
context: context,
tags: tags,
partition_by: partition_by
]}
)
%{broadway: broadway_name}
end
def shuffle_data(msg), do: if(msg.data <= 6, do: 0, else: 1)
test "messages of the same partition are processed in order by the same processor",
%{broadway: broadway} do
Broadway.test_batch(broadway, Enum.to_list(1..8))
assert_receive {:message_handled, 1, processor_1}
assert_receive {:message_handled, data, ^processor_1}
assert data == 3
assert_receive {:message_handled, data, ^processor_1}
assert data == 5
assert_receive {:message_handled, data, ^processor_1}
assert data == 7
assert_receive {:message_handled, 2, processor_2}
assert_receive {:message_handled, data, ^processor_2}
assert data == 4
assert_receive {:message_handled, data, ^processor_2}
assert data == 6
assert_receive {:message_handled, data, ^processor_2}
assert data == 8
assert processor_1 != processor_2
end
test "messages of the same partition are processed in order by the same batch processor",
%{broadway: broadway} do
Broadway.test_batch(broadway, Enum.to_list(1..12))
assert_receive {:batch_handled, [1, 3], batch_processor_1}
assert_receive {:batch_handled, data, ^batch_processor_1}
assert data == [5, 7]
assert_receive {:batch_handled, data, ^batch_processor_1}
assert data == [9, 11]
assert_receive {:batch_handled, [2, 4], batch_processor_2}
assert_receive {:batch_handled, data, ^batch_processor_2}
assert data == [6, 8]
assert_receive {:batch_handled, data, ^batch_processor_2}
assert data == [10, 12]
assert batch_processor_1 != batch_processor_2
end
@tag batch_size: 4
test "messages of the same partition are processed in order with batch key",
%{broadway: broadway} do
Broadway.test_batch(broadway, Enum.to_list(16..23))
# Without the batch key, we would have two batches of 4 elements
assert_receive {:batch_handled, [16, 18], _}
assert_receive {:batch_handled, [17, 19], _}
assert_receive {:batch_handled, [20, 22], _}
assert_receive {:batch_handled, [21, 23], _}
end
@tag processors_options: [partition_by: &__MODULE__.shuffle_data/1],
batchers_options: [partition_by: &__MODULE__.shuffle_data/1]
test "messages with processors and batchers level partitioning",
%{broadway: broadway} do
Broadway.test_batch(broadway, Enum.to_list(1..12))
assert_receive {:message_handled, 1, processor_1}
assert_receive {:message_handled, data, ^processor_1}
assert data == 2
assert_receive {:message_handled, 7, processor_2}
assert_receive {:message_handled, data, ^processor_2}
assert data == 8
assert_receive {:batch_handled, [1, 2], batch_processor_1}
assert_receive {:batch_handled, data, ^batch_processor_1}
assert data == [3, 4]
assert_receive {:batch_handled, [7, 8], batch_processor_2}
assert_receive {:batch_handled, data, ^batch_processor_2}
assert data == [9, 10]
assert processor_1 != processor_2
assert batch_processor_1 != batch_processor_2
end
end
describe "put_batch_mode" do
setup do
test_pid = self()
broadway_name = new_unique_name()
handle_message = fn message, _ ->
message
end
context = %{
handle_message: handle_message,
handle_batch: fn batcher, batch, batch_info, _ ->
send(test_pid, {:batch_handled, batcher, batch_info})
batch
end
}
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: [batch_size: 2, batch_timeout: 20]]
)
%{broadway: broadway_name}
end
test "flush by default", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2, 3, 4, 5], batch_mode: :flush)
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 1}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 2}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 3}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 4}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 5}], []}
end
test "bulk", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2, 3, 4, 5])
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 2}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 3}, %{data: 4}], []}
assert_receive {:batch_handled, :default, _}
assert_receive {:ack, ^ref, [%{data: 5}], []}
end
end
describe "ack_immediately" do
test "acks a single message" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
message = Message.ack_immediately(message)
send(test_pid, :manually_acked)
message
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: %{handle_message: handle_message},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
ref = Broadway.test_message(broadway_name, 1)
assert_receive {:ack, ^ref, [%Message{data: 1}], []}
assert_receive :manually_acked
refute_received {:ack, ^ref, _successful, _failed}
end
test "acks multiple messages" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
Message.put_batcher(message, :default)
end
handle_batch = fn :default, batch, _batch_info, _ ->
batch =
Enum.map(batch, fn
%Message{data: :ok} = message -> message
%Message{data: :fail} = message -> Message.failed(message, :manually_failed)
end)
batch = Message.ack_immediately(batch)
send(test_pid, :manually_acked)
batch
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: %{handle_message: handle_message, handle_batch: handle_batch},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: [batch_size: 4, batch_timeout: 1000]]
)
ref = Broadway.test_batch(broadway_name, [:ok, :ok, :fail, :ok])
assert_receive {:ack, ^ref, [_, _, _], [_]}
assert_receive :manually_acked
refute_received {:ack, ^ref, _successful, _failed}
end
end
describe "configure_ack" do
defmodule NonConfigurableAcker do
@behaviour Broadway.Acknowledger
def ack(ack_ref, successful, failed) do
send(ack_ref, {:ack, successful, failed})
end
end
test "raises if the acknowledger doesn't implement the configure/3 callback" do
broadway_name = new_unique_name()
handle_message = fn message, _ ->
Message.configure_ack(message, test_pid: self())
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: %{handle_message: handle_message},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
log =
capture_log(fn ->
Broadway.push_messages(broadway_name, [
%Message{data: 1, acknowledger: {NonConfigurableAcker, self(), :unused}}
])
assert_receive {:ack, _successful = [], [%{status: {:error, _, _}}]}
end)
assert log =~
"the configure/3 callback is not defined by acknowledger BroadwayTest.NonConfigurableAcker"
end
test "configures the acknowledger" do
broadway_name = new_unique_name()
configure_options = [some_unique_term: make_ref()]
handle_message = fn message, _ ->
Message.configure_ack(message, configure_options)
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: %{handle_message: handle_message},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
ref = Broadway.test_message(broadway_name, 1)
assert_receive {:configure, ^ref, ^configure_options}
assert_receive {:ack, ^ref, [success], _failed = []}
assert success.data == 1
end
end
describe "transformer" do
setup do
broadway_name = new_unique_name()
{:ok, _pid} =
Broadway.start_link(Forwarder,
name: broadway_name,
resubscribe_interval: 0,
context: %{test_pid: self()},
producer: [
module: {ManualProducer, []},
transformer: {Transformer, :transform, test_pid: self()}
],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]],
batchers: [default: [batch_size: 2]]
)
producer = get_producer(broadway_name)
%{producer: producer}
end
test "transform all events on info", %{producer: producer} do
send(producer, {:push_messages, [1, 2, 3]})
assert_receive {:message_handled, "1 transformed"}
assert_receive {:message_handled, "2 transformed"}
assert_receive {:message_handled, "3 transformed"}
end
test "transform all events on call", %{producer: producer} do
assert GenStage.call(producer, {:push_messages, [1, 2, 3]}) == :reply
assert_receive {:message_handled, "1 transformed"}
assert_receive {:message_handled, "2 transformed"}
assert_receive {:message_handled, "3 transformed"}
end
test "transform all events on cast", %{producer: producer} do
assert GenStage.cast(producer, {:push_messages, [1, 2, 3]}) == :ok
assert_receive {:message_handled, "1 transformed"}
assert_receive {:message_handled, "2 transformed"}
assert_receive {:message_handled, "3 transformed"}
end
test "restart the producer if the transformation raises an error", %{producer: producer} do
send(producer, {:push_messages, [1, 2]})
send(producer, {:push_messages, [:kill_producer, 3]})
ref_producer = Process.monitor(producer)
assert_receive {:message_handled, "1 transformed"}
assert_receive {:message_handled, "2 transformed"}
assert_receive {:DOWN, ^ref_producer, _, _, _}
refute_received {:message_handled, "3 transformed"}
end
end
describe "handle_failed in the processor" do
test "is called when a message is failed by the user" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn
%{data: :fail} = message, _ -> Message.failed(message, :failed)
message, _ -> message
end
handle_failed = fn [message], _context ->
send(test_pid, {:handle_failed_called, message})
[Message.update_data(message, fn _ -> :updated end)]
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{handle_message: handle_message, handle_failed: handle_failed},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
ref = Broadway.test_batch(broadway_name, [1, :fail], batch_mode: :flush)
assert_receive {:handle_failed_called, %Message{data: :fail}}
assert_receive {:ack, ^ref, [successful], [failed]}
assert successful.data == 1
assert failed.data == :updated
end
test "is called when the processor raises when handling the message" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn
%{data: :fail}, _ -> raise "forced failure"
message, _ -> message
end
handle_failed = fn [message], _context ->
send(test_pid, {:handle_failed_called, message})
[Message.update_data(message, fn _ -> :updated end)]
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{handle_message: handle_message, handle_failed: handle_failed},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
assert capture_log(fn ->
ref = Broadway.test_batch(broadway_name, [1, :fail], batch_mode: :flush)
assert_receive {:handle_failed_called, %Message{data: :fail}}
assert_receive {:ack, ^ref, [successful], [failed]}
assert successful.data == 1
assert failed.data == :updated
end) =~ "(RuntimeError) forced failure"
end
test "is wrapped in try/catch to contain failures" do
broadway_name = new_unique_name()
handle_message = fn
%{data: :fail} = message, _ -> Message.failed(message, :some_reason)
end
handle_failed = fn [_message], _context ->
raise "error in handle_failed"
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{handle_message: handle_message, handle_failed: handle_failed},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
assert capture_log(fn ->
ref = Broadway.test_message(broadway_name, :fail)
assert_receive {:ack, ^ref, [], [%{data: :fail, status: {:failed, _}}]}
end) =~ "(RuntimeError) error in handle_failed"
end
end
describe "handle_failed in the batcher" do
test "is called for messages that are failed by the user" do
broadway_name = new_unique_name()
test_pid = self()
handle_batch = fn _, messages, _, _ ->
Enum.map(messages, fn
%{data: :fail} = message -> Message.failed(message, :failed)
message -> message
end)
end
handle_failed = fn messages, _context ->
send(test_pid, {:handle_failed_called, messages})
Enum.map(messages, &Message.put_data(&1, :updated))
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{
handle_message: fn message, _context -> message end,
handle_batch: handle_batch,
handle_failed: handle_failed
},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
assert capture_log(fn ->
ref = Broadway.test_batch(broadway_name, [1, :fail, :fail, 2], batch_mode: :flush)
assert_receive {:handle_failed_called, messages}
assert [%Message{data: :fail}, %Message{data: :fail}] = messages
assert_receive {:ack, ^ref, successful, failed}
assert [%{data: 1}, %{data: 2}] = successful
assert [%{data: :updated}, %{data: :updated}] = failed
end) == ""
end
test "is called for the whole batch if handle_batch crashes" do
broadway_name = new_unique_name()
test_pid = self()
handle_batch = fn _, _, _, _ ->
raise "handle_batch failed"
end
handle_failed = fn messages, _context ->
send(test_pid, {:handle_failed_called, messages})
Enum.map(messages, &Message.put_data(&1, :updated))
end
assert capture_log(fn ->
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{
handle_message: fn message, _context -> message end,
handle_batch: handle_batch,
handle_failed: handle_failed
},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
ref = Broadway.test_batch(broadway_name, [1, 2], batch_mode: :flush)
assert_receive {:handle_failed_called, [_, _]}
assert_receive {:ack, ^ref, _successful = [], failed}
assert [%{data: :updated}, %{data: :updated}] = failed
end) =~ "handle_batch failed"
end
test "is wrapped in try/catch to contain failures" do
broadway_name = new_unique_name()
handle_batch = fn _, messages, _, _ ->
Enum.map(messages, &Message.failed(&1, :failed))
end
assert capture_log(fn ->
{:ok, _broadway} =
Broadway.start_link(CustomHandlersWithHandleFailed,
name: broadway_name,
context: %{
handle_message: fn message, _context -> message end,
handle_batch: handle_batch,
handle_failed: fn _, _ -> raise "handle_failed failed" end
},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
ref = Broadway.test_batch(broadway_name, [1, 2], batch_mode: :flush)
assert_receive {:ack, ^ref, _successful = [], failed}
assert [%{data: 1}, %{data: 2}] = failed
end) =~ "(RuntimeError) handle_failed failed"
end
end
describe "handle producer crash" do
setup do
test_pid = self()
handle_message = fn message, _ ->
send(test_pid, {:message_handled, message})
message
end
context = %{
handle_message: handle_message,
handle_batch: fn _, batch, _, _ -> batch end
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
resubscribe_interval: 0,
context: context,
producer: [
module: {ManualProducer, %{test_pid: self()}}
],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]],
batchers: [default: [batch_size: 2]]
)
producer = get_producer(broadway_name)
processor = get_processor(broadway_name, :default)
batcher = get_batcher(broadway_name, :default)
consumer = get_batch_processor(broadway_name, :default)
%{
broadway: broadway_name,
producer: producer,
processor: processor,
batcher: batcher,
consumer: consumer
}
end
test "only the producer will be restarted",
%{producer: producer, processor: processor, batcher: batcher, consumer: consumer} do
ref_producer = Process.monitor(producer)
ref_batcher = Process.monitor(batcher)
ref_processor = Process.monitor(processor)
ref_consumer = Process.monitor(consumer)
GenStage.stop(producer)
assert_receive {:DOWN, ^ref_producer, _, _, _}
assert_receive {:producer_initialized, ^producer}
refute_received {:DOWN, ^ref_processor, _, _, _}
refute_received {:DOWN, ^ref_batcher, _, _, _}
refute_received {:DOWN, ^ref_consumer, _, _, _}
end
test "processors resubscribe to the restarted producers and keep processing messages",
%{broadway: broadway, producer: producer} do
assert_receive {:producer_initialized, ^producer}
Broadway.test_message(broadway, 1)
assert_receive {:message_handled, %{data: 1}}
GenStage.stop(producer)
assert_receive {:producer_initialized, ^producer}
Broadway.test_message(broadway, 2)
assert_receive {:message_handled, %{data: 2}}
end
end
describe "handle processor crash" do
setup do
test_pid = self()
handle_message = fn message, _ ->
if message.data == :kill_processor do
Process.exit(self(), :kill)
end
send(test_pid, {:message_handled, message})
message
end
handle_batch = fn _, batch, _, _ ->
send(test_pid, {:batch_handled, batch})
batch
end
context = %{
handle_message: handle_message,
handle_batch: handle_batch
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]],
batchers: [default: [batch_size: 2]]
)
producer = get_producer(broadway_name)
processor = get_processor(broadway_name, :default)
batcher = get_batcher(broadway_name, :default)
consumer = get_batch_processor(broadway_name, :default)
%{
broadway: broadway_name,
producer: producer,
processor: processor,
batcher: batcher,
consumer: consumer
}
end
test "processor will be restarted in order to handle other messages",
%{broadway: broadway, processor: processor} do
Broadway.test_message(broadway, 1)
assert_receive {:message_handled, %{data: 1}}
pid = Process.whereis(processor)
ref = Process.monitor(processor)
Broadway.test_message(broadway, :kill_processor)
assert_receive {:DOWN, ^ref, _, _, _}
Broadway.test_message(broadway, 2)
assert_receive {:message_handled, %{data: 2}}
assert Process.whereis(processor) != pid
end
test "processor crashes cascade down (but not up)", context do
%{
broadway: broadway,
producer: producer,
processor: processor,
batcher: batcher,
consumer: consumer
} = context
ref_batcher = Process.monitor(batcher)
ref_processor = Process.monitor(processor)
ref_producer = Process.monitor(producer)
ref_consumer = Process.monitor(consumer)
Broadway.test_message(broadway, :kill_processor)
assert_receive {:DOWN, ^ref_processor, _, _, _}
assert_receive {:DOWN, ^ref_batcher, _, _, _}
assert_receive {:DOWN, ^ref_consumer, _, _, _}
refute_received {:DOWN, ^ref_producer, _, _, _}
end
test "only the messages in the crashing processor are lost", %{broadway: broadway} do
Broadway.test_batch(broadway, [1, 2, :kill_processor, 3, 4, 5])
assert_receive {:message_handled, %{data: 1}}
assert_receive {:message_handled, %{data: 2}}
assert_receive {:message_handled, %{data: 4}}
assert_receive {:message_handled, %{data: 5}}
refute_received {:message_handled, %{data: :kill_processor}}
refute_received {:message_handled, %{data: 3}}
end
test "batches are created normally (without the lost messages)", %{broadway: broadway} do
Broadway.test_batch(broadway, [1, 2, :kill_processor, 3, 4, 5])
assert_receive {:batch_handled, messages}
values = messages |> Enum.map(& &1.data)
assert values == [1, 2]
assert_receive {:batch_handled, messages}
values = messages |> Enum.map(& &1.data)
assert values == [4, 5]
end
end
describe "handle batcher crash" do
setup do
test_pid = self()
broadway_name = new_unique_name()
handle_batch = fn _, messages, _, _ ->
pid = Process.whereis(get_batcher(broadway_name, :default))
if Enum.any?(messages, fn msg -> msg.data == :kill_batcher end) do
Process.exit(pid, :kill)
end
send(test_pid, {:batch_handled, messages, pid})
messages
end
context = %{
handle_batch: handle_batch,
handle_message: fn message, _ -> message end
}
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 2]],
batchers: [default: [batch_size: 2]]
)
producer = get_producer(broadway_name)
processor = get_processor(broadway_name, :default)
batcher = get_batcher(broadway_name, :default)
consumer = get_batch_processor(broadway_name, :default)
%{
broadway: broadway_name,
producer: producer,
processor: processor,
batcher: batcher,
consumer: consumer
}
end
test "batcher will be restarted in order to handle other messages", %{broadway: broadway} do
Broadway.test_batch(broadway, [1, 2])
assert_receive {:batch_handled, _, batcher1} when is_pid(batcher1)
ref = Process.monitor(batcher1)
Broadway.test_batch(broadway, [:kill_batcher, 3])
assert_receive {:batch_handled, _, ^batcher1}
assert_receive {:DOWN, ^ref, _, _, _}
Broadway.test_batch(broadway, [4, 5])
assert_receive {:batch_handled, _, batcher2} when is_pid(batcher2)
assert batcher1 != batcher2
end
test "batcher crashes cascade down (but not up)", context do
%{
broadway: broadway,
producer: producer,
processor: processor,
batcher: batcher,
consumer: consumer
} = context
ref_batcher = Process.monitor(batcher)
ref_processor = Process.monitor(processor)
ref_producer = Process.monitor(producer)
ref_consumer = Process.monitor(consumer)
Broadway.test_batch(broadway, [:kill_batcher, 3], batch_mode: :flush)
assert_receive {:DOWN, ^ref_batcher, _, _, _}
assert_receive {:DOWN, ^ref_consumer, _, _, _}
refute_received {:DOWN, ^ref_producer, _, _, _}
refute_received {:DOWN, ^ref_processor, _, _, _}
end
test "only the messages in the crashing batcher are lost", %{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2, :kill_batcher, 3, 4, 5, 6, 7])
assert_receive {:ack, ^ref, successful, _failed}
values = Enum.map(successful, & &1.data)
assert values == [1, 2]
assert_receive {:ack, ^ref, successful, _failed}
values = Enum.map(successful, & &1.data)
assert values == [:kill_batcher, 3]
assert_receive {:ack, ^ref, successful, _failed}
values = Enum.map(successful, & &1.data)
assert values == [6, 7]
refute_received {:ack, _, _successful, _failed}
end
end
describe "batcher" do
setup do
test_pid = self()
handle_batch = fn _, batch, _, _ ->
send(test_pid, {:batch_handled, batch})
Enum.map(batch, fn message ->
case message.data do
:fail -> Message.failed(message, "Failed message")
:raise -> raise "Error raised"
:bad_return -> :oops
:broken_link -> Task.async(fn -> raise "oops" end) |> Task.await()
_ -> message
end
end)
end
context = %{
handle_batch: handle_batch,
handle_message: fn message, _ -> message end
}
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
context: context,
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 4]],
batchers: [default: [batch_size: 4]]
)
consumer = get_batch_processor(broadway_name, :default)
Process.link(Process.whereis(consumer))
%{broadway: broadway_name, consumer: consumer}
end
test "messages are grouped by ack_ref + status (successful or failed) before sent for acknowledgement",
%{broadway: broadway} do
Broadway.push_messages(broadway, [
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ack_ref_1}, :ok}},
%Message{data: :fail, acknowledger: {CallerAcknowledger, {self(), :ack_ref_2}, :ok}},
%Message{data: :fail, acknowledger: {CallerAcknowledger, {self(), :ack_ref_1}, :ok}},
%Message{data: 4, acknowledger: {CallerAcknowledger, {self(), :ack_ref_2}, :ok}}
])
assert_receive {:ack, :ack_ref_1, [%{data: 1}], [%{data: :fail}]}
assert_receive {:ack, :ack_ref_2, [%{data: 4}], [%{data: :fail}]}
end
test "all messages in the batch are marked as {kind, reason, stack} when an error is raised",
%{broadway: broadway, consumer: consumer} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, 2, :raise, 3])
assert_receive {:ack, ^ref, [],
[
%{data: 1, status: {:error, _, _}},
%{data: 2, status: {:error, _, _}},
%{data: :raise, status: {:error, _, _}},
%{data: 3, status: {:error, _, _}}
]}
end) =~ "[error] ** (RuntimeError) Error raised"
refute_received {:EXIT, _, ^consumer}
end
test "all messages in the batch are marked as {kind, reason, stack} on bad return",
%{broadway: broadway, consumer: consumer} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, 2, :bad_return, 3])
assert_receive {:ack, ^ref, [],
[
%{data: 1, status: {:error, _, _}},
%{data: 2, status: {:error, _, _}},
%{data: :bad_return, status: {:error, _, _}},
%{data: 3, status: {:error, _, _}}
]}
end) =~ "[error]"
refute_received {:EXIT, _, ^consumer}
end
test "consumers do not crash on bad acknowledger",
%{broadway: broadway, consumer: consumer} do
[one, two, raise, three] = wrap_messages([1, 2, :raise, 3])
raise = %{raise | acknowledger: {Unknown, :ack_ref, :ok}}
assert capture_log(fn ->
Broadway.push_messages(broadway, [one, two, raise, three])
ref = Broadway.test_batch(broadway, [1, 2, 3, 4])
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 2}, %{data: 3}, %{data: 4}], []}
end) =~ "[error] ** (UndefinedFunctionError) function Unknown.ack/3 is undefined"
refute_received {:EXIT, _, ^consumer}
end
test "consumers do not crash on broken link",
%{broadway: broadway, consumer: consumer} do
assert capture_log(fn ->
ref = Broadway.test_batch(broadway, [1, 2, :broken_link, 3])
assert_receive {:ack, ^ref, [],
[
%{data: 1, status: {:exit, _, _}},
%{data: 2, status: {:exit, _, _}},
%{data: :broken_link, status: {:exit, _, _}},
%{data: 3, status: {:exit, _, _}}
]}
end) =~ "** (RuntimeError) oops"
refute_received {:EXIT, _, ^consumer}
end
end
describe "shutdown" do
setup tags do
Process.flag(:trap_exit, true)
broadway_name = new_unique_name()
handle_message = fn
%{data: :sleep}, _ -> Process.sleep(:infinity)
message, _ -> message
end
context = %{
handle_message: handle_message,
handle_batch: fn _, batch, _, _ -> batch end
}
{:ok, _} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
producer: [module: {ManualProducer, %{test_pid: self()}}],
processors: [default: [concurrency: 1, min_demand: 1, max_demand: 4]],
batchers: [default: [batch_size: 4]],
context: context,
shutdown: Map.get(tags, :shutdown, 5000)
)
producer = get_producer(broadway_name)
%{broadway: broadway_name, producer: producer}
end
test "killing the supervisor brings down the broadway GenServer",
%{broadway: broadway} do
pid = Process.whereis(broadway)
%{supervisor_pid: supervisor_pid} = :sys.get_state(pid)
Process.exit(supervisor_pid, :kill)
assert_receive {:EXIT, ^pid, :killed}
end
test "shutting down broadway waits until the Broadway.Supervisor is down",
%{broadway: broadway} do
%{supervisor_pid: supervisor_pid} = :sys.get_state(broadway)
pid = Process.whereis(broadway)
Process.exit(pid, :shutdown)
assert_receive {:EXIT, ^pid, :shutdown}
refute Process.alive?(supervisor_pid)
end
test "shutting down broadway waits until all events are processed",
%{broadway: broadway, producer: producer} do
# We suspend the producer to make sure that it doesn't process the messages early on
:sys.suspend(producer)
async_push_messages(producer, [1, 2, 3, 4])
Process.exit(Process.whereis(broadway), :shutdown)
:sys.resume(producer)
assert_receive {:ack, _, [%{data: 1}, %{data: 2}, %{data: 3}, %{data: 4}], []}
end
test "shutting down broadway waits until all events are processed even on incomplete batches",
%{broadway: broadway} do
ref = Broadway.test_batch(broadway, [1, 2])
Process.exit(Process.whereis(broadway), :shutdown)
assert_receive {:ack, ^ref, [%{data: 1}, %{data: 2}], []}
end
test "shutting down broadway cancels producers and waits for messages sent during cancellation",
%{broadway: broadway} do
pid = Process.whereis(broadway)
Process.exit(pid, :shutdown)
assert_receive {:ack, _, [%{data: :message_during_cancel}], []}
assert_receive {:EXIT, ^pid, :shutdown}
refute_received {:ack, _, [%{data: :message_after_cancel}], []}
end
@tag shutdown: 1
test "shutting down broadway respects shutdown value", %{broadway: broadway} do
Broadway.test_batch(broadway, [:sleep, 1, 2, 3], batch_mode: :flush)
pid = Process.whereis(broadway)
Process.exit(pid, :shutdown)
assert_receive {:EXIT, ^pid, :shutdown}
end
end
describe "rate limiting" do
test "with an interval and exact allowed messages in that interval" do
broadway_name = new_unique_name()
{:ok, _broadway} =
Broadway.start_link(Forwarder,
name: broadway_name,
producer: [
module: {ManualProducer, []},
concurrency: 1,
rate_limiting: [allowed_messages: 1, interval: 10000]
],
processors: [default: []],
context: %{test_pid: self()}
)
producer = get_producer(broadway_name)
send(
producer,
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:ack, :ref, [_], []}
# We "cheat" and manually tell the rate limiter to reset the limit.
send(get_rate_limiter(broadway_name), :reset_limit)
# Give a chance for other processes to run
:erlang.yield()
send(
producer,
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:ack, :ref, [_], []}
end
test "with an interval and more than allowed messages in that interval" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
send(test_pid, {:handle_message_called, message, System.monotonic_time()})
message
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
producer: [
module: {ManualProducer, []},
concurrency: 2,
rate_limiting: [allowed_messages: 2, interval: 50]
],
processors: [default: []],
context: %{handle_message: handle_message}
)
send(
get_producer(broadway_name),
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 2, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 4, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 5, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:handle_message_called, %Message{data: 1}, timestamp1}
assert_receive {:handle_message_called, %Message{data: 2}, _timestamp2}
assert_receive {:handle_message_called, %Message{data: 3}, timestamp3}
assert_receive {:handle_message_called, %Message{data: 4}, _timestamp4}
assert_receive {:handle_message_called, %Message{data: 5}, timestamp5}
assert_receive {:ack, :ref, [_, _], []}
assert_receive {:ack, :ref, [_, _], []}
assert_receive {:ack, :ref, [_], []}
# To be safe, we're saying that these messages have to be at least 10ms apart
# even if the interval is 50ms. This is because the time when the processor
# receives the message is not the time when the producer produced it.
assert System.convert_time_unit(timestamp3 - timestamp1, :native, :millisecond) >= 10
assert System.convert_time_unit(timestamp5 - timestamp3, :native, :millisecond) >= 10
end
test "buffers demand and flushes it when the rate limiting is lifted" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
send(test_pid, {:handle_message_called, message, System.monotonic_time()})
message
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
producer: [
module: {ManualProducer, %{test_pid: test_pid}},
concurrency: 1,
# We use a long rate limiting interval so that we can trigger the rate limiting
# manually from this test.
rate_limiting: [allowed_messages: 2, interval: 10000]
],
processors: [default: [concurrency: 1, max_demand: 2]],
context: %{handle_message: handle_message}
)
send(
get_producer(broadway_name),
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 2, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
# First, we assert that handle_demand is called and that two events are emitted.
assert_receive {:handle_demand_called, _demand, demand_timestamp1}
assert_receive {:handle_message_called, %Message{data: 1}, timestamp1}
assert_receive {:handle_message_called, %Message{data: 2}, _timestamp2}
assert demand_timestamp1 < timestamp1
# Now, since the rate limiting interval is long, we can assert that
# handle_demand is not called and that the third message is not emitted yet.
refute_received {:handle_demand_called, _demand, _timestamp}
refute_received {:handle_message_called, %Message{data: 3}, _timestamp3}
# We "cheat" and manually tell the rate limiter to reset the limit.
send(get_rate_limiter(broadway_name), :reset_limit)
assert_receive {:handle_demand_called, _demand, demand_timestamp2}
assert_receive {:handle_message_called, %Message{data: 3}, timestamp3}
assert demand_timestamp2 < timestamp3
assert demand_timestamp2 > timestamp1
end
test "works when stopping a pipeline and draining producers" do
Process.flag(:trap_exit, true)
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
send(test_pid, {:handle_message_called, message, System.monotonic_time()})
message
end
{:ok, broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
producer: [
module: {ManualProducer, %{test_pid: test_pid}},
concurrency: 1,
# We use a long rate limiting interval so that we can trigger the rate limiting
# manually from this test.
rate_limiting: [allowed_messages: 2, interval: 10000]
],
processors: [default: [concurrency: 1, max_demand: 2]],
context: %{handle_message: handle_message}
)
# First, we use all the rate limiting we have available.
send(
get_producer(broadway_name),
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 2, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:handle_message_called, %Broadway.Message{data: 1}, timestamp1}
assert_receive {:handle_message_called, %Broadway.Message{data: 2}, _timestamp2}
# Then we stop the pipeline, triggering the :message_during_cancel message of
# the ManualProducer.
Process.exit(broadway, :shutdown)
refute_received {:handle_message_called, %Broadway.Message{data: :message_during_cancel},
_timestamp}
send(get_rate_limiter(broadway_name), :reset_limit)
assert_receive {:handle_message_called, %Broadway.Message{data: 3}, timestamp3}, 4_000
assert_receive {:handle_message_called, %Broadway.Message{data: :message_during_cancel},
timestamp_cancel},
4_000
assert timestamp_cancel > timestamp1
assert timestamp3 < timestamp_cancel
end
test "getting and updating the rate limit at runtime" do
broadway_name = new_unique_name()
test_pid = self()
handle_message = fn message, _ ->
send(test_pid, {:handle_message_called, message, System.monotonic_time()})
message
end
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name,
producer: [
module: {ManualProducer, []},
concurrency: 2,
rate_limiting: [allowed_messages: 1, interval: 5000]
],
processors: [default: []],
context: %{handle_message: handle_message}
)
assert Broadway.get_rate_limiting(broadway_name) ==
{:ok, %{allowed_messages: 1, interval: 5000}}
send(
get_producer(broadway_name),
{:push_messages,
[
%Message{data: 1, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 2, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 3, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}},
%Message{data: 4, acknowledger: {CallerAcknowledger, {self(), :ref}, :unused}}
]}
)
assert_receive {:handle_message_called, %Message{data: 1}, _timestamp}
refute_received {:handle_message_called, _message, _timestamp}
assert :ok = Broadway.update_rate_limiting(broadway_name, allowed_messages: 3)
assert Broadway.get_rate_limiting(broadway_name) ==
{:ok, %{allowed_messages: 3, interval: 5000}}
send(get_rate_limiter(broadway_name), :reset_limit)
assert_receive {:handle_message_called, %Message{data: 2}, _timestamp}
assert_receive {:handle_message_called, %Message{data: 3}, _timestamp}
assert_receive {:handle_message_called, %Message{data: 4}, _timestamp}
end
test "invalid options" do
{:ok, _broadway} =
Broadway.start_link(CustomHandlers,
name: broadway_name = new_unique_name(),
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
assert_raise ArgumentError,
"invalid options, unknown options [:invalid_option], valid options are: [:allowed_messages, :interval]",
fn -> Broadway.update_rate_limiting(broadway_name, invalid_option: 3) end
end
end
describe "topology/1" do
test "default config" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
topology = Broadway.topology(broadway)
assert topology[:producers] == [%{name: :"#{broadway}.Broadway.Producer", concurrency: 1}]
assert [
%{
name: default_processor_name,
concurrency: processors_concurrency,
processor_key: :default
}
] = topology[:processors]
assert get_n_processors(broadway) == processors_concurrency
assert default_processor_name == :"#{broadway}.Broadway.Processor_default"
assert topology[:batchers] == [
%{
name: :"#{broadway}.Broadway.BatchProcessor_default",
batcher_name: :"#{broadway}.Broadway.Batcher_default",
batcher_key: :default,
concurrency: 1
}
]
end
test "without batchers" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
topology = Broadway.topology(broadway)
assert topology[:batchers] == []
end
test "with multiple batchers" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 20]],
batchers: [default: [concurrency: 12], b1: [concurrency: 13]]
)
topology = Broadway.topology(broadway)
assert topology[:batchers] == [
%{
batcher_name: :"#{broadway}.Broadway.Batcher_default",
name: :"#{broadway}.Broadway.BatchProcessor_default",
batcher_key: :default,
concurrency: 12
},
%{
batcher_name: :"#{broadway}.Broadway.Batcher_b1",
name: :"#{broadway}.Broadway.BatchProcessor_b1",
batcher_key: :b1,
concurrency: 13
}
]
end
end
describe "all_running/0" do
test "return running pipeline names" do
broadway = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 20]],
batchers: [default: [concurrency: 12], b1: [concurrency: 13]]
)
assert Broadway.all_running() == [broadway]
broadway2 = new_unique_name()
Broadway.start_link(Forwarder,
name: broadway2,
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: [concurrency: 20]],
batchers: [default: [concurrency: 12], b1: [concurrency: 13]]
)
assert Enum.sort(Broadway.all_running()) == [broadway, broadway2]
Broadway.stop(broadway2)
assert Broadway.all_running() == [broadway]
Broadway.stop(broadway)
assert Broadway.all_running() == []
end
end
describe "stop/3" do
setup do
{:ok, pid} =
Broadway.start_link(Forwarder,
name: broadway_name = new_unique_name(),
context: %{test_pid: self()},
producer: [module: {ManualProducer, []}],
processors: [default: []]
)
%{pid: pid, broadway_name: broadway_name}
end
test "accepts a pid and shuts down broadway", %{pid: pid} do
Broadway.stop(pid)
refute Process.alive?(pid)
end
test "accepts a name and shuts down broadway", %{pid: pid, broadway_name: broadway_name} do
Broadway.stop(broadway_name)
refute Process.alive?(pid)
end
end
describe "starting under a registry using via tuple" do
setup do
{:ok, _registry} = start_supervised({Registry, keys: :unique, name: MyRegistry})
name = via_tuple(:broadway)
{:ok, _broadway} =
Broadway.start_link(UsesRegistry,
name: name,
context: %{test_pid: self()},
producer: [
module: {ManualProducer, []},
rate_limiting: [allowed_messages: 1, interval: 5000]
],
processors: [default: []],
batchers: [default: []]
)
%{name: name}
end
test "names processes in topology using process_name/2", %{name: name} do
assert is_pid(GenServer.whereis(name))
assert is_pid(GenServer.whereis(via_tuple({:broadway, "Producer_0"})))
assert is_pid(GenServer.whereis(via_tuple({:broadway, "Processor_default_0"})))
assert is_pid(GenServer.whereis(via_tuple({:broadway, "Terminator"})))
end
test "Broadway.get_rate_limiting/1", %{name: name} do
assert Broadway.get_rate_limiting(name) ==
{:ok, %{allowed_messages: 1, interval: 5000}}
end
test "Broadway.all_running/1", %{name: name} do
assert name in Broadway.all_running()
end
test "Broadway.producer_names/1", %{name: name} do
assert Broadway.producer_names(name) == [via_tuple({:broadway, "Producer_0"})]
end
test "Broadway.topology/1", %{name: name} do
producer_name = via_tuple({:broadway, "Producer"})
processor_name = via_tuple({:broadway, "Processor_default"})
batcher_name = via_tuple({:broadway, "Batcher_default"})
batch_processor_name = via_tuple({:broadway, "BatchProcessor_default"})
assert [
{:producers, [%{name: ^producer_name}]},
{:processors, [%{name: ^processor_name}]},
{:batchers, [%{batcher_name: ^batcher_name, name: ^batch_processor_name}]}
] = Broadway.topology(name)
end
test "Broadway.test_message/2", %{name: name} do
ref = Broadway.test_message(name, :message)
assert_receive {:ack, ^ref, [%Message{data: :message}], []}
end
test "raises error if started with via tuple but process_name/2 is not defined" do
Process.flag(:trap_exit, true)
assert {:error, {%ArgumentError{} = error, _stacktrace}} =
Broadway.start_link(Forwarder,
# Use a Broadway module that does not have process_name/2 defined
name: via_tuple({Forwarder, "1"}),
producer: [module: {ManualProducer, []}],
processors: [default: []],
batchers: [default: []]
)
assert Exception.message(error) =~
"you must define process_name/2 in the module which uses Broadway"
end
defp via_tuple(name), do: {:via, Registry, {MyRegistry, name}}
end
defp new_unique_name() do
:"Elixir.Broadway#{System.unique_integer([:positive, :monotonic])}"
end
defp get_producer(broadway_name, index \\ 0) do
:"#{broadway_name}.Broadway.Producer_#{index}"
end
defp get_processor(broadway_name, key, index \\ 0) do
:"#{broadway_name}.Broadway.Processor_#{key}_#{index}"
end
defp get_batcher(broadway_name, key) do
:"#{broadway_name}.Broadway.Batcher_#{key}"
end
defp get_batch_processor(broadway_name, key, index \\ 0) do
:"#{broadway_name}.Broadway.BatchProcessor_#{key}_#{index}"
end
defp get_rate_limiter(broadway_name) do
:"#{broadway_name}.Broadway.RateLimiter"
end
defp get_n_producers(broadway_name) do
Supervisor.count_children(:"#{broadway_name}.Broadway.ProducerSupervisor").workers
end
defp get_n_processors(broadway_name) do
Supervisor.count_children(:"#{broadway_name}.Broadway.ProcessorSupervisor").workers
end
defp get_n_consumers(broadway_name, key) do
Supervisor.count_children(:"#{broadway_name}.Broadway.BatchProcessorSupervisor_#{key}").workers
end
defp async_push_messages(producer, list) do
send(
producer,
{:"$gen_call", {self(), make_ref()},
{Broadway.Topology.ProducerStage, :push_messages, wrap_messages(list)}}
)
end
defp wrap_messages(list) do
ack = {CallerAcknowledger, {self(), make_ref()}, :ok}
Enum.map(list, &%Message{data: &1, acknowledger: ack})
end
end
| 33.837641 | 144 | 0.597319 |
0832dfe80d62a2630cd06817b6be794502ecc131 | 2,956 | ex | Elixir | apps/fz_http/lib/fz_http_web.ex | mdp/firezone | 53d8f0803a7ef005fdca3ae8c6fa9c3483ae5cbc | [
"Apache-2.0"
] | null | null | null | apps/fz_http/lib/fz_http_web.ex | mdp/firezone | 53d8f0803a7ef005fdca3ae8c6fa9c3483ae5cbc | [
"Apache-2.0"
] | 1 | 2022-03-30T03:57:41.000Z | 2022-03-30T03:57:41.000Z | apps/fz_http/lib/fz_http_web.ex | amishakov/firezone | cd85b0847ac1792ca00aedab99fbf0f7a520f1a6 | [
"Apache-2.0"
] | null | null | null | defmodule FzHttpWeb do
@moduledoc """
The entrypoint for defining your web interface, such
as controllers, views, channels and so on.
This can be used in your application as:
use FzHttpWeb, :controller
use FzHttpWeb, :view
The definitions below will be executed for every view,
controller, etc, so keep them short and clean, focused
on imports, uses and aliases.
Do NOT define functions inside the quoted expressions
below. Instead, define any helper function in modules
and import those modules here.
"""
def controller do
quote do
use Phoenix.Controller, namespace: FzHttpWeb
import Plug.Conn
import FzHttpWeb.Gettext
import Phoenix.LiveView.Controller
import FzHttpWeb.ControllerHelpers
alias FzHttpWeb.Router.Helpers, as: Routes
end
end
def view do
quote do
use Phoenix.View,
root: "lib/fz_http_web/templates",
namespace: FzHttpWeb
# Import convenience functions from controllers
import Phoenix.Controller, only: [get_flash: 1, get_flash: 2, view_module: 1]
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
import FzHttpWeb.ErrorHelpers
import FzHttpWeb.AuthorizationHelpers
import FzHttpWeb.Gettext
import Phoenix.LiveView.Helpers
alias FzHttpWeb.Router.Helpers, as: Routes
def render_common(template, assigns \\ []) do
render(FzHttpWeb.CommonView, template, assigns)
end
end
end
def live_view do
quote do
use Phoenix.LiveView, layout: {FzHttpWeb.LayoutView, "live.html"}
import FzHttpWeb.LiveHelpers
alias Phoenix.LiveView.JS
@events_module Application.compile_env!(:fz_http, :events_module)
unquote(view_helpers())
end
end
def live_component do
quote do
use Phoenix.LiveComponent
@events_module Application.compile_env!(:fz_http, :events_module)
unquote(view_helpers())
end
end
def router do
quote do
use Phoenix.Router
import Plug.Conn
import Phoenix.Controller
import Phoenix.LiveView.Router
end
end
def channel do
quote do
use Phoenix.Channel
import FzHttpWeb.Gettext
end
end
defp view_helpers do
quote do
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
# Import LiveView helpers (live_render, live_component, live_patch, etc)
import Phoenix.LiveView.Helpers
# Import basic rendering functionality (render, render_layout, etc)
import Phoenix.View
# Authorization Helpers
import FzHttpWeb.AuthorizationHelpers
import FzHttpWeb.ErrorHelpers
import FzHttpWeb.Gettext
alias FzHttpWeb.Router.Helpers, as: Routes
end
end
@doc """
When used, dispatch to the appropriate controller/view/etc.
"""
defmacro __using__(which) when is_atom(which) do
apply(__MODULE__, which, [])
end
end
| 24.429752 | 83 | 0.692828 |
0832e818c79c125d05188cd805521fef7e30706d | 1,168 | ex | Elixir | lib/tmdb/people/people.ex | DonHansDampf/elixir-tmdb | 7a79350dfb0eeb5a37384be4ca9fac0570a3020e | [
"MIT"
] | null | null | null | lib/tmdb/people/people.ex | DonHansDampf/elixir-tmdb | 7a79350dfb0eeb5a37384be4ca9fac0570a3020e | [
"MIT"
] | null | null | null | lib/tmdb/people/people.ex | DonHansDampf/elixir-tmdb | 7a79350dfb0eeb5a37384be4ca9fac0570a3020e | [
"MIT"
] | null | null | null | defmodule Tmdb.People do
use Tmdb.Base
def find(id, params \\ %{}) do
get!("person/#{id}?#{URI.encode_query(params)}").body
end
def popular(params \\ %{}) do
get!("person/popular?#{URI.encode_query(params)}").body
end
def latest(params \\ %{}) do
get!("person/latest?#{URI.encode_query(params)}").body
end
def movie_credits(id, params \\ %{}) do
get!("person/#{id}/movie_credits?#{URI.encode_query(params)}").body
end
def tv_credits(id, params \\ %{}) do
get!("person/#{id}/tv_credits?#{URI.encode_query(params)}").body
end
def combined_credits(id, params \\ %{}) do
get!("person/#{id}/combined_credits?#{URI.encode_query(params)}").body
end
def external_ids(id, params \\ %{}) do
get!("person/#{id}/external_ids?#{URI.encode_query(params)}").body
end
def images(id, params \\ %{}) do
get!("person/#{id}/images?#{URI.encode_query(params)}").body
end
def tagged_images(id, params \\ %{}) do
get!("person/#{id}/tagged_images?#{URI.encode_query(params)}").body
end
def translations(id, params \\ %{}) do
get!("person/#{id}/translations?#{URI.encode_query(params)}").body
end
end
| 26.545455 | 74 | 0.634418 |
08337659763e7d4c90cf52297adfcca60027502a | 15,935 | exs | Elixir | test/mix_doctor_test.exs | kianmeng/doctor | 2b1eb4bc0698b96169f2ad0049010d5428b48c2e | [
"MIT"
] | null | null | null | test/mix_doctor_test.exs | kianmeng/doctor | 2b1eb4bc0698b96169f2ad0049010d5428b48c2e | [
"MIT"
] | null | null | null | test/mix_doctor_test.exs | kianmeng/doctor | 2b1eb4bc0698b96169f2ad0049010d5428b48c2e | [
"MIT"
] | null | null | null | defmodule Mix.Tasks.DoctorTest do
use ExUnit.Case
setup_all do
original_shell = Mix.shell()
Mix.shell(Mix.Shell.Process)
on_exit(fn ->
Mix.shell(original_shell)
end)
end
describe "mix doctor" do
test "should output the full report when no params are provided" do
Mix.Tasks.Doctor.run([])
remove_at_exit_hook()
doctor_output = get_shell_output()
assert doctor_output == [
["Doctor file found. Loading configuration."],
[
"---------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
],
[
"Doc Cov Spec Cov Module File Functions No Docs No Specs Module Doc Struct Spec"
],
[
"100% 0% Doctor.CLI lib/cli/cli.ex 3 0 3 Yes N/A "
],
[
"100% 0% Doctor.Config lib/config.ex 3 0 3 Yes Yes "
],
[
"100% 0% Doctor.Docs lib/docs.ex 1 0 1 Yes Yes "
],
[
"N/A N/A Doctor lib/doctor.ex 0 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor lib/mix/tasks/doctor.ex 1 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor.Explain lib/mix/tasks/doctor.explain.ex 1 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor.Gen.Config lib/mix/tasks/doctor.gen.config.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.ModuleInformation lib/module_information.ex 4 0 4 Yes Yes "
],
[
"100% 0% Doctor.ModuleReport lib/module_report.ex 1 0 1 Yes Yes "
],
[
"100% 0% Doctor.ReportUtils lib/report_utils.ex 9 0 9 Yes N/A "
],
[
"N/A N/A Doctor.Reporter lib/reporter.ex 0 0 0 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Full lib/reporters/full.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.Reporters.ModuleExplain lib/reporters/module_explain.ex 1 0 1 Yes N/A "
],
[
"100% 0% Doctor.Reporters.OutputUtils lib/reporters/output_utils.ex 6 0 6 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Short lib/reporters/short.ex 1 0 0 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Summary lib/reporters/summary.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.Specs lib/specs.ex 1 0 1 Yes Yes "
],
[
"---------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
],
["Summary:\n"],
["Passed Modules: 17"],
["Failed Modules: 0"],
["Total Doc Coverage: 100.0%"],
["Total Spec Coverage: 17.1%\n"],
["Doctor validation has passed!"]
]
end
test "should output the summary report along with an error when an invalid doctor file path is provided" do
Mix.Tasks.Doctor.run(["--summary", "--config-file", "./not_a_real_file.exs"])
remove_at_exit_hook()
[[first_line] | rest_doctor_output] = get_shell_output()
assert first_line =~ "Doctor file not found at path"
assert first_line =~ "not_a_real_file.exs"
assert rest_doctor_output == [
["---------------------------------------------"],
["Summary:\n"],
["Passed Modules: 19"],
["Failed Modules: 6"],
["Total Doc Coverage: 83.3%"],
["Total Spec Coverage: 35.0%\n"],
["\e[31mDoctor validation has failed!\e[0m"]
]
end
test "should output the summary report when a doctor file path is provided" do
Mix.Tasks.Doctor.run(["--summary", "--config-file", "./.doctor.exs"])
remove_at_exit_hook()
doctor_output = get_shell_output()
assert doctor_output == [
["Doctor file found. Loading configuration."],
["---------------------------------------------"],
["Summary:\n"],
["Passed Modules: 17"],
["Failed Modules: 0"],
["Total Doc Coverage: 100.0%"],
["Total Spec Coverage: 17.1%\n"],
["Doctor validation has passed!"]
]
end
test "should output the summary report with the correct output if given the --summary flag" do
Mix.Tasks.Doctor.run(["--summary"])
remove_at_exit_hook()
doctor_output = get_shell_output()
assert doctor_output == [
["Doctor file found. Loading configuration."],
["---------------------------------------------"],
["Summary:\n"],
["Passed Modules: 17"],
["Failed Modules: 0"],
["Total Doc Coverage: 100.0%"],
["Total Spec Coverage: 17.1%\n"],
["Doctor validation has passed!"]
]
end
test "should output the short report with the correct output if given the --short flag" do
Mix.Tasks.Doctor.run(["--short"])
remove_at_exit_hook()
doctor_output = get_shell_output()
assert doctor_output == [
["Doctor file found. Loading configuration."],
["----------------------------------------------------------------------------------------------"],
["Doc Cov Spec Cov Functions Module Module Doc Struct Spec"],
["100% 0% 3 Doctor.CLI Yes N/A "],
["100% 0% 3 Doctor.Config Yes Yes "],
["100% 0% 1 Doctor.Docs Yes Yes "],
["N/A N/A 0 Doctor Yes N/A "],
["100% 100% 1 Mix.Tasks.Doctor Yes N/A "],
["100% 100% 1 Mix.Tasks.Doctor.Explain Yes N/A "],
["100% 100% 1 Mix.Tasks.Doctor.Gen.Config Yes N/A "],
["100% 0% 4 Doctor.ModuleInformation Yes Yes "],
["100% 0% 1 Doctor.ModuleReport Yes Yes "],
["100% 0% 9 Doctor.ReportUtils Yes N/A "],
["N/A N/A 0 Doctor.Reporter Yes N/A "],
["100% 100% 1 Doctor.Reporters.Full Yes N/A "],
["100% 0% 1 Doctor.Reporters.ModuleExplain Yes N/A "],
["100% 0% 6 Doctor.Reporters.OutputUtils Yes N/A "],
["100% 100% 1 Doctor.Reporters.Short Yes N/A "],
["100% 100% 1 Doctor.Reporters.Summary Yes N/A "],
["100% 0% 1 Doctor.Specs Yes Yes "],
["----------------------------------------------------------------------------------------------"],
["Summary:\n"],
["Passed Modules: 17"],
["Failed Modules: 0"],
["Total Doc Coverage: 100.0%"],
["Total Spec Coverage: 17.1%\n"],
["Doctor validation has passed!"]
]
end
test "should output the full report with the correct output if given the --full flag" do
Mix.Tasks.Doctor.run(["--full"])
remove_at_exit_hook()
doctor_output = get_shell_output()
assert doctor_output == [
["Doctor file found. Loading configuration."],
[
"---------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
],
[
"Doc Cov Spec Cov Module File Functions No Docs No Specs Module Doc Struct Spec"
],
[
"100% 0% Doctor.CLI lib/cli/cli.ex 3 0 3 Yes N/A "
],
[
"100% 0% Doctor.Config lib/config.ex 3 0 3 Yes Yes "
],
[
"100% 0% Doctor.Docs lib/docs.ex 1 0 1 Yes Yes "
],
[
"N/A N/A Doctor lib/doctor.ex 0 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor lib/mix/tasks/doctor.ex 1 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor.Explain lib/mix/tasks/doctor.explain.ex 1 0 0 Yes N/A "
],
[
"100% 100% Mix.Tasks.Doctor.Gen.Config lib/mix/tasks/doctor.gen.config.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.ModuleInformation lib/module_information.ex 4 0 4 Yes Yes "
],
[
"100% 0% Doctor.ModuleReport lib/module_report.ex 1 0 1 Yes Yes "
],
[
"100% 0% Doctor.ReportUtils lib/report_utils.ex 9 0 9 Yes N/A "
],
[
"N/A N/A Doctor.Reporter lib/reporter.ex 0 0 0 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Full lib/reporters/full.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.Reporters.ModuleExplain lib/reporters/module_explain.ex 1 0 1 Yes N/A "
],
[
"100% 0% Doctor.Reporters.OutputUtils lib/reporters/output_utils.ex 6 0 6 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Short lib/reporters/short.ex 1 0 0 Yes N/A "
],
[
"100% 100% Doctor.Reporters.Summary lib/reporters/summary.ex 1 0 0 Yes N/A "
],
[
"100% 0% Doctor.Specs lib/specs.ex 1 0 1 Yes Yes "
],
[
"---------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
],
["Summary:\n"],
["Passed Modules: 17"],
["Failed Modules: 0"],
["Total Doc Coverage: 100.0%"],
["Total Spec Coverage: 17.1%\n"],
["Doctor validation has passed!"]
]
end
end
defp get_shell_output() do
{:messages, message_mailbox} = Process.info(self(), :messages)
Enum.map(message_mailbox, fn
{:mix_shell, :info, message} -> message
{:mix_shell, :error, message} -> message
end)
end
defp remove_at_exit_hook() do
at_exit_hooks = :elixir_config.get(:at_exit)
filtered_hooks =
Enum.reject(at_exit_hooks, fn hook ->
function_info = Function.info(hook)
Keyword.get(function_info, :module) == Mix.Tasks.Doctor
end)
:elixir_config.put(:at_exit, filtered_hooks)
end
end
| 57.320144 | 190 | 0.297584 |
0833787b39f7738c70deceb29c1dded9c4836c8c | 5,273 | exs | Elixir | test/payload_test.exs | scottming/absinthe_error_payload | 500f7fa2ad1c17eeba644e7922dd0275994f6f94 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | test/payload_test.exs | scottming/absinthe_error_payload | 500f7fa2ad1c17eeba644e7922dd0275994f6f94 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | test/payload_test.exs | scottming/absinthe_error_payload | 500f7fa2ad1c17eeba644e7922dd0275994f6f94 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | defmodule AbsintheErrorPayload.PayloadTest do
@moduledoc """
Test conversion of changeset errors to ValidationMessage structs
"""
use ExUnit.Case
import Ecto.Changeset
import AbsintheErrorPayload.Payload
alias Absinthe.Resolution
alias AbsintheErrorPayload.Payload
alias AbsintheErrorPayload.ValidationMessage
def resolution(value) do
%Resolution{
value: value,
adapter: "",
context: "",
root_value: "",
schema: "",
source: ""
}
end
defp resolution_error(error) do
%Resolution{
errors: error,
adapter: "",
context: "",
root_value: "",
schema: "",
source: ""
}
end
def payload(successful, messages \\ [], result \\ nil) do
%Payload{
successful: successful,
messages: messages,
result: result
}
end
def assert_error_payload(messages, result) do
assert %{value: value, errors: []} = result
expected = payload(false, messages)
assert expected.successful == value.successful
assert expected.result == value.result
for message <- messages do
message = convert_field_name(message)
assert Enum.find_value(value.messages, &(message == &1)),
"Expected to find \n#{inspect(message)}\n in \n#{inspect(value.messages)}"
end
end
describe "build_payload/2" do
test "validation message tuple" do
message = %ValidationMessage{code: :required}
resolution = resolution(message)
result = build_payload(resolution, nil)
assert_error_payload([message], result)
end
test "error, validation message tuple" do
message = %ValidationMessage{code: :required}
resolution = resolution({:error, message})
result = build_payload(resolution, nil)
assert_error_payload([message], result)
end
test "error, string message tuple" do
resolution = resolution({:error, "an error"})
result = build_payload(resolution, nil)
message = %ValidationMessage{code: :unknown, message: "an error", template: "an error"}
assert_error_payload([message], result)
end
test "error list" do
messages = [%ValidationMessage{code: :required}, %ValidationMessage{code: :max}]
resolution = resolution({:error, messages})
result = build_payload(resolution, nil)
assert %{value: value} = result
expected = payload(false, messages)
assert expected == value
end
test "error changeset" do
changeset =
{%{}, %{title: :string, title_lang: :string}}
|> Ecto.Changeset.cast(%{}, [:title, :title_lang])
|> add_error(:title, "error 1")
|> add_error(:title, "error 2")
|> add_error(:title_lang, "error 3")
resolution = resolution(changeset)
result = build_payload(resolution, nil)
messages = [
%ValidationMessage{code: :unknown, message: "error 1", template: "error 1", field: :title, key: :title},
%ValidationMessage{code: :unknown, message: "error 2", template: "error 2", field: :title, key: :title},
%ValidationMessage{code: :unknown, message: "error 3", template: "error 3", field: :titleLang, key: :titleLang}
]
assert_error_payload(messages, result)
end
test "valid changeset" do
changeset =
{%{}, %{title: :string, body: :string}}
|> Ecto.Changeset.cast(%{}, [:title, :body])
resolution = resolution(changeset)
result = build_payload(resolution, nil)
assert %{value: value} = result
assert value.successful == true
assert value.messages == []
assert value.result == changeset
end
test "map" do
map = %{something: "something"}
resolution = resolution(map)
result = build_payload(resolution, nil)
assert %{value: value} = result
assert value.successful == true
assert value.messages == []
assert value.result == map
end
test "error from resolution, validation message" do
message = %ValidationMessage{code: :required}
resolution = resolution_error([message])
result = build_payload(resolution, nil)
assert_error_payload([message], result)
end
test "error from resolution, string message" do
resolution = resolution_error(["an error"])
result = build_payload(resolution, nil)
message = %ValidationMessage{code: :unknown, message: "an error", template: "an error"}
assert_error_payload([message], result)
end
test "error from resolution, string message list" do
resolution = resolution_error(["an error", "another error"])
result = build_payload(resolution, nil)
messages = [
%ValidationMessage{code: :unknown, message: "an error", template: "an error"},
%ValidationMessage{code: :unknown, message: "another error", template: "another error"}
]
assert_error_payload(messages, result)
end
test "error from resolution, error list" do
messages = [%ValidationMessage{code: :required}, %ValidationMessage{code: :max}]
resolution = resolution_error(messages)
result = build_payload(resolution, nil)
assert %{value: _value} = result
assert_error_payload(messages, result)
end
end
end
| 28.349462 | 119 | 0.644225 |
08337a173d185f0e0a732cb2f41cd087b52b75d5 | 1,220 | ex | Elixir | server/lib/idai_field_server_web/controllers/user_confirmation_controller.ex | felixwolter/idai-field | 146ab8dbdedb23035a4ba19eac95f02a1fa2329f | [
"Apache-2.0"
] | null | null | null | server/lib/idai_field_server_web/controllers/user_confirmation_controller.ex | felixwolter/idai-field | 146ab8dbdedb23035a4ba19eac95f02a1fa2329f | [
"Apache-2.0"
] | null | null | null | server/lib/idai_field_server_web/controllers/user_confirmation_controller.ex | felixwolter/idai-field | 146ab8dbdedb23035a4ba19eac95f02a1fa2329f | [
"Apache-2.0"
] | null | null | null | defmodule IdaiFieldServerWeb.UserConfirmationController do
use IdaiFieldServerWeb, :controller
alias IdaiFieldServer.Accounts
def new(conn, _params) do
render(conn, "new.html")
end
def create(conn, %{"user" => %{"email" => email}}) do
if user = Accounts.get_user_by_email(email) do
Accounts.deliver_user_confirmation_instructions(
user,
&Routes.user_confirmation_url(conn, :confirm, &1)
)
end
# Regardless of the outcome, show an impartial success/error message.
conn
|> put_flash(
:info,
"If your e-mail is in our system and it has not been confirmed yet, " <>
"you will receive an e-mail with instructions shortly."
)
|> redirect(to: "/")
end
# Do not log in the user after confirmation to avoid a
# leaked token giving the user access to the account.
def confirm(conn, %{"token" => token}) do
case Accounts.confirm_user(token) do
{:ok, _} ->
conn
|> put_flash(:info, "Account confirmed successfully.")
|> redirect(to: "/")
:error ->
conn
|> put_flash(:error, "Confirmation link is invalid or it has expired.")
|> redirect(to: "/")
end
end
end
| 27.727273 | 79 | 0.633607 |
0833a87ade993a754776dd9564b729e126310de2 | 16,470 | ex | Elixir | clients/compute/lib/google_api/compute/v1/api/region_notification_endpoints.ex | jamesvl/elixir-google-api | 6c87fb31d996f08fb42ce6066317e9d652a87acc | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/api/region_notification_endpoints.ex | jamesvl/elixir-google-api | 6c87fb31d996f08fb42ce6066317e9d652a87acc | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/compute/lib/google_api/compute/v1/api/region_notification_endpoints.ex | myskoach/elixir-google-api | 4f8cbc2fc38f70ffc120fd7ec48e27e46807b563 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Api.RegionNotificationEndpoints do
@moduledoc """
API calls for all endpoints tagged `RegionNotificationEndpoints`.
"""
alias GoogleApi.Compute.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Deletes the specified NotificationEndpoint in the given region
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region scoping this request.
* `notification_endpoint` (*type:* `String.t`) - Name of the NotificationEndpoint resource to delete.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_region_notification_endpoints_delete(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_region_notification_endpoints_delete(
connection,
project,
region,
notification_endpoint,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url(
"/projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}",
%{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"notificationEndpoint" =>
URI.encode(notification_endpoint, &(URI.char_unreserved?(&1) || &1 == ?/))
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Returns the specified NotificationEndpoint resource in the given region.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region scoping this request.
* `notification_endpoint` (*type:* `String.t`) - Name of the NotificationEndpoint resource to return.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.NotificationEndpoint{}}` on success
* `{:error, info}` on failure
"""
@spec compute_region_notification_endpoints_get(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.NotificationEndpoint.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_region_notification_endpoints_get(
connection,
project,
region,
notification_endpoint,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url(
"/projects/{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}",
%{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"notificationEndpoint" =>
URI.encode(notification_endpoint, &(URI.char_unreserved?(&1) || &1 == ?/))
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.NotificationEndpoint{}])
end
@doc """
Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region scoping this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.NotificationEndpoint.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_region_notification_endpoints_insert(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_region_notification_endpoints_insert(
connection,
project,
region,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/projects/{project}/regions/{region}/notificationEndpoints", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Lists the NotificationEndpoints for a project in the given region.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region scoping this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:filter` (*type:* `String.t`) - A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `>`, or `<`.
For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.
You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.
To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ```
* `:maxResults` (*type:* `integer()`) - The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
* `:orderBy` (*type:* `String.t`) - Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.
You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.
Currently, only sorting by `name` or `creationTimestamp desc` is supported.
* `:pageToken` (*type:* `String.t`) - Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
* `:returnPartialSuccess` (*type:* `boolean()`) - Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.NotificationEndpointList{}}` on success
* `{:error, info}` on failure
"""
@spec compute_region_notification_endpoints_list(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.NotificationEndpointList.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_region_notification_endpoints_list(
connection,
project,
region,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:filter => :query,
:maxResults => :query,
:orderBy => :query,
:pageToken => :query,
:returnPartialSuccess => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/projects/{project}/regions/{region}/notificationEndpoints", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.NotificationEndpointList{}])
end
end
| 49.311377 | 434 | 0.651609 |
0833b7552276a446aa5e15db8eeed4fde65681e5 | 425 | ex | Elixir | apps/teaching_management_web/lib/teaching_management_web/resolvers/student_resolver.ex | danielscosta/teaching_management | c703374a27174763d5309b9144ba09488eeb95c4 | [
"MIT"
] | 2 | 2020-07-28T14:10:41.000Z | 2020-10-20T20:32:27.000Z | apps/teaching_management_web/lib/teaching_management_web/resolvers/student_resolver.ex | danielscosta/teaching_management | c703374a27174763d5309b9144ba09488eeb95c4 | [
"MIT"
] | null | null | null | apps/teaching_management_web/lib/teaching_management_web/resolvers/student_resolver.ex | danielscosta/teaching_management | c703374a27174763d5309b9144ba09488eeb95c4 | [
"MIT"
] | null | null | null | defmodule TeachingManagementWeb.Resolvers.StudentResolver do
alias TeachingManagement.Accounts
alias TeachingManagement.Records
def list_students_by_group(_, args, _) do
{:ok, Accounts.list_students_by_group(args[:group_id])}
end
def create_student(_, args, _) do
groups = if is_nil(args[:group_id]), do: [], else: [Records.get_group!(args[:group_id])]
Accounts.create_student(args, groups)
end
end
| 30.357143 | 92 | 0.752941 |
0833d97e1f01e76fb6302ed6f50d32f7cf1cdcb6 | 2,970 | ex | Elixir | clients/secret_manager/lib/google_api/secret_manager/v1/model/audit_config.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/secret_manager/lib/google_api/secret_manager/v1/model/audit_config.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/secret_manager/lib/google_api/secret_manager/v1/model/audit_config.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.SecretManager.V1.Model.AuditConfig do
@moduledoc """
Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.
## Attributes
* `auditLogConfigs` (*type:* `list(GoogleApi.SecretManager.V1.Model.AuditLogConfig.t)`, *default:* `nil`) - The configuration for logging of each type of permission.
* `service` (*type:* `String.t`, *default:* `nil`) - Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:auditLogConfigs => list(GoogleApi.SecretManager.V1.Model.AuditLogConfig.t()),
:service => String.t()
}
field(:auditLogConfigs, as: GoogleApi.SecretManager.V1.Model.AuditLogConfig, type: :list)
field(:service)
end
defimpl Poison.Decoder, for: GoogleApi.SecretManager.V1.Model.AuditConfig do
def decode(value, options) do
GoogleApi.SecretManager.V1.Model.AuditConfig.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.SecretManager.V1.Model.AuditConfig do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 59.4 | 1,106 | 0.748485 |
0833daf33214349af2c9e004430aa84d690947ce | 1,685 | ex | Elixir | lib/metrics/socket_instrumenter.ex | SwiftAusterity/gossip | d79c53acd02fcb9905acb9730e59065efdd5a589 | [
"MIT"
] | null | null | null | lib/metrics/socket_instrumenter.ex | SwiftAusterity/gossip | d79c53acd02fcb9905acb9730e59065efdd5a589 | [
"MIT"
] | null | null | null | lib/metrics/socket_instrumenter.ex | SwiftAusterity/gossip | d79c53acd02fcb9905acb9730e59065efdd5a589 | [
"MIT"
] | null | null | null | defmodule Metrics.SocketInstrumenter do
@moduledoc """
Instrumentation for sockets
"""
use Prometheus.Metric
@doc false
def setup() do
Gauge.declare(
name: :gossip_socket_count,
help: "Number of sockets connected to gossip"
)
Counter.declare(
name: :gossip_socket_heartbeat_count,
help: "Total count of heartbeats"
)
Counter.declare(
name: :gossip_socket_heartbeat_disconnect_count,
help: "Total count of disconnects due to heartbeat misses"
)
Counter.declare(
name: :gossip_socket_connect_count,
help: "Total count of sockets connecting"
)
Counter.declare(
name: :gossip_socket_connect_success_count,
help: "Total count of successful sockets connecting"
)
Counter.declare(
name: :gossip_socket_connect_failure_count,
help: "Total count of failed sockets connecting"
)
Counter.declare(
name: :gossip_socket_unknown_event_count,
help: "Total count of unknown events sent to the socket"
)
end
def set_sockets(count) do
Gauge.set([name: :gossip_socket_count], count)
end
def heartbeat() do
Counter.inc(name: :gossip_socket_heartbeat_count)
end
def heartbeat_disconnect() do
Counter.inc(name: :gossip_socket_heartbeat_disconnect_count)
end
def connect() do
Counter.inc(name: :gossip_socket_connect_count)
end
def connect_success() do
Counter.inc(name: :gossip_socket_connect_success_count)
end
def connect_failure() do
Counter.inc(name: :gossip_socket_connect_failure_count)
end
def unknown_event() do
Counter.inc(name: :gossip_socket_unknown_event_count)
end
end
| 22.77027 | 64 | 0.708012 |
0833fc1dbd11cc67bb173052338fc7ce8adefce2 | 289 | ex | Elixir | lib/elm_phoenix_web_socket_example.ex | phollyer/elm-phoenix-websocket-example | 147da038b5ca4f9304924124c546284f12ecfaa8 | [
"BSD-3-Clause"
] | null | null | null | lib/elm_phoenix_web_socket_example.ex | phollyer/elm-phoenix-websocket-example | 147da038b5ca4f9304924124c546284f12ecfaa8 | [
"BSD-3-Clause"
] | 2 | 2020-12-29T15:13:39.000Z | 2020-12-30T01:01:02.000Z | lib/elm_phoenix_web_socket_example.ex | phollyer/elm-phoenix-websocket-example | 147da038b5ca4f9304924124c546284f12ecfaa8 | [
"BSD-3-Clause"
] | null | null | null | defmodule ElmPhoenixWebSocketExample do
@moduledoc """
ElmPhoenixWebSocketExample keeps the contexts that define your domain
and business logic.
Contexts are also responsible for managing your data, regardless
if it comes from the database, an external API or others.
"""
end
| 28.9 | 71 | 0.785467 |
08341682f69f800ce79cdabfb5d59d647bf2ce27 | 862 | ex | Elixir | Microsoft.Azure.Management.Network/lib/microsoft/azure/management/network/model/operation_list_result.ex | chgeuer/ex_microsoft_azure_management | 99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603 | [
"Apache-2.0"
] | 4 | 2018-09-29T03:43:15.000Z | 2021-04-01T18:30:46.000Z | Microsoft.Azure.Management.Network/lib/microsoft/azure/management/network/model/operation_list_result.ex | chgeuer/ex_microsoft_azure_management | 99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603 | [
"Apache-2.0"
] | null | null | null | Microsoft.Azure.Management.Network/lib/microsoft/azure/management/network/model/operation_list_result.ex | chgeuer/ex_microsoft_azure_management | 99cd9f7f2ff1fdbe69ca5bac55b6e2af91ba3603 | [
"Apache-2.0"
] | null | null | null | # NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule Microsoft.Azure.Management.Network.Model.OperationListResult do
@moduledoc """
Result of the request to list Network operations. It contains a list of operations and a URL link to get the next set of results.
"""
@derive [Poison.Encoder]
defstruct [
:"value",
:"nextLink"
]
@type t :: %__MODULE__{
:"value" => [Operation],
:"nextLink" => String.t
}
end
defimpl Poison.Decoder, for: Microsoft.Azure.Management.Network.Model.OperationListResult do
import Microsoft.Azure.Management.Network.Deserializer
def decode(value, options) do
value
|> deserialize(:"value", :list, Microsoft.Azure.Management.Network.Model.Operation, options)
end
end
| 28.733333 | 131 | 0.727378 |
0834411231d2ee6c40897452027bc01833475ad7 | 2,019 | exs | Elixir | test/plug/cowboy/translator_test.exs | darrenclark/plug_cowboy | e3bc44285ed0db0e017d2a030c2c05e9baa849fe | [
"Apache-2.0"
] | null | null | null | test/plug/cowboy/translator_test.exs | darrenclark/plug_cowboy | e3bc44285ed0db0e017d2a030c2c05e9baa849fe | [
"Apache-2.0"
] | null | null | null | test/plug/cowboy/translator_test.exs | darrenclark/plug_cowboy | e3bc44285ed0db0e017d2a030c2c05e9baa849fe | [
"Apache-2.0"
] | null | null | null | defmodule Plug.Cowboy.TranslatorTest do
use ExUnit.Case
import ExUnit.CaptureLog
def init(opts) do
opts
end
def call(%{path_info: ["warn"]}, _opts) do
raise Plug.Parsers.UnsupportedMediaTypeError, media_type: "foo/bar"
end
def call(%{path_info: ["error"]}, _opts) do
raise "oops"
end
def call(%{path_info: ["linked"]}, _opts) do
fn -> GenServer.call(:i_dont_exist, :ok) end |> Task.async() |> Task.await()
end
test "ranch/cowboy 500 logs" do
{:ok, _pid} = Plug.Cowboy.http(__MODULE__, [], port: 9001)
output =
capture_log(fn ->
:hackney.get("http://127.0.0.1:9001/error", [], "", [])
Plug.Cowboy.shutdown(__MODULE__.HTTP)
end)
assert output =~ ~r"#PID<0\.\d+\.0> running Plug\.Cowboy\.TranslatorTest \(.*\) terminated"
assert output =~ "Server: 127.0.0.1:9001 (http)"
assert output =~ "Request: GET /"
assert output =~ "** (exit) an exception was raised:"
assert output =~ "** (RuntimeError) oops"
end
test "ranch/cowboy non-500 skips" do
{:ok, _pid} = Plug.Cowboy.http(__MODULE__, [], port: 9002)
output =
capture_log(fn ->
:hackney.get("http://127.0.0.1:9002/warn", [], "", [])
Plug.Cowboy.shutdown(__MODULE__.HTTP)
end)
refute output =~ ~r"#PID<0\.\d+\.0> running Plug\.Cowboy\.TranslatorTest \(.*\) terminated"
refute output =~ "Server: 127.0.0.1:9002 (http)"
refute output =~ "Request: GET /"
refute output =~ "** (exit) an exception was raised:"
end
test "ranch/cowboy linked logs" do
{:ok, _pid} = Plug.Cowboy.http(__MODULE__, [], port: 9003)
output =
capture_log(fn ->
:hackney.get("http://127.0.0.1:9003/linked", [], "", [])
Plug.Cowboy.shutdown(__MODULE__.HTTP)
end)
assert output =~
~r"Ranch protocol #PID<0\.\d+\.0> of listener Plug\.Cowboy\.TranslatorTest\.HTTP \(.*\) terminated"
assert output =~ "exited in: GenServer.call"
assert output =~ "** (EXIT) no process"
end
end
| 29.26087 | 112 | 0.603764 |
083447cc9dcecf2b874f3291a0b9485491ae14bf | 7,214 | ex | Elixir | lib/buckaroo.ex | IanLuites/buckaro | 41e60343f4cf11c3402bcf4ce843ece879f79959 | [
"MIT"
] | null | null | null | lib/buckaroo.ex | IanLuites/buckaro | 41e60343f4cf11c3402bcf4ce843ece879f79959 | [
"MIT"
] | null | null | null | lib/buckaroo.ex | IanLuites/buckaro | 41e60343f4cf11c3402bcf4ce843ece879f79959 | [
"MIT"
] | null | null | null | defmodule Buckaroo do
@moduledoc ~S"""
Simple `:cowboy` (v2) webserver with support for websockets.
"""
alias Plug.Cowboy
require Logger
@doc ~S"""
Setup a simple webserver handling HTTP requests including websockets.
"""
@spec child_spec(Keyword.t()) :: Supervisor.child_spec()
def child_spec(opts \\ []) do
router =
{plug, _} =
{opts[:plug] || raise("Need to set plug: ... to the plug router."), opts[:opts] || []}
if {:__sse__, 0} in plug.__info__(:functions) and plug.__sse__() do
p = opts[:protocol_options] || []
unless p[:idle_timeout] == :infinity do
Logger.warn(fn ->
"""
Buckaroo: serving SSE events, but `:idle_timeout` is not set to `:infinity`.
This causes SSE EventSources to disconnect after the timeout passes.
Please pass `protocol_options: [idle_timeout: :infinity]`
to prevent disconnect after idle timeout.
Example:
Buckaroo.child_spec(plug: MyRouter, protocol_options: [idle_timeout: :infinity])
"""
end)
end
end
socket = if s = opts[:socket], do: {s, opts[:socket_opts] || opts[:opts] || []}
options =
[
port: 3000,
compress: true,
dispatch: [{:_, [{:_, __MODULE__, {socket || router, router}}]}]
]
|> Keyword.merge(Keyword.drop(opts, ~w(socket plug opts)a))
|> Keyword.update!(:port, &if(is_binary(&1), do: String.to_integer(&1), else: &1))
Cowboy.child_spec([scheme: :http, plug: elem(router, 0)] ++ options)
end
### Handler ###
@connection Plug.Cowboy.Conn
@already_sent {:plug_conn, :sent}
@behaviour :cowboy_websocket
@impl :cowboy_websocket
def init(req, {{socket, socket_opts}, {plug, plug_opts}}) do
{conn, plug, opts} =
if :cowboy_websocket.is_upgrade_request(req) do
{%{@connection.conn(req) | method: "WEBSOCKET"}, socket, socket_opts}
else
{@connection.conn(req), plug, plug_opts}
end
try do
conn = plug.call(conn, opts)
case Map.get(conn.private, :websocket) do
nil ->
%{adapter: {@connection, req}} = maybe_send(conn, plug)
{:ok, req, {plug, opts}}
{:sse, {socket, opts}} ->
sse_init(conn, socket, opts)
{:sse, socket} ->
sse_init(conn, socket)
{socket, opts} ->
{:cowboy_websocket, req, {socket, {conn, opts}}}
socket ->
{:cowboy_websocket, req, {socket, {conn, []}}}
end
catch
kind, reason ->
exit_on_error(kind, reason, __STACKTRACE__, {plug, :call, [conn, opts]})
after
receive do
@already_sent -> :ok
after
0 -> :ok
end
end
end
defp maybe_send(%Plug.Conn{state: :unset}, _plug), do: raise(Plug.Conn.NotSentError)
defp maybe_send(%Plug.Conn{state: :set} = conn, _plug), do: Plug.Conn.send_resp(conn)
defp maybe_send(%Plug.Conn{} = conn, _plug), do: conn
defp maybe_send(other, plug) do
raise "Cowboy2 adapter expected #{inspect(plug)} to return Plug.Conn but got: " <>
inspect(other)
end
defp exit_on_error(
:error,
%Plug.Conn.WrapperError{kind: kind, reason: reason, stack: stack},
_stack,
call
) do
exit_on_error(kind, reason, stack, call)
end
defp exit_on_error(:error, value, stack, call) do
exception = Exception.normalize(:error, value, stack)
:erlang.raise(:exit, {{exception, stack}, call}, [])
end
defp exit_on_error(:throw, value, stack, call) do
:erlang.raise(:exit, {{{:nocatch, value}, stack}, call}, [])
end
defp exit_on_error(:exit, value, _stack, call) do
:erlang.raise(:exit, {value, call}, [])
end
### Handling Socket ###
@impl :cowboy_websocket
def websocket_init({socket, {conn, state}}), do: conn |> socket.init(state) |> result(socket)
@impl :cowboy_websocket
def websocket_handle(frame, {socket, state}),
do: frame |> socket.handle(state) |> result(socket)
@impl :cowboy_websocket
def websocket_info(info, {socket, state}),
do: info |> socket.info(state) |> result(socket)
# Note: terminate overlaps with loop handler (SSE) terminate
@impl :cowboy_websocket
def terminate(reason, req, state)
def terminate(reason, req, {socket, state}) when is_atom(socket) do
if :erlang.function_exported(socket, :terminate, 3),
do: socket.terminate(reason, req, state),
else: :ok
end
def terminate(_, _, _), do: :ok
@spec result(tuple, module) :: tuple
defp result({:stop, state}, socket), do: {:stop, {socket, state}}
defp result({:ok, state}, socket), do: {:ok, {socket, state}}
defp result({:ok, state, :hibernate}, socket), do: {:ok, {socket, state}, :hibernate}
defp result({:reply, frame, state}, socket), do: {:reply, frame, {socket, state}}
defp result({:reply, frame, state, :hibernate}, socket),
do: {:reply, frame, {socket, state}, :hibernate}
## Loop Handler
@spec sse_init(term, module, term) :: tuple
defp sse_init(c, handler, opts \\ []) do
{:ok, conn, state} =
case handler.init(c, opts) do
{:ok, s} -> {:ok, c, s}
{:ok, updated_c, s} -> {:ok, updated_c, s}
end
%{adapter: {_, req}} =
conn
|> Plug.Conn.put_resp_content_type("text/event-stream")
|> Plug.Conn.put_resp_header("cache-control", "no-cache")
|> Plug.Conn.send_chunked(200)
{:cowboy_loop, req, {handler, state}, :hibernate}
end
@spec info(term, map, {module, term}) :: tuple
def info(:eof, req, s = {handler, state}) do
handler.terminate(:eof, req, state)
{:stop, req, {handler, s}}
end
def info(msg, req, {handler, state}) do
case handler.info(msg, state) do
{:ok, s} -> {:ok, req, {handler, s}}
{:ok, s, :hibernate} -> {:ok, req, {handler, s}, :hibernate}
{:reply, events, s} -> {:ok, send_events(req, events), {handler, s}}
{:reply, events, s, :hibernate} -> {:ok, send_events(req, events), {handler, s}, :hibernate}
{:stop, s} -> {:stop, req, {handler, s}}
end
end
@spec send_events(map, [map] | map) :: map
defp send_events(req, events) when is_list(events),
do: Enum.reduce(events, req, &send_events(&2, &1))
defp send_events(req, event) do
:cowboy_req.stream_body(sse_event(event), :nofin, req)
req
end
defp sse_event(%{id: id, event: event, retry: retry, data: data}),
do: "id: #{id}\nevent: #{event}\nretry: #{retry}\ndata: #{data}\n\n"
defp sse_event(%{id: id, event: event, data: data}),
do: "id: #{id}\nevent: #{event}\ndata: #{data}\n\n"
defp sse_event(%{event: event, retry: retry, data: data}),
do: "event: #{event}\nretry: #{retry}\ndata: #{data}\n\n"
defp sse_event(%{id: id, retry: retry, data: data}),
do: "id: #{id}\nretry: #{retry}\ndata: #{data}\n\n"
defp sse_event(%{id: id, data: data}), do: "id: #{id}\ndata: #{data}\n\n"
defp sse_event(%{event: event, data: data}), do: "event: #{event}\ndata: #{data}\n\n"
defp sse_event(%{retry: retry, data: data}), do: "retry: #{retry}\ndata: #{data}\n\n"
defp sse_event(%{data: data}), do: "data: #{data}\n\n"
defp sse_event(data), do: "data: #{data}\n\n"
end
| 32.062222 | 98 | 0.601331 |
0834724741c38ed2d19e53cd1ca358c6643e5a82 | 4,712 | exs | Elixir | test/test_helper.exs | olafz/mariaex | caa1ec6b251ddde395a610020629603d73aab8ea | [
"Apache-2.0"
] | null | null | null | test/test_helper.exs | olafz/mariaex | caa1ec6b251ddde395a610020629603d73aab8ea | [
"Apache-2.0"
] | null | null | null | test/test_helper.exs | olafz/mariaex | caa1ec6b251ddde395a610020629603d73aab8ea | [
"Apache-2.0"
] | null | null | null | mdb_socket = System.get_env("MDBSOCKET")
socket? =
match?({:unix, _}, :os.type()) and List.to_integer(:erlang.system_info(:otp_release)) >= 19 and
is_binary(mdb_socket) and File.exists?(mdb_socket)
ExUnit.configure(
exclude: [
ssl_tests: true,
json: System.get_env("JSON_SUPPORT") != "true",
socket: not socket?,
geometry: System.get_env("GEOMETRY_SUPPORT") == "false"
]
)
ExUnit.start()
run_cmd = fn cmd ->
key = :ecto_setup_cmd_output
Process.put(key, "")
# TODO: Not relay on Mix.Shell.cmd
cmd_fun =
cond do
function_exported?(Mix.Shell, :cmd, 2) -> &Mix.Shell.cmd/2
function_exported?(Mix.Shell, :cmd, 3) -> &Mix.Shell.cmd(&1, [], &2)
end
status =
cmd_fun.(cmd, fn data ->
current = Process.get(key)
Process.put(key, current <> data)
end)
output = Process.get(key)
Process.put(key, "")
{status, output}
end
mysql_pass_switch =
if mysql_root_pass = System.get_env("MYSQL_ROOT_PASSWORD") do
"-p#{mysql_root_pass}"
else
""
end
mysql_port = System.get_env("MDBPORT") || 3306
mysql_host = System.get_env("MDBHOST") || "localhost"
mysql_connect =
"-u root #{mysql_pass_switch} --host=#{mysql_host} --port=#{mysql_port} --protocol=tcp"
sql = """
CREATE TABLE test1 (id serial, title text);
INSERT INTO test1 VALUES(1, 'test');
INSERT INTO test1 VALUES(2, 'test2');
DROP TABLE test1;
"""
create_user =
case System.get_env("DB") do
"mysql:5.6" -> "CREATE USER"
_ -> "CREATE USER IF NOT EXISTS"
end
cmds = [
~s(mysql #{mysql_connect} -e "DROP DATABASE IF EXISTS mariaex_test;"),
~s(mysql #{mysql_connect} -e "CREATE DATABASE mariaex_test DEFAULT CHARACTER SET 'utf8' COLLATE 'utf8_general_ci';"),
~s(mysql #{mysql_connect} -e "#{create_user} 'mariaex_user'@'%' IDENTIFIED BY 'mariaex_pass';"),
~s(mysql #{mysql_connect} -e "GRANT ALL ON *.* TO 'mariaex_user'@'%' WITH GRANT OPTION"),
~s(mysql --host=#{mysql_host} --port=#{mysql_port} --protocol=tcp -u mariaex_user -pmariaex_pass mariaex_test -e "#{
sql
}")
]
Enum.each(cmds, fn cmd ->
{status, output} = run_cmd.(cmd)
IO.puts("--> #{output}")
if status != 0 do
IO.puts("""
Command:
#{cmd}
error'd with:
#{output}
Please verify the user "root" exists and it has permissions to
create databases and users.
If the "root" user requires a password, set the environment
variable MYSQL_ROOT_PASSWORD to its value.
Beware that the password may be visible in the process list!
""")
System.halt(1)
end
end)
defmodule Mariaex.TestHelper do
defmacro query(stat, params, opts \\ []) do
quote do
pid = var!(context)[:pid]
case Mariaex.Connection.query(pid, unquote(stat), unquote(params), unquote(opts)) do
{:ok, %Mariaex.Result{rows: nil}} -> :ok
{:ok, %Mariaex.Result{rows: rows}} -> rows
{:error, %Mariaex.Error{} = err} -> err
end
end
end
defmacro execute_text(stat, params, opts \\ []) do
quote do
opts = [query_type: :text] ++ unquote(opts)
case Mariaex.query(var!(context)[:pid], unquote(stat), unquote(params), opts) do
{:ok, %Mariaex.Result{rows: nil}} -> :ok
{:ok, %Mariaex.Result{rows: rows}} -> rows
{:error, %Mariaex.Error{} = err} -> err
end
end
end
defmacro with_prepare!(name, stat, params, opts \\ []) do
quote do
conn = var!(context)[:pid]
query = Mariaex.prepare!(conn, unquote(name), unquote(stat), unquote(opts))
case Mariaex.execute!(conn, query, unquote(params)) do
%Mariaex.Result{rows: nil} -> :ok
%Mariaex.Result{rows: rows} -> rows
end
end
end
defmacro prepare(stat, opts \\ []) do
quote do
case Mariaex.prepare(var!(context)[:pid], unquote(stat), unquote(opts)) do
{:ok, %Mariaex.Query{} = query} -> query
{:error, %Mariaex.Error{} = err} -> err
end
end
end
defmacro execute(query, params, opts \\ []) do
quote do
case Mariaex.execute(var!(context)[:pid], unquote(query), unquote(params), unquote(opts)) do
{:ok, %Mariaex.Result{rows: nil}} -> :ok
{:ok, %Mariaex.Result{rows: rows}} -> rows
{:error, %Mariaex.Error{} = err} -> err
end
end
end
defmacro close(query, opts \\ []) do
quote do
case Mariaex.close(var!(context)[:pid], unquote(query), unquote(opts)) do
:ok -> :ok
{:error, %Mariaex.Error{} = err} -> err
end
end
end
def capture_log(fun) do
Logger.remove_backend(:console)
fun.()
Logger.add_backend(:console, flush: true)
end
def length_encode_row(row) do
Enum.map_join(row, &(<<String.length(&1)>> <> &1))
end
end
| 27.555556 | 119 | 0.620543 |
0834a1cacabdb4b23c36b561eaa20728785ed533 | 1,549 | ex | Elixir | server/lib/connect_web/views/error_helpers.ex | rafbgarcia/conn | e23e09a8be900bc72c083780693d0fe5bd7cbd99 | [
"MIT"
] | null | null | null | server/lib/connect_web/views/error_helpers.ex | rafbgarcia/conn | e23e09a8be900bc72c083780693d0fe5bd7cbd99 | [
"MIT"
] | 3 | 2021-04-27T14:11:46.000Z | 2021-04-27T16:00:42.000Z | server/lib/connect_web/views/error_helpers.ex | rafbgarcia/conn | e23e09a8be900bc72c083780693d0fe5bd7cbd99 | [
"MIT"
] | null | null | null | defmodule ConnectWeb.ErrorHelpers do
@moduledoc """
Conveniences for translating and building error messages.
"""
use Phoenix.HTML
@doc """
Generates tag for inlined form input errors.
"""
def error_tag(form, field) do
Enum.map(Keyword.get_values(form.errors, field), fn error ->
content_tag(:span, translate_error(error),
class: "invalid-feedback",
phx_feedback_for: input_name(form, field)
)
end)
end
@doc """
Translates an error message using gettext.
"""
def translate_error({msg, opts}) do
# When using gettext, we typically pass the strings we want
# to translate as a static argument:
#
# # Translate "is invalid" in the "errors" domain
# dgettext("errors", "is invalid")
#
# # Translate the number of files with plural rules
# dngettext("errors", "1 file", "%{count} files", count)
#
# Because the error messages we show in our forms and APIs
# are defined inside Ecto, we need to translate them dynamically.
# This requires us to call the Gettext module passing our gettext
# backend as first argument.
#
# Note we use the "errors" domain, which means translations
# should be written to the errors.po file. The :count option is
# set by Ecto and indicates we should also apply plural rules.
if count = opts[:count] do
Gettext.dngettext(ConnectWeb.Gettext, "errors", msg, msg, count, opts)
else
Gettext.dgettext(ConnectWeb.Gettext, "errors", msg, opts)
end
end
end
| 32.270833 | 76 | 0.665591 |
0834c1ad763b0dda55dc5fee9df3fe15bc305703 | 8,230 | ex | Elixir | lib/flickrex/flickr.ex | christopheradams/flickrex | 67e3a507e128c55969aeda3802f6dacd94bb03c5 | [
"MIT"
] | 13 | 2017-04-02T10:55:10.000Z | 2022-03-02T02:55:40.000Z | lib/flickrex/flickr.ex | christopheradams/flickrex | 67e3a507e128c55969aeda3802f6dacd94bb03c5 | [
"MIT"
] | null | null | null | lib/flickrex/flickr.ex | christopheradams/flickrex | 67e3a507e128c55969aeda3802f6dacd94bb03c5 | [
"MIT"
] | 1 | 2021-01-31T17:47:27.000Z | 2021-01-31T17:47:27.000Z | defmodule Flickrex.Flickr do
@moduledoc """
Flickr API Modules.
These modules and functions map to the methods from the Flickr [API
Documentation](https://www.flickr.com/services/api/).
Arguments for the API functions should be strings, or integers (if the API
accepts a number). Any additional `opts` will be set as params for the Rest
operation.
Each function returns an operation that can be executed with
`Flickrex.request/2`.
Some Flickr methods require user access tokens that were granted read, write,
or delete permissions.
## Examples
Get the five most recent public photos:
get_recent = Flickrex.Flickr.Photos.get_recent(per_page: 5)
{:ok, resp} = Flickrex.request(get_recent)
%{"photos" => photos} = resp.body
Test logging in as a user, by configuring the tokens for the request:
config = [oauth_token: "...", oauth_token_secret: "..."]
{:ok, resp} = Flickrex.Flickr.Test.login() |> Flickrex.request(config)
%{"user" => user} = resp.body
The API methods will return an error tuple if there was a problem with the
request:
{:error, resp} = Flickrex.Flickr.Photos.get_info(nil) |> Flickrex.request()
resp.body == %{"code" => 1, "message" => "Photo not found", "stat" => "fail"}
"""
# Flickr directory
flickr_dir = Path.join(File.cwd!(), "/lib/flickr")
# A local copy of "flickr.reflection.getMethods"
methods_file = Path.join(flickr_dir, "flickr.reflection.getMethods.json")
# Directory holding a JSON info file for each method
methods_dir = Path.join(flickr_dir, "getMethodInfo")
methods_files = File.ls!(methods_dir)
# Recompile this module when the source files change
@external_resource methods_file
for method_file <- methods_files do
@external_resource Path.join(methods_dir, method_file)
end
# Group Flickr methods based on the module they will be included in
methods_modules =
methods_file
|> File.read!()
|> Jason.decode!()
|> get_in(["methods", "method"])
|> Enum.map(fn %{"_content" => m} -> m end)
|> Enum.group_by(fn m -> m |> String.split(".") |> List.delete_at(-1) end)
for {[_ | namespaces], methods} <- methods_modules do
# Generate a module name for each method namespace, e.g.
# `Flickr.Photos.People` for "flickr.photos.people"
aliases = ["flickrex", "flickr"] ++ namespaces
module =
aliases
|> Enum.map(&String.capitalize/1)
|> Enum.map(&String.to_atom/1)
|> Module.concat()
defmodule module do
alias Flickrex.Decoder
@type arg :: String.Chars.t()
@type opts :: Flickrex.Rest.args()
@type operation :: Flickrex.Operation.Rest.t()
for method <- methods do
# Generate the function name from the method, e.g. `get_list` for
# "flickr.photos.people.getList"
function =
method
|> String.split(".")
|> Enum.reverse()
|> List.first()
|> Macro.underscore()
|> String.to_atom()
method_file = "#{method}.json"
method_info =
case method_file in methods_files do
true ->
methods_dir
|> Path.join(method_file)
|> File.read!()
|> Jason.decode!()
false ->
%{}
end
description =
method_info
|> get_in(["method", "description", "_content"])
|> String.replace("\"/services/api/", "\"https://www.flickr.com/services/api/")
|> String.replace(~r/<br\ ?\/>/, " ")
needs_login = get_in(method_info, ["method", "needslogin"])
requiredperms = get_in(method_info, ["method", "requiredperms"])
method_response =
case get_in(method_info, ["method", "response", "_content"]) do
nil ->
nil
"<?xml" <> _rest = xml_doc ->
xml_doc
"<rsp" <> _rest = rsp_tag ->
rsp_tag
content_tag ->
"""
<rsp stat="ok">
#{content_tag}
</rsp>
"""
end
example_response =
case method_response do
nil ->
nil
response_content ->
response_content
|> String.replace(~r/<!-- .* -->/U, "")
|> Decoder.XML.parse_data()
end
permission_code =
if is_binary(requiredperms) do
String.to_integer(requiredperms)
else
requiredperms
end
permission =
case permission_code do
0 -> "no"
1 -> "read"
2 -> "write"
3 -> "delete"
end
arguments =
method_info
|> get_in(["arguments", "argument"])
|> Enum.reject(fn a -> a["name"] == "api_key" end)
|> Enum.map(fn a ->
case is_binary(a["optional"]) do
true -> Map.put(a, "optional", String.to_integer(a["optional"]))
false -> a
end
end)
|> Enum.map(fn argument ->
content =
argument
|> Map.get("_content")
|> String.replace("\n", " ")
|> String.replace("\"/services/api/", "\"https://www.flickr.com/services/api/")
Map.put(argument, "_content", content)
end)
{required_args, optional_args} =
Enum.split_with(arguments, fn
%{"optional" => 0} -> true
_ -> false
end)
args_names = Enum.map(required_args, fn %{"name" => name} -> String.to_atom(name) end)
args_vars = Enum.map(args_names, &Macro.var(&1, __MODULE__))
args_specs = Enum.map(args_names, fn _ -> Macro.var(:arg, __MODULE__) end)
args_params = Enum.zip(args_names, args_vars)
doc_source = """
<%= @description %>
<%= if @needs_login == 1 do %>
This method requires authentication with "<%= @permission %>" permission.
<% else %>
This method does not require authentication.
<% end %>
<%= if length(@required_args) > 0 do %>
## Arguments
<% end %>
<%= for arg <- @required_args do %>
* `<%= arg["name"] %>` - <%= arg["_content"] %>
<% end %>
<%= if length(@optional_args) > 0 do %>
## Options
<% end %>
<%= for arg <- @optional_args do %>
* `<%= arg["name"] %>` - <%= arg["_content"] %>
<% end %>
<%= unless is_nil(@example_response) do %>
## Example response
```elixir
<%= inspect(@example_response, pretty: true) %>
```
<% end %>
"""
assigns = [
description: description,
required_args: required_args,
optional_args: optional_args,
example_response: example_response,
needs_login: needs_login,
permission: permission
]
doc = EEx.eval_string(doc_source, assigns: assigns)
verb =
case permission_code do
0 -> :get
1 -> :get
2 -> :post
3 -> :post
end
function_arity = length(required_args) + 1
if function_arity > 1 do
@doc false
@spec unquote(function)() :: operation
def unquote(function)() do
unquote(function)([])
end
@doc false
@spec unquote(function)(opts) :: operation
def unquote(function)(opts) when is_list(opts) do
IO.warn(
"calling `#{unquote(function)}/1` with required arguments as options is " <>
"deprecated. Use `#{unquote(function)}/#{unquote(function_arity)}` instead."
)
Flickrex.Rest.unquote(verb)(unquote(method), opts)
end
end
@doc doc
@spec unquote(function)(unquote_splicing(args_specs), opts) :: operation
def unquote(function)(unquote_splicing(args_vars), opts \\ []) do
Flickrex.Rest.unquote(verb)(unquote(method), unquote(args_params) ++ opts)
end
end
end
end
end
| 29.498208 | 94 | 0.543621 |
0834d9e4b5aac78ff45b89efd3ba75d99671da53 | 1,865 | ex | Elixir | lib/nebulex/helpers.ex | RudolfMan/nebulex | 9cf2a1b66d538ba66e8d0fd195485a32772fdf04 | [
"MIT"
] | 845 | 2017-02-14T14:16:11.000Z | 2022-03-30T04:13:08.000Z | lib/nebulex/helpers.ex | RudolfMan/nebulex | 9cf2a1b66d538ba66e8d0fd195485a32772fdf04 | [
"MIT"
] | 146 | 2017-04-29T16:11:14.000Z | 2022-03-29T08:49:05.000Z | lib/nebulex/helpers.ex | RudolfMan/nebulex | 9cf2a1b66d538ba66e8d0fd195485a32772fdf04 | [
"MIT"
] | 50 | 2017-08-17T13:44:06.000Z | 2022-03-30T11:29:59.000Z | defmodule Nebulex.Helpers do
# Module for general purpose helpers.
@moduledoc false
## API
@spec get_option(Keyword.t(), atom, String.t(), (any -> boolean), term) :: term
def get_option(opts, key, expected, valid?, default \\ nil)
when is_list(opts) and is_atom(key) do
value = Keyword.get(opts, key, default)
if valid?.(value) do
value
else
raise ArgumentError, "expected #{key}: to be #{expected}, got: #{inspect(value)}"
end
end
@spec get_boolean_option(Keyword.t(), atom, boolean) :: term
def get_boolean_option(opts, key, default \\ false)
when is_list(opts) and is_atom(key) and is_boolean(default) do
value = Keyword.get(opts, key, default)
if is_boolean(value) do
value
else
raise ArgumentError, "expected #{key}: to be boolean, got: #{inspect(value)}"
end
end
@spec assert_behaviour(module, module, String.t()) :: module
def assert_behaviour(module, behaviour, msg \\ "module") do
if behaviour in module_behaviours(module, msg) do
module
else
raise ArgumentError,
"expected #{inspect(module)} to implement the behaviour #{inspect(behaviour)}"
end
end
@spec module_behaviours(module, String.t()) :: [module]
def module_behaviours(module, msg) do
if Code.ensure_compiled(module) != {:module, module} do
raise ArgumentError,
"#{msg} #{inspect(module)} was not compiled, " <>
"ensure it is correct and it is included as a project dependency"
end
for {:behaviour, behaviours} <- module.__info__(:attributes),
behaviour <- behaviours,
do: behaviour
end
@spec normalize_module_name([atom | binary | number]) :: module
def normalize_module_name(list) when is_list(list) do
list
|> Enum.map(&Macro.camelize("#{&1}"))
|> Module.concat()
end
end
| 30.57377 | 90 | 0.652547 |
0834e42679d806ca96608272d4c8b5949fe67b29 | 5,321 | exs | Elixir | examples/mnist_gan.exs | zeionara/axon | 8b82074b358043c0b685c7831458ec97f8edc378 | [
"Apache-2.0"
] | null | null | null | examples/mnist_gan.exs | zeionara/axon | 8b82074b358043c0b685c7831458ec97f8edc378 | [
"Apache-2.0"
] | null | null | null | examples/mnist_gan.exs | zeionara/axon | 8b82074b358043c0b685c7831458ec97f8edc378 | [
"Apache-2.0"
] | null | null | null | Mix.install([
{:axon, github: "elixir-nx/axon"},
{:exla, github: "elixir-nx/nx", sparse: "exla"},
{:nx, github: "elixir-nx/nx", sparse: "nx", override: true},
{:scidata, "~> 0.1.0"}
])
EXLA.Client.set_preferred_platform(:default, [:tpu, :cuda, :rocm, :host])
defmodule MNISTGAN do
require Axon
alias Axon.Loop.State
import Nx.Defn
defp transform_images({bin, type, shape}) do
bin
|> Nx.from_binary(type)
|> Nx.reshape({elem(shape, 0), 1, 28, 28})
|> Nx.divide(255.0)
|> Nx.to_batched_list(32)
end
defp build_generator(z_dim) do
Axon.input({nil, z_dim})
|> Axon.dense(256)
|> Axon.relu()
|> Axon.batch_norm()
|> Axon.dense(512)
|> Axon.relu()
|> Axon.batch_norm()
|> Axon.dense(1024)
|> Axon.relu()
|> Axon.batch_norm()
|> Axon.dense(784)
|> Axon.tanh()
|> Axon.reshape({1, 28, 28})
end
defp build_discriminator(input_shape) do
Axon.input(input_shape)
|> Axon.flatten()
|> Axon.dense(512)
|> Axon.relu()
|> Axon.dense(256)
|> Axon.relu()
|> Axon.dense(2, activation: :softmax)
end
defnp running_average(avg, obs, i) do
avg
|> Nx.multiply(i)
|> Nx.add(obs)
|> Nx.divide(Nx.add(i, 1))
end
defn init(d_model, g_model, init_optim_d, init_optim_g) do
d_params = Axon.init(d_model)
g_params = Axon.init(g_model)
%{
iteration: Nx.tensor(0),
discriminator: %{
model_state: d_params,
optimizer_state: init_optim_d.(d_params),
loss: Nx.tensor(0.0)
},
generator: %{
model_state: g_params,
optimizer_state: init_optim_g.(g_params),
loss: Nx.tensor(0.0)
}
}
end
defn batch_step(d_model, g_model, optim_d, optim_g, real_images, state) do
iter = state[:iteration]
d_params = state[:discriminator][:model_state]
g_params = state[:generator][:model_state]
# Update D
fake_labels = Nx.iota({32, 2}, axis: 1)
real_labels = Nx.reverse(fake_labels)
noise = Nx.random_normal({32, 100})
{d_loss, d_grads} = value_and_grad(d_params, fn params ->
fake_images = Axon.predict(g_model, g_params, noise, mode: :train)
d_fake_preds = Axon.predict(d_model, params, fake_images, mode: :train)
d_real_preds = Axon.predict(d_model, params, real_images, mode: :train)
joint_preds = Nx.concatenate([d_fake_preds, d_real_preds], axis: 0)
joint_labels = Nx.concatenate([fake_labels, real_labels], axis: 0)
Axon.Losses.categorical_cross_entropy(joint_labels, joint_preds, reduction: :mean)
end)
d_optimizer_state = state[:discriminator][:optimizer_state]
{d_updates, d_optimizer_state} = optim_d.(d_grads, d_optimizer_state, d_params)
d_params = Axon.Updates.apply_updates(d_params, d_updates)
# Update G
{g_loss, g_grads} = value_and_grad(g_params, fn params ->
fake_images = Axon.predict(g_model, params, noise, mode: :train)
d_preds = Axon.predict(d_model, d_params, fake_images)
Axon.Losses.categorical_cross_entropy(real_labels, d_preds, reduction: :mean)
end)
g_optimizer_state = state[:generator][:optimizer_state]
{g_updates, g_optimizer_state} = optim_g.(g_grads, g_optimizer_state, g_params)
g_params = Axon.Updates.apply_updates(g_params, g_updates)
%{
iteration: iter + 1,
discriminator: %{
model_state: d_params,
optimizer_state: d_optimizer_state,
loss: running_average(state[:discriminator][:loss], d_loss, iter)
},
generator: %{
model_state: g_params,
optimizer_state: g_optimizer_state,
loss: running_average(state[:generator][:loss], g_loss, iter)
}
}
end
defp train_loop(d_model, g_model) do
{init_optim_d, optim_d} = Axon.Optimizers.adam(2.0e-3, b1: 0.5)
{init_optim_g, optim_g} = Axon.Optimizers.adam(2.0e-3, b1: 0.5)
step = &batch_step(d_model, g_model, optim_d, optim_g, &1, &2)
init = fn -> init(d_model, g_model, init_optim_d, init_optim_g) end
Axon.Loop.loop(step, init)
end
defp log_iteration(state) do
%State{epoch: epoch, iteration: iter, step_state: pstate} = state
g_loss = "G: #{:io_lib.format('~.5f', [Nx.to_scalar(pstate[:generator][:loss])])}"
d_loss = "D: #{:io_lib.format('~.5f', [Nx.to_scalar(pstate[:discriminator][:loss])])}"
IO.write("\rEpoch: #{Nx.to_scalar(epoch)}, batch: #{Nx.to_scalar(iter)} #{g_loss} #{d_loss}")
{:continue, state}
end
defp view_generated_images(model, batch_size, state) do
%State{step_state: pstate} = state
noise = Nx.random_normal({batch_size, 100})
preds = Axon.predict(model, pstate[:generator][:model_state], noise, compiler: EXLA)
preds
|> Nx.reshape({batch_size, 28, 28})
|> Nx.to_heatmap()
|> IO.inspect()
{:continue, state}
end
def run() do
{images, _} = Scidata.MNIST.download(transform_images: &transform_images/1)
generator = build_generator(100)
discriminator = build_discriminator({nil, 1, 28, 28})
discriminator
|> train_loop(generator)
|> Axon.Loop.handle(:iteration_completed, &log_iteration/1, every: 50)
|> Axon.Loop.handle(:epoch_completed, &view_generated_images(generator, 3, &1))
|> Axon.Loop.run(images, epochs: 10, compiler: EXLA)
end
end
MNISTGAN.run() | 29.561111 | 97 | 0.650442 |
0834e4ea05aa1e97e7d4c398ccd7a950d8abc3e6 | 2,082 | ex | Elixir | lib/aws_auth/utils.ex | lulu-2021/aws_auth | 3645ace9fdb085c6627c8b7bfc2f6db2e8be058a | [
"Apache-2.0"
] | null | null | null | lib/aws_auth/utils.ex | lulu-2021/aws_auth | 3645ace9fdb085c6627c8b7bfc2f6db2e8be058a | [
"Apache-2.0"
] | null | null | null | lib/aws_auth/utils.ex | lulu-2021/aws_auth | 3645ace9fdb085c6627c8b7bfc2f6db2e8be058a | [
"Apache-2.0"
] | null | null | null | defmodule AWSAuth.Utils do
@moduledoc false
def build_canonical_request(http_method, path, params, headers, hashed_payload) do
query_params = URI.encode_query(params) |> String.replace("+", "%20")
header_params = Enum.map(headers, fn({key, value}) -> "#{String.downcase(key)}:#{String.trim(value)}" end)
|> Enum.sort(&(&1 < &2))
|> Enum.join("\n")
signed_header_params = Enum.map(headers, fn({key, _}) -> String.downcase(key) end)
|> Enum.sort(&(&1 < &2))
|> Enum.join(";")
hashed_payload = if hashed_payload == :unsigned,
do: "UNSIGNED-PAYLOAD",
else: hashed_payload
encoded_path =
path
|> String.split("/")
|> Enum.map(fn (segment) -> URI.encode_www_form(segment) end)
|> Enum.join("/")
"#{http_method}\n#{encoded_path}\n#{query_params}\n#{header_params}\n\n#{signed_header_params}\n#{hashed_payload}"
end
def build_string_to_sign(canonical_request, timestamp, scope) do
hashed_canonical_request = hash_sha256(canonical_request)
"AWS4-HMAC-SHA256\n#{timestamp}\n#{scope}\n#{hashed_canonical_request}"
end
def build_signing_key(secret_key, date, region, service) do
hmac_sha256("AWS4#{secret_key}", date)
|> hmac_sha256(region)
|> hmac_sha256(service)
|> hmac_sha256("aws4_request")
end
def build_signature(signing_key, string_to_sign) do
hmac_sha256(signing_key, string_to_sign)
|> bytes_to_string
end
def hash_sha256(data) do
:crypto.hash(:sha256, data)
|> bytes_to_string
end
def hmac_sha256(key, data) do
#:crypto.hmac(:sha256, key, data)
:crypto.mac(:hmac, :sha256, key, data)
end
def bytes_to_string(bytes) do
Base.encode16(bytes, case: :lower)
end
def format_time(time) do
formatted_time = time
|> NaiveDateTime.to_iso8601
|> String.split(".")
|> List.first
|> String.replace("-", "")
|> String.replace(":", "")
formatted_time <> "Z"
end
def format_date(date) do
date
|> NaiveDateTime.to_date
|> Date.to_iso8601
|> String.replace("-", "")
end
end
| 27.038961 | 118 | 0.653218 |
0834f5758b2070f13186f9833700d78c22d60512 | 1,146 | ex | Elixir | lib/registries/entry_registry.ex | weareyipyip/CachedContentful | b967d46a65af529a453262c076d964f7ae6a57aa | [
"MIT"
] | 2 | 2018-06-08T02:35:18.000Z | 2018-06-08T02:35:33.000Z | lib/registries/entry_registry.ex | weareyipyip/CachedContentful | b967d46a65af529a453262c076d964f7ae6a57aa | [
"MIT"
] | null | null | null | lib/registries/entry_registry.ex | weareyipyip/CachedContentful | b967d46a65af529a453262c076d964f7ae6a57aa | [
"MIT"
] | null | null | null | defmodule CachedContentful.EntryRegistry do
use GenServer
require Logger
alias CachedContentful.RequestHandler
defp get_env_auto_update(), do: Application.get_env(:cached_contentful, :auto_update, false)
defp get_env_update_interval(), do: Application.get_env(:cached_contentful, :update_interval, 1 * 60 * 60 * 10000)
def start_link(name) do
GenServer.start_link(__MODULE__, :ok, name: name)
end
def init(:ok) do
if get_env_auto_update(), do: schedule_work()
entryData = RequestHandler.get_all_entries()
{:ok, entryData}
end
# Auto updater
defp schedule_work do
Process.send_after(self(), :work, get_env_update_interval())
end
def handle_info(:work, state) do
entries = RequestHandler.get_all_entries()
GenServer.cast(__MODULE__, {:updateEntries, entries})
schedule_work()
{:noreply, state}
end
# Getters
def handle_call(:getEntries, _from, entryData) do
case entryData do
entries ->
{:reply, entries, entries}
# {:error, nil} ->
# {:reply, %{}, {:error, nil}}
end
end
# Updates
def handle_cast({:updateEntries, entries}, _entryData) do
{:noreply, entries}
end
end | 24.382979 | 115 | 0.720768 |
083518780cef24deec2262ec5e122e17321276c6 | 1,790 | exs | Elixir | clients/vault/mix.exs | mcrumm/elixir-google-api | 544f22797cec52b3a23dfb6e39117f0018448610 | [
"Apache-2.0"
] | null | null | null | clients/vault/mix.exs | mcrumm/elixir-google-api | 544f22797cec52b3a23dfb6e39117f0018448610 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/vault/mix.exs | mcrumm/elixir-google-api | 544f22797cec52b3a23dfb6e39117f0018448610 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Vault.Mixfile do
use Mix.Project
@version "0.18.0"
def project() do
[
app: :google_api_vault,
version: @version,
elixir: "~> 1.6",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps(),
source_url: "https://github.com/googleapis/elixir-google-api/tree/master/clients/vault"
]
end
def application() do
[extra_applications: [:logger]]
end
defp deps() do
[
{:google_gax, "~> 0.4"},
{:ex_doc, "~> 0.16", only: :dev}
]
end
defp description() do
"""
G Suite Vault API client library. Archiving and eDiscovery for G Suite.
"""
end
defp package() do
[
files: ["lib", "mix.exs", "README*", "LICENSE"],
maintainers: ["Jeff Ching", "Daniel Azuma"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/googleapis/elixir-google-api/tree/master/clients/vault",
"Homepage" => "https://developers.google.com/vault"
}
]
end
end
| 26.716418 | 96 | 0.648603 |
083521451f51409430f77fa5aecb6580cfd63c18 | 2,723 | ex | Elixir | lib/bunch/list.ex | geometerio/bunch | 2b8fe7f44881ef5e4a035080cd7dd3a59e86dbb9 | [
"Apache-2.0"
] | 12 | 2018-09-10T09:59:22.000Z | 2021-08-29T00:37:02.000Z | lib/bunch/list.ex | geometerio/bunch | 2b8fe7f44881ef5e4a035080cd7dd3a59e86dbb9 | [
"Apache-2.0"
] | 8 | 2018-09-03T08:29:47.000Z | 2022-03-27T16:28:08.000Z | lib/bunch/list.ex | geometerio/bunch | 2b8fe7f44881ef5e4a035080cd7dd3a59e86dbb9 | [
"Apache-2.0"
] | 2 | 2019-07-23T04:21:26.000Z | 2021-10-15T23:45:57.000Z | defmodule Bunch.List do
@moduledoc """
A bunch of helper functions for list manipulation.
"""
@doc """
Generates a list using generator function and provided inital accumulator.
Successive list elements are generated by calling `f` with the previous accumulator.
The enumeration finishes when it returns `:halt` or `{:halt, accumulator}`.
## Examples
iex> #{inspect(__MODULE__)}.unfoldr(10, fn 0 -> :halt; n -> {:cont, n-1} end)
Enum.to_list(9..0)
iex> f = fn
...> <<size, content::binary-size(size), rest::binary>> -> {:cont, content, rest}
...> binary -> {:halt, binary}
...> end
iex> #{inspect(__MODULE__)}.unfoldr(<<2, "ab", 3, "cde", 4, "fghi">>, f)
{~w(ab cde fghi), <<>>}
iex> #{inspect(__MODULE__)}.unfoldr(<<2, "ab", 3, "cde", 4, "fg">>, f)
{~w(ab cde), <<4, "fg">>}
"""
@spec unfoldr(acc, (acc -> {:cont, a, acc} | {:cont, acc} | :halt | {:halt, acc})) ::
[a] | {[a], acc}
when acc: any, a: any
def unfoldr(acc, f) do
do_unfoldr(acc, f, [])
end
defp do_unfoldr(acc, f, res_acc) do
case f.(acc) do
{:cont, value, acc} -> do_unfoldr(acc, f, [value | res_acc])
{:cont, acc} -> do_unfoldr(acc, f, [acc | res_acc])
{:halt, acc} -> {Enum.reverse(res_acc), acc}
:halt -> Enum.reverse(res_acc)
end
end
@doc """
The version of `unfoldr/2` that res_accepts `:ok` and `:error` return tuples.
Behaves as `unfoldr/2` as long as `:ok` tuples are returned. Upon `:error`
the processing is stopped and error is returned.
## Examples
iex> f = fn
iex> <<a, b, rest::binary>> ->
iex> sum = a + b
iex> if rem(sum, 2) == 1, do: {:ok, {:cont, sum, rest}}, else: {:error, :even_sum}
iex> acc -> {:ok, {:halt, acc}}
iex> end
iex> #{inspect(__MODULE__)}.try_unfoldr(<<1,2,3,4,5>>, f)
{:ok, {[3, 7], <<5>>}}
iex> #{inspect(__MODULE__)}.try_unfoldr(<<2,4,6,8>>, f)
{:error, :even_sum}
"""
@spec try_unfoldr(
acc,
(acc ->
{:ok, {:cont, a, acc} | {:cont, acc} | :halt | {:halt, acc}} | {:error, reason})
) ::
{:ok, [a] | {[a], acc}} | {:error, reason}
when acc: any, a: any, reason: any
def try_unfoldr(acc, f) do
do_try_unfoldr(acc, f, [])
end
defp do_try_unfoldr(acc, f, res_acc) do
case f.(acc) do
{:ok, {:cont, value, acc}} -> do_try_unfoldr(acc, f, [value | res_acc])
{:ok, {:cont, acc}} -> do_try_unfoldr(acc, f, [acc | res_acc])
{:ok, {:halt, acc}} -> {:ok, {Enum.reverse(res_acc), acc}}
{:ok, :halt} -> {:ok, Enum.reverse(res_acc)}
{:error, reason} -> {:error, reason}
end
end
end
| 32.807229 | 93 | 0.536173 |
083542084d13021f84e9b56135401dfaddd52df3 | 2,003 | exs | Elixir | apps/flair/mix.exs | PillarTechnology/smartcitiesdata | 9420a26820e38267513cd1bfa82c7f5583222bb1 | [
"Apache-2.0"
] | null | null | null | apps/flair/mix.exs | PillarTechnology/smartcitiesdata | 9420a26820e38267513cd1bfa82c7f5583222bb1 | [
"Apache-2.0"
] | null | null | null | apps/flair/mix.exs | PillarTechnology/smartcitiesdata | 9420a26820e38267513cd1bfa82c7f5583222bb1 | [
"Apache-2.0"
] | null | null | null | defmodule Flair.MixProject do
use Mix.Project
def project do
[
app: :flair,
version: "0.3.1",
elixir: "~> 1.8",
build_path: "../../_build",
config_path: "../../config/config.exs",
deps_path: "../../deps",
lockfile: "../../mix.lock",
start_permanent: Mix.env() == :prod,
test_coverage: [tool: ExCoveralls],
preferred_cli_env: [
coveralls: :test,
"coveralls.detail": :test,
"coveralls.post": :test,
"coveralls.html": :test
],
deps: deps(),
aliases: aliases(),
elixirc_paths: elixirc_paths(Mix.env()),
test_paths: test_paths(Mix.env())
]
end
def application do
[
extra_applications: [:logger],
mod: {Flair.Application, []}
]
end
defp deps do
[
{:elsa, "~> 0.10.0"},
{:flow, "~> 0.14"},
{:gen_stage, "~> 0.14"},
{:jason, "~> 1.1"},
{:prestige, "~> 0.3"},
{:retry, "~> 0.13.0"},
{:smart_city, "~> 3.0", override: true},
{:statistics, "~> 0.6"},
{:credo, "~> 1.1", only: :dev, runtime: false},
{:excoveralls, "~> 0.11", only: :dev},
{:ex_doc, "~> 0.21"},
{:divo, "~> 1.1", only: [:dev, :integration]},
{:divo_kafka, "~> 0.1", only: [:dev, :integration]},
{:placebo, "~> 1.2", only: [:dev, :test, :integration]},
{:faker, "~> 0.12", only: [:test, :integration], override: true},
{:mox, "~> 0.5.1", only: [:dev, :test, :integration]},
{:smart_city_test, "~> 0.8", only: [:test, :integration]},
{:distillery, "~> 2.1"},
{:pipeline, in_umbrella: true},
{:tasks, in_umbrella: true, only: :dev}
]
end
defp aliases do
[
verify: ["format --check-formatted", "credo"]
]
end
defp elixirc_paths(env) when env in [:test, :integration], do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
defp test_paths(:integration), do: ["test/integration"]
defp test_paths(_), do: ["test/unit"]
end
| 27.819444 | 88 | 0.517224 |
08354381c389cb657beb3c56e6a8cf84013c494f | 13,213 | ex | Elixir | apps/api_web/lib/api_web/controllers/vehicle_controller.ex | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | null | null | null | apps/api_web/lib/api_web/controllers/vehicle_controller.ex | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | null | null | null | apps/api_web/lib/api_web/controllers/vehicle_controller.ex | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | null | null | null | defmodule ApiWeb.VehicleController do
@moduledoc """
Controller for Vehicles. Can be filtered by:
* trip
* route (optionally direction)
"""
use ApiWeb.Web, :api_controller
alias State.Vehicle
@filters ~w(trip route direction_id id label route_type)s
@pagination_opts ~w(offset limit order_by)a
@includes ~w(trip stop route)
def state_module, do: State.Vehicle
swagger_path :show do
get(path(__MODULE__, :show))
description("""
Single vehicle (bus, ferry, or train)
#{swagger_path_description("/data")}
""")
parameter(:id, :path, :string, "Unique identifier for a vehicle")
common_show_parameters(:vehicle)
include_parameters()
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:Vehicle))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(404, "Not Found", Schema.ref(:NotFound))
response(406, "Not Acceptable", Schema.ref(:NotAcceptable))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
@spec show_data(Plug.Conn.t(), %{String.t() => String.t()}) :: Model.Vehicle.t() | nil
def show_data(conn, %{"id" => id} = params) do
case Params.validate_includes(params, @includes, conn) do
{:ok, _includes} ->
State.Vehicle.by_id(id)
{:error, _, _} = error ->
error
end
end
swagger_path :index do
get(path(__MODULE__, :index))
description("""
List of vehicles (buses, ferries, and trains)
#{swagger_path_description("/data/{index}")}
""")
common_index_parameters(__MODULE__, :vehicle)
include_parameters()
parameter(
"filter[id]",
:query,
:string,
"Filter by multiple IDs. Multiple IDs #{comma_separated_list()}. Cannot be combined with any other filter.",
example: "1,2"
)
parameter(
"filter[trip]",
:query,
:string,
"Filter by `/data/{index}/relationships/trip/data/id`. Multiple `/data/{index}/relationships/trip/data/id` #{
comma_separated_list()
}. Cannot be combined with any other filter."
)
parameter("filter[label]", :query, :string, """
Filter by label. Multiple `label` #{comma_separated_list()}.
""")
parameter("filter[route]", :query, :string, """
Filter by route. If the vehicle is on a \
[multi-route trip](https://groups.google.com/forum/#!msg/massdotdevelopers/1egrhNjT9eA/iy6NFymcCgAJ), it will be \
returned for any of the routes. Multiple `route_id` #{comma_separated_list()}.
""")
filter_param(:direction_id, desc: "Only used if `filter[route]` is also present.")
filter_param(:route_type)
consumes("application/vnd.api+json")
produces("application/vnd.api+json")
response(200, "OK", Schema.ref(:Vehicles))
response(400, "Bad Request", Schema.ref(:BadRequest))
response(403, "Forbidden", Schema.ref(:Forbidden))
response(429, "Too Many Requests", Schema.ref(:TooManyRequests))
end
@spec index_data(Plug.Conn.t(), %{}) :: [Model.Vehicle.t()]
def index_data(conn, params) do
params = backwards_compatible_params(conn.assigns.api_version, params)
with {:ok, filtered} <- Params.filter_params(params, @filters, conn),
{:ok, _includes} <- Params.validate_includes(params, @includes, conn) do
filtered
|> apply_filters()
|> State.all(Params.filter_opts(params, @pagination_opts, conn))
else
{:error, _, _} = error -> error
end
end
defp backwards_compatible_params("2017-11-28", %{"sort" => "last_updated" <> _} = params) do
Map.put(params, "sort", "updated_at")
end
defp backwards_compatible_params("2017-11-28", %{"sort" => "-last_updated" <> _} = params) do
Map.put(params, "sort", "-updated_at")
end
defp backwards_compatible_params(_version, params) do
params
end
defp apply_filters(%{"id" => id}) do
id
|> Params.split_on_comma()
|> Vehicle.by_ids()
end
defp apply_filters(%{"trip" => trip}) do
trip
|> Params.split_on_comma()
|> Vehicle.by_trip_ids()
end
# If no id or trip present, evaluate all remaining filters together
defp apply_filters(%{} = filters) when map_size(filters) > 0 do
filters
|> Stream.flat_map(&do_format_filter(&1))
|> Enum.into(%{})
|> Vehicle.filter_by()
end
defp apply_filters(_filters) do
Vehicle.all()
end
defp do_format_filter({key, string}) when key in ["label", "route"] do
case Params.split_on_comma(string) do
[] ->
[]
values ->
%{String.to_existing_atom("#{key}s") => values}
end
end
defp do_format_filter({"route_type", route_type}) do
case Params.integer_values(route_type) do
[] ->
[]
route_types ->
%{route_types: route_types}
end
end
defp do_format_filter({"direction_id", direction_id}) do
case Params.direction_id(%{"direction_id" => direction_id}) do
nil ->
[]
parsed_direction_id ->
%{direction_id: parsed_direction_id}
end
end
defp do_format_filter(_), do: []
def swagger_definitions do
import PhoenixSwagger.JsonApi, except: [page: 1]
%{
VehicleResource:
resource do
description("Current state of a vehicle on a trip.")
attributes do
bearing(
:integer,
"Bearing, in degrees, clockwise from True North, i.e., 0 is North and 90 is East. This can be the compass bearing, or the direction towards the next stop or intermediate location. See [GTFS-realtime Position bearing](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-position).",
example: 174
)
current_status(
:string,
"""
Status of vehicle relative to the stops. See [GTFS-realtime VehicleStopStatus](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-vehiclestopstatus).
| _**Value**_ | _**Description**_ |
|-------------------|------------------------------------------------------------------------------------------------------------|
| **INCOMING_AT** | The vehicle is just about to arrive at the stop (on a stop display, the vehicle symbol typically flashes). |
| **STOPPED_AT** | The vehicle is standing at the stop. |
| **IN_TRANSIT_TO** | The vehicle has departed the previous stop and is in transit. |
""",
example: "IN_TRANSIT_TO"
)
current_stop_sequence(
:integer,
"Index of current stop along trip. See [GTFS-realtime VehiclePosition current_stop_sequence](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-vehicleposition)",
example: 8
)
label(
:string,
"User visible label, such as the one of on the signage on the vehicle. See [GTFS-realtime VehicleDescriptor label](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-vehicledescriptor).",
example: "1817"
)
updated_at(
%Schema{type: :string, format: :"date-time"},
"Time at which vehicle information was last updated. Format is ISO8601.",
example: "2017-08-14T16:04:44-04:00"
)
latitude(
:number,
"Latitude of the vehicle's current position. Degrees North, in the [WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#A_new_World_Geodetic_System:_WGS.C2.A084) coordinate system. See [GTFS-realtime Position latitude](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-position).",
example: -71.27239990234375
)
longitude(
:number,
"Longitude of the vehicle's current position. Degrees East, in the [WGS-84](https://en.wikipedia.org/wiki/World_Geodetic_System#Longitudes_on_WGS.C2.A084) coordinate system. See [GTFS-realtime Position longitude](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-position).",
example: 42.32941818237305
)
speed(
:number,
"Speed that the vehicle is traveling in meters per second. See [GTFS-realtime Position speed](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-position).",
example: 16
)
occupancy_status(
:string,
"""
The degree of passenger occupancy for the vehicle. See [GTFS-realtime OccupancyStatus](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-vehiclestopstatus).
| _**Value**_ | _**Description**_ |
|--------------------------------|-----------------------------------------------------------------------------------------------------|
| **MANY_SEATS_AVAILABLE** | Not crowded: the vehicle has a large percentage of seats available. |
| **FEW_SEATS_AVAILABLE** | Some crowding: the vehicle has a small percentage of seats available. |
| **FULL** | Crowded: the vehicle is considered full by most measures, but may still be allowing passengers to board. |
""",
"x-nullable": true,
example: "FEW_SEATS_AVAILABLE"
)
end
direction_id_attribute()
relationship(:trip)
relationship(:stop)
relationship(:route)
end,
Vehicle: single(:VehicleResource),
Vehicles: page(:VehicleResource)
}
end
defp include_parameters(schema) do
ApiWeb.SwaggerHelpers.include_parameters(
schema,
~w(trip stop route),
description: """
| include | Description |
|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `trip` | The trip which the vehicle is currently operating. |
| `stop` | The vehicle's current (when `current_status` is **STOPPED_AT**) or *next* stop. |
| `route` | The one route that is designated for that trip, as in GTFS `trips.txt`. A trip might also provide service on other routes, identified by the MBTA's `multi_route_trips.txt` GTFS extension. `filter[route]` does consider the multi_route_trips GTFS extension, so it is possible to filter for one route and get a different route included in the response. |
"""
)
end
defp swagger_path_description(parent_pointer) do
"""
## Direction
### World
To figure out which way the vehicle is pointing at the location, use `#{parent_pointer}/attributes/bearing`. This \
can be the compass bearing, or the direction towards the next stop or intermediate location.
### Trip
To get the direction around the stops in the trip use `#{parent_pointer}/attributes/direction_id`.
## Location
### World
Use `#{parent_pointer}/attributes/latitude` and `#{parent_pointer}/attributes/longitude` to get the location of a \
vehicle.
### Trip
Use `#{parent_pointer}/attributes/current_stop_sequence` to get the stop number along the trip. Useful for linear \
stop indicators. Position relative to the current stop is in `#{parent_pointer}/attributes/current_status`.
## Movement
### World
Use `#{parent_pointer}/attributes/speed` to get the speed of the vehicle in meters per second.
"""
end
end
| 40.907121 | 370 | 0.548551 |
08356fb6b521a1a8587e371628cfc52b3c086fde | 77 | ex | Elixir | lib/hook/producers/email.ex | samuelnygaard/accent | db753badab1d885397b48a42ac3fb43024345467 | [
"BSD-3-Clause"
] | 1 | 2020-07-01T16:08:34.000Z | 2020-07-01T16:08:34.000Z | lib/hook/producers/email.ex | samuelnygaard/accent | db753badab1d885397b48a42ac3fb43024345467 | [
"BSD-3-Clause"
] | 6 | 2021-03-11T07:37:48.000Z | 2022-02-13T21:10:33.000Z | lib/hook/producers/email.ex | doc-ai/accent | e337e16f3658cc0728364f952c0d9c13710ebb06 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T21:47:35.000Z | 2020-05-29T21:47:35.000Z | defmodule Accent.Hook.Producers.Email do
use Accent.Hook.EventProducer
end
| 19.25 | 40 | 0.831169 |
083577b891cd22f40be60a07a6064e13529c86a3 | 1,737 | ex | Elixir | debian/manpage.1.ex | dreamhost/dpkg-ndn-perl-mod-perl | b22e73bc5bbff804cece98fd2b8138b14883efd3 | [
"Apache-2.0"
] | null | null | null | debian/manpage.1.ex | dreamhost/dpkg-ndn-perl-mod-perl | b22e73bc5bbff804cece98fd2b8138b14883efd3 | [
"Apache-2.0"
] | null | null | null | debian/manpage.1.ex | dreamhost/dpkg-ndn-perl-mod-perl | b22e73bc5bbff804cece98fd2b8138b14883efd3 | [
"Apache-2.0"
] | null | null | null | .\" Hey, EMACS: -*- nroff -*-
.\" (C) Copyright 2015 Chris Weyl <Chris Weyl <cweyl@alumni.drew.edu>>,
.\"
.\" First parameter, NAME, should be all caps
.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
.\" other parameters are allowed: see man(7), man(1)
.TH NDN-PERL-APACHE2-MOD-PERL SECTION "March 18, 2015"
.\" Please adjust this date whenever revising the manpage.
.\"
.\" Some roff macros, for reference:
.\" .nh disable hyphenation
.\" .hy enable hyphenation
.\" .ad l left justify
.\" .ad b justify to both left and right margins
.\" .nf disable filling
.\" .fi enable filling
.\" .br insert line break
.\" .sp <n> insert n+1 empty lines
.\" for manpage-specific macros, see man(7)
.SH NAME
ndn-perl-apache2-mod-perl \- program to do something
.SH SYNOPSIS
.B ndn-perl-apache2-mod-perl
.RI [ options ] " files" ...
.br
.B bar
.RI [ options ] " files" ...
.SH DESCRIPTION
This manual page documents briefly the
.B ndn-perl-apache2-mod-perl
and
.B bar
commands.
.PP
.\" TeX users may be more comfortable with the \fB<whatever>\fP and
.\" \fI<whatever>\fP escape sequences to invode bold face and italics,
.\" respectively.
\fBndn-perl-apache2-mod-perl\fP is a program that...
.SH OPTIONS
These programs follow the usual GNU command line syntax, with long
options starting with two dashes (`-').
A summary of options is included below.
For a complete description, see the Info files.
.TP
.B \-h, \-\-help
Show summary of options.
.TP
.B \-v, \-\-version
Show version of program.
.SH SEE ALSO
.BR bar (1),
.BR baz (1).
.br
The programs are documented fully by
.IR "The Rise and Fall of a Fooish Bar" ,
available via the Info system.
| 30.473684 | 71 | 0.668394 |
0835a4829054f9847d45c2e1993e3ebb8c3f33c7 | 208 | ex | Elixir | apps/kerostasia/lib/kerostasia.ex | achlysproject/nosos | 59070c8c32115ba6b0670385e596ecd6d7b2f69a | [
"Apache-2.0"
] | null | null | null | apps/kerostasia/lib/kerostasia.ex | achlysproject/nosos | 59070c8c32115ba6b0670385e596ecd6d7b2f69a | [
"Apache-2.0"
] | null | null | null | apps/kerostasia/lib/kerostasia.ex | achlysproject/nosos | 59070c8c32115ba6b0670385e596ecd6d7b2f69a | [
"Apache-2.0"
] | null | null | null | defmodule Kerostasia do
@moduledoc """
Documentation for Kerostasia.
"""
@doc """
Hello world.
## Examples
iex> Kerostasia.hello()
:world
"""
def hello do
:world
end
end
| 10.947368 | 31 | 0.581731 |
0835bc5fe87fc340baf26ed61adba61088bd15fd | 1,034 | exs | Elixir | config/config.exs | joaobalsini/mmo_game | 60d046c92ce0cf3038ba3cbd9bfd33aa79e22969 | [
"MIT"
] | null | null | null | config/config.exs | joaobalsini/mmo_game | 60d046c92ce0cf3038ba3cbd9bfd33aa79e22969 | [
"MIT"
] | 2 | 2021-03-10T11:57:02.000Z | 2021-05-11T07:36:34.000Z | config/config.exs | joaobalsini/mmo_game | 60d046c92ce0cf3038ba3cbd9bfd33aa79e22969 | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
# General application configuration
use Mix.Config
# Configures the endpoint
config :mmo_game, MmoGameWeb.Endpoint,
url: [host: "localhost"],
secret_key_base: "I1IHwV0nPisBSomh2Vr1RT5+kloJ1FPOg8S/Z8b/I95ZLH1mwXuULZRTW7aqqph4",
render_errors: [view: MmoGameWeb.ErrorView, accepts: ~w(html json)],
pubsub: [name: MmoGame.PubSub, adapter: Phoenix.PubSub.PG2],
live_view: [
signing_salt: "zHvGjY09bOfbOgVA1y0pUZv4oiC7RiZF"
]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Use Jason for JSON parsing in Phoenix
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"
| 33.354839 | 86 | 0.767892 |
0835ce56e98d1d89df9755686ae995a0edee2f2c | 1,545 | ex | Elixir | lib/asteroid/object_store/authentication_event.ex | tanguilp/asteroid | 8e03221d365da7f03f82df192c535d3ba2101f4d | [
"Apache-2.0"
] | 36 | 2019-07-23T20:01:05.000Z | 2021-08-05T00:52:34.000Z | lib/asteroid/object_store/authentication_event.ex | tanguilp/asteroid | 8e03221d365da7f03f82df192c535d3ba2101f4d | [
"Apache-2.0"
] | 19 | 2019-08-23T19:04:50.000Z | 2021-05-07T22:12:25.000Z | lib/asteroid/object_store/authentication_event.ex | tanguilp/asteroid | 8e03221d365da7f03f82df192c535d3ba2101f4d | [
"Apache-2.0"
] | 3 | 2019-09-06T10:47:20.000Z | 2020-09-09T03:43:31.000Z | defmodule Asteroid.ObjectStore.AuthenticationEvent do
@moduledoc """
Behaviour for authentication event store
"""
@type opts :: Keyword.t()
@doc """
Installs the authentication event store
"""
@callback install(opts()) :: :ok | {:error, any()}
@doc """
Starts the authentication event store (non supervised)
"""
@callback start(opts()) :: :ok | {:error, any()}
@doc """
Starts the authentication event store (supervised)
"""
@callback start_link(opts()) :: Supervisor.on_start()
@doc """
Returns an authentication event from its id
Returns `{:ok, %Asteroid.OIDC.AuthenticationEvent{}}` if the authentication event exists
and `{:ok, nil}` otherwise.
"""
@callback get(Asteroid.OIDC.AuthenticationEvent.id(), opts()) ::
{:ok, Asteroid.OIDC.AuthenticationEvent.t() | nil}
| {:error, any()}
@doc """
Returns all the *authentication event ids* of a subject
"""
@callback get_from_authenticated_session_id(Asteroid.Subject.id(), opts()) ::
{:ok, [Asteroid.OIDC.AuthenticationEvent.id()]} | {:error, any()}
@doc """
Stores an authentication event
If the authentication event already exists, all of its data should be erased.
"""
@callback put(Asteroid.OIDC.AuthenticationEvent.t(), opts()) :: :ok | {:error, any()}
@doc """
Removes an authentication event
"""
@callback delete(Asteroid.OIDC.AuthenticationEvent.id(), opts()) :: :ok | {:error, any()}
@optional_callbacks start: 1,
start_link: 1
end
| 25.327869 | 91 | 0.642071 |
0835d992e4aa11195d81b428340a166ba28abe5f | 1,056 | ex | Elixir | lib/star_web/channels/user_socket.ex | ModinfinityLLC/star | 6d56cea1c08f88b73984f7ad1197e4337139995f | [
"MIT"
] | 2 | 2019-05-27T22:56:48.000Z | 2019-06-17T14:42:53.000Z | lib/star_web/channels/user_socket.ex | ModinfinityLLC/star | 6d56cea1c08f88b73984f7ad1197e4337139995f | [
"MIT"
] | 3 | 2020-07-17T03:13:04.000Z | 2021-05-08T22:56:26.000Z | lib/star_web/channels/user_socket.ex | ModinfinityLLC/star | 6d56cea1c08f88b73984f7ad1197e4337139995f | [
"MIT"
] | null | null | null | defmodule StarWeb.UserSocket do
use Phoenix.Socket
## Channels
# channel "room:*", StarWeb.RoomChannel
# Socket params are passed from the client and can
# be used to verify and authenticate a user. After
# verification, you can put default assigns into
# the socket that will be set for all channels, ie
#
# {:ok, assign(socket, :user_id, verified_user_id)}
#
# To deny connection, return `:error`.
#
# See `Phoenix.Token` documentation for examples in
# performing token verification on connect.
def connect(_params, socket, _connect_info) do
{:ok, socket}
end
# Socket id's are topics that allow you to identify all sockets for a given user:
#
# def id(socket), do: "user_socket:#{socket.assigns.user_id}"
#
# Would allow you to broadcast a "disconnect" event and terminate
# all active sockets and channels for a given user:
#
# StarWeb.Endpoint.broadcast("user_socket:#{user.id}", "disconnect", %{})
#
# Returning `nil` makes this socket anonymous.
def id(_socket), do: nil
end
| 31.058824 | 83 | 0.694129 |
0836079b44b707711c57456673572f616797f8f9 | 622 | ex | Elixir | lib/quick_fs_public_api/model/api_usage.ex | nrrso/ex_quickfs | 912ea7a5ebb34ceebd9460608758716952c60752 | [
"MIT"
] | null | null | null | lib/quick_fs_public_api/model/api_usage.ex | nrrso/ex_quickfs | 912ea7a5ebb34ceebd9460608758716952c60752 | [
"MIT"
] | null | null | null | lib/quick_fs_public_api/model/api_usage.ex | nrrso/ex_quickfs | 912ea7a5ebb34ceebd9460608758716952c60752 | [
"MIT"
] | null | null | null | # NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# https://openapi-generator.tech
# Do not edit the class manually.
defmodule QuickFSPublicAPI.Model.ApiUsage do
@moduledoc """
"""
@derive [Poison.Encoder]
defstruct [
:"usage"
]
@type t :: %__MODULE__{
:"usage" => QuickFSPublicAPI.Model.ApiUsageUsage.t
}
end
defimpl Poison.Decoder, for: QuickFSPublicAPI.Model.ApiUsage do
import QuickFSPublicAPI.Deserializer
def decode(value, options) do
value
|> deserialize(:"usage", :struct, QuickFSPublicAPI.Model.ApiUsageUsage, options)
end
end
| 22.214286 | 91 | 0.717042 |
0836197646d82bb448095ebd188250ae2440371a | 873 | ex | Elixir | backend/lib/honeyland.ex | bejolithic/honeyland | 8c4a0d3b56543648d3acb96cc6906df86526743b | [
"Apache-2.0"
] | null | null | null | backend/lib/honeyland.ex | bejolithic/honeyland | 8c4a0d3b56543648d3acb96cc6906df86526743b | [
"Apache-2.0"
] | null | null | null | backend/lib/honeyland.ex | bejolithic/honeyland | 8c4a0d3b56543648d3acb96cc6906df86526743b | [
"Apache-2.0"
] | null | null | null | #
# This file is part of Honeyland.
#
# Copyright 2022 Nervive Studio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
defmodule Honeyland do
@moduledoc """
Honeyland keeps the contexts that define your domain
and business logic.
Contexts are also responsible for managing your data, regardless
if it comes from the database, an external API or others.
"""
end
| 31.178571 | 74 | 0.756014 |
083661a48d9a77f54dc64b577fcf02304899d077 | 12,380 | exs | Elixir | test/lib/adapters/sendin_blue_adapter_v3_test.exs | maysam/bamboo_sendinblue | 882511499ce8088b46713dbae06eee1bd84c860a | [
"MIT"
] | null | null | null | test/lib/adapters/sendin_blue_adapter_v3_test.exs | maysam/bamboo_sendinblue | 882511499ce8088b46713dbae06eee1bd84c860a | [
"MIT"
] | null | null | null | test/lib/adapters/sendin_blue_adapter_v3_test.exs | maysam/bamboo_sendinblue | 882511499ce8088b46713dbae06eee1bd84c860a | [
"MIT"
] | null | null | null | defmodule Bamboo.SendinBlueAdapterV3Test do
use ExUnit.Case
alias Bamboo.Email
alias Bamboo.Attachment
alias Bamboo.SendinBlueAdapterV3
alias Bamboo.SendinBlueHelper
@config %{adapter: SendinBlueAdapterV3, api_key: "123_abc"}
@config_with_bad_key %{adapter: SendinBlueAdapterV3, api_key: nil}
defmodule FakeSendinBlue do
use Plug.Router
plug(Plug.Parsers,
# parsers: [:urlencoded, :multipart, :json],
parsers: [:multipart, :json],
pass: ["*/*"],
json_decoder: Poison
)
plug(:match)
plug(:dispatch)
def start_server(parent) do
Agent.start_link(fn -> Map.new() end, name: __MODULE__)
Agent.update(__MODULE__, &Map.put(&1, :parent, parent))
port = get_free_port()
Application.put_env(:bamboo, :sendinblue_base_uri, "http://localhost:#{port}")
Plug.Adapters.Cowboy.http(__MODULE__, [], port: port, ref: __MODULE__)
end
defp get_free_port do
{:ok, socket} = :ranch_tcp.listen(port: 0)
{:ok, port} = :inet.port(socket)
:erlang.port_close(socket)
port
end
def shutdown do
Plug.Adapters.Cowboy.shutdown(__MODULE__)
end
post "/v3/smtp/email" do
case Map.get(conn.params, "from") do
"INVALID_EMAIL" -> conn |> send_resp(500, "Error!!") |> send_to_parent
_ -> conn |> send_resp(200, "SENT") |> send_to_parent
end
end
defp send_to_parent(conn) do
parent = Agent.get(__MODULE__, fn set -> Map.get(set, :parent) end)
send(parent, {:fake_sendinblue, conn})
conn
end
end
setup do
FakeSendinBlue.start_server(self())
on_exit(fn ->
FakeSendinBlue.shutdown()
end)
:ok
end
defp new_email(attrs \\ []) do
Keyword.merge([from: "foo@bar.com", to: []], attrs)
|> Email.new_email()
|> Bamboo.Mailer.normalize_addresses()
end
test "raises if the api key is nil" do
assert_raise ArgumentError, ~r/no API key set/, fn ->
new_email(from: "foo@bar.com") |> SendinBlueAdapterV3.deliver(@config_with_bad_key)
end
assert_raise ArgumentError, ~r/no API key set/, fn ->
SendinBlueAdapterV3.handle_config(%{})
end
end
test "deliver/2 correctly formats reply-to from headers" do
email = new_email(headers: %{"reply-to-email" => "foo@bar.com", "reply-to-name" => "Foo"})
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert %{"email" => "foo@bar.com", "name" => "Foo"} = params["replyTo"]
end
test "deliver/2 sends the to the right url" do
new_email() |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{request_path: request_path}}
assert request_path == "/v3/smtp/email"
end
test "deliver/2 sends the to the right url for templates" do
new_email() |> SendinBlueHelper.template("hello") |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{request_path: request_path}}
assert request_path == "/v3/smtp/email"
end
test "deliver/2 sends from, html and text body, subject, and headers" do
email =
new_email(
from: {"From", "from@foo.com"},
subject: "My Subject",
text_body: "TEXT BODY",
html_body: "HTML BODY"
)
|> Email.put_header("reply-to-name", "Foo")
|> Email.put_header("reply-to-email", "foo@bar.com")
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params, req_headers: headers}}
assert params["sender"]["email"] == email.from |> elem(1)
assert params["sender"]["name"] == email.from |> elem(0)
assert params["subject"] == email.subject
assert params["textContent"] == email.text_body
assert params["htmlContent"] == email.html_body
assert Enum.member?(headers, {"api-key", @config[:api_key]})
end
test "deliver/2 correctly formats recipients" do
email =
new_email(
to: [{"ToName", "to@bar.com"}, {nil, "noname@bar.com"}],
cc: [{"CC1", "cc1@bar.com"}, {"CC2", "cc2@bar.com"}],
bcc: [{"BCC1", "bcc1@bar.com"}, {"BCC2", "bcc2@bar.com"}]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [
%{"email" => "to@bar.com", "name" => "ToName"},
%{"email" => "noname@bar.com"}
]
assert params["cc"] == [
%{"email" => "cc1@bar.com", "name" => "CC1"},
%{"email" => "cc2@bar.com", "name" => "CC2"}
]
assert params["bcc"] == [
%{"email" => "bcc1@bar.com", "name" => "BCC1"},
%{"email" => "bcc2@bar.com", "name" => "BCC2"}
]
end
test "deliver/2 puts template name and empty content" do
email = SendinBlueHelper.template(new_email(), "hello")
SendinBlueAdapterV3.deliver(email, @config)
assert_receive {:fake_sendinblue, %{params: %{"templateId" => template_id,
"params" => template_model}}}
assert template_id == "hello"
assert template_model == %{}
end
test "deliver/2 puts template name and content" do
email = SendinBlueHelper.template(new_email(), "hello", [
%{name: "example name", content: "example content"}
])
SendinBlueAdapterV3.deliver(email, @config)
assert_receive {:fake_sendinblue, %{params: %{"templateId" => template_id,
"params" => template_model}}}
assert template_id == "hello"
assert template_model == [%{"content" => "example content",
"name" => "example name"}]
end
test "deliver/2 puts tag param" do
new_email()
|> SendinBlueHelper.tag("some_tag")
|> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: %{"tags" => ["some_tag"]}}}
end
test "deliver/2 puts multiple tags param" do
email = new_email()
|> SendinBlueHelper.tag("tag2")
|> SendinBlueHelper.tag("tag1")
SendinBlueAdapterV3.deliver(email, @config)
assert_receive {:fake_sendinblue, %{params: %{"tags" => ["tag1", "tag2"]}}}
end
test "deliver/2 correctly formats DATA attachment from local path even if filename is missing" do
email =
new_email(
to: "noname@bar.com",
attachments: [
Attachment.new("./test/support/attachment.png")
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"content" => File.read!("./test/support/attachment.png") |> Base.encode64(),
"name" => "attachment.png"
}
]
end
test "deliver/2 correctly formats DATA attachment from contents, path when filename is missing" do
email =
new_email(
to: "noname@bar.com",
attachments: [
%Attachment{
path: "./test/support/attachment.png",
data: File.read!("./test/support/attachment.png")
}
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"content" => File.read!("./test/support/attachment.png") |> Base.encode64(),
"name" => "attachment.png"
}
]
end
test "deliver/2 correctly formats DATA attachment from contents, filename" do
email =
new_email(
to: "noname@bar.com",
attachments: [
%Attachment{
filename: "attachment1.png",
data: File.read!("./test/support/attachment.png")
}
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"content" => File.read!("./test/support/attachment.png") |> Base.encode64(),
"name" => "attachment1.png"
}
]
end
test "deliver/2 correctly formats DATA attachment from path" do
email =
new_email(
to: "noname@bar.com",
attachments: [Attachment.new("./test/support/attachment.png")]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"content" => File.read!("./test/support/attachment.png") |> Base.encode64(),
"name" => "attachment.png"
}
]
end
test "deliver/2 correctly formats LINK attachment from path, filename" do
email =
new_email(
to: "noname@bar.com",
attachments: [
%Attachment{
path: "https://www.coders51.com/img/logo-alt.png",
filename: "attachment1.png"
}
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"url" => "https://www.coders51.com/img/logo-alt.png",
"name" => "attachment1.png"
}
]
end
test "deliver/2 correctly formats LINK attachment from path" do
email =
new_email(
to: "noname@bar.com",
attachments: [
%Attachment{
path: "https://www.coders51.com/img/logo-alt.png"
}
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"url" => "https://www.coders51.com/img/logo-alt.png",
"name" => "logo-alt.png"
}
]
end
test "deliver/2 correctly formats mixed DATA/LINK attachments" do
email =
new_email(
to: "noname@bar.com",
attachments: [
Attachment.new("./test/support/attachment.png", filename: "attachment1.png"),
%Attachment{path: "https://www.coders51.com/img/logo-alt.png"}
]
)
email |> SendinBlueAdapterV3.deliver(@config)
assert_receive {:fake_sendinblue, %{params: params}}
assert params["to"] == [%{"email" => "noname@bar.com"}]
assert params["attachment"] == [
%{
"content" => File.read!("./test/support/attachment.png") |> Base.encode64(),
"name" => "attachment1.png"
},
%{"name" => "logo-alt.png", "url" => "https://www.coders51.com/img/logo-alt.png"}
]
end
test "deliver/2 puts inline attachments" do
email = SendinBlueHelper.template(new_email(), "hello", [
%{name: "example name", content: "example content <img src=\"my-attachment\" />"}
])
|> Email.put_attachment(Path.join(__DIR__, "../../support/attachment.png"))
SendinBlueAdapterV3.deliver(email, @config)
assert_receive {
:fake_sendinblue,
%{
params: %{
"attachment" => [
%{
"content" => "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9Ti1JaHOwg4pChOlkQFXHUKhShQqgVWnUwufQLmjQkKS6OgmvBwY/FqoOLs64OroIg+AHi5uak6CIl/i8ptIjx4Lgf7+497t4BQrPKNKtnHNB028ykkmIuvyr2vkJACGEAUZlZxpwkpeE7vu4R4Otdgmf5n/tzRNWCxYCASDzLDNMm3iCe3rQNzvvEMVaWVeJz4jGTLkj8yHXF4zfOJZcFnhkzs5l54hixWOpipYtZ2dSIp4jjqqZTvpDzWOW8xVmr1ln7nvyFkYK+ssx1msNIYRFLkCBCQR0VVGEjQatOioUM7Sd9/EOuXyKXQq4KGDkWUIMG2fWD/8Hvbq3i5ISXFEkCoRfH+RgBeneBVsNxvo8dp3UCBJ+BK73jrzWBmU/SGx0tfgT0bwMX1x1N2QMud4DBJ0M2ZVcK0hSKReD9jL4pDwzcAuE1r7f2Pk4fgCx1lb4BDg6B0RJlr/u8u6+7t3/PtPv7AbeTclziFEqrAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAB3RJTUUH5QIFCgArZmj/VQAAAJ9JREFUeNrt0DEBAAAIAyC1f+dZwc8HItBJiptRIEuWLFmyZCmQJUuWLFmyFMiSJUuWLFkKZMmSJUuWLAWyZMmSJUuWAlmyZMmSJUuBLFmyZMmSpUCWLFmyZMlSIEuWLFmyZCmQJUuWLFmyFMiSJUuWLFkKZMmSJUuWLAWyZMmSJUuWAlmyZMmSJUuBLFmyZMmSpUCWLFmyZMlSIEvWtwXP8QPFWbLJBwAAAABJRU5ErkJggg==",
"name" => "attachment.png"
}
]
}
}
}
end
end
| 32.408377 | 906 | 0.608643 |
08369557afef315cd92d3aee744c314ffd209215 | 2,656 | ex | Elixir | lib/ace/http/worker.ex | CharlesOkwuagwu/Ace | e39bfbf5a99dde225d3adcf680885e0bd71a86a4 | [
"MIT"
] | null | null | null | lib/ace/http/worker.ex | CharlesOkwuagwu/Ace | e39bfbf5a99dde225d3adcf680885e0bd71a86a4 | [
"MIT"
] | null | null | null | lib/ace/http/worker.ex | CharlesOkwuagwu/Ace | e39bfbf5a99dde225d3adcf680885e0bd71a86a4 | [
"MIT"
] | null | null | null | defmodule Ace.HTTP.Worker do
@moduledoc false
use GenServer
def child_spec({module, config}) do
# DEBT is module previously checked to implement Raxx.Application or Raxx.Server
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [{module, config}]},
type: :worker,
restart: :temporary,
shutdown: 500
}
end
# TODO decide whether a channel should be limited from startup to single channel (stream/pipeline)
def start_link({module, config}, channel \\ nil) do
GenServer.start_link(__MODULE__, {module, config, nil}, [])
end
## Server Callbacks
def handle_info({client, request = %Raxx.Request{}}, {mod, state, nil}) do
mod.handle_head(request, state)
|> normalise_reaction(state)
|> do_send({mod, state, client})
end
def handle_info({client, data = %Raxx.Data{}}, {mod, state, client}) do
mod.handle_data(data.data, state)
|> normalise_reaction(state)
|> do_send({mod, state, client})
end
def handle_info({client, tail = %Raxx.Tail{}}, {mod, state, client}) do
mod.handle_tail(tail.headers, state)
|> normalise_reaction(state)
|> do_send({mod, state, client})
end
def handle_info(other, {mod, state, client}) do
mod.handle_info(other, state)
|> normalise_reaction(state)
|> do_send({mod, state, client})
end
defp normalise_reaction(response = %Raxx.Response{}, state) do
case response.body do
false ->
{[response], state}
true ->
{[response], state}
_body ->
# {[%{response | body: true}, Raxx.data(response.body), Raxx.tail], state}
{[response], state}
end
end
defp normalise_reaction({parts, new_state}, _old_state) do
parts = Enum.map(parts, &fix_part/1)
{parts, new_state}
end
defp do_send({parts, new_state}, {mod, _old_state, client}) do
case parts do
[] ->
:ok
parts ->
case client do
ref = {:http1, pid, _count} ->
send(pid, {ref, parts})
stream = {:stream, _, _, _} ->
Enum.each(parts, fn part ->
Ace.HTTP2.send(stream, part)
end)
end
end
case List.last(parts) do
%{body: false} ->
{:stop, :normal, {mod, new_state, client}}
%Raxx.Tail{} ->
{:stop, :normal, {mod, new_state, client}}
_ ->
{:noreply, {mod, new_state, client}}
end
end
# TODO remove this special case
defp fix_part({:promise, request}) do
request =
request
|> Map.put(:scheme, request.scheme || :https)
{:promise, request}
end
defp fix_part(part) do
part
end
end
| 24.592593 | 100 | 0.600151 |
0836c4d61389883dc62680bb5ed5a2f42299864d | 3,774 | ex | Elixir | clients/domains_rdap/lib/google_api/domains_rdap/v1/api/entity.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/domains_rdap/lib/google_api/domains_rdap/v1/api/entity.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/domains_rdap/lib/google_api/domains_rdap/v1/api/entity.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DomainsRDAP.V1.Api.Entity do
@moduledoc """
API calls for all endpoints tagged `Entity`.
"""
alias GoogleApi.DomainsRDAP.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
The RDAP API recognizes this command from the RDAP specification but
does not support it. The response is a formatted 501 error.
## Parameters
* `connection` (*type:* `GoogleApi.DomainsRDAP.V1.Connection.t`) - Connection to server
* `entity_id` (*type:* `String.t`) -
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.DomainsRDAP.V1.Model.RdapResponse{}}` on success
* `{:error, info}` on failure
"""
@spec domainsrdap_entity_get(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.DomainsRDAP.V1.Model.RdapResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def domainsrdap_entity_get(connection, entity_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1/entity/{entityId}", %{
"entityId" => URI.encode(entity_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.DomainsRDAP.V1.Model.RdapResponse{}])
end
end
| 42.886364 | 196 | 0.651828 |
0836fa47b7227b3e3c0d1b96c203e1e2bfcdf9b9 | 2,396 | ex | Elixir | clients/sheets/lib/google_api/sheets/v4/model/insert_dimension_request.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/sheets/lib/google_api/sheets/v4/model/insert_dimension_request.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/sheets/lib/google_api/sheets/v4/model/insert_dimension_request.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Sheets.V4.Model.InsertDimensionRequest do
@moduledoc """
Inserts rows or columns in a sheet at a particular index.
## Attributes
* `inheritFromBefore` (*type:* `boolean()`, *default:* `nil`) - Whether dimension properties should be extended from the dimensions before or after the newly inserted dimensions. True to inherit from the dimensions before (in which case the start index must be greater than 0), and false to inherit from the dimensions after. For example, if row index 0 has red background and row index 1 has a green background, then inserting 2 rows at index 1 can inherit either the green or red background. If `inheritFromBefore` is true, the two new rows will be red (because the row before the insertion point was red), whereas if `inheritFromBefore` is false, the two new rows will be green (because the row after the insertion point was green).
* `range` (*type:* `GoogleApi.Sheets.V4.Model.DimensionRange.t`, *default:* `nil`) - The dimensions to insert. Both the start and end indexes must be bounded.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:inheritFromBefore => boolean(),
:range => GoogleApi.Sheets.V4.Model.DimensionRange.t()
}
field(:inheritFromBefore)
field(:range, as: GoogleApi.Sheets.V4.Model.DimensionRange)
end
defimpl Poison.Decoder, for: GoogleApi.Sheets.V4.Model.InsertDimensionRequest do
def decode(value, options) do
GoogleApi.Sheets.V4.Model.InsertDimensionRequest.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Sheets.V4.Model.InsertDimensionRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 47.92 | 739 | 0.753756 |
083703db60f18466cdcf9497ff4656dbb9d7301a | 68 | exs | Elixir | 2020/liveview/dazzle/test/test_helper.exs | herminiotorres/programmer_passport | d1786518a3a5f82471457e0ace41c4c33343739a | [
"MIT"
] | null | null | null | 2020/liveview/dazzle/test/test_helper.exs | herminiotorres/programmer_passport | d1786518a3a5f82471457e0ace41c4c33343739a | [
"MIT"
] | null | null | null | 2020/liveview/dazzle/test/test_helper.exs | herminiotorres/programmer_passport | d1786518a3a5f82471457e0ace41c4c33343739a | [
"MIT"
] | null | null | null | ExUnit.start()
Ecto.Adapters.SQL.Sandbox.mode(Dazzle.Repo, :manual)
| 22.666667 | 52 | 0.779412 |
0837ca8a3183cf90b654d0ec66bb87b84a1634f7 | 160 | ex | Elixir | lib/cep/source.ex | douglascamata/cep | 042aa6d2bcb64b459ac6ac281add2f2f753a6877 | [
"MIT"
] | 11 | 2016-03-23T20:34:19.000Z | 2019-07-16T13:21:48.000Z | lib/cep/source.ex | douglascamata/cep | 042aa6d2bcb64b459ac6ac281add2f2f753a6877 | [
"MIT"
] | 5 | 2016-03-22T05:35:46.000Z | 2019-04-02T16:44:00.000Z | lib/cep/source.ex | douglascamata/cep | 042aa6d2bcb64b459ac6ac281add2f2f753a6877 | [
"MIT"
] | 3 | 2019-03-26T18:54:59.000Z | 2020-07-06T22:41:51.000Z | defmodule Cep.Source do
@callback get_address(cep :: String.t()) ::
{:ok, %Cep.Address{}} | {:not_found, String.t()} | {:error, String.t()}
end
| 32 | 85 | 0.575 |
0837ed46c3ab02a658c495be05625f3e547788c8 | 3,736 | exs | Elixir | test/vutuv_web/controllers/user_controller_test.exs | hendri-tobing/vutuv | 50a3095e236fe96739a79954157b74b4c4025921 | [
"MIT"
] | null | null | null | test/vutuv_web/controllers/user_controller_test.exs | hendri-tobing/vutuv | 50a3095e236fe96739a79954157b74b4c4025921 | [
"MIT"
] | null | null | null | test/vutuv_web/controllers/user_controller_test.exs | hendri-tobing/vutuv | 50a3095e236fe96739a79954157b74b4c4025921 | [
"MIT"
] | null | null | null | defmodule VutuvWeb.UserControllerTest do
use VutuvWeb.ConnCase
import VutuvWeb.AuthCase
alias Vutuv.Accounts
@create_attrs %{email: "bill@example.com", password: "hard2guess"}
@update_attrs %{email: "william@example.com"}
@invalid_attrs %{email: nil}
setup %{conn: conn} = config do
conn = conn |> bypass_through(VutuvWeb.Router, [:browser]) |> get("/")
if email = config[:login] do
user = add_user(email)
other = add_user("tony@example.com")
conn = conn |> add_session(user) |> send_resp(:ok, "/")
{:ok, %{conn: conn, user: user, other: other}}
else
{:ok, %{conn: conn}}
end
end
describe "index" do
@tag login: "reg@example.com"
test "lists all entries on index", %{conn: conn} do
conn = get(conn, Routes.user_path(conn, :index))
assert html_response(conn, 200) =~ "Listing Users"
end
test "renders /users error for unauthorized user", %{conn: conn} do
conn = get(conn, Routes.user_path(conn, :index))
assert redirected_to(conn) == Routes.session_path(conn, :new)
end
end
describe "renders forms" do
test "renders form for new users", %{conn: conn} do
conn = get(conn, Routes.user_path(conn, :new))
assert html_response(conn, 200) =~ "New User"
end
@tag login: "reg@example.com"
test "renders form for editing chosen user", %{conn: conn, user: user} do
conn = get(conn, Routes.user_path(conn, :edit, user))
assert html_response(conn, 200) =~ "Edit User"
end
end
describe "show user resource" do
@tag login: "reg@example.com"
test "show chosen user's page", %{conn: conn, user: user} do
conn = get(conn, Routes.user_path(conn, :show, user))
assert html_response(conn, 200) =~ "Show User"
end
end
describe "create user" do
test "creates user when data is valid", %{conn: conn} do
conn = post(conn, Routes.user_path(conn, :create), user: @create_attrs)
assert redirected_to(conn) == Routes.session_path(conn, :new)
end
test "does not create user and renders errors when data is invalid", %{conn: conn} do
conn = post(conn, Routes.user_path(conn, :create), user: @invalid_attrs)
assert html_response(conn, 200) =~ "New User"
end
end
describe "updates user" do
@tag login: "reg@example.com"
test "updates chosen user when data is valid", %{conn: conn, user: user} do
conn = put(conn, Routes.user_path(conn, :update, user), user: @update_attrs)
assert redirected_to(conn) == Routes.user_path(conn, :show, user)
updated_user = Accounts.get_user(user.id)
assert updated_user.email == "william@example.com"
conn = get(conn, Routes.user_path(conn, :show, user))
assert html_response(conn, 200) =~ "william@example.com"
end
@tag login: "reg@example.com"
test "does not update chosen user and renders errors when data is invalid", %{
conn: conn,
user: user
} do
conn = put(conn, Routes.user_path(conn, :update, user), user: @invalid_attrs)
assert html_response(conn, 200) =~ "Edit User"
end
end
describe "delete user" do
@tag login: "reg@example.com"
test "deletes chosen user", %{conn: conn, user: user} do
conn = delete(conn, Routes.user_path(conn, :delete, user))
assert redirected_to(conn) == Routes.session_path(conn, :new)
refute Accounts.get_user(user.id)
end
@tag login: "reg@example.com"
test "cannot delete other user", %{conn: conn, user: user, other: other} do
conn = delete(conn, Routes.user_path(conn, :delete, other))
assert redirected_to(conn) == Routes.user_path(conn, :show, user)
assert Accounts.get_user(other.id)
end
end
end
| 34.915888 | 89 | 0.650428 |
0837fa3817c821a446a9ef77bdea1c774ce3c09e | 1,298 | ex | Elixir | lib/tyx/traversal/preset.ex | am-kantox/tyx | da0847cabdf304fbf2f5c56797d75c9e8795994c | [
"MIT"
] | 2 | 2021-06-04T20:35:33.000Z | 2021-11-28T22:26:08.000Z | lib/tyx/traversal/preset.ex | am-kantox/tyx | da0847cabdf304fbf2f5c56797d75c9e8795994c | [
"MIT"
] | null | null | null | lib/tyx/traversal/preset.ex | am-kantox/tyx | da0847cabdf304fbf2f5c56797d75c9e8795994c | [
"MIT"
] | null | null | null | defmodule Tyx.Traversal.Preset do
@moduledoc false
use Boundary, deps: [Tyx.Traversal]
alias Code.Typespec
alias Tyx.Traversal.Typemap
@behaviour Tyx.Traversal
@impl Tyx.Traversal
def lookup(Tyx.Traversal.Binding, :wrap, [key, binding]) do
{:error, stub: [key, binding]}
end
def lookup(Map, :get, [t1, t2]) do
case Typemap.to_spec(t1, nil) do
{^t1, {{:., _, [mod, type]}, _, []}} ->
with ^mod <- Code.ensure_compiled!(mod),
{:ok, types} <- Typespec.fetch_types(mod),
{_, {^type, {:type, _, :map, fields}, _}} <-
Enum.find(types, &match?({_, {^type, _, _}}, &1)) do
case Enum.find(fields, &match?({:type, _, :map_field_exact, [{:atom, _, ^t2}, _]}, &1)) do
{:type, _, :map_field_exact, [{:atom, _, ^t2}, type]} ->
{:ok, %Tyx.Fn{<~: [arg1: t1, arg2: t2], ~>: Typemap.from_spec(mod, type)}}
nil ->
{:ok,
%Tyx.Fn{<~: [arg1: t1, arg2: t2], ~>: Typemap.from_spec(mod, {:atom, [], :any})}}
end
end
_ ->
{:ok, {:alias, {Map, :get, [t1, t2, Tyx.BuiltIn.Nil]}}}
end
end
def lookup(Map, :fetch!, [t1, t2]), do: lookup(Map, :get, [t1, t2])
def lookup(mod, fun, args), do: {:error, {mod, fun, args}}
end
| 32.45 | 100 | 0.51849 |
083808da26bb9d52b1c77fdfa04a69f13260a7ec | 374 | exs | Elixir | config/dev.exs | bancolombia/perf-analizer | 16a90dad045413ee168fd2a45f975f3c03099fb3 | [
"MIT"
] | 1 | 2020-03-03T18:17:37.000Z | 2020-03-03T18:17:37.000Z | config/dev.exs | bancolombia/perf-analizer | 16a90dad045413ee168fd2a45f975f3c03099fb3 | [
"MIT"
] | 3 | 2020-02-20T18:53:59.000Z | 2020-02-20T19:00:06.000Z | config/dev.exs | bancolombia/perf-analizer | 16a90dad045413ee168fd2a45f975f3c03099fb3 | [
"MIT"
] | null | null | null | import Config
config :perf_analyzer,
url: "http://127.0.0.1:8080/wait/1000",
request: %{
method: "GET",
headers: [{"Content-Type", "application/json"}],
body: ""
},
execution: %{
steps: 5,
increment: 1,
duration: 7000,
constant_load: true,
dataset: :none,
separator: ","
},
distributed: :none
config :logger,
level: :info
| 17 | 52 | 0.585561 |
083821e4f4803762e4ca619a06af727f1555b1ee | 1,820 | ex | Elixir | lib/dgraph_ex/query.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 21 | 2017-08-20T06:19:37.000Z | 2021-02-04T23:22:10.000Z | lib/dgraph_ex/query.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 43 | 2017-08-06T21:03:28.000Z | 2018-09-08T13:00:35.000Z | lib/dgraph_ex/query.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 1 | 2017-10-12T02:20:13.000Z | 2017-10-12T02:20:13.000Z | defmodule DgraphEx.Query do
alias DgraphEx.{
# Field,
Query,
}
alias Query.{
As,
Var,
Block,
Groupby,
}
@bracketed [
As,
Var,
Groupby,
]
defstruct [
sequence: [],
]
defmacro __using__(_) do
quote do
alias DgraphEx.{Query, Kwargs}
def query do
%Query{}
end
def query(kwargs) when is_list(kwargs) do
Kwargs.query(kwargs)
end
def render(x) do
Query.render(x)
end
end
end
def merge(%Query{sequence: seq1}, %Query{sequence: seq2}) do
%Query{sequence: seq2 ++ seq1 }
end
def put_sequence(%__MODULE__{sequence: prev_sequence} = d, prefix) when is_list(prefix) do
%{ d | sequence: prefix ++ prev_sequence }
end
def put_sequence(%__MODULE__{sequence: sequence} = d, item) do
%{ d | sequence: [ item | sequence ] }
end
def render(%Query{sequence: backwards_sequence}) do
case backwards_sequence |> Enum.reverse do
[ %Block{keywords: [{:func, _ } | _ ]} | _ ] = sequence ->
sequence
|> render_sequence
|> with_brackets
[ %{__struct__: module} | _ ] = sequence when module in @bracketed ->
sequence
|> render_sequence
|> with_brackets
sequence when is_list(sequence) ->
sequence
|> render_sequence
%{__struct__: module} = model ->
module.render(model)
end
end
def render(block) when is_tuple(block) do
Block.render(block)
end
def render(%{__struct__: module} = model) do
module.render(model)
end
defp render_sequence(sequence) do
sequence
|> Enum.map(fn
%{__struct__: module} = model -> module.render(model)
end)
|> Enum.join(" ")
end
defp with_brackets(rendered) do
"{ " <> rendered <> " }"
end
end | 20 | 92 | 0.587912 |
0838471c95d8b60626e71418a7a40583b565dc83 | 1,870 | ex | Elixir | lib/dashwallet_web/endpoint.ex | benlime/dashwallet | 90754cf9cda72b289d5b802cd9fd7eb094f08acb | [
"MIT"
] | 2 | 2017-11-15T20:47:47.000Z | 2017-12-02T11:29:10.000Z | lib/dashwallet_web/endpoint.ex | benlime/dashwallet | 90754cf9cda72b289d5b802cd9fd7eb094f08acb | [
"MIT"
] | null | null | null | lib/dashwallet_web/endpoint.ex | benlime/dashwallet | 90754cf9cda72b289d5b802cd9fd7eb094f08acb | [
"MIT"
] | null | null | null | defmodule DashwalletWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :dashwallet
socket "/socket", DashwalletWeb.UserSocket
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phoenix.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/", from: :dashwallet, gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
plug Phoenix.LiveReloader
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Logger
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Poison
plug Plug.MethodOverride
plug Plug.Head
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
plug Plug.Session,
store: :cookie,
key: "_dashwallet_key",
signing_salt: "3LD4Q9Z7"
plug DashwalletWeb.Router
@doc """
Callback invoked for dynamically configuring the endpoint.
It receives the endpoint configuration and checks if
configuration should be loaded from the system environment.
"""
def init(_key, config) do
if config[:load_from_system_env] do
port = System.get_env("PORT") || raise "expected the PORT environment variable to be set"
host = System.get_env("HOST") || raise "expected the HOST environment variable to be set"
config = Keyword.put(config, :http, [:inet6, port: port])
config = Keyword.put(config, :url, [host: host, port: port])
{:ok, config}
else
{:ok, config}
end
end
end
| 30.16129 | 95 | 0.704278 |
0838765f7ce99326773961f10304a63388cd60dd | 380 | ex | Elixir | web/views/error_view.ex | whenther/chopsticks | ec8d9f90cb4e9afc9e80322c734d9c6bfe5e14e1 | [
"MIT"
] | 2 | 2016-11-08T18:17:41.000Z | 2017-02-23T06:51:56.000Z | web/views/error_view.ex | will-wow/chopsticks | ec8d9f90cb4e9afc9e80322c734d9c6bfe5e14e1 | [
"MIT"
] | null | null | null | web/views/error_view.ex | will-wow/chopsticks | ec8d9f90cb4e9afc9e80322c734d9c6bfe5e14e1 | [
"MIT"
] | null | null | null | defmodule Chopsticks.ErrorView do
use Chopsticks.Web, :view
def render("404.html", _assigns) do
"Page not found"
end
def render("500.html", _assigns) do
"Internal server error"
end
# In case no render clause matches or no
# template is found, let's render it as 500
def template_not_found(_template, assigns) do
render "500.html", assigns
end
end
| 21.111111 | 47 | 0.702632 |
0838a238168a809a9516255b1eb238578d5590cf | 3,852 | ex | Elixir | lib/teslamate/locations/geocoder.ex | tacotran/teslamate | 23f7c9ce8d2610d59991e5aa158958b00b069778 | [
"MIT"
] | 1 | 2020-08-10T03:16:35.000Z | 2020-08-10T03:16:35.000Z | lib/teslamate/locations/geocoder.ex | tacotran/teslamate | 23f7c9ce8d2610d59991e5aa158958b00b069778 | [
"MIT"
] | null | null | null | lib/teslamate/locations/geocoder.ex | tacotran/teslamate | 23f7c9ce8d2610d59991e5aa158958b00b069778 | [
"MIT"
] | null | null | null | defmodule TeslaMate.Locations.Geocoder do
alias Finch.Response
alias TeslaMate.HTTP
alias TeslaMate.Locations.Address
def reverse_lookup(lat, lon, lang \\ "en") do
with {:ok, address_raw} <-
fetch("https://nominatim.openstreetmap.org/reverse", lang,
format: :jsonv2,
addressdetails: 1,
extratags: 1,
namedetails: 1,
zoom: 19,
lat: lat,
lon: lon
) do
{:ok, into_address(address_raw)}
end
end
def details(addresses, lang) when is_list(addresses) do
osm_ids =
addresses
|> Enum.reject(fn %Address{osm_id: id, osm_type: type} -> is_nil(id) or is_nil(type) end)
|> Enum.map(fn %Address{osm_id: id, osm_type: type} ->
"#{type |> String.at(0) |> String.upcase()}#{id}"
end)
|> Enum.join(",")
with {:ok, raw_addresses} <-
fetch("https://nominatim.openstreetmap.org/lookup", lang,
osm_ids: osm_ids,
format: :jsonv2,
addressdetails: 1,
extratags: 1,
namedetails: 1,
zoom: 19
) do
{:ok, Enum.map(raw_addresses, &into_address/1)}
end
end
defp fetch(url, lang, params) do
url = assemble_url(url, params)
case HTTP.get(url, headers(lang), receive_timeout: 15_000) do
{:ok, %Response{status: 200, body: body}} -> {:ok, Jason.decode!(body)}
{:ok, %Response{body: body}} -> {:error, Jason.decode!(body) |> Map.get("error")}
{:error, %{reason: reason}} -> {:error, reason}
end
end
defp assemble_url(url, params) do
url
|> URI.parse()
|> Map.put(:query, URI.encode_query(params))
|> URI.to_string()
end
defp headers(lang) do
[
{"User-Agent", "TeslaMate"},
{"Content-Type", "application/json"},
{"Accept-Language", lang},
{"Accept", "Application/json; Charset=utf-8"}
]
end
# Address Formatting
# Source: https://github.com/OpenCageData/address-formatting/blob/master/conf/components.yaml
@road_aliases [
"road",
"footway",
"street",
"street_name",
"residential",
"path",
"pedestrian",
"road_reference",
"road_reference_intl",
"square",
"place"
]
@neighbourhood_aliases [
"neighbourhood",
"suburb",
"city_district",
"district",
"quarter",
"residential",
"commercial",
"houses",
"subdivision"
]
@city_aliases [
"city",
"town",
"municipality",
"village",
"hamlet",
"locality",
"croft"
]
@county_aliases [
"county",
"local_administrative_area",
"county_code"
]
defp into_address(raw) do
%{
display_name: Map.get(raw, "display_name"),
osm_id: Map.get(raw, "osm_id"),
osm_type: Map.get(raw, "osm_type"),
latitude: Map.get(raw, "lat"),
longitude: Map.get(raw, "lon"),
name:
Map.get(raw, "name") || get_in(raw, ["namedetails", "name"]) ||
get_in(raw, ["namedetails", "alt_name"]),
house_number: raw["address"] |> get_first(["house_number", "street_number"]),
road: raw["address"] |> get_first(@road_aliases),
neighbourhood: raw["address"] |> get_first(@neighbourhood_aliases),
city: raw["address"] |> get_first(@city_aliases),
county: raw["address"] |> get_first(@county_aliases),
postcode: get_in(raw, ["address", "postcode"]),
state: raw["address"] |> get_first(["state", "province", "state_code"]),
state_district: get_in(raw, ["address", "state_district"]),
country: raw["address"] |> get_first(["country", "country_name"]),
raw: raw
}
end
defp get_first(_address, []), do: nil
defp get_first(address, [key | aliases]) do
with nil <- Map.get(address, key), do: get_first(address, aliases)
end
end
| 26.937063 | 95 | 0.579439 |
0838bf7422bcd7e9cbe270c0f224bec060b689ff | 1,888 | ex | Elixir | clients/api_gateway/lib/google_api/api_gateway/v1beta/model/apigateway_list_locations_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/api_gateway/lib/google_api/api_gateway/v1beta/model/apigateway_list_locations_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/api_gateway/lib/google_api/api_gateway/v1beta/model/apigateway_list_locations_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.APIGateway.V1beta.Model.ApigatewayListLocationsResponse do
@moduledoc """
The response message for Locations.ListLocations.
## Attributes
* `locations` (*type:* `list(GoogleApi.APIGateway.V1beta.Model.ApigatewayLocation.t)`, *default:* `nil`) - A list of locations that matches the specified filter in the request.
* `nextPageToken` (*type:* `String.t`, *default:* `nil`) - The standard List next-page token.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:locations => list(GoogleApi.APIGateway.V1beta.Model.ApigatewayLocation.t()) | nil,
:nextPageToken => String.t() | nil
}
field(:locations, as: GoogleApi.APIGateway.V1beta.Model.ApigatewayLocation, type: :list)
field(:nextPageToken)
end
defimpl Poison.Decoder, for: GoogleApi.APIGateway.V1beta.Model.ApigatewayListLocationsResponse do
def decode(value, options) do
GoogleApi.APIGateway.V1beta.Model.ApigatewayListLocationsResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.APIGateway.V1beta.Model.ApigatewayListLocationsResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 37.76 | 180 | 0.751059 |
0838e6a4cd4e0bf67931a47e68b240f395e14652 | 377 | ex | Elixir | lib/jaya_currency_converter_web/controllers/fallback_controller.ex | franknfjr/jaya_currency_converter | 56dfcf40b2ed2c9307fa39d7a5d1121cf4a1a37e | [
"MIT"
] | null | null | null | lib/jaya_currency_converter_web/controllers/fallback_controller.ex | franknfjr/jaya_currency_converter | 56dfcf40b2ed2c9307fa39d7a5d1121cf4a1a37e | [
"MIT"
] | null | null | null | lib/jaya_currency_converter_web/controllers/fallback_controller.ex | franknfjr/jaya_currency_converter | 56dfcf40b2ed2c9307fa39d7a5d1121cf4a1a37e | [
"MIT"
] | null | null | null | defmodule JayaCurrencyConverterWeb.FallbackController do
@moduledoc false
use JayaCurrencyConverterWeb, :controller
def call(conn, {:error, %Ecto.Changeset{} = changeset}) do
conn
|> put_status(:not_acceptable)
|> json(%{error: changeset})
end
def call(conn, {:error, msg}) do
conn
|> put_status(:not_found)
|> json(%{error: msg})
end
end
| 22.176471 | 60 | 0.676393 |
083906a1a38db7eaf0ce2a865a2072d24f7f055c | 1,257 | ex | Elixir | lib/epi_contacts_web/plugs/content_security_policy_header.ex | RatioPBC/epi-contacts | 6c43eea52cbfe2097f48b02e3d0c8fce3b46f1ee | [
"Apache-2.0"
] | null | null | null | lib/epi_contacts_web/plugs/content_security_policy_header.ex | RatioPBC/epi-contacts | 6c43eea52cbfe2097f48b02e3d0c8fce3b46f1ee | [
"Apache-2.0"
] | 13 | 2021-06-29T04:35:41.000Z | 2022-02-09T04:25:39.000Z | lib/epi_contacts_web/plugs/content_security_policy_header.ex | RatioPBC/epi-contacts | 6c43eea52cbfe2097f48b02e3d0c8fce3b46f1ee | [
"Apache-2.0"
] | null | null | null | defmodule EpiContactsWeb.Plugs.ContentSecurityPolicyHeader do
@moduledoc """
Sets the content security policy so that it it sufficiently restrictive while also allowing websocket connections.
Using this plug will allow websocket connections to the configured endpoint.
"""
import Plug.Conn
alias Euclid.Extra.Random
def init(opts \\ []), do: opts
def call(conn, header_override: header_override) do
conn
|> add_nonce()
|> put_resp_header("content-security-policy", header_override)
end
def call(conn, _opts) do
conn
|> add_nonce()
|> add_csp()
end
defp add_csp(conn) do
put_resp_header(conn, "content-security-policy", csp(conn))
end
defp add_nonce(conn) do
put_private(conn, :nonce, Random.string())
end
defp csp(conn) do
nonce = conn.private[:nonce]
"default-src 'self'; \
img-src 'self' 'nonce-#{nonce}' 'unsafe-inline'; \
style-src 'self' 'nonce-#{nonce}' 'unsafe-inline'; \
connect-src 'self' #{url_for(conn, "ws")} #{url_for(conn, "wss")}; \
frame-ancestors 'none'"
end
defp url_for(conn, scheme) do
Phoenix.Controller.endpoint_module(conn).struct_url
|> Map.put(:scheme, scheme)
|> Map.put(:port, nil)
|> URI.to_string()
end
end
| 24.647059 | 116 | 0.674622 |
0839167a9611e61b9f283d12efadec1b10d37ca0 | 643 | ex | Elixir | Tracer/camera.ex | evansaboo/elixir-programming | 57408424914003091003430500473546c94354d9 | [
"MIT"
] | null | null | null | Tracer/camera.ex | evansaboo/elixir-programming | 57408424914003091003430500473546c94354d9 | [
"MIT"
] | null | null | null | Tracer/camera.ex | evansaboo/elixir-programming | 57408424914003091003430500473546c94354d9 | [
"MIT"
] | null | null | null | defmodule Camera do
defstruct pos: nil, corner: nil, right: nil, down: nil, size: nil
def normal(size) do
{width, height} = size
d = width * 1.2
h = width / 2
v = height / 2
pos = {0,0,0}
corner = {-h, v, d}
right = {1, 0, 0}
down = {0, -1, 0}
%Camera{pos: pos, corner: corner, right: right, down: down, size: size}
end
def ray(camera=%Camera{}, x, y) do
pos = camera.pos
x_pos = Vector.smul(camera.right, x)
y_pos = Vector.smul(camera.down, y)
pixle = Vector.add(camera.corner, Vector.add(x_pos, y_pos))
dir = Vector.normalize(pixle)
%Ray{pos: pos, dir: dir}
end
end
| 22.964286 | 75 | 0.583204 |
08395f17c35aad8dea4a680c419054534783f2cc | 448 | ex | Elixir | lib/free_fall/game/point.ex | Goose-Vic/free_fall | 84d95e33cbd0d6e06dd9170576726929e97b25ac | [
"MIT"
] | null | null | null | lib/free_fall/game/point.ex | Goose-Vic/free_fall | 84d95e33cbd0d6e06dd9170576726929e97b25ac | [
"MIT"
] | null | null | null | lib/free_fall/game/point.ex | Goose-Vic/free_fall | 84d95e33cbd0d6e06dd9170576726929e97b25ac | [
"MIT"
] | null | null | null | defmodule FreeFall.Game.Point do
def new(x, y), do: {x, y}
def left({x, y}), do: {x - 1, y}
def right({x, y}), do: {x + 1, y}
def down({x, y}), do: {x, y + 1}
defp flip({x, y}), do: {x, 5 - y}
defp flop({x, y}), do: {5 - x, y}
def rotate90(point), do: point |> transpose |> flop
def rotate180(point), do: point |> flip |> flop
def rotate270(point), do: point |> transpose |> flip
defp transpose({x, y}), do: {y, x}
end
| 20.363636 | 54 | 0.535714 |
08396bfe028416bf9506c3bc5794b7cda9d6f630 | 1,299 | ex | Elixir | lib/hexpm/web/controllers/api/repository_controller.ex | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | lib/hexpm/web/controllers/api/repository_controller.ex | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | lib/hexpm/web/controllers/api/repository_controller.ex | lau/hexpm | beee80f5358a356530debfea35ee65c3a0aa9b25 | [
"Apache-2.0"
] | null | null | null | defmodule Hexpm.Web.API.RepositoryController do
use Hexpm.Web, :controller
plug :fetch_repository when action in [:show]
plug :maybe_authorize, [domain: "api", resource: "read"] when action in [:index]
plug :maybe_authorize,
[domain: "api", resource: "read", fun: &organization_access/2]
when action in [:show]
def index(conn, _params) do
organizations =
Organizations.all_public() ++
all_by_user(conn.assigns.current_user) ++
all_by_organization(conn.assigns.current_organization)
when_stale(conn, organizations, [modified: false], fn conn ->
conn
|> api_cache(:logged_in)
|> render(:index, organizations: organizations)
end)
end
def show(conn, _params) do
organization = conn.assigns.organization
when_stale(conn, organization, fn conn ->
conn
|> api_cache(show_cachability(organization))
|> render(:show, organization: organization)
end)
end
defp all_by_user(nil), do: []
defp all_by_user(user), do: Organizations.all_by_user(user)
defp all_by_organization(nil), do: []
defp all_by_organization(organization), do: [organization]
defp show_cachability(%Organization{public: true}), do: :public
defp show_cachability(%Organization{public: false}), do: :private
end
| 30.209302 | 82 | 0.69746 |
08396d8a52811a1b95bd0a6f5d7d34bdd33ade6d | 1,571 | ex | Elixir | apps/astarte_appengine_api/lib/astarte_appengine_api_web/socket_guardian.ex | matt-mazzucato/astarte | 34d84941a5019efc42321052f7f34b7d907a38f2 | [
"Apache-2.0"
] | 191 | 2018-03-30T13:23:08.000Z | 2022-03-02T12:05:32.000Z | apps/astarte_appengine_api/lib/astarte_appengine_api_web/socket_guardian.ex | matt-mazzucato/astarte | 34d84941a5019efc42321052f7f34b7d907a38f2 | [
"Apache-2.0"
] | 402 | 2018-03-30T13:37:00.000Z | 2022-03-31T16:47:10.000Z | apps/astarte_appengine_api/lib/astarte_appengine_api_web/socket_guardian.ex | matt-mazzucato/astarte | 34d84941a5019efc42321052f7f34b7d907a38f2 | [
"Apache-2.0"
] | 24 | 2018-03-30T13:29:48.000Z | 2022-02-28T11:10:26.000Z | #
# This file is part of Astarte.
#
# Copyright 2017 Ispirata Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule Astarte.AppEngine.APIWeb.SocketGuardian do
use Guardian, otp_app: :astarte_appengine_api
alias Astarte.AppEngine.API.Auth.RoomsUser
def subject_for_token(%RoomsUser{id: id}, _claims) do
{:ok, to_string(id)}
end
def resource_from_claims(claims) do
channels_authz = Map.get(claims, "a_ch", [])
join_authz =
channels_authz
|> extract_authorization_paths("JOIN")
watch_authz =
channels_authz
|> extract_authorization_paths("WATCH")
{:ok,
%RoomsUser{
id: claims["sub"],
join_authorizations: join_authz,
watch_authorizations: watch_authz
}}
end
defp extract_authorization_paths(authorizations, match_prefix) do
Enum.reduce(authorizations, [], fn authorization, acc ->
with [^match_prefix, _opts, auth_path] <- String.split(authorization, ":", parts: 3) do
[auth_path | acc]
else
_ ->
acc
end
end)
end
end
| 27.561404 | 93 | 0.698281 |
08397057272a58c43bb55bff57e5c03bcc69dcfa | 8,664 | exs | Elixir | test/tag_cloud/compiler/color/parse_color_test.exs | RobertDober/tag_cloud | e3f1fca47b32691c54870d6f8b66a7f66bbd53d6 | [
"Apache-2.0"
] | null | null | null | test/tag_cloud/compiler/color/parse_color_test.exs | RobertDober/tag_cloud | e3f1fca47b32691c54870d6f8b66a7f66bbd53d6 | [
"Apache-2.0"
] | 3 | 2021-10-03T16:48:49.000Z | 2022-02-06T10:26:36.000Z | test/tag_cloud/compiler/color/parse_color_test.exs | RobertDober/tag_cloud | e3f1fca47b32691c54870d6f8b66a7f66bbd53d6 | [
"Apache-2.0"
] | null | null | null | defmodule Test.TagCloud.Compiler.Color.ParseColorTest do
use ExUnit.Case
import TagCloud.Compiler.Color, only: [parse_color: 1]
describe "gray scales" do
test "0" do
result = parse_color("0")
expected = {0, "000000"}
assert result == expected
end
test "1" do
result = parse_color("1")
expected = {1, "000000"}
assert result == expected
end
test "2" do
result = parse_color("2")
expected = {2, "000000"}
assert result == expected
end
test "3" do
result = parse_color("3")
expected = {3, "000000"}
assert result == expected
end
test "4" do
result = parse_color("4")
expected = {4, "000000"}
assert result == expected
end
test "5" do
result = parse_color("5")
expected = {5, "000000"}
assert result == expected
end
test "6" do
result = parse_color("6")
expected = {6, "000000"}
assert result == expected
end
test "7" do
result = parse_color("7")
expected = {7, "000000"}
assert result == expected
end
test "8" do
result = parse_color("8")
expected = {8, "000000"}
assert result == expected
end
test "9" do
result = parse_color("9")
expected = {9, "000000"}
assert result == expected
end
test "10" do
result = parse_color("10")
expected = {10, "000000"}
assert result == expected
end
test "11" do
result = parse_color("11")
expected = {11, "000000"}
assert result == expected
end
test "12" do
result = parse_color("12")
expected = {12, "000000"}
assert result == expected
end
end
describe "red scales" do
test "0" do
result = parse_color("0/red")
expected = {0, "ff0000"}
assert result == expected
end
test "1" do
result = parse_color("1/red")
expected = {1, "ff0000"}
assert result == expected
end
test "2" do
result = parse_color("2/red")
expected = {2, "ff0000"}
assert result == expected
end
test "3" do
result = parse_color("3/red")
expected = {3, "ff0000"}
assert result == expected
end
test "4" do
result = parse_color("4/red")
expected = {4, "ff0000"}
assert result == expected
end
test "5" do
result = parse_color("5/red")
expected = {5, "ff0000"}
assert result == expected
end
test "6" do
result = parse_color("6/red")
expected = {6, "ff0000"}
assert result == expected
end
test "7" do
result = parse_color("7/red")
expected = {7, "ff0000"}
assert result == expected
end
test "8" do
result = parse_color("8/red")
expected = {8, "ff0000"}
assert result == expected
end
test "9" do
result = parse_color("9/red")
expected = {9, "ff0000"}
assert result == expected
end
test "10" do
result = parse_color("10/red")
expected = {10, "ff0000"}
assert result == expected
end
test "11" do
result = parse_color("11/red")
expected = {11, "ff0000"}
assert result == expected
end
test "12" do
result = parse_color("12/red")
expected = {12, "ff0000"}
assert result == expected
end
end
describe "lime scales" do
test "0" do
result = parse_color("0/lime")
expected = {0, "00ff00"}
assert result == expected
end
test "1" do
result = parse_color("1/lime")
expected = {1, "00ff00"}
assert result == expected
end
test "2" do
result = parse_color("2/lime")
expected = {2, "00ff00"}
assert result == expected
end
test "3" do
result = parse_color("3/lime")
expected = {3, "00ff00"}
assert result == expected
end
test "4" do
result = parse_color("4/lime")
expected = {4, "00ff00"}
assert result == expected
end
test "5" do
result = parse_color("5/lime")
expected = {5, "00ff00"}
assert result == expected
end
test "6" do
result = parse_color("6/lime")
expected = {6, "00ff00"}
assert result == expected
end
test "7" do
result = parse_color("7/lime")
expected = {7, "00ff00"}
assert result == expected
end
test "8" do
result = parse_color("8/lime")
expected = {8, "00ff00"}
assert result == expected
end
test "9" do
result = parse_color("9/lime")
expected = {9, "00ff00"}
assert result == expected
end
test "10" do
result = parse_color("10/lime")
expected = {10, "00ff00"}
assert result == expected
end
test "11" do
result = parse_color("11/lime")
expected = {11, "00ff00"}
assert result == expected
end
test "12" do
result = parse_color("12/lime")
expected = {12, "00ff00"}
assert result == expected
end
end
describe "blue scales" do
# <%= for i <- 0..12 do %>
# test "<%= i %>" do
# result = parse_color("<%= i %>/blue")
# expected = {<%= i %>, "0000ff"}
# assert result == expected
# end
# <% end %>
test "0" do
result = parse_color("0/blue")
expected = {0, "0000ff"}
assert result == expected
end
test "1" do
result = parse_color("1/blue")
expected = {1, "0000ff"}
assert result == expected
end
test "2" do
result = parse_color("2/blue")
expected = {2, "0000ff"}
assert result == expected
end
test "3" do
result = parse_color("3/blue")
expected = {3, "0000ff"}
assert result == expected
end
test "4" do
result = parse_color("4/blue")
expected = {4, "0000ff"}
assert result == expected
end
test "5" do
result = parse_color("5/blue")
expected = {5, "0000ff"}
assert result == expected
end
test "6" do
result = parse_color("6/blue")
expected = {6, "0000ff"}
assert result == expected
end
test "7" do
result = parse_color("7/blue")
expected = {7, "0000ff"}
assert result == expected
end
test "8" do
result = parse_color("8/blue")
expected = {8, "0000ff"}
assert result == expected
end
test "9" do
result = parse_color("9/blue")
expected = {9, "0000ff"}
assert result == expected
end
test "10" do
result = parse_color("10/blue")
expected = {10, "0000ff"}
assert result == expected
end
test "11" do
result = parse_color("11/blue")
expected = {11, "0000ff"}
assert result == expected
end
test "12" do
result = parse_color("12/blue")
expected = {12, "0000ff"}
assert result == expected
end
end
describe "miscelaneous scales" do
# <%= for {i, j} <- [{0, "dfec0a"}, {3, "010203"}, {12, "f0f0da"}] do %>
# test "<%= i %>" do
# result = parse_color("<%= i %>/#<%= j %>")
# expected = {<%= i %>, "<%= j %>"}
# assert result == expected
# end
# <% end %>
test "0" do
result = parse_color("0/#dfec0a")
expected = {0, "dfec0a"}
assert result == expected
end
test "3" do
result = parse_color("3/#010203")
expected = {3, "010203"}
assert result == expected
end
test "12" do
result = parse_color("12/#f0f0da")
expected = {12, "f0f0da"}
assert result == expected
end
end
describe "illegal spex" do
# <%= for illegal <- ["", "23/", "#abcdeg", "#aaaaa", "#bbbbbbb", "012345", "1/010203"] do %>
# test "<%= illegal %>" do
# assert_raise TagCloud.Error, fn ->
# parse_color("<%= illegal %>")
# end
# end
# <% end %>
test "" do
assert_raise TagCloud.Error, fn ->
parse_color("")
end
end
test "23/" do
assert_raise TagCloud.Error, fn ->
parse_color("23/")
end
end
test "#abcdeg" do
assert_raise TagCloud.Error, fn ->
parse_color("#abcdeg")
end
end
test "#aaaaa" do
assert_raise TagCloud.Error, fn ->
parse_color("#aaaaa")
end
end
test "#bbbbbbb" do
assert_raise TagCloud.Error, fn ->
parse_color("#bbbbbbb")
end
end
test "012345" do
assert_raise TagCloud.Error, fn ->
parse_color("012345")
end
end
test "1/010203" do
assert_raise TagCloud.Error, fn ->
parse_color("1/010203")
end
end
end
end
| 21.445545 | 97 | 0.53301 |
083978c1df8a730ce4a981ddac9db3c8101fc690 | 2,114 | exs | Elixir | config/releases.exs | xprazak2/papercups | 925e7c20ab868648e078a129e832856026c50424 | [
"MIT"
] | 1 | 2021-01-18T09:57:23.000Z | 2021-01-18T09:57:23.000Z | config/releases.exs | xprazak2/papercups | 925e7c20ab868648e078a129e832856026c50424 | [
"MIT"
] | 1 | 2021-01-17T10:42:34.000Z | 2021-01-17T10:42:34.000Z | config/releases.exs | xprazak2/papercups | 925e7c20ab868648e078a129e832856026c50424 | [
"MIT"
] | null | null | null | import Config
database_url =
System.get_env("DATABASE_URL") ||
raise """
environment variable DATABASE_URL is missing.
For example: ecto://USER:PASS@HOST/DATABASE
"""
secret_key_base =
System.get_env("SECRET_KEY_BASE") ||
raise """
environment variable SECRET_KEY_BASE is missing.
You can generate one by calling: mix phx.gen.secret
"""
backend_url =
System.get_env("BACKEND_URL") ||
raise """
environment variable BACKEND_URL is missing.
For example: myselfhostedwebsite.com or papercups.io
"""
# Configure your database
config :chat_api, ChatApi.Repo,
url: database_url,
show_sensitive_data_on_connection_error: false,
pool_size: 10
ssl_key_path = System.get_env("SSL_KEY_PATH")
ssl_cert_path = System.get_env("SSL_CERT_PATH")
https = (ssl_cert_path && ssl_key_path) != nil
if https do
config :chat_api, ChatApiWeb.Endpoint,
http: [
port: String.to_integer(System.get_env("PORT") || "4000"),
transport_options: [socket_opts: [:inet6]]
],
url: [host: backend_url],
pubsub_server: ChatApi.PubSub,
secret_key_base: secret_key_base,
https: [
port: 443,
cipher_suite: :strong,
otp_app: :hello,
keyfile: ssl_key_path,
certfile: ssl_cert_path
],
server: true
else
config :chat_api, ChatApiWeb.Endpoint,
http: [
port: String.to_integer(System.get_env("PORT") || "4000"),
transport_options: [socket_opts: [:inet6]]
],
url: [host: backend_url],
pubsub_server: ChatApi.PubSub,
secret_key_base: secret_key_base,
server: true
end
# https = (ssl_cert_path && ssl_key_path) != nil
# # ## SSL Support
# #
# # To get SSL working, you will need to add the `https` key
# # to the previous section and set your `:url` port to 443:
# if ssl_key_path && ssl_cert_path do
# config :chat_api, ChatApiWeb.Endpoint,
# url: [host: "example.com", port: 443],
# https: [
# port: 443,
# cipher_suite: :strong,
# keyfile: ssl_key_path,
# certfile: ssl_cert_path,
# transport_options: [socket_opts: [:inet6]]
# ]
# end
| 26.759494 | 64 | 0.668401 |
083990fe891ffbc6d07904f147e78d82124ffbac | 1,409 | ex | Elixir | clients/cloud_kms/lib/google_api/cloud_kms/v1/model/destroy_crypto_key_version_request.ex | leandrocp/elixir-google-api | a86e46907f396d40aeff8668c3bd81662f44c71e | [
"Apache-2.0"
] | null | null | null | clients/cloud_kms/lib/google_api/cloud_kms/v1/model/destroy_crypto_key_version_request.ex | leandrocp/elixir-google-api | a86e46907f396d40aeff8668c3bd81662f44c71e | [
"Apache-2.0"
] | null | null | null | clients/cloud_kms/lib/google_api/cloud_kms/v1/model/destroy_crypto_key_version_request.ex | leandrocp/elixir-google-api | a86e46907f396d40aeff8668c3bd81662f44c71e | [
"Apache-2.0"
] | 1 | 2020-11-10T16:58:27.000Z | 2020-11-10T16:58:27.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.CloudKMS.V1.Model.DestroyCryptoKeyVersionRequest do
@moduledoc """
Request message for KeyManagementService.DestroyCryptoKeyVersion.
## Attributes
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{}
end
defimpl Poison.Decoder, for: GoogleApi.CloudKMS.V1.Model.DestroyCryptoKeyVersionRequest do
def decode(value, options) do
GoogleApi.CloudKMS.V1.Model.DestroyCryptoKeyVersionRequest.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudKMS.V1.Model.DestroyCryptoKeyVersionRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 32.767442 | 90 | 0.775727 |
08399697232eac2a36aec20280462b29bb723583 | 2,497 | ex | Elixir | lib/imdb.ex | yatender-oktalk/moviematch | c8de8752a96cfb63b30fc72e28d61501b977f982 | [
"MIT"
] | null | null | null | lib/imdb.ex | yatender-oktalk/moviematch | c8de8752a96cfb63b30fc72e28d61501b977f982 | [
"MIT"
] | null | null | null | lib/imdb.ex | yatender-oktalk/moviematch | c8de8752a96cfb63b30fc72e28d61501b977f982 | [
"MIT"
] | null | null | null | defmodule MovieMatch.Imdb do
@imdb_url Application.get_env(:moviematch, :imdb_url)
def build_url(imdb_user_id) do
String.replace(@imdb_url, "imdb_user_id", imdb_user_id)
end
defp make_request(url) do
# try using tesla library also, HTTPoison and HTTPotion has some limitations
#what if it's not :ok? try handling in case
case HTTPoison.get(url) do
{:ok, response} -> response
{:error, msg} -> msg
end
end
defp parse_html(response) do
response.body
|> Floki.find(".ab_widget")
|> Enum.fetch(0)
|> elem(1)
|> elem(2)
|> Enum.fetch(1)
|> elem(1)
|> elem(2)
|> Enum.fetch(0)
|> elem(1)
|> String.split(";\n")
|> Enum.fetch(4)
|> elem(1)
end
defp build_json(script_str) do
script_str
|> String.trim()
|> String.replace("IMDbReactInitialState.push(", "")
|> String.replace(")", "")
|> String.replace("false", "111")
|> String.replace("null", "111")
|> String.replace("\'", "111")
|> String.replace(")", "")
|> String.replace("'", "\"")
|> Poison.decode!()
end
defp id_title({k, v}) do
#handle error cases also with else syntax in `with` otherwise there
#is no proper use case of using it, we can directly use pipe if you are not handling else cases
# I have given example below, in case you are fully sure that this value is gonna be there then you can directly use v["primary"] or Aceess.get(v, "primary")
# usually when
with {:ok, primary} = Map.fetch(v, "primary"),
{:ok, movie_title} = Map.fetch(primary, "title"),
do: %{id: k, title: movie_title}
# else
# {:error} <- "something"
# _ <- "something"
# end
end
# what if this is not a map? where is other case?
defp movie_id_titles(map) when is_map(map) do
#don't use ok syntax unless you are handling error , you can directly use
# movie_ids = map["titles"] || "in case null you can give default like this"
{:ok, movie_ids} = Map.fetch(map, "titles")
items = Enum.map(movie_ids, &id_title/1)
Enum.reduce(items, %{}, fn e, acc -> Map.put(acc, e.id, e.title) end)
end
#handle here otherwise it'll say that pattern not found otherwise dont' check in function above that it's a map or not
defp movie_id_titles(_not_map_case) do
[]
end
def fetch_imdb_watchlist(imdb_user_id) do
imdb_user_id
|> build_url()
|> make_request()
|> parse_html()
|> build_json()
|> movie_id_titles()
end
end
| 29.72619 | 161 | 0.626752 |
0839a3aa88496ab43cbb05c4dbc2d94271b3e02f | 1,438 | exs | Elixir | test/ecto_fields_test.exs | tjamesking/ecto_fields | 17e93f28c0b6ab20f1b2f0ba2fff98eaa5b37559 | [
"MIT"
] | 24 | 2016-06-27T22:24:35.000Z | 2022-02-19T01:16:21.000Z | test/ecto_fields_test.exs | tjamesking/ecto_fields | 17e93f28c0b6ab20f1b2f0ba2fff98eaa5b37559 | [
"MIT"
] | 9 | 2017-07-29T18:41:40.000Z | 2021-11-22T05:37:45.000Z | test/ecto_fields_test.exs | tjamesking/ecto_fields | 17e93f28c0b6ab20f1b2f0ba2fff98eaa5b37559 | [
"MIT"
] | 9 | 2017-07-29T18:14:14.000Z | 2021-03-15T23:16:36.000Z | defmodule EctoFieldsTest do
use ExUnit.Case
use PropCheck
doctest(EctoFields)
doctest(EctoFields.Email)
doctest(EctoFields.IP)
doctest(EctoFields.IPv4)
doctest(EctoFields.IPv6)
doctest(EctoFields.PositiveInteger)
doctest(EctoFields.Slug)
doctest(EctoFields.URL)
doctest(EctoFields.Atom)
doctest(EctoFields.Static)
property("EctoFields.Atom") do
forall(atom in oneof([atom(), atom_utf8()])) do
string = Atom.to_string(atom)
{:ok, atom} == EctoFields.Atom.cast(string) and {:ok, atom} == EctoFields.Atom.load(string) and
{:ok, string} == EctoFields.Atom.dump(atom)
end
end
@doc false
defp atom_utf8() do
:proper_types.new_type(
[
{:generator, &atom_utf8_gen/1},
{:reverse_gen, &atom_utf8_rev/1},
{:size_transform, fn size -> :erlang.min(size, 255) end},
{:is_instance, &atom_utf8_is_instance/1}
],
:wrapper
)
end
@doc false
defp atom_utf8_gen(size) when is_integer(size) and size >= 0 do
let(string <- such_that(x <- :proper_unicode.utf8(size), when: x === <<>> or :binary.first(x) !== ?$)) do
:erlang.binary_to_atom(string, :utf8)
end
end
@doc false
defp atom_utf8_rev(atom) when is_atom(atom) do
{:"$used", :erlang.atom_to_list(atom), atom}
end
@doc false
defp atom_utf8_is_instance(x) do
is_atom(x) and (x === :"" or :erlang.hd(:erlang.atom_to_list(x)) !== ?$)
end
end
| 25.678571 | 109 | 0.652295 |
0839ae783126d8729dcad1d950e2bdb1c3d601f4 | 29,218 | ex | Elixir | lib/dataloader/ecto.ex | absinthe-graphql/dataloader | 84a5297e28b537b4409ce0987cddafe9d15df55e | [
"MIT"
] | 415 | 2017-10-18T22:27:17.000Z | 2022-03-31T06:28:37.000Z | lib/dataloader/ecto.ex | absinthe-graphql/dataloader | 84a5297e28b537b4409ce0987cddafe9d15df55e | [
"MIT"
] | 108 | 2017-10-24T07:17:06.000Z | 2022-03-20T00:19:57.000Z | lib/dataloader/ecto.ex | absinthe-graphql/dataloader | 84a5297e28b537b4409ce0987cddafe9d15df55e | [
"MIT"
] | 83 | 2017-11-15T06:15:38.000Z | 2022-03-20T00:26:44.000Z | if Code.ensure_loaded?(Ecto) do
defmodule Dataloader.Ecto do
@moduledoc """
Ecto source for Dataloader
This defines a schema and an implementation of the `Dataloader.Source` protocol
for handling Ecto related batching.
A simple Ecto source only needs to know about your application's Repo.
## Basic Usage
Querying by primary key (analogous to Ecto.Repo.get/3):
```elixir
source = Dataloader.Ecto.new(MyApp.Repo)
loader =
Dataloader.new
|> Dataloader.add_source(Accounts, source)
|> Dataloader.load(Accounts, User, 1)
|> Dataloader.load_many(Accounts, Organization, [4, 9])
|> Dataloader.run
organizations = Dataloader.get_many(loader, Accounts, Organization, [4,9])
```
Querying for associations. Here we look up the `:users` association on all
the organizations, and the `:organization` for a single user.
```elixir
loader =
loader
|> Dataloader.load(Accounts, :organization, user)
|> Dataloader.load_many(Accounts, :users, organizations)
|> Dataloader.run
```
Querying by a column other than the primary key:
```elixir
loader =
loader
|> Dataloader.load(Accounts, {:one, User}, name: "admin")
|> Dataloader.run
```
Here we pass a keyword list of length one. It is only possible to
query by one column here; for more complex queries, see "filtering" below.
Notice here that we need to also specify the cardinality in the batch_key
(`:many` or `:one`), which will decide whether to return a list or a single
value (or nil). This is because the column may not be a key and there may be
multiple matching records. Note also that even if we are returning `:many` values
here from multiple matching records, this is still a call to `Dataloader.load/4`
rather than `Dataloader.load_many/4` because there is only one val specified.
## Filtering / Ordering
`Dataloader.Ecto.new/2` can receive a 2 arity function that can be used to apply
broad ordering and filtering rules, as well as handle parameters
```elixir
source = Dataloader.Ecto.new(MyApp.Repo, query: &Accounts.query/2)
loader =
Dataloader.new
|> Dataloader.add_source(Accounts, source)
```
When we call `Dataloader.load/4` we can pass in a tuple as the batch key with a keyword list
of parameters in addition to the queryable or assoc_field
```elixir
# with a queryable
loader
|> Dataloader.load(Accounts, {User, order: :name}, 1)
# or an association
loader
|> Dataloader.load_many(Accounts, {:users, order: :name}, organizations)
# this is still supported
loader
|> Dataloader.load(Accounts, User, 1)
# as is this
loader
|> Dataloader.load(:accounts, :user, organization)
```
In all cases the `Accounts.query` function would be:
```elixir
def query(User, params) do
field = params[:order] || :id
from u in User, order_by: [asc: field(u, ^field)]
end
def query(queryable, _) do
queryable
end
```
If we query something that ends up using the `User` schema, whether directly
or via association, the `query/2` function will match on the first clause and
we can handle the params. If no params are supplied, the params arg defaults
to `source.default_params` which itself defaults to `%{}`.
`default_params` is an extremely useful place to store values like the current user:
```elixir
source = Dataloader.Ecto.new(MyApp.Repo, [
query: &Accounts.query/2,
default_params: %{current_user: current_user},
])
loader =
Dataloader.new
|> Dataloader.add_source(Accounts, source)
|> Dataloader.load_many(Accounts, Organization, ids)
|> Dataloader.run
# the query function
def query(Organization, %{current_user: user}) do
from o in Organization,
join: m in assoc(o, :memberships),
where: m.user_id == ^user.id
end
def query(queryable, _) do
queryable
end
```
In our query function we are pattern matching on the current user to make sure
that we are only able to lookup data in organizations that the user actually
has a membership in. Additional options you specify IE `{Organization, %{order: :asc}}`
are merged into the default.
## Custom batch queries
There are cases where you want to run the batch function yourself. To do this
we can add a custom `run_batch/5` callback to our source.
The `run_batch/5` function is executed with the query returned from the `query/2`
function.
For example, we want to get the post count for a set of users.
First we add a custom `run_batch/5` function.
```
def run_batch(_, query, :post_count, users, repo_opts) do
user_ids = Enum.map(users, & &1.id)
default_count = 0
result =
query
|> where([p], p.user_id in ^user_ids)
|> group_by([p], p.user_id)
|> select([p], {p.user_id, count("*")})
|> Repo.all(repo_opts)
|> Map.new()
for %{id: id} <- users do
[Map.get(result, id, default_count)]
end
end
# Fallback to original run_batch
def run_batch(queryable, query, col, inputs, repo_opts) do
Dataloader.Ecto.run_batch(Repo, queryable, query, col, inputs, repo_opts)
end
```
This function is supplied with a list of users, does a query and will return
the post count for each of user. If the user id is not found in the resultset,
because the user has no posts, we return a post count of 0.
Now we need to call `run_batch/5` from dataloader. First we add a few posts
to the database.
After that, the custom `run_batch/5` function is provided to the Dataloader
source. Now, we can load the post count for several users. When the dataloader
runs it will call the custom `run_batch/5` and we can retrieve the posts counts
for each individual user.
```
[user1, user2] = [%User{id: 1}, %User{id: 2}]
rows = [
%{user_id: user1.id, title: "foo", published: true},
%{user_id: user1.id, title: "baz", published: false}
]
_ = Repo.insert_all(Post, rows)
source =
Dataloader.Ecto.new(
Repo,
query: &query/2,
run_batch: &run_batch/5
)
loader =
Dataloader.new()
|> Dataloader.add_source(Posts, source)
loader =
loader
|> Dataloader.load(Posts, {:one, Post}, post_count: user1)
|> Dataloader.load(Posts, {:one, Post}, post_count: user2)
|> Dataloader.run()
# Returns 2
Dataloader.get(loader, Posts, {:one, Post}, post_count: user1)
# Returns 0
Dataloader.get(loader, Posts, {:one, Post}, post_count: user2)
```
Additional params for the `query/2` function can be passed to the load functions
with a 3-tuple.
For example, to limit the above example to only return published we can add a query
function to filter the published posts:
```
def query(Post, %{published: published}) do
from p in Post,
where: p.published == ^published
end
def query(queryable, _) do
queryable
end
```
And we can return the published posts with a 3-tuple on the loader:
```
loader =
loader
|> Dataloader.load(Posts, {:one, Post}, post_count: user1)
|> Dataloader.load(Posts, {:one, Post, %{published: true}}, post_count: user1)
|> Dataloader.run()
# Returns 2
Dataloader.get(loader, Posts, {:one, Post}, post_count: user1)
# Returns 1
Dataloader.get(loader, Posts, {:one, Post, %{published: true}}, post_count: user1)
```
"""
defstruct [
:repo,
:query,
:run_batch,
repo_opts: [],
batches: %{},
results: %{},
default_params: %{},
options: []
]
@type t :: %__MODULE__{
repo: Ecto.Repo.t(),
query: query_fun,
repo_opts: repo_opts,
batches: map,
results: map,
default_params: map,
run_batch: batch_fun,
options: Keyword.t()
}
@type query_fun :: (Ecto.Queryable.t(), any -> Ecto.Queryable.t())
@type repo_opts :: Keyword.t()
@type batch_fun :: (Ecto.Queryable.t(), Ecto.Query.t(), any, [any], repo_opts -> [any])
@type opt ::
{:query, query_fun}
| {:default_params, map()}
| {:repo_opts, repo_opts}
| {:timeout, pos_integer}
| {:run_batch, batch_fun()}
import Ecto.Query
@doc """
Create an Ecto Dataloader source.
This module handles retrieving data from Ecto for dataloader. It requires a
valid Ecto Repo. It also accepts a `repo_opts:` option which is handy for
applying options to any calls to Repo functions that this module makes.
For example, you can use this module in a multi-tenant context by using
the `prefix` option:
```
Dataloader.Ecto.new(MyApp.Repo, repo_opts: [prefix: "tenant"])
```
"""
@spec new(Ecto.Repo.t(), [opt]) :: t
def new(repo, opts \\ []) do
data =
opts
|> Keyword.put_new(:query, &query/2)
|> Keyword.put_new(:run_batch, &run_batch(repo, &1, &2, &3, &4, &5))
opts = Keyword.take(opts, [:timeout])
%__MODULE__{repo: repo, options: opts}
|> struct(data)
end
@doc """
Default implementation for loading a batch. Handles looking up records by
column
"""
@spec run_batch(
repo :: Ecto.Repo.t(),
queryable :: Ecto.Queryable.t(),
query :: Ecto.Query.t(),
col :: any,
inputs :: [any],
repo_opts :: repo_opts
) :: [any]
def run_batch(repo, queryable, query, col, inputs, repo_opts) do
results = load_rows(col, inputs, queryable, query, repo, repo_opts)
grouped_results = group_results(results, col)
for value <- inputs do
grouped_results
|> Map.get(value, [])
|> Enum.reverse()
end
end
defp load_rows(col, inputs, queryable, query, repo, repo_opts) do
pk = queryable.__schema__(:primary_key)
case query do
%Ecto.Query{limit: limit, offset: offset}
when pk != [col] and (not is_nil(limit) or not is_nil(offset)) ->
load_rows_lateral(col, inputs, queryable, query, repo, repo_opts)
_ ->
query
|> where([q], field(q, ^col) in ^inputs)
|> repo.all(repo_opts)
end
end
defp load_rows_lateral(col, inputs, queryable, query, repo, repo_opts) do
# Approximate a postgres unnest with a subquery
inputs_query =
queryable
|> where([q], field(q, ^col) in ^inputs)
|> select(^[col])
|> distinct(true)
inner_query =
query
|> where([q], field(q, ^col) == field(parent_as(:input), ^col))
|> exclude(:preload)
results =
from(input in subquery(inputs_query), as: :input)
|> join(:inner_lateral, q in subquery(inner_query))
|> select([_input, q], q)
|> repo.all(repo_opts)
case query.preloads do
[] -> results
# Preloads can't be used in a subquery, using Repo.preload instead
preloads -> repo.preload(results, preloads, repo_opts)
end
end
defp group_results(results, col) do
results
|> Enum.reduce(%{}, fn result, grouped ->
value = Map.get(result, col)
Map.update(grouped, value, [result], &[result | &1])
end)
end
defp query(schema, _) do
schema
end
defimpl Dataloader.Source do
def run(source) do
results = Dataloader.async_safely(__MODULE__, :run_batches, [source])
results =
Map.merge(source.results, results, fn _, {:ok, v1}, {:ok, v2} ->
{:ok, Map.merge(v1, v2)}
end)
%{source | results: results, batches: %{}}
end
def fetch(source, batch_key, item) do
{batch_key, item_key, _item} =
batch_key
|> normalize_key(source.default_params)
|> get_keys(item)
with {:ok, batch} <- Map.fetch(source.results, batch_key) do
fetch_item_from_batch(batch, item_key)
else
:error ->
{:error, "Unable to find batch #{inspect(batch_key)}"}
end
end
defp fetch_item_from_batch(tried_and_failed = {:error, _reason}, _item_key),
do: tried_and_failed
defp fetch_item_from_batch({:ok, batch}, item_key) do
case Map.fetch(batch, item_key) do
:error -> {:error, "Unable to find item #{inspect(item_key)} in batch"}
result -> result
end
end
def put(source, _batch, _item, %Ecto.Association.NotLoaded{}) do
source
end
def put(source, batch, item, result) do
batch = normalize_key(batch, source.default_params)
{batch_key, item_key, _item} = get_keys(batch, item)
results =
Map.update(
source.results,
batch_key,
{:ok, %{item_key => result}},
fn {:ok, map} -> {:ok, Map.put(map, item_key, result)} end
)
%{source | results: results}
end
def load(source, batch, item) do
{batch_key, item_key, item} =
batch
|> normalize_key(source.default_params)
|> get_keys(item)
if fetched?(source.results, batch_key, item_key) do
source
else
entry = {item_key, item}
update_in(source.batches, fn batches ->
Map.update(batches, batch_key, MapSet.new([entry]), &MapSet.put(&1, entry))
end)
end
end
defp fetched?(results, batch_key, item_key) do
case results do
%{^batch_key => {:ok, %{^item_key => _}}} -> true
_ -> false
end
end
def pending_batches?(%{batches: batches}) do
batches != %{}
end
def timeout(%{options: options}) do
options[:timeout]
end
defp chase_down_queryable([field], schema) do
case schema.__schema__(:association, field) do
%{queryable: queryable} ->
queryable
%Ecto.Association.HasThrough{through: through} ->
chase_down_queryable(through, schema)
val ->
raise """
Valid association #{field} not found on schema #{inspect(schema)}
Got: #{inspect(val)}
"""
end
end
defp chase_down_queryable([field | fields], schema) do
case schema.__schema__(:association, field) do
%{queryable: queryable} ->
chase_down_queryable(fields, queryable)
%Ecto.Association.HasThrough{through: [through_field | through_fields]} ->
[through_field | through_fields ++ fields]
|> chase_down_queryable(schema)
end
end
defp get_keys({assoc_field, opts}, %schema{} = record) when is_atom(assoc_field) do
validate_queryable(schema)
primary_keys = schema.__schema__(:primary_key)
id = Enum.map(primary_keys, &Map.get(record, &1))
queryable = chase_down_queryable([assoc_field], schema)
{{:assoc, schema, self(), assoc_field, queryable, opts}, id, record}
end
defp get_keys({{cardinality, queryable}, opts}, value) when is_atom(queryable) do
validate_queryable(queryable)
{_, col, value} = normalize_value(queryable, value)
{{:queryable, self(), queryable, cardinality, col, opts}, value, value}
end
defp get_keys({queryable, opts}, value) when is_atom(queryable) do
validate_queryable(queryable)
case normalize_value(queryable, value) do
{:primary, col, value} ->
{{:queryable, self(), queryable, :one, col, opts}, value, value}
{:not_primary, col, _value} ->
raise """
Cardinality required unless using primary key
The non-primary key column specified was: #{inspect(col)}
"""
end
end
defp get_keys(key, item) do
raise """
Invalid: #{inspect(key)}
#{inspect(item)}
The batch key must either be a schema module, or an association name.
"""
end
defp validate_queryable(queryable) do
unless {:__schema__, 1} in queryable.__info__(:functions) do
raise "The given module - #{queryable} - is not an Ecto schema."
end
rescue
_ in UndefinedFunctionError ->
raise Dataloader.GetError, """
The given atom - #{inspect(queryable)} - is not a module.
This can happen if you intend to pass an Ecto struct in your call to
`dataloader/4` but pass something other than a struct.
"""
end
defp normalize_value(queryable, [{col, value}]) do
case queryable.__schema__(:primary_key) do
[^col] ->
{:primary, col, value}
_ ->
{:not_primary, col, value}
end
end
defp normalize_value(queryable, value) do
[primary_key] = queryable.__schema__(:primary_key)
{:primary, primary_key, value}
end
# This code was totally OK until cardinalities showed up. Now it's ugly :(
# It is however correct, which is nice.
@cardinalities [:one, :many]
defp normalize_key({cardinality, queryable}, default_params)
when cardinality in @cardinalities do
normalize_key({{cardinality, queryable}, []}, default_params)
end
defp normalize_key({cardinality, queryable, params}, default_params)
when cardinality in @cardinalities do
normalize_key({{cardinality, queryable}, params}, default_params)
end
defp normalize_key({key, params}, default_params) do
{key, Enum.into(params, default_params)}
end
defp normalize_key(key, default_params) do
{key, default_params}
end
def run_batches(source) do
options = [
timeout: source.options[:timeout] || Dataloader.default_timeout(),
on_timeout: :kill_task
]
batches = Enum.to_list(source.batches)
results =
batches
|> Task.async_stream(
fn batch ->
id = :erlang.unique_integer()
system_time = System.system_time()
start_time_mono = System.monotonic_time()
emit_start_event(id, system_time, batch)
batch_result = run_batch(batch, source)
emit_stop_event(id, start_time_mono, batch)
batch_result
end,
options
)
|> Enum.map(fn
{:ok, {_key, result}} -> {:ok, result}
{:exit, reason} -> {:error, reason}
end)
batches
|> Enum.map(fn {key, _set} -> key end)
|> Enum.zip(results)
|> Map.new()
end
defp run_batch(
{{:queryable, pid, queryable, cardinality, col, opts} = key, entries},
source
) do
inputs = Enum.map(entries, &elem(&1, 0))
query = source.query.(queryable, opts)
repo_opts = Keyword.put(source.repo_opts, :caller, pid)
cardinality_mapper = cardinality_mapper(cardinality, queryable)
coerced_inputs =
if type = queryable.__schema__(:type, col) do
for input <- inputs do
{:ok, input} = Ecto.Type.cast(type, input)
input
end
else
inputs
end
results =
queryable
|> source.run_batch.(query, col, coerced_inputs, repo_opts)
|> Enum.map(cardinality_mapper)
results =
inputs
|> Enum.zip(results)
|> Map.new()
{key, results}
end
defp run_batch({{:assoc, schema, pid, field, queryable, opts} = key, records}, source) do
{ids, records} = Enum.unzip(records)
query = source.query.(queryable, opts) |> Ecto.Queryable.to_query()
repo_opts = Keyword.put(source.repo_opts, :caller, pid)
empty = schema |> struct |> Map.fetch!(field)
records = records |> Enum.map(&Map.put(&1, field, empty))
results =
if query.limit || query.offset do
records
|> preload_lateral(field, query, source.repo, repo_opts)
else
records
|> source.repo.preload([{field, query}], repo_opts)
end
results = results |> Enum.map(&Map.get(&1, field))
{key, Map.new(Enum.zip(ids, results))}
end
def preload_lateral([], _assoc, _query, _opts), do: []
def preload_lateral([%schema{} = struct | _] = structs, assoc, query, repo, repo_opts) do
[pk] = schema.__schema__(:primary_key)
# Carry the database prefix across from already-loaded records if not already set
repo_opts = Keyword.put_new(repo_opts, :prefix, struct.__meta__.prefix)
assocs = expand_assocs(schema, [assoc])
query_excluding_preloads = exclude(query, :preload)
inner_query =
assocs
|> Enum.reverse()
|> build_preload_lateral_query(query_excluding_preloads, :join_first)
|> maybe_distinct(assocs)
results =
from(x in schema,
as: :parent,
inner_lateral_join: y in subquery(inner_query),
where: field(x, ^pk) in ^Enum.map(structs, &Map.get(&1, pk)),
select: {field(x, ^pk), y}
)
|> repo.all(repo_opts)
results =
case query.preloads do
[] ->
results
# Preloads can't be used in a subquery, using Repo.preload instead
preloads ->
{keys, vals} = Enum.unzip(results)
vals = repo.preload(vals, preloads, repo_opts)
Enum.zip(keys, vals)
end
{keyed, default} =
case schema.__schema__(:association, assoc) do
%{cardinality: :one} ->
{results |> Map.new(), nil}
%{cardinality: :many} ->
{Enum.group_by(results, fn {k, _} -> k end, fn {_, v} -> v end), []}
end
structs
|> Enum.map(&Map.put(&1, assoc, Map.get(keyed, Map.get(&1, pk), default)))
end
defp expand_assocs(_schema, []), do: []
defp expand_assocs(schema, [assoc | rest]) do
case schema.__schema__(:association, assoc) do
%Ecto.Association.HasThrough{through: through} ->
expand_assocs(schema, through ++ rest)
a ->
[a | expand_assocs(a.queryable, rest)]
end
end
defp build_preload_lateral_query(
[%Ecto.Association.ManyToMany{} = assoc],
query,
:join_first
) do
[{owner_join_key, owner_key}, {related_join_key, related_key}] = assoc.join_keys
join_query =
query
|> join(:inner, [x], y in ^assoc.join_through,
on: field(x, ^related_key) == field(y, ^related_join_key)
)
|> where([..., x], field(x, ^owner_join_key) == field(parent_as(:parent), ^owner_key))
binds_count = Ecto.Query.Builder.count_binds(join_query)
join_query
|> Ecto.Association.combine_joins_query(assoc.where, 0)
|> Ecto.Association.combine_joins_query(assoc.join_where, binds_count - 1)
end
defp build_preload_lateral_query(
[%Ecto.Association.ManyToMany{} = assoc],
query,
:join_last
) do
[{owner_join_key, owner_key}, {related_join_key, related_key}] = assoc.join_keys
join_query =
query
|> join(:inner, [..., x], y in ^assoc.join_through,
on: field(x, ^related_key) == field(y, ^related_join_key)
)
|> where([..., x], field(x, ^owner_join_key) == field(parent_as(:parent), ^owner_key))
binds_count = Ecto.Query.Builder.count_binds(join_query)
join_query
|> Ecto.Association.combine_joins_query(assoc.where, binds_count - 2)
|> Ecto.Association.combine_joins_query(assoc.join_where, binds_count - 1)
end
defp build_preload_lateral_query([assoc], query, :join_first) do
query
|> where([x], field(x, ^assoc.related_key) == field(parent_as(:parent), ^assoc.owner_key))
|> Ecto.Association.combine_assoc_query(assoc.where)
end
defp build_preload_lateral_query([assoc], query, :join_last) do
join_query =
query
|> where(
[..., x],
field(x, ^assoc.related_key) == field(parent_as(:parent), ^assoc.owner_key)
)
binds_count = Ecto.Query.Builder.count_binds(join_query)
join_query
|> Ecto.Association.combine_joins_query(assoc.where, binds_count - 1)
end
defp build_preload_lateral_query(
[%Ecto.Association.ManyToMany{} = assoc | rest],
query,
:join_first
) do
[{owner_join_key, owner_key}, {related_join_key, related_key}] = assoc.join_keys
join_query =
query
|> join(:inner, [x], y in ^assoc.join_through,
on: field(x, ^related_key) == field(y, ^related_join_key)
)
|> join(:inner, [..., x], y in ^assoc.owner,
on: field(x, ^owner_join_key) == field(y, ^owner_key)
)
binds_count = Ecto.Query.Builder.count_binds(join_query)
query =
join_query
|> Ecto.Association.combine_joins_query(assoc.where, 0)
|> Ecto.Association.combine_joins_query(assoc.join_where, binds_count - 2)
build_preload_lateral_query(rest, query, :join_last)
end
defp build_preload_lateral_query(
[%Ecto.Association.ManyToMany{} = assoc | rest],
query,
:join_last
) do
[{owner_join_key, owner_key}, {related_join_key, related_key}] = assoc.join_keys
join_query =
query
|> join(:inner, [..., x], y in ^assoc.join_through,
on: field(x, ^related_key) == field(y, ^related_join_key)
)
|> join(:inner, [..., x], y in ^assoc.owner,
on: field(x, ^owner_join_key) == field(y, ^owner_key)
)
binds_count = Ecto.Query.Builder.count_binds(join_query)
query =
join_query
|> Ecto.Association.combine_joins_query(assoc.where, binds_count - 3)
|> Ecto.Association.combine_joins_query(assoc.join_where, binds_count - 2)
build_preload_lateral_query(rest, query, :join_last)
end
defp build_preload_lateral_query(
[assoc | rest],
query,
:join_first
) do
query =
query
|> join(:inner, [x], y in ^assoc.owner,
on: field(x, ^assoc.related_key) == field(y, ^assoc.owner_key)
)
|> Ecto.Association.combine_joins_query(assoc.where, 0)
build_preload_lateral_query(rest, query, :join_last)
end
defp build_preload_lateral_query(
[assoc | rest],
query,
:join_last
) do
binds_count = Ecto.Query.Builder.count_binds(query)
join_query =
query
|> Ecto.Association.combine_joins_query(assoc.where, binds_count - 1)
|> join(:inner, [..., x], y in ^assoc.owner,
on: field(x, ^assoc.related_key) == field(y, ^assoc.owner_key)
)
build_preload_lateral_query(rest, join_query, :join_last)
end
defp maybe_distinct(query, [%Ecto.Association.Has{}, %Ecto.Association.BelongsTo{} | _]) do
distinct(query, true)
end
defp maybe_distinct(query, [%Ecto.Association.ManyToMany{} | _]), do: distinct(query, true)
defp maybe_distinct(query, [_assoc | rest]), do: maybe_distinct(query, rest)
defp maybe_distinct(query, []), do: query
defp emit_start_event(id, system_time, batch) do
:telemetry.execute(
[:dataloader, :source, :batch, :run, :start],
%{system_time: system_time},
%{id: id, batch: batch}
)
end
defp emit_stop_event(id, start_time_mono, batch) do
:telemetry.execute(
[:dataloader, :source, :batch, :run, :stop],
%{duration: System.monotonic_time() - start_time_mono},
%{id: id, batch: batch}
)
end
defp cardinality_mapper(:many, _) do
fn
value when is_list(value) -> value
value -> [value]
end
end
defp cardinality_mapper(:one, queryable) do
fn
[] ->
nil
[value] ->
value
other when is_list(other) ->
raise Ecto.MultipleResultsError, queryable: queryable, count: length(other)
other ->
other
end
end
end
end
end
| 30.918519 | 98 | 0.586385 |
0839c9e91313c11da8398eb8d52e0a9c565ed46c | 123 | ex | Elixir | apps/firestorm/lib/firestorm_web/views/admin/user_view.ex | firestormforum/firestorm_elixir | 80caba13daa21ef6087aa85f6cf0dd1f016e9aef | [
"MIT"
] | 14 | 2020-01-03T04:13:48.000Z | 2021-12-01T16:16:24.000Z | apps/firestorm/lib/firestorm_web/views/admin/user_view.ex | firestormforum/firestorm_elixir | 80caba13daa21ef6087aa85f6cf0dd1f016e9aef | [
"MIT"
] | 1 | 2019-10-07T23:31:52.000Z | 2019-10-07T23:31:52.000Z | apps/firestorm/lib/firestorm_web/views/admin/user_view.ex | firestormforum/firestorm_elixir | 80caba13daa21ef6087aa85f6cf0dd1f016e9aef | [
"MIT"
] | 4 | 2020-01-20T16:43:18.000Z | 2022-03-03T23:05:37.000Z | defmodule FirestormWeb.Admin.UserView do
use FirestormWeb, :view
import Torch.TableView
import Torch.FilterView
end
| 17.571429 | 40 | 0.804878 |
0839f049ebda0b14f5ab6796d36c404e27c8df6f | 97 | exs | Elixir | test/blueprint/application_test.exs | ScrimpyCat/Blueprint | 3a2e02318a2f09d80b4a18d54644e4567bf86156 | [
"BSD-2-Clause"
] | 1 | 2017-12-04T12:43:04.000Z | 2017-12-04T12:43:04.000Z | test/blueprint/application_test.exs | ScrimpyCat/Blueprint | 3a2e02318a2f09d80b4a18d54644e4567bf86156 | [
"BSD-2-Clause"
] | null | null | null | test/blueprint/application_test.exs | ScrimpyCat/Blueprint | 3a2e02318a2f09d80b4a18d54644e4567bf86156 | [
"BSD-2-Clause"
] | null | null | null | defmodule Blueprint.ApplicationTest do
use ExUnit.Case
doctest Blueprint.Application
end
| 19.4 | 38 | 0.804124 |
083a246f3528cb64648858c4701dfddbc2d97c82 | 1,792 | ex | Elixir | clients/dlp/lib/google_api/dlp/v2/model/google_privacy_dlp_v2_quasi_id_field.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/dlp/lib/google_api/dlp/v2/model/google_privacy_dlp_v2_quasi_id_field.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/dlp/lib/google_api/dlp/v2/model/google_privacy_dlp_v2_quasi_id_field.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2QuasiIdField do
@moduledoc """
A quasi-identifier column has a custom_tag, used to know which column
in the data corresponds to which column in the statistical model.
## Attributes
* `customTag` (*type:* `String.t`, *default:* `nil`) - A auxiliary field.
* `field` (*type:* `GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2FieldId.t`, *default:* `nil`) - Identifies the column.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:customTag => String.t(),
:field => GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2FieldId.t()
}
field(:customTag)
field(:field, as: GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2FieldId)
end
defimpl Poison.Decoder, for: GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2QuasiIdField do
def decode(value, options) do
GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2QuasiIdField.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.DLP.V2.Model.GooglePrivacyDlpV2QuasiIdField do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.137255 | 119 | 0.741629 |
083a6d627d0d566451fd6c0e3c646453b7ef6e52 | 5,456 | ex | Elixir | clients/android_management/lib/google_api/android_management/v1/model/application_policy.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/android_management/lib/google_api/android_management/v1/model/application_policy.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/android_management/lib/google_api/android_management/v1/model/application_policy.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.AndroidManagement.V1.Model.ApplicationPolicy do
@moduledoc """
Policy for an individual app.
## Attributes
- defaultPermissionPolicy (String.t): The default policy for all permissions requested by the app. If specified, this overrides the policy-level default_permission_policy which applies to all apps. It does not override the permission_grants which applies to all apps. Defaults to: `null`.
- Enum - one of [PERMISSION_POLICY_UNSPECIFIED, PROMPT, GRANT, DENY]
- delegatedScopes ([String.t]): The scopes delegated to the app from Android Device Policy. Defaults to: `null`.
- Enum - one of
- disabled (boolean()): Whether the app is disabled. When disabled, the app data is still preserved. Defaults to: `null`.
- installType (String.t): The type of installation to perform. Defaults to: `null`.
- Enum - one of [INSTALL_TYPE_UNSPECIFIED, PREINSTALLED, FORCE_INSTALLED, BLOCKED, AVAILABLE]
- lockTaskAllowed (boolean()): Whether the app is allowed to lock itself in full-screen mode. Defaults to: `null`.
- managedConfiguration (%{optional(String.t) => String.t}): Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: <table> <tr><td><i>type</i></td><td><i>JSON value</i></td></tr> <tr><td>BOOL</td><td>true or false</td></tr> <tr><td>STRING</td><td>string</td></tr> <tr><td>INTEGER</td><td>number</td></tr> <tr><td>CHOICE</td><td>string</td></tr> <tr><td>MULTISELECT</td><td>array of strings</td></tr> <tr><td>HIDDEN</td><td>string</td></tr> <tr><td>BUNDLE_ARRAY</td><td>array of objects</td></tr> </table> Defaults to: `null`.
- managedConfigurationTemplate (ManagedConfigurationTemplate): The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. Defaults to: `null`.
- minimumVersionCode (integer()): The minimum version of the app that runs on the device. If set, the device attempts to update the app to at least this version code. If the app is not up-to-date, the device will contain a NonComplianceDetail with non_compliance_reason set to APP_NOT_UPDATED. The app must already be published to Google Play with a version code greater than or equal to this value. At most 20 apps may specify a minimum version code per policy. Defaults to: `null`.
- packageName (String.t): The package name of the app. For example, com.google.android.youtube for the YouTube app. Defaults to: `null`.
- permissionGrants ([PermissionGrant]): Explicit permission grants or denials for the app. These values override the default_permission_policy and permission_grants which apply to all apps. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:defaultPermissionPolicy => any(),
:delegatedScopes => list(any()),
:disabled => any(),
:installType => any(),
:lockTaskAllowed => any(),
:managedConfiguration => map(),
:managedConfigurationTemplate =>
GoogleApi.AndroidManagement.V1.Model.ManagedConfigurationTemplate.t(),
:minimumVersionCode => any(),
:packageName => any(),
:permissionGrants => list(GoogleApi.AndroidManagement.V1.Model.PermissionGrant.t())
}
field(:defaultPermissionPolicy)
field(:delegatedScopes, type: :list)
field(:disabled)
field(:installType)
field(:lockTaskAllowed)
field(:managedConfiguration, type: :map)
field(
:managedConfigurationTemplate,
as: GoogleApi.AndroidManagement.V1.Model.ManagedConfigurationTemplate
)
field(:minimumVersionCode)
field(:packageName)
field(:permissionGrants, as: GoogleApi.AndroidManagement.V1.Model.PermissionGrant, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.AndroidManagement.V1.Model.ApplicationPolicy do
def decode(value, options) do
GoogleApi.AndroidManagement.V1.Model.ApplicationPolicy.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.AndroidManagement.V1.Model.ApplicationPolicy do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 64.952381 | 1,110 | 0.73717 |
083a7d70e58e22de8e4b642eb03355839c4679fd | 3,239 | exs | Elixir | 11/part1.exs | seantanly/elixir-advent_of_code | 1e39ac46bc01f5c8cffd2d2f79f9af0b71767291 | [
"MIT"
] | 3 | 2016-01-18T01:14:45.000Z | 2017-05-11T09:14:49.000Z | 11/part1.exs | seantanly/elixir-advent_of_code | 1e39ac46bc01f5c8cffd2d2f79f9af0b71767291 | [
"MIT"
] | null | null | null | 11/part1.exs | seantanly/elixir-advent_of_code | 1e39ac46bc01f5c8cffd2d2f79f9af0b71767291 | [
"MIT"
] | null | null | null | defmodule Password do
@invalid_chars [?i, ?o, ?l]
@chars_start ?a
@chars_end ?z
@chars_base_length @chars_end - @chars_start + 1
def inc_char(char, amt) do
base = char - @chars_start + amt
val = rem(base, @chars_base_length) + @chars_start
if val in @invalid_chars do
inc_char(char, amt + 1)
else
carry = div(base, @chars_base_length)
{val, carry}
end
end
def clean_input(chars), do: clean_input(chars, [], false) |> Enum.reverse
def clean_input([], acc, _), do: acc
def clean_input([_char | chars], acc, _reset=true), do: clean_input(chars, [@chars_start | acc], true)
def clean_input([char | chars], acc, false) do
if char in @invalid_chars do
{new_char, carry} = inc_char(char, 1)
acc = if carry == 0, do: acc, else: inc(acc, carry, []) |> Enum.reverse
clean_input(chars, [new_char | acc], true)
else
clean_input(chars, [char | acc], false)
end
end
def inc(chars) do
inc(Enum.reverse(chars), 1, [])
end
def inc([], carry, acc) do
if carry == 0 do
acc
else
{new_char, carry} = inc_char(@chars_start, carry - 1)
inc([], carry, [new_char | acc])
end
end
def inc([char|chars], carry, acc) do
{new_char, carry} = inc_char(char, carry)
inc(chars, carry, [new_char | acc])
end
def contains_invalid_chars?(chars) do
{:ok, regex} = Regex.compile("[#{@invalid_chars}]")
chars |> IO.chardata_to_string |> String.match?(regex)
end
def contains_straight_of_three?([char | chars]), do: contains_straight_of_three?(chars, {char, 1})
def contains_straight_of_three?(_, {_prev, count}) when count == 3, do: true
def contains_straight_of_three?([], _), do: false
def contains_straight_of_three?([char | chars], {prev, count}) do
if char == prev + 1 do
contains_straight_of_three?(chars, {char, count + 1})
else
contains_straight_of_three?(chars, {char, 1})
end
end
def contains_two_different_pairs?(chars), do: contains_two_different_pairs?(chars, [], 0)
def contains_two_different_pairs?(_, _, count) when count >= 2, do: true
def contains_two_different_pairs?([], _, _), do: false
def contains_two_different_pairs?(chars, found_pairs, count) do
{pair, char_rest} = Enum.split(chars, 2)
case pair do
[x, x] ->
if x in found_pairs do
contains_two_different_pairs?(char_rest, found_pairs, count)
else
contains_two_different_pairs?(char_rest, [x | found_pairs], count + 1)
end
_ ->
[_ | char_rest] = chars
contains_two_different_pairs?(char_rest, found_pairs, count)
end
end
def next_password(cur), do: cur |> clean_input |> do_next_password
def do_next_password(cur) do
new = inc(cur)
with false <- contains_invalid_chars?(new),
true <- contains_straight_of_three?(new),
true <- contains_two_different_pairs?(new)
do
:ok
end
|> case do
:ok -> new
_ -> next_password(new)
end
end
end
result = [
'abcdefgh',
'ghijklmn',
]
|> Enum.map(&Password.next_password/1)
|> IO.inspect
^result = [
'abcdffaa',
'ghjaabcc',
]
result = 'hepxcrrq'
|> Password.next_password
|> IO.inspect
^result = 'hepxxyzz'
| 27.922414 | 104 | 0.642174 |
083abdee0b556307ffd3ab977bb227ac236d7da9 | 1,924 | ex | Elixir | clients/container_analysis/lib/google_api/container_analysis/v1alpha1/model/volume.ex | corp-momenti/elixir-google-api | fe1580e305789ab2ca0741791b8ffe924bd3240c | [
"Apache-2.0"
] | null | null | null | clients/container_analysis/lib/google_api/container_analysis/v1alpha1/model/volume.ex | corp-momenti/elixir-google-api | fe1580e305789ab2ca0741791b8ffe924bd3240c | [
"Apache-2.0"
] | null | null | null | clients/container_analysis/lib/google_api/container_analysis/v1alpha1/model/volume.ex | corp-momenti/elixir-google-api | fe1580e305789ab2ca0741791b8ffe924bd3240c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.ContainerAnalysis.V1alpha1.Model.Volume do
@moduledoc """
Volume describes a Docker container volume which is mounted into build steps in order to persist files across build step execution.
## Attributes
* `name` (*type:* `String.t`, *default:* `nil`) - Name of the volume to mount. Volume names must be unique per build step and must be valid names for Docker volumes. Each named volume must be used by at least two build steps.
* `path` (*type:* `String.t`, *default:* `nil`) - Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:name => String.t() | nil,
:path => String.t() | nil
}
field(:name)
field(:path)
end
defimpl Poison.Decoder, for: GoogleApi.ContainerAnalysis.V1alpha1.Model.Volume do
def decode(value, options) do
GoogleApi.ContainerAnalysis.V1alpha1.Model.Volume.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.ContainerAnalysis.V1alpha1.Model.Volume do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 38.48 | 229 | 0.735967 |
083abe664c3bc9a781ef105d13272ffefa786aa0 | 10,012 | ex | Elixir | lib/mix/tasks/git_ops.release.ex | garfenter/git_ops | f6e64cf9b798024e0be7cec06fc006a8372cffb3 | [
"MIT"
] | null | null | null | lib/mix/tasks/git_ops.release.ex | garfenter/git_ops | f6e64cf9b798024e0be7cec06fc006a8372cffb3 | [
"MIT"
] | null | null | null | lib/mix/tasks/git_ops.release.ex | garfenter/git_ops | f6e64cf9b798024e0be7cec06fc006a8372cffb3 | [
"MIT"
] | null | null | null | defmodule Mix.Tasks.GitOps.Release do
use Mix.Task
@shortdoc "Parses the commit log and writes any updates to the changelog"
@moduledoc """
Updates project changelog, and any other configured release capabilities.
mix git_ops.release
Logs an error for any commits that were not parseable.
In the case that the prior version was a pre-release and this one is not,
the version is only updated via removing the pre-release identifier.
For more information on semantic versioning, including pre release and build identifiers,
see the specification here: https://semver.org/
## Switches:
* `--initial` - Creates the first changelog, and sets the version to whatever the
configured mix project's version is.
* `--pre-release` - Sets this release to be a pre release, using the configured
string as the pre release identifier. This is a manual process, and results in
an otherwise unchanged version. (Does not change the minor version).
The version number will only change if a *higher* version number bump is required
than what was originally changed in the creation of the RC. For instance, if patch
was changed when creating the pre-release, and no fixes or features were added when
requesting a new pre-release, then the version will not change. However, if the last
pre-release had only a patch version bump, but a major change has since been added,
the version will be changed accordingly.
* `--rc` - Overrides the presence of `--pre-release`, and manages an incrementing
identifier as the prerelease. This will look like `1.0.0-rc0` `1.0.0-rc1` and so
forth. See the `--pre-release` flag for information on when the version will change
for a pre-release. In the case that the version must change, the counter for
the release candidate counter will be reset as well.
* `--build` - Sets the release build metadata. Build information has no semantic
meaning to the version itself, and so is simply attached to the end and is to
be used to describe the build conditions for that release. You might build the
same version many times, and this can be used to denote that in whatever way
you choose.
* `--force-patch` - In cases where this task is run, but the version should not
change, this option will force the patch number to be incremented.
* `--no-major` - Forces major version changes to instead only result in minor version
changes. This would be a common option for libraries that are still in 0.x.x phases
where 1.0.0 should only happen at some specified milestones. After that, it is important
to *not* resist a 2.x.x change just because it doesn't seem like it deserves it.
Semantic versioning uses this major version change to communicate, and it should not be
reserved.
* `--dry-run` - Allow users to run release process and view changes without committing and tagging
* `--yes` - Don't prompt for confirmation, just perform release. Useful for your CI run.
"""
alias GitOps.Changelog
alias GitOps.Commit
alias GitOps.Config
alias GitOps.Git
alias GitOps.VersionReplace
@doc false
def run(args) do
opts = get_opts(args)
Config.mix_project_check(opts)
mix_project_module = Config.mix_project()
mix_project = mix_project_module.project()
changelog_file = Config.changelog_file()
changelog_path = Path.expand(changelog_file)
current_version = String.trim(mix_project[:version])
repo = Git.init!()
if opts[:initial] do
Changelog.initialize(changelog_path, opts)
end
tags = Git.tags(repo)
prefix = Config.prefix()
config_types = Config.types()
{commit_messages_for_version, commit_messages_for_changelog} =
get_commit_messages(repo, prefix, tags, opts)
log_for_version? = !opts[:initial]
commits_for_version =
parse_commits(commit_messages_for_version, config_types, log_for_version?)
commits_for_changelog = parse_commits(commit_messages_for_changelog, config_types, false)
prefixed_new_version =
if opts[:initial] do
prefix <> mix_project[:version]
else
GitOps.Version.determine_new_version(
current_version,
prefix,
commits_for_version,
opts
)
end
new_version =
if prefix != "" do
String.trim_leading(prefixed_new_version, prefix)
else
prefixed_new_version
end
changelog_changes =
Changelog.write(
changelog_path,
commits_for_changelog,
current_version,
prefixed_new_version,
opts
)
create_and_display_changes(current_version, new_version, changelog_changes, opts)
cond do
opts[:dry_run] ->
:ok
opts[:yes] ->
tag(repo, changelog_file, prefixed_new_version, changelog_changes)
:ok
true ->
confirm_and_tag(repo, changelog_file, prefixed_new_version, changelog_changes)
:ok
end
end
defp get_commit_messages(repo, prefix, tags, opts) do
if opts[:initial] do
commits = Git.get_initial_commits!(repo)
{commits, commits}
else
tag =
if opts[:rc] do
GitOps.Version.last_valid_version(tags, prefix)
else
GitOps.Version.last_valid_non_rc_version(tags, prefix)
end
IO.inspect("COMPARING AGAISNT TAG")
IO.inspect(tag)
commits_for_version = Git.commit_messages_since_tag(repo, tag)
IO.inspect(commits_for_version)
last_version_after = GitOps.Version.last_version_greater_than(tags, tag, prefix)
IO.inspect("LAST VERSION AFTER")
IO.inspect(last_version_after)
if last_version_after do
commit_messages_for_changelog = Git.commit_messages_since_tag(repo, last_version_after)
{commits_for_version, commit_messages_for_changelog}
else
{commits_for_version, commits_for_version}
end
end
end
defp create_and_display_changes(current_version, new_version, changelog_changes, opts) do
changelog_file = Config.changelog_file()
mix_project_module = Config.mix_project()
readme = Config.manage_readme_version()
Mix.shell().info("Your new version is: #{new_version}\n")
mix_project_changes =
if Config.manage_mix_version?() do
VersionReplace.update_mix_project(
mix_project_module,
current_version,
new_version,
opts
)
end
readme_changes =
if readme do
VersionReplace.update_readme(readme, current_version, new_version, opts)
end
if opts[:dry_run] do
"Below are the contents of files that will change.\n"
|> append_changes_to_message(changelog_file, changelog_changes)
|> append_changes_to_message(readme, readme_changes)
|> append_changes_to_message(mix_project_module, mix_project_changes)
|> Mix.shell().info()
end
end
defp tag(repo, changelog_file, new_version, new_message) do
Git.add!(repo, "#{changelog_file}")
Git.commit!(repo, ["-am", "chore: release version #{new_version}"])
new_message =
new_message
|> String.replace(~r/^#+/m, "")
|> String.split("\n")
|> Enum.map(&String.trim/1)
|> Enum.reject(&(&1 == ""))
|> Enum.join("\n")
Git.tag!(repo, ["-a", new_version, "-m", "release #{new_version}\n\n" <> new_message])
Mix.shell().info("Don't forget to push with tags:\n\n git push --follow-tags")
end
defp confirm_and_tag(repo, changelog_file, new_version, new_message) do
message = """
Shall we commit and tag?
Instructions will be printed for committing and tagging if you choose no.
"""
if Mix.shell().yes?(message) do
tag(repo, changelog_file, new_version, new_message)
else
Mix.shell().info("""
If you want to do it on your own, make sure you tag the release with:
If you want to include your release notes in the tag message, use
git commit -am "chore: release version #{new_version}"
git tag -a #{new_version}
And replace the contents with your release notes (make sure to escape any # with \#)
Otherwise, use:
git commit -am "chore: release version #{new_version}"
git tag -a #{new_version} -m "release #{new_version}"
git push --follow-tags
""")
end
end
defp parse_commits(messages, config_types, log?) do
Enum.flat_map(messages, &parse_commit(&1, config_types, log?))
end
defp parse_commit(text, config_types, log?) do
IO.inspect(text)
case Commit.parse(text) do
{:ok, commits} ->
commits_with_type(config_types, commits, text, log?)
_ ->
error_if_log("Unparseable commit: #{text}", log?)
[]
end
end
defp commits_with_type(config_types, commits, text, log?) do
Enum.flat_map(commits, fn commit ->
if Map.has_key?(config_types, String.downcase(commit.type)) do
[commit]
else
error_if_log("Commit with unknown type in: #{text}", log?)
[]
end
end)
end
defp append_changes_to_message(message, _, {:error, :bad_replace}), do: message
defp append_changes_to_message(message, file, changes) do
message <> "----- BEGIN #{file} -----\n\n#{changes}\n----- END #{file} -----\n\n"
end
defp error_if_log(error, _log? = true), do: Mix.shell().error(error)
defp error_if_log(_, _), do: :ok
defp get_opts(args) do
{opts, _} =
OptionParser.parse!(args,
strict: [
build: :string,
force_patch: :boolean,
initial: :boolean,
no_major: :boolean,
pre_release: :string,
rc: :boolean,
dry_run: :boolean,
yes: :boolean
],
aliases: [
i: :initial,
p: :pre_release,
b: :build,
f: :force_patch,
n: :no_major,
d: :dry_run,
y: :yes
]
)
opts
end
end
| 31.190031 | 100 | 0.668498 |
083ac66bc1928c0653a02bb036d20a644152293c | 256 | exs | Elixir | template/$PROJECT_NAME$/apps/web/test/web/views/layout_view_test.exs | mitjok/gen_template_litslink_umbrella | eadef26e83a1bb540a360f0bf5db0f519a100f8b | [
"Apache-2.0"
] | 19 | 2020-10-08T14:05:30.000Z | 2022-03-18T08:43:11.000Z | template/$PROJECT_NAME$/apps/web/test/web/views/layout_view_test.exs | mitjok/gen_template_litslink_umbrella | eadef26e83a1bb540a360f0bf5db0f519a100f8b | [
"Apache-2.0"
] | null | null | null | template/$PROJECT_NAME$/apps/web/test/web/views/layout_view_test.exs | mitjok/gen_template_litslink_umbrella | eadef26e83a1bb540a360f0bf5db0f519a100f8b | [
"Apache-2.0"
] | 3 | 2021-02-19T08:31:58.000Z | 2021-12-09T05:28:55.000Z | defmodule Web.LayoutViewTest do
use Web.ConnCase, async: true
# When testing helpers, you may want to import Phoenix.HTML and
# use functions such as safe_to_string() to convert the helper
# result into an HTML string.
# import Phoenix.HTML
end
| 28.444444 | 65 | 0.753906 |
083ad328f7a3b8d66eed58f0fff0d54ac7013b51 | 216 | ex | Elixir | lib/chess_app/chess/board/macros.ex | leobessa/exchess | 289819d183f3001dddf56810c36298fa669c3a06 | [
"MIT"
] | 3 | 2017-06-02T20:47:07.000Z | 2018-05-25T11:17:12.000Z | lib/chess_app/chess/board/macros.ex | leobessa/exchess | 289819d183f3001dddf56810c36298fa669c3a06 | [
"MIT"
] | null | null | null | lib/chess_app/chess/board/macros.ex | leobessa/exchess | 289819d183f3001dddf56810c36298fa669c3a06 | [
"MIT"
] | null | null | null | defmodule ChessApp.Chess.Board.Macros do
defmacro is_file(file) do
quote do: unquote(file) in ~w(a b c d e f g h)
end
defmacro is_rank(rank) do
quote do: unquote(rank) in ~w(1 2 3 4 5 6 7 8)
end
end
| 21.6 | 50 | 0.666667 |
083ae605ee3471a1b88396ffabef6df9f5560831 | 2,184 | ex | Elixir | lib/river/stream_handler.ex | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 86 | 2016-08-19T21:59:28.000Z | 2022-01-31T20:14:18.000Z | lib/river/stream_handler.ex | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 7 | 2016-09-27T14:44:16.000Z | 2017-08-08T14:57:45.000Z | lib/river/stream_handler.ex | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 4 | 2016-09-26T10:57:24.000Z | 2018-04-03T14:30:19.000Z | defmodule River.StreamHandler do
alias River.{Response, Frame, Stream, Conn}
def child_spec(_) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, []}
}
end
def get_pid(%{host: host} = conn, id, parent \\ nil) do
Supervisor.start_child(River.StreamSupervisor, [
[name: :"stream-#{host}-#{id}"],
%Stream{conn: conn, id: id, listener: parent, recv_window: Conn.initial_window_size()}
])
end
def start_link(opts, %Stream{} = stream) do
case Agent.start_link(fn -> {stream, %Response{}} end, opts) do
{:ok, pid} ->
{:ok, pid}
{:error, {:already_started, pid}} ->
{:ok, pid}
end
end
def recv_frame(pid, %Frame{} = frame) do
Agent.cast(pid, fn {%{listener: cpid} = stream, response} ->
stream = Stream.recv_frame(stream, frame)
case Response.add_frame(response, frame) do
%Response{closed: true, __status: :error} = r ->
message_and_close(pid, cpid, {:error, r})
{stream, r}
%Response{closed: true} = r ->
message_and_close(pid, cpid, {:ok, r})
{stream, r}
r ->
message(pid, cpid, {:frame, frame})
{stream, r}
end
end)
end
def send_frame(pid, %Frame{} = frame) do
Agent.cast(pid, fn {%{listener: _cpid} = stream, response} ->
# how should we handle this here...?
# this is why we need to handle
stream = Stream.send_frame(stream, frame)
{stream, response}
end)
end
def get_response(pid) do
Agent.get(pid, fn {_, response} ->
response
end)
end
def get_stream(pid) do
Agent.get(pid, fn {stream, _} ->
stream
end)
end
def stop(pid), do: Agent.stop(pid)
defp message(_pid, nil, _what), do: nil
defp message(_pid, cpid, what), do: send(cpid, what)
defp message_and_close(pid, cpid, what) do
message(pid, cpid, what)
# I don't really know the best way to clean up after ourselves here
# I need to send a response to the concerned pid, and then stop myself
# maybe these should be handled with cast calls...?
spawn(fn ->
River.StreamHandler.stop(pid)
end)
end
end
| 26 | 92 | 0.595696 |
083b07d56af22c31e605f8d0e9298f4c8ac38684 | 6,915 | ex | Elixir | lib/lang_tags/registry.ex | milmazz/lang_tags | 4162648cb36957bc301c12434b0c82029e483885 | [
"Apache-2.0"
] | null | null | null | lib/lang_tags/registry.ex | milmazz/lang_tags | 4162648cb36957bc301c12434b0c82029e483885 | [
"Apache-2.0"
] | null | null | null | lib/lang_tags/registry.ex | milmazz/lang_tags | 4162648cb36957bc301c12434b0c82029e483885 | [
"Apache-2.0"
] | null | null | null | defmodule LangTags.Registry do
@moduledoc false
# For more information about the Registry format, please see:
# https://tools.ietf.org/html/rfc5646#section-3.1
# Source:
# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry
@external_resource path = Application.app_dir(:lang_tags, "priv/language-subtag-registry")
pattern = :binary.compile_pattern(": ")
registry = String.split(File.read!(path) <> "%%", ~r{\r?\n})
{_record, lang_types, subtag_records, tag_records, scope_records, macrolanguages} =
Enum.reduce registry, {%{}, %{}, [], [], %{}, %{}}, fn(line, {record, lang_types, subtag_records, tag_records, scope_records, macrolanguages}) ->
case line |> String.trim() |> :binary.split(pattern) do
# Records are separated by lines containing only the sequence "%%" (record-jar)
["%%"] ->
# There are three types of records in the registry: "File-Date", "Subtag", and "Tag".
{lang_types, subtag_records, tag_records, scope_records, macrolanguages} =
case record do
%{"Subtag" => subtag, "Type" => type} = record ->
new_scope =
if type in ["language", "extlang"] && record["Scope"] do
Map.update(scope_records, record["Scope"], [subtag], &([subtag | &1]))
else
scope_records
end
macrolanguages =
if record["Macrolanguage"] do
Map.update(macrolanguages, record["Macrolanguage"], [{subtag, type}], &([{subtag, type} | &1]))
else
macrolanguages
end
lang_types = Map.update(lang_types, subtag, MapSet.new([type]), &(MapSet.put(&1, type)))
{lang_types, [record | subtag_records], tag_records, new_scope, macrolanguages}
%{"Tag" => tag, "Type" => type} = record ->
lang_types = Map.update(lang_types, tag, MapSet.new([type]), &(MapSet.put(&1, type)))
{lang_types, subtag_records, [record | tag_records], scope_records, macrolanguages}
%{"File-Date" => file_date} ->
def date() do
unquote(file_date)
end
{lang_types, subtag_records, tag_records, scope_records, macrolanguages}
end
{%{}, lang_types, subtag_records, tag_records, scope_records, macrolanguages}
["Tag", v] ->
# Lowercase for consistency (case is only a formatting convention, not a standard requirement).
{Map.put(record, "Tag", String.downcase(v)), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
["Subtag", v] ->
# Lowercase for consistency (case is only a formatting convention, not a standard requirement).
{Map.put(record, "Subtag", String.downcase(v)), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
["Type", v] ->
# Lowercase for consistency (case is only a formatting convention, not a standard requirement).
{Map.put(record, "Type", String.downcase(v)), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
["Comments", v] ->
{Map.put(record, "Comments", [v]), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
["Description", v] ->
{Map.update(record, "Description", [v], &(&1 ++ [v])), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
[k, v] ->
{Map.put(record, k, v), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
[comment] ->
{Map.update(record, "Comments", [comment], &(&1 ++ [comment])), lang_types, subtag_records, tag_records, scope_records, macrolanguages}
end
end
## Macrolanguages
@spec macrolanguages(String.t) :: [{String.t, String.t}] | []
for {key, subtags} <- macrolanguages do
def macrolanguages(unquote(key)) do
unquote(subtags)
end
end
def macrolanguages(_), do: []
## Types
@spec types(String.t) :: [String.t] | []
for {key, available_types} <- lang_types do
def types(unquote(key)) do
unquote(MapSet.to_list(available_types))
end
end
def types(_), do: []
@spec language?(String.t) :: boolean
def language?(subtag), do: "language" in types(String.downcase(subtag))
@spec extlang?(String.t) :: boolean
def extlang?(subtag), do: "extlang" in types(String.downcase(subtag))
@spec script?(String.t) :: boolean
def script?(subtag), do: "script" in types(String.downcase(subtag))
@spec region?(String.t) :: boolean
def region?(subtag), do: "region" in types(String.downcase(subtag))
@spec variant?(String.t) :: boolean
def variant?(subtag), do: "variant" in types(String.downcase(subtag))
@spec grandfathered?(String.t) :: boolean
def grandfathered?(tag), do: "grandfathered" in types(String.downcase(tag))
@spec redundant?(String.t) :: boolean
def redundant?(tag), do: "redundant" in types(String.downcase(tag))
## Subtags
@spec subtag(String.t, String.t) :: map | Exception.t
for %{"Subtag" => key, "Type" => type} = subtag_record <- subtag_records do
result = Macro.escape(subtag_record)
def subtag(unquote(key), unquote(type)) do
unquote(result)
end
end
def subtag(subtag, type) when type in ["language", "extlang", "script", "region", "variant"] do
raise(ArgumentError, "non-existent subtag '#{subtag}' of type '#{type}'.")
end
def subtag(_subtag, type) when type in ["grandfathered", "redundant"] do
raise(ArgumentError, ~S{invalid type for subtag, expected: "language", "extlang", "script", "region" or "variant"})
end
def subtag(subtag, _type) do
raise(ArgumentError, "non-existent subtag '#{subtag}'.")
end
## Tags
@spec tag(String.t) :: map | Exception.t
for %{"Tag" => key} = tag_record <- tag_records do
result = Macro.escape(tag_record)
def tag(unquote(key)) do
unquote(result)
end
end
def tag(tag) do
raise(ArgumentError, "non-existent tag '#{tag}'.")
end
## Scopes
@spec collection?(String.t) :: boolean
for collection <- scope_records["collection"] do
def collection?(unquote(collection)), do: true
end
def collection?(_), do: false
@spec macrolanguage?(String.t) :: boolean
for macrolanguage <- scope_records["macrolanguage"] do
def macrolanguage?(unquote(macrolanguage)), do: true
end
def macrolanguage?(_), do: false
@spec special?(String.t) :: boolean
for special <- scope_records["special"] do
def special?(unquote(special)), do: true
end
def special?(_), do: false
@spec private_use?(String.t) :: boolean
for private_use <- scope_records["private-use"] do
def private_use?(unquote(private_use)), do: true
end
def private_use?(_), do: false
end
| 39.971098 | 149 | 0.632249 |
083b1331e705e20623565a033a497a7603f13f54 | 7,372 | ex | Elixir | clients/books/lib/google_api/books/v1/api/promooffer.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/books/lib/google_api/books/v1/api/promooffer.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/books/lib/google_api/books/v1/api/promooffer.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.Books.V1.Api.Promooffer do
@moduledoc """
API calls for all endpoints tagged `Promooffer`.
"""
alias GoogleApi.Books.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@doc """
## Parameters
- connection (GoogleApi.Books.V1.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :androidId (String.t): device android_id
- :device (String.t): device device
- :manufacturer (String.t): device manufacturer
- :model (String.t): device model
- :offerId (String.t):
- :product (String.t): device product
- :serial (String.t): device serial
- :volumeId (String.t): Volume id to exercise the offer
## Returns
{:ok, %{}} on success
{:error, info} on failure
"""
@spec books_promooffer_accept(Tesla.Env.client(), keyword()) ::
{:ok, nil} | {:error, Tesla.Env.t()}
def books_promooffer_accept(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:androidId => :query,
:device => :query,
:manufacturer => :query,
:model => :query,
:offerId => :query,
:product => :query,
:serial => :query,
:volumeId => :query
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/promooffer/accept")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [decode: false])
end
@doc """
## Parameters
- connection (GoogleApi.Books.V1.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :androidId (String.t): device android_id
- :device (String.t): device device
- :manufacturer (String.t): device manufacturer
- :model (String.t): device model
- :offerId (String.t): Offer to dimiss
- :product (String.t): device product
- :serial (String.t): device serial
## Returns
{:ok, %{}} on success
{:error, info} on failure
"""
@spec books_promooffer_dismiss(Tesla.Env.client(), keyword()) ::
{:ok, nil} | {:error, Tesla.Env.t()}
def books_promooffer_dismiss(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:androidId => :query,
:device => :query,
:manufacturer => :query,
:model => :query,
:offerId => :query,
:product => :query,
:serial => :query
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/promooffer/dismiss")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [decode: false])
end
@doc """
Returns a list of promo offers available to the user
## Parameters
- connection (GoogleApi.Books.V1.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :androidId (String.t): device android_id
- :device (String.t): device device
- :manufacturer (String.t): device manufacturer
- :model (String.t): device model
- :product (String.t): device product
- :serial (String.t): device serial
## Returns
{:ok, %GoogleApi.Books.V1.Model.Offers{}} on success
{:error, info} on failure
"""
@spec books_promooffer_get(Tesla.Env.client(), keyword()) ::
{:ok, GoogleApi.Books.V1.Model.Offers.t()} | {:error, Tesla.Env.t()}
def books_promooffer_get(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:androidId => :query,
:device => :query,
:manufacturer => :query,
:model => :query,
:product => :query,
:serial => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/promooffer/get")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Books.V1.Model.Offers{}])
end
end
| 36.676617 | 170 | 0.657623 |
083b35bffe6e3ee75cb5bd5bcceb6ba8ffa8e7b4 | 186 | exs | Elixir | test/test_helper.exs | andrewMacmurray/tic-tac-toe | 8fb698e6db30486be3c96b6e9a5f281004222ef4 | [
"MIT"
] | 2 | 2017-12-02T16:50:50.000Z | 2017-12-05T13:14:58.000Z | test/test_helper.exs | andrewMacmurray/tic-tac-toe | 8fb698e6db30486be3c96b6e9a5f281004222ef4 | [
"MIT"
] | 10 | 2017-12-05T22:18:24.000Z | 2017-12-18T15:33:48.000Z | test/test_helper.exs | andrewMacmurray/tic-tac-toe | 8fb698e6db30486be3c96b6e9a5f281004222ef4 | [
"MIT"
] | null | null | null | helper_files = [
"fake_io.exs",
"board_helper.exs",
"model_helper.exs",
"view_helper.exs"
]
for f <- helper_files do
Code.require_file(f, "test/support/")
end
ExUnit.start()
| 14.307692 | 39 | 0.677419 |
083b3d1b8552088c48e0e3edd87fac2074d29990 | 904 | ex | Elixir | lib/nostrum/struct/event/voice_ready.ex | phereford/nostrum | 3d273671f51d839eedac4d6e52ba9cf70720ac01 | [
"MIT"
] | 637 | 2017-03-07T11:25:35.000Z | 2022-03-31T13:37:51.000Z | lib/nostrum/struct/event/voice_ready.ex | phereford/nostrum | 3d273671f51d839eedac4d6e52ba9cf70720ac01 | [
"MIT"
] | 372 | 2017-03-07T20:42:03.000Z | 2022-03-30T22:46:46.000Z | lib/nostrum/struct/event/voice_ready.ex | phereford/nostrum | 3d273671f51d839eedac4d6e52ba9cf70720ac01 | [
"MIT"
] | 149 | 2017-03-07T12:11:58.000Z | 2022-03-19T22:11:51.000Z | defmodule Nostrum.Struct.Event.VoiceReady do
@moduledoc since: "0.5.0"
@moduledoc """
Struct representing a Nostrum-generated Voice Ready event
Nostrum will generate this event when the bot joins a voice channel
and is ready to play audio.
Listening to this event may be used for bots that begin playing audio
directly after joining a voice channel as an alternative to waiting
until `Nostrum.Voice.ready?/1` returns `true`.
"""
defstruct [
:channel_id,
:guild_id
]
alias Nostrum.Struct.{Channel, Guild}
@typedoc """
Id of the channel that voice is ready in.
"""
@type channel_id :: Channel.id()
@typedoc """
Id of the guild that voice is ready in.
"""
@type guild_id :: Guild.id()
@type t :: %__MODULE__{
channel_id: channel_id,
guild_id: guild_id
}
@doc false
def to_struct(map), do: struct(__MODULE__, map)
end
| 23.179487 | 71 | 0.678097 |
083b3e216719bcd819129e5f6e2de46695955819 | 2,032 | exs | Elixir | apps/omg_watcher/test/omg_watcher/api/alarm_test.exs | karmonezz/elixir-omg | 3b26fc072fa553992277e1b9c4bad37b3d61ec6a | [
"Apache-2.0"
] | 1 | 2020-05-01T12:30:09.000Z | 2020-05-01T12:30:09.000Z | apps/omg_watcher/test/omg_watcher/api/alarm_test.exs | karmonezz/elixir-omg | 3b26fc072fa553992277e1b9c4bad37b3d61ec6a | [
"Apache-2.0"
] | null | null | null | apps/omg_watcher/test/omg_watcher/api/alarm_test.exs | karmonezz/elixir-omg | 3b26fc072fa553992277e1b9c4bad37b3d61ec6a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule OMG.Watcher.API.AlarmTest do
use ExUnit.Case, async: false
alias OMG.Watcher.API.Alarm
setup %{} do
{:ok, apps} = Application.ensure_all_started(:omg_status)
system_alarm = {:system_memory_high_watermark, []}
system_disk_alarm = {{:disk_almost_full, "/dev/null"}, []}
app_alarm = {:ethereum_client_connection, %{node: Node.self(), reporter: Reporter}}
on_exit(fn ->
apps |> Enum.reverse() |> Enum.each(fn app -> Application.stop(app) end)
end)
%{system_alarm: system_alarm, system_disk_alarm: system_disk_alarm, app_alarm: app_alarm}
end
test "if alarms are returned when there are no alarms raised", _ do
_ = OMG.Status.Alert.Alarm.clear_all()
{:ok, []} = Alarm.get_alarms()
end
test "if alarms are returned when there are alarms raised", %{
system_alarm: system_alarm,
system_disk_alarm: system_disk_alarm,
app_alarm: app_alarm
} do
:ok = :alarm_handler.set_alarm(system_alarm)
:ok = :alarm_handler.set_alarm(app_alarm)
:ok = :alarm_handler.set_alarm(system_disk_alarm)
find_alarms = [
{{:disk_almost_full, "/dev/null"}, []},
{:ethereum_client_connection, %{node: Node.self(), reporter: Reporter}},
{:system_memory_high_watermark, []}
]
{:ok, alarms} = Alarm.get_alarms()
^find_alarms =
Enum.filter(
alarms,
fn alarm ->
Enum.member?(find_alarms, alarm)
end
)
end
end
| 31.75 | 93 | 0.688976 |
083b8d4da88bf64d1948c274c87c76202f8f99f5 | 437 | ex | Elixir | test/factories/competition/session_factory.ex | nimblehq/codewar-web | 5038bc09d4fc42f6be0737857a4c053ff42463f1 | [
"MIT"
] | 2 | 2021-06-29T02:22:28.000Z | 2022-02-15T06:32:15.000Z | test/factories/competition/session_factory.ex | nimblehq/codewar-web | 5038bc09d4fc42f6be0737857a4c053ff42463f1 | [
"MIT"
] | 14 | 2021-05-06T04:27:19.000Z | 2021-08-24T11:15:33.000Z | test/factories/competition/session_factory.ex | nimblehq/codewar-web | 5038bc09d4fc42f6be0737857a4c053ff42463f1 | [
"MIT"
] | 1 | 2021-08-20T07:50:19.000Z | 2021-08-20T07:50:19.000Z | defmodule Codewar.Competition.SessionFactory do
defmacro __using__(_opts) do
quote do
alias Codewar.Competition.Schemas.Session
def session_factory do
%Session{
name: Faker.Lorem.sentence(3)
}
end
def active_session_factory do
struct!(
session_factory(),
%{started_at: NaiveDateTime.utc_now(), completed_at: nil}
)
end
end
end
end
| 20.809524 | 67 | 0.615561 |
083bae95a9954c11f07baefc3030e3a82a0a93c9 | 230 | ex | Elixir | lib/ido_keido/continent.ex | isabella232/ido_keido | 871fd5c8b294fdc89e31e8dba173f2a5464de399 | [
"Apache-2.0"
] | 6 | 2019-11-15T20:34:47.000Z | 2021-04-05T03:15:23.000Z | lib/ido_keido/continent.ex | FindHotel/ido_keido | 871fd5c8b294fdc89e31e8dba173f2a5464de399 | [
"Apache-2.0"
] | 1 | 2022-03-20T09:34:42.000Z | 2022-03-20T09:34:42.000Z | lib/ido_keido/continent.ex | isabella232/ido_keido | 871fd5c8b294fdc89e31e8dba173f2a5464de399 | [
"Apache-2.0"
] | 3 | 2021-06-09T05:58:09.000Z | 2022-03-20T07:05:13.000Z | defmodule IdoKeido.Continent do
@moduledoc """
Continent data.
"""
use TypedStruct
@typedoc """
Continent type.
"""
typedstruct(enforce: true) do
field :code, String.t()
field :name, String.t()
end
end
| 14.375 | 31 | 0.634783 |
083bbf454342b41d07b9060eb182a0bcef30ebda | 3,528 | exs | Elixir | test/absinthe/execution/arguments/list_test.exs | zoldar/absinthe | 72ff9f91fcc0a261f9965cf8120c7c72ff6e4c7c | [
"MIT"
] | 4,101 | 2016-03-02T03:49:20.000Z | 2022-03-31T05:46:01.000Z | test/absinthe/execution/arguments/list_test.exs | zoldar/absinthe | 72ff9f91fcc0a261f9965cf8120c7c72ff6e4c7c | [
"MIT"
] | 889 | 2016-03-02T16:06:59.000Z | 2022-03-31T20:24:12.000Z | test/absinthe/execution/arguments/list_test.exs | zoldar/absinthe | 72ff9f91fcc0a261f9965cf8120c7c72ff6e4c7c | [
"MIT"
] | 564 | 2016-03-02T07:49:59.000Z | 2022-03-06T14:40:59.000Z | defmodule Absinthe.Execution.Arguments.ListTest do
use Absinthe.Case, async: true
@schema Absinthe.Fixtures.ArgumentsSchema
@graphql """
query ($contacts: [ContactInput]) {
contacts(contacts: $contacts)
}
"""
test "when missing for a non-null argument, should raise an error" do
msg = ~s(In argument "contacts": Expected type "[ContactInput]!", found null.)
assert_error_message(msg, run(@graphql, @schema))
end
@graphql """
query ($numbers: [Int!]!) {
numbers(numbers: $numbers)
}
"""
test "using variables, works with basic scalars" do
assert_data(%{"numbers" => [1, 2]}, run(@graphql, @schema, variables: %{"numbers" => [1, 2]}))
end
@graphql """
query ($names: [InputName!]!) {
names(names: $names)
}
"""
test "works with custom scalars" do
assert_data(
%{"names" => ["Joe", "bob"]},
run(@graphql, @schema, variables: %{"names" => ["Joe", "bob"]})
)
end
@graphql """
query ($contacts: [ContactInput]) {
contacts(contacts: $contacts)
}
"""
test "using variables, works with input objects" do
assert_data(
%{"contacts" => ["a@b.com", "c@d.com"]},
run(
@graphql,
@schema,
variables: %{
"contacts" => [
%{"email" => "a@b.com"},
%{"email" => "c@d.com"}
]
}
)
)
end
@graphql """
query ($contact: ContactInput) {
contacts(contacts: [$contact, $contact])
}
"""
test "with inner variables" do
assert_data(
%{"contacts" => ["a@b.com", "a@b.com"]},
run(@graphql, @schema, variables: %{"contact" => %{"email" => "a@b.com"}})
)
end
@graphql """
query ($contact: ContactInput) {
contacts(contacts: [$contact, $contact])
}
"""
test "with inner variables when no variables are given" do
assert_data(%{"contacts" => []}, run(@graphql, @schema, variables: %{}))
end
@graphql """
query {
names(names: ["Joe", "bob"])
}
"""
test "custom scalars literals can be included" do
assert_data(%{"names" => ["Joe", "bob"]}, run(@graphql, @schema))
end
@graphql """
query {
numbers(numbers: [1, 2])
}
"""
test "using literals, works with basic scalars" do
assert_data(%{"numbers" => [1, 2]}, run(@graphql, @schema))
end
@graphql """
query {
listOfLists(items: [["foo"], ["bar", "baz"]])
}
"""
test "works with nested lists" do
assert_data(%{"listOfLists" => [["foo"], ["bar", "baz"]]}, run(@graphql, @schema))
end
@graphql """
query {
numbers(numbers: 1)
}
"""
test "it will coerce a non list item if it's of the right type" do
# per https://facebook.github.io/graphql/#sec-Lists
assert_data(%{"numbers" => [1]}, run(@graphql, @schema))
end
@graphql """
query {
contacts(contacts: [{email: "a@b.com"}, {email: "c@d.com"}])
}
"""
test "using literals, works with input objects" do
assert_data(%{"contacts" => ["a@b.com", "c@d.com"]}, run(@graphql, @schema))
end
@graphql """
query {
contacts(contacts: [{email: "a@b.com"}, {foo: "c@d.com"}])
}
"""
test "returns deeply nested errors" do
assert_error_message_lines(
[
~s(Argument "contacts" has invalid value [{email: "a@b.com"}, {foo: "c@d.com"}].),
~s(In element #2: Expected type "ContactInput", found {foo: "c@d.com"}.),
~s(In field "email": Expected type "String!", found null.),
~s(In field "foo": Unknown field.)
],
run(@graphql, @schema)
)
end
end
| 24.84507 | 98 | 0.562358 |
083bc12c62c1183b9c9d4c9d1b2c4bea021ba23c | 7,125 | ex | Elixir | lib/binance/futures/rest/http_client.ex | quantd2/binance_ex | 74e2a5b2d6d76174f0f8f4d4497ae14224daa93b | [
"MIT"
] | null | null | null | lib/binance/futures/rest/http_client.ex | quantd2/binance_ex | 74e2a5b2d6d76174f0f8f4d4497ae14224daa93b | [
"MIT"
] | null | null | null | lib/binance/futures/rest/http_client.ex | quantd2/binance_ex | 74e2a5b2d6d76174f0f8f4d4497ae14224daa93b | [
"MIT"
] | null | null | null | defmodule Binance.Futures.Rest.HTTPClient do
@endpoint "https://fapi.binance.com"
@used_order_limit "X-MBX-ORDER-COUNT-1M"
@used_weight_limit "X-MBX-USED-WEIGHT-1M"
alias Binance.{Config, Util}
def get_binance(url, headers \\ []) when is_list(headers) do
HTTPoison.get("#{@endpoint}#{url}", headers)
|> parse_response
end
def delete_binance(url, headers \\ []) when is_list(headers) do
HTTPoison.delete("#{@endpoint}#{url}", headers)
|> parse_response(:rate_limit)
end
def get_binance(url, params, config) do
case prepare_request(:get, url, params, config, true) do
{:error, _} = error ->
error
{:ok, url, headers} ->
get_binance(url, headers)
end
end
def delete_binance(url, params, config) do
case prepare_request(:delete, url, params, config, true) do
{:error, _} = error ->
error
{:ok, url, headers} ->
delete_binance(url, headers)
end
end
def post_binance(url, params, config, signed? \\ true) do
case prepare_request(:post, url, params, config, signed?) do
{:error, _} = error ->
error
{:ok, url, headers, body} ->
case HTTPoison.post("#{@endpoint}#{url}", body, headers) do
{:error, err} ->
rate_limit = parse_rate_limits(err)
{:error, {:http_error, err}, rate_limit}
{:ok, %{status_code: status_code} = response} when status_code not in 200..299 ->
rate_limit = parse_rate_limits(response)
case Poison.decode(response.body) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}, rate_limit}
{:error, err} ->
{:error, {:poison_decode_error, err}, rate_limit}
end
{:ok, response} ->
rate_limit = parse_rate_limits(response)
case Poison.decode(response.body) do
{:ok, data} -> {:ok, data, rate_limit}
{:error, err} -> {:error, {:poison_decode_error, err}, rate_limit}
end
end
end
end
defp parse_rate_limits(%HTTPoison.Response{headers: headers}) do
headers
|> Enum.reduce(
%{},
fn {k, v}, acc ->
case String.upcase(k) do
@used_order_limit -> Map.put(acc, :used_order_limit, v)
@used_weight_limit -> Map.put(acc, :used_weight_limit, v)
_ -> acc
end
end
)
end
defp parse_rate_limits(_error) do
%{}
end
def put_binance(url, params, config, signed? \\ true) do
case prepare_request(:put, url, params, config, signed?) do
{:error, _} = error ->
error
{:ok, url, headers, body} ->
case HTTPoison.put("#{@endpoint}#{url}", body, headers) do
{:error, err} ->
{:error, {:http_error, err}}
{:ok, %{status_code: status_code} = response} when status_code not in 200..299 ->
case Poison.decode(response.body) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
{:error, err} ->
{:error, {:poison_decode_error, err}}
end
{:ok, %{body: ""}} ->
{:ok, ""}
{:ok, response} ->
case Poison.decode(response.body) do
{:ok, data} -> {:ok, data}
{:error, err} -> {:error, {:poison_decode_error, err}}
end
end
end
end
def prepare_request(method, url, params, config, signed?) do
case validate_credentials(config) do
{:error, _} = error ->
error
{:ok, %Config{api_key: api_key, api_secret: api_secret}} ->
cond do
method in [:get, :delete] ->
headers = [
{"X-MBX-APIKEY", api_key}
]
params =
params
|> Map.merge(%{timestamp: :os.system_time(:millisecond)})
|> Enum.reduce(
params,
fn x, acc ->
Map.put(
acc,
elem(x, 0),
if is_list(elem(x, 1)) do
ele =
x
|> elem(1)
|> Enum.join(",")
"[#{ele}]"
else
elem(x, 1)
end
)
end
)
argument_string = URI.encode_query(params)
signature = Util.sign_content(api_secret, argument_string)
{:ok, "#{url}?#{argument_string}&signature=#{signature}", headers}
method in [:post, :put] ->
headers = [
{"X-MBX-APIKEY", api_key},
{"Content-Type", "application/x-www-form-urlencoded"}
]
argument_string = URI.encode_query(params)
argument_string =
case signed? do
true ->
signature = Util.sign_content(api_secret, argument_string)
"#{argument_string}&signature=#{signature}"
false ->
argument_string
end
{:ok, url, headers, argument_string}
end
end
end
defp validate_credentials(config) do
case Config.get(config) do
%Config{api_key: api_key, api_secret: api_secret} = config
when is_binary(api_key) and is_binary(api_secret) ->
{:ok, config}
_ ->
{:error, {:config_missing, "Secret or API key missing"}}
end
end
defp parse_response({:error, err}, :rate_limit) do
{:error, {:http_error, err}, parse_rate_limits(err)}
end
defp parse_response({:ok, %{status_code: status_code} = response}, :rate_limit)
when status_code not in 200..299 do
response.body
|> Poison.decode()
|> case do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}, parse_rate_limits(response)}
{:error, error} ->
{:error, {:poison_decode_error, error}, parse_rate_limits(response)}
end
end
defp parse_response({:ok, response}, :rate_limit) do
response.body
|> Poison.decode()
|> case do
{:ok, data} -> {:ok, data, parse_rate_limits(response)}
{:error, error} -> {:error, {:poison_decode_error, error}, parse_rate_limits(response)}
end
end
defp parse_response({:error, err}) do
{:error, {:http_error, err}}
end
defp parse_response({:ok, %{status_code: status_code} = response})
when status_code not in 200..299 do
response.body
|> Poison.decode()
|> case do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
{:error, error} ->
{:error, {:poison_decode_error, error}}
end
end
defp parse_response({:ok, response}) do
response.body
|> Poison.decode()
|> case do
{:ok, data} -> {:ok, data}
{:error, error} -> {:error, {:poison_decode_error, error}}
end
end
end
| 28.963415 | 93 | 0.526316 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.