hexsha stringlengths 40 40 | size int64 2 991k | ext stringclasses 2 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 208 | max_stars_repo_name stringlengths 6 106 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses list | max_stars_count int64 1 33.5k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 208 | max_issues_repo_name stringlengths 6 106 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses list | max_issues_count int64 1 16.3k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 208 | max_forks_repo_name stringlengths 6 106 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses list | max_forks_count int64 1 6.91k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 991k | avg_line_length float64 1 36k | max_line_length int64 1 977k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e81c1f2878bcf892dfbd359ce65d685bc26a72c4 | 1,304 | exs | Elixir | test/unit/strong_migrations/classifiers/add_index_concurrently_in_transaction_test.exs | maximemenager/strong_migrations | b7e091d2cfed73098d3bf683c7ce5c8ceee3159b | [
"MIT"
] | 23 | 2021-10-29T19:58:35.000Z | 2021-11-13T21:42:45.000Z | test/unit/strong_migrations/classifiers/add_index_concurrently_in_transaction_test.exs | maximemenager/strong_migrations | b7e091d2cfed73098d3bf683c7ce5c8ceee3159b | [
"MIT"
] | 1 | 2021-10-31T03:57:47.000Z | 2021-10-31T14:33:45.000Z | test/unit/strong_migrations/classifiers/add_index_concurrently_in_transaction_test.exs | surgeventures/strong_migrations | 3c82e34a6e7a372c6de17ba7a0b07da7664baa26 | [
"MIT"
] | 3 | 2021-10-31T02:14:10.000Z | 2021-11-09T08:07:22.000Z | defmodule StrongMigrations.Classifiers.AddIndexConcurrentlyInTransactionTest do
use ExUnit.Case, async: true
alias StrongMigrations.Classifiers.AddIndexConcurrentlyInTransaction
alias StrongMigrations.Migration
test "it has failed when creating an index concurrently with disabled ddl transaction" do
migration = %{
Migration.new("test.exs")
| create_index_concurrently: true,
disable_migration_lock: true
}
assert {:error, :add_index_concurrently_in_transaction} ==
AddIndexConcurrentlyInTransaction.classify(migration)
end
test "it has failed when creating an index concurrently with disabled migration lock" do
migration = %{
Migration.new("test.exs")
| create_index_concurrently: true,
disable_ddl_transaction: true
}
assert {:error, :add_index_concurrently_in_transaction} ==
AddIndexConcurrentlyInTransaction.classify(migration)
end
test "it has passed when creating an index concurrently with enabled ddl and migration lock" do
migration = %{
Migration.new("test.exs")
| create_index_concurrently: true,
disable_ddl_transaction: true,
disable_migration_lock: true
}
assert :ok == AddIndexConcurrentlyInTransaction.classify(migration)
end
end
| 32.6 | 97 | 0.736196 |
e81c31f0b5398b777c43ab24e88b70b739f82625 | 1,001 | ex | Elixir | lib/membrane/core/element/pad_spec_handler.ex | treble37/membrane-core | 3f7c7200a80eef370092ef252b5f75dc9eb16cbd | [
"Apache-2.0"
] | null | null | null | lib/membrane/core/element/pad_spec_handler.ex | treble37/membrane-core | 3f7c7200a80eef370092ef252b5f75dc9eb16cbd | [
"Apache-2.0"
] | null | null | null | lib/membrane/core/element/pad_spec_handler.ex | treble37/membrane-core | 3f7c7200a80eef370092ef252b5f75dc9eb16cbd | [
"Apache-2.0"
] | null | null | null | defmodule Membrane.Core.Element.PadSpecHandler do
@moduledoc false
# Module parsing pads specifications in elements.
alias Membrane.{Core, Element}
alias Element.Pad
alias Core.Element.{PadModel, PadsSpecsParser, State}
require Pad
use Bunch
use Core.Element.Log
@doc """
Initializes pads info basing on element's pads specifications.
"""
@spec init_pads(State.t()) :: State.t()
def init_pads(%State{module: module} = state) do
pads = %{
data: %{},
info: module.membrane_pads() |> Bunch.TupleList.map_values(&init_pad_info/1) |> Map.new(),
dynamic_currently_linking: []
}
%State{state | pads: pads}
end
@spec init_pad_info(PadsSpecsParser.parsed_pad_specs_t()) :: PadModel.pad_info_t()
defp init_pad_info(specs) do
specs
|> Bunch.Map.move!(:caps, :accepted_caps)
|> Map.merge(
case specs.availability |> Pad.availability_mode() do
:dynamic -> %{current_id: 0}
:static -> %{}
end
)
end
end
| 26.342105 | 96 | 0.664336 |
e81c84fc584f7af34d6f8d076ea34424feeb2af1 | 620 | exs | Elixir | nerves.exs | guidoholz/nerves_custom_rpi0_opencv | 1f9860d3d6fd12b81cf9762a1382c31da25c4ae5 | [
"Apache-2.0"
] | null | null | null | nerves.exs | guidoholz/nerves_custom_rpi0_opencv | 1f9860d3d6fd12b81cf9762a1382c31da25c4ae5 | [
"Apache-2.0"
] | null | null | null | nerves.exs | guidoholz/nerves_custom_rpi0_opencv | 1f9860d3d6fd12b81cf9762a1382c31da25c4ae5 | [
"Apache-2.0"
] | null | null | null | use Mix.Config
version =
Path.join(__DIR__, "VERSION")
|> File.read!
|> String.trim
pkg = :nerves_custom_rpi0_opencv
config pkg, :nerves_env,
type: :system,
version: version,
compiler: :nerves_package,
# artifact_url: [
# "https://github.com/nerves-project/#{pkg}/releases/download/v#{version}/#{pkg}-v#{version}.tar.gz",
# ],
platform: Nerves.System.BR,
platform_config: [
defconfig: "nerves_defconfig"
],
checksum: [
"nerves_defconfig",
"rootfs_overlay",
"linux-4.4.defconfig",
"fwup.conf",
"cmdline.txt",
"config.txt",
"post-createfs.sh",
"VERSION"
]
| 20 | 104 | 0.641935 |
e81c893fc0b3e11c4cbba4098f5e5c0911af13b2 | 1,856 | exs | Elixir | mix.exs | marnen/contraq-elixir | 69798c1b92a08c76a63e200eadeeb19986d56d91 | [
"MIT"
] | null | null | null | mix.exs | marnen/contraq-elixir | 69798c1b92a08c76a63e200eadeeb19986d56d91 | [
"MIT"
] | 3 | 2016-11-25T20:35:54.000Z | 2016-11-26T19:45:35.000Z | mix.exs | marnen/contraq-elixir | 69798c1b92a08c76a63e200eadeeb19986d56d91 | [
"MIT"
] | null | null | null | defmodule Contraq.Mixfile do
use Mix.Project
def project do
[
app: :contraq,
version: "0.0.1",
elixir: "~> 1.2",
elixirc_paths: elixirc_paths(Mix.env),
compilers: [:phoenix, :gettext] ++ Mix.compilers,
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
aliases: aliases(),
deps: deps(),
preferred_cli_env: [espec: :test, "white_bread.run": :test]
]
end
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[mod: {Contraq, []},
applications: [:phoenix, :phoenix_pubsub, :phoenix_html, :cowboy, :logger, :gettext,
:phoenix_ecto, :postgrex]]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "web", "test/support"]
defp elixirc_paths(_), do: ["lib", "web"]
# Specifies your project dependencies.
#
# Type `mix help deps` for examples and options.
defp deps do
[
{:phoenix, "~> 1.2.1"},
{:phoenix_pubsub, "~> 1.0"},
{:phoenix_ecto, "~> 3.0"},
{:postgrex, ">= 0.0.0"},
{:phoenix_html, "~> 2.6"},
{:phoenix_live_reload, "~> 1.0", only: :dev},
{:gettext, "~> 0.11"},
{:cowboy, "~> 1.0"},
{:espec_phoenix, "~> 0.6.4", only: :test},
{ :white_bread, "~> 2.5", only: [:dev, :test] }
]
end
# Aliases are shortcuts or tasks specific to the current project.
# For example, to create, migrate and run the seeds file at once:
#
# $ mix ecto.setup
#
# See the documentation for `Mix` for more info on aliases.
defp aliases do
["ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"],
"ecto.reset": ["ecto.drop", "ecto.setup"],
"test": ["ecto.create --quiet", "ecto.migrate", "test"]]
end
end
| 29.935484 | 89 | 0.583513 |
e81c95412fadf1a5741523f43b724a7917ad8ca1 | 6,947 | ex | Elixir | clients/dfa_reporting/lib/google_api/dfa_reporting/v34/api/postal_codes.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/dfa_reporting/lib/google_api/dfa_reporting/v34/api/postal_codes.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/dfa_reporting/lib/google_api/dfa_reporting/v34/api/postal_codes.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DFAReporting.V34.Api.PostalCodes do
@moduledoc """
API calls for all endpoints tagged `PostalCodes`.
"""
alias GoogleApi.DFAReporting.V34.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Gets one postal code by ID.
## Parameters
* `connection` (*type:* `GoogleApi.DFAReporting.V34.Connection.t`) - Connection to server
* `profile_id` (*type:* `String.t`) - User profile ID associated with this request.
* `code` (*type:* `String.t`) - Postal code ID.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.DFAReporting.V34.Model.PostalCode{}}` on success
* `{:error, info}` on failure
"""
@spec dfareporting_postal_codes_get(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.DFAReporting.V34.Model.PostalCode.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def dfareporting_postal_codes_get(
connection,
profile_id,
code,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/dfareporting/v3.4/userprofiles/{profileId}/postalCodes/{code}", %{
"profileId" => URI.encode(profile_id, &URI.char_unreserved?/1),
"code" => URI.encode(code, &(URI.char_unreserved?(&1) || &1 == ?/))
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.DFAReporting.V34.Model.PostalCode{}])
end
@doc """
Retrieves a list of postal codes.
## Parameters
* `connection` (*type:* `GoogleApi.DFAReporting.V34.Connection.t`) - Connection to server
* `profile_id` (*type:* `String.t`) - User profile ID associated with this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.DFAReporting.V34.Model.PostalCodesListResponse{}}` on success
* `{:error, info}` on failure
"""
@spec dfareporting_postal_codes_list(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.DFAReporting.V34.Model.PostalCodesListResponse.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def dfareporting_postal_codes_list(connection, profile_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/dfareporting/v3.4/userprofiles/{profileId}/postalCodes", %{
"profileId" => URI.encode(profile_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.DFAReporting.V34.Model.PostalCodesListResponse{}]
)
end
end
| 42.359756 | 196 | 0.625162 |
e81c9923f456b7a373c4ed9200d056bd13850fc6 | 4,230 | ex | Elixir | lib/livebook_web/live/session_live/mix_standalone_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | 1 | 2022-02-16T09:13:27.000Z | 2022-02-16T09:13:27.000Z | lib/livebook_web/live/session_live/mix_standalone_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | null | null | null | lib/livebook_web/live/session_live/mix_standalone_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | null | null | null | defmodule LivebookWeb.SessionLive.MixStandaloneLive do
use LivebookWeb, :live_view
alias Livebook.{Session, Runtime, Utils, FileSystem}
@type status :: :initial | :initializing | :finished
@impl true
def mount(_params, %{"session" => session, "current_runtime" => current_runtime}, socket) do
if connected?(socket) do
Phoenix.PubSub.subscribe(Livebook.PubSub, "sessions:#{session.id}")
end
{:ok,
assign(socket,
session: session,
status: :initial,
current_runtime: current_runtime,
file: initial_file(current_runtime),
outputs: [],
emitter: nil
), temporary_assigns: [outputs: []]}
end
@impl true
def render(assigns) do
~H"""
<div class="flex-col space-y-5">
<p class="text-gray-700">
Start a new local node in the context of a Mix project.
This way all your code and dependencies will be available
within the notebook.
</p>
<p class="text-gray-700">
<span class="font-semibold">Warning:</span>
Notebooks that use <code>Mix.install/1</code> do not work
inside a Mix project because the dependencies of the project
itself have been installed instead.
</p>
<%= if @status == :initial do %>
<div class="h-full h-52">
<.live_component module={LivebookWeb.FileSelectComponent}
id="mix-project-dir"
file={@file}
extnames={[]}
running_files={[]}
submit_event={if(disabled?(@file.path), do: nil, else: :init)}
file_system_select_disabled={true} />
</div>
<button class="button-base button-blue" phx-click="init" disabled={disabled?(@file.path)}>
<%= if(matching_runtime?(@current_runtime, @file.path), do: "Reconnect", else: "Connect") %>
</button>
<% end %>
<%= if @status != :initial do %>
<div class="markdown">
<pre><code class="max-h-40 overflow-y-auto tiny-scrollbar"
id="mix-standalone-init-output"
phx-update="append"
phx-hook="ScrollOnUpdate"
><%= for {output, i} <- @outputs do %><span id={"output-#{i}"}><%= ansi_string_to_html(output) %></span><% end %></code></pre>
</div>
<% end %>
</div>
"""
end
@impl true
def handle_event("init", _params, socket) do
handle_init(socket)
end
@impl true
def handle_info({:set_file, file, _info}, socket) do
{:noreply, assign(socket, :file, file)}
end
def handle_info(:init, socket) do
handle_init(socket)
end
def handle_info({:emitter, ref, message}, %{assigns: %{emitter: %{ref: ref}}} = socket) do
case message do
{:output, output} ->
{:noreply, add_output(socket, output)}
{:ok, runtime} ->
Session.connect_runtime(socket.assigns.session.pid, runtime)
{:noreply, socket |> assign(status: :finished) |> add_output("Connected successfully")}
{:error, error} ->
{:noreply, socket |> assign(status: :finished) |> add_output("Error: #{error}")}
end
end
def handle_info({:operation, {:set_runtime, _pid, runtime}}, socket) do
{:noreply, assign(socket, current_runtime: runtime)}
end
def handle_info(_, socket), do: {:noreply, socket}
defp handle_init(socket) do
emitter = Utils.Emitter.new(self())
Runtime.MixStandalone.init_async(socket.assigns.file.path, emitter)
{:noreply, assign(socket, status: :initializing, emitter: emitter)}
end
defp add_output(socket, output) do
assign(socket, outputs: socket.assigns.outputs ++ [{output, Utils.random_id()}])
end
defp initial_file(%Runtime.MixStandalone{} = current_runtime) do
FileSystem.File.local(current_runtime.project_path)
end
defp initial_file(_runtime) do
Livebook.Config.local_filesystem_home()
end
defp matching_runtime?(%Runtime.MixStandalone{} = runtime, path) do
Path.expand(runtime.project_path) == Path.expand(path)
end
defp matching_runtime?(_runtime, _path), do: false
defp disabled?(path) do
not mix_project_root?(path)
end
defp mix_project_root?(path) do
File.dir?(path) and File.exists?(Path.join(path, "mix.exs"))
end
end
| 31.804511 | 138 | 0.629078 |
e81cd8aef4a8331d9f04724aa534474134285ff1 | 1,350 | ex | Elixir | lib/sanbase_web/graphql/resolvers/report_resolver.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 81 | 2017-11-20T01:20:22.000Z | 2022-03-05T12:04:25.000Z | lib/sanbase_web/graphql/resolvers/report_resolver.ex | rmoorman/sanbase2 | 226784ab43a24219e7332c49156b198d09a6dd85 | [
"MIT"
] | 359 | 2017-10-15T14:40:53.000Z | 2022-01-25T13:34:20.000Z | lib/sanbase_web/graphql/resolvers/report_resolver.ex | rmoorman/sanbase2 | 226784ab43a24219e7332c49156b198d09a6dd85 | [
"MIT"
] | 16 | 2017-11-19T13:57:40.000Z | 2022-02-07T08:13:02.000Z | defmodule SanbaseWeb.Graphql.Resolvers.ReportResolver do
require Logger
alias Sanbase.Report
alias Sanbase.Billing.{Subscription, Product}
@product_sanbase Product.product_sanbase()
def upload_report(_root, %{report: report} = args, _resolution) do
{params, _} = Map.split(args, [:name, :description])
case Report.save_report(report, params) do
{:ok, report} -> {:ok, report}
{:error, _reason} -> {:error, "Can't save report!"}
end
end
def get_reports(_root, _args, %{context: %{auth: %{current_user: user}}}) do
plan =
Subscription.current_subscription(user, @product_sanbase)
|> Subscription.plan_name()
reports = Report.get_published_reports(%{is_logged_in: true, plan_atom_name: plan})
{:ok, reports}
end
def get_reports(_root, _args, _resolution) do
{:ok, Report.get_published_reports(%{is_logged_in: false})}
end
def get_reports_by_tags(_root, %{tags: tags}, %{context: %{auth: %{current_user: user}}}) do
plan =
Subscription.current_subscription(user, @product_sanbase)
|> Subscription.plan_name()
reports = Report.get_by_tags(tags, %{is_logged_in: true, plan_atom_name: plan})
{:ok, reports}
end
def get_reports_by_tags(_root, %{tags: tags}, _resolution) do
{:ok, Report.get_by_tags(tags, %{is_logged_in: false})}
end
end
| 29.347826 | 94 | 0.69037 |
e81ce3f29f7988fc92b78318355188621531092e | 376 | ex | Elixir | kousa/lib/onion/telemetry.ex | movwf/dogehouse | c97c04164e98853c6221100a97cc4ff1af84de7b | [
"MIT"
] | 9 | 2021-03-17T03:56:18.000Z | 2021-09-24T22:45:14.000Z | kousa/lib/onion/telemetry.ex | movwf/dogehouse | c97c04164e98853c6221100a97cc4ff1af84de7b | [
"MIT"
] | 12 | 2021-07-06T12:51:13.000Z | 2022-03-16T12:38:18.000Z | kousa/lib/onion/telemetry.ex | movwf/dogehouse | c97c04164e98853c6221100a97cc4ff1af84de7b | [
"MIT"
] | 4 | 2021-07-15T20:33:50.000Z | 2022-03-27T12:46:47.000Z | defmodule Onion.Telemetry do
use GenServer
def start_link(_) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
def init(_opts) do
:timer.send_interval(10_000, self(), :collect_metrics)
{:ok, %{}}
end
def handle_info(:collect_metrics, state) do
Kousa.Metric.UserSessions.set(Onion.UserSession.count())
{:noreply, state}
end
end
| 20.888889 | 60 | 0.696809 |
e81ce8608930d096599cdb2695ff5b360cd92eed | 329 | ex | Elixir | lib/codes/codes_n62.ex | badubizzle/icd_code | 4c625733f92b7b1d616e272abc3009bb8b916c0c | [
"Apache-2.0"
] | null | null | null | lib/codes/codes_n62.ex | badubizzle/icd_code | 4c625733f92b7b1d616e272abc3009bb8b916c0c | [
"Apache-2.0"
] | null | null | null | lib/codes/codes_n62.ex | badubizzle/icd_code | 4c625733f92b7b1d616e272abc3009bb8b916c0c | [
"Apache-2.0"
] | null | null | null | defmodule IcdCode.ICDCode.Codes_N62 do
alias IcdCode.ICDCode
def _N62 do
%ICDCode{full_code: "N62",
category_code: "N62",
short_code: "",
full_name: "Hypertrophy of breast",
short_name: "Hypertrophy of breast",
category_name: "Hypertrophy of breast"
}
end
end
| 20.5625 | 48 | 0.613982 |
e81cf52c46920ef143f23e53f531e9d7bb7b586b | 1,951 | exs | Elixir | mix.exs | Drahcirius/crux_structs | 53ba17c6ab0adfadbf69e236ec936fed8015420e | [
"MIT"
] | null | null | null | mix.exs | Drahcirius/crux_structs | 53ba17c6ab0adfadbf69e236ec936fed8015420e | [
"MIT"
] | null | null | null | mix.exs | Drahcirius/crux_structs | 53ba17c6ab0adfadbf69e236ec936fed8015420e | [
"MIT"
] | null | null | null | defmodule Crux.Structs.MixProject do
use Mix.Project
@vsn "0.2.1"
@name :crux_structs
def project do
[
start_permanent: Mix.env() == :prod,
package: package(),
app: @name,
version: @vsn,
elixir: "~> 1.6",
description: "Library providing Discord API structs for crux.",
source_url: "https://github.com/SpaceEEC/crux_structs/",
homepage_url: "https://github.com/SpaceEEC/crux_structs/",
deps: deps(),
aliases: aliases()
]
end
def package do
[
name: @name,
licenses: ["MIT"],
maintainers: ["SpaceEEC"],
links: %{
"GitHub" => "https://github.com/SpaceEEC/#{@name}/",
"Changelog" => "https://github.com/SpaceEEC/#{@name}/releases/tag/#{@vsn}/",
"Documentation" => "https://hexdocs.pm/#{@name}/#{@vsn}/",
"Unified Development Documentation" => "https://crux.randomly.space/",
"Crux Libraries Overview" => "https://github.com/SpaceEEC/crux/"
}
]
end
def application, do: []
defp deps do
[
{:ex_doc,
git: "https://github.com/spaceeec/ex_doc",
branch: "feat/umbrella",
only: :dev,
runtime: false},
{:credo, "~> 1.1", only: [:dev, :test], runtime: false},
{:jason, ">= 0.0.0", only: [:dev, :test], runtime: false}
]
end
defp aliases() do
[
docs: ["docs", &generate_config/1]
]
end
def generate_config(_) do
{data, 0} = System.cmd("git", ["tag"])
config =
data
|> String.trim()
|> String.split("\n")
|> Enum.map(&%{"url" => "https://hexdocs.pm/#{@name}/" <> &1, "version" => &1})
|> Enum.reverse()
|> Jason.encode!()
config = "var versionNodes = " <> config
path =
__ENV__.file
|> Path.join(Path.join(["..", "doc", "docs_config.js"]))
|> Path.expand()
File.write!(path, config)
Mix.Shell.IO.info(~s{Generated "#{path}".})
end
end
| 24.3875 | 85 | 0.540748 |
e81cfcf45f27f19b4f0c46d86614f49ab81b3434 | 291 | ex | Elixir | elixir/advent_of_code/lib/day3.ex | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | 2 | 2020-01-02T12:59:44.000Z | 2020-01-04T19:21:31.000Z | elixir/advent_of_code/lib/day3.ex | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | elixir/advent_of_code/lib/day3.ex | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | defmodule Day3 do
@moduledoc """
iex -S mix
Day3.part1()
todo
"""
def part1(input) do
input
|> parse
# |> IO.inspect(label: "part1")
end
def parse(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&String.to_integer/1)
end
end
| 13.227273 | 37 | 0.553265 |
e81d07562a62682bb633914c5e8c2343bc717235 | 4,384 | ex | Elixir | lib/cforum/badges.ex | multitain/cforum_ex | 95634a547893f5392345b173f3c264b149e2b124 | [
"MIT"
] | 16 | 2019-04-04T06:33:33.000Z | 2021-08-16T19:34:31.000Z | lib/cforum/badges.ex | multitain/cforum_ex | 95634a547893f5392345b173f3c264b149e2b124 | [
"MIT"
] | 294 | 2019-02-10T11:10:27.000Z | 2022-03-30T04:52:53.000Z | lib/cforum/badges.ex | multitain/cforum_ex | 95634a547893f5392345b173f3c264b149e2b124 | [
"MIT"
] | 10 | 2019-02-10T10:39:24.000Z | 2021-07-06T11:46:05.000Z | defmodule Cforum.Badges do
@moduledoc """
The boundary for the Accounts system.
"""
import Ecto.Query, warn: false
import CforumWeb.Gettext
alias Cforum.Repo
alias Cforum.Badges.{Badge, BadgeUser, BadgeGroup}
alias Cforum.Notifications
alias Cforum.System
alias Cforum.Users
@doc """
Returns the list of badges.
## Examples
iex> list_badges()
[%Badge{}, ...]
"""
def list_badges(query_params \\ []) do
query_params = Keyword.merge([order: nil, limit: nil, search: nil, preload: [badges_users: :user]], query_params)
from(badge in Badge, preload: ^query_params[:preload])
|> Cforum.PagingApi.set_limit(query_params[:limit])
|> Cforum.OrderApi.set_ordering(query_params[:order], asc: :order)
|> Repo.all()
end
def list_badge_groups() do
from(bg in BadgeGroup, order_by: [asc: :name], preload: [:badges])
|> Repo.all()
end
@doc """
Counts the number of badges.
## Examples
iex> count_badges()
1
"""
def count_badges() do
from(
badge in Badge,
select: count()
)
|> Repo.one()
end
@doc """
Gets a single badge.
Raises `Ecto.NoResultsError` if the Badge does not exist.
## Examples
iex> get_badge!(123)
%Badge{}
iex> get_badge!(456)
** (Ecto.NoResultsError)
"""
def get_badge!(id) do
Repo.get!(Badge, id)
|> Repo.preload(badges_users: :user)
end
def get_badge_by(clauses, opts \\ []) do
Badge
|> Repo.get_by(clauses)
|> Repo.maybe_preload(opts[:with])
end
def get_badge_by!(clauses, opts \\ []) do
Badge
|> Repo.get_by!(clauses)
|> Repo.maybe_preload(opts[:with])
end
@doc """
Creates a badge.
## Examples
iex> create_badge(%{field: value})
{:ok, %Badge{}}
iex> create_badge(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_badge(current_user, attrs \\ %{}) do
System.audited("create", current_user, fn ->
%Badge{}
|> Badge.changeset(attrs)
|> Repo.insert()
end)
end
@doc """
Updates a badge.
## Examples
iex> update_badge(badge, %{field: new_value})
{:ok, %Badge{}}
iex> update_badge(badge, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_badge(current_user, %Badge{} = badge, attrs) do
System.audited("update", current_user, fn ->
badge
|> Badge.changeset(attrs)
|> Repo.update()
end)
end
@doc """
Deletes a Badge.
## Examples
iex> delete_badge(badge)
{:ok, %Badge{}}
iex> delete_badge(badge)
{:error, %Ecto.Changeset{}}
"""
def delete_badge(current_user, %Badge{} = badge) do
System.audited("destroy", current_user, fn ->
Repo.delete(badge)
end)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking badge changes.
## Examples
iex> change_badge(badge)
%Ecto.Changeset{source: %Badge{}}
"""
def change_badge(%Badge{} = badge) do
Badge.changeset(badge, %{})
end
def unique_users(%Badge{} = badge) do
badge.badges_users
|> Enum.reduce(%{}, fn bu, acc ->
Map.update(acc, bu.user_id, %{user: bu.user, times: 1, created_at: bu.created_at, active: bu.active}, fn mp ->
%{mp | times: mp[:times] + 1}
end)
end)
|> Map.values()
end
def grant_badge(type, user) when is_binary(type),
do: grant_badge(get_badge_by(badge_type: type), user)
def grant_badge({key, value}, user),
do: grant_badge(get_badge_by([{key, value}]), user)
def grant_badge(badge, user) do
System.audited("badge-gained", user, fn ->
%BadgeUser{}
|> BadgeUser.changeset(%{user_id: user.user_id, badge_id: badge.badge_id})
|> Repo.insert()
end)
|> notify_user(user, badge)
|> Users.discard_user_cache()
end
def notify_user({:ok, badge_user}, user, badge) do
subject =
gettext("You have won the %{mtype} “%{name}”!",
mtype: CforumWeb.Views.ViewHelpers.l10n_medal_type(badge.badge_medal_type),
name: badge.name
)
Notifications.create_notification(%{
recipient_id: user.user_id,
subject: subject,
oid: badge.badge_id,
otype: "badge",
path: CforumWeb.Router.Helpers.badge_path(CforumWeb.Endpoint, :show, badge.slug)
})
{:ok, badge_user}
end
def notify_user(val, _, _), do: val
end
| 21.596059 | 117 | 0.611542 |
e81dcd3e6e7abfefdf2b5d13dd99994adad8f6af | 211 | exs | Elixir | priv/repo/migrations/20170626133913_create_listing.exs | diemesleno/backend | a55f9c846cc826b5269f3fd6ce19223f0c6a1682 | [
"MIT"
] | 1 | 2020-01-23T04:24:58.000Z | 2020-01-23T04:24:58.000Z | priv/repo/migrations/20170626133913_create_listing.exs | diemesleno/backend | a55f9c846cc826b5269f3fd6ce19223f0c6a1682 | [
"MIT"
] | null | null | null | priv/repo/migrations/20170626133913_create_listing.exs | diemesleno/backend | a55f9c846cc826b5269f3fd6ce19223f0c6a1682 | [
"MIT"
] | 1 | 2019-12-31T16:11:21.000Z | 2019-12-31T16:11:21.000Z | defmodule Re.Repo.Migrations.CreateListing do
use Ecto.Migration
def change do
create table(:listings) do
add :description, :string
add :name, :string
timestamps()
end
end
end
| 15.071429 | 45 | 0.663507 |
e81dd79acb95f197d7f9459ffd2c07b14bfd583c | 1,871 | ex | Elixir | clients/sheets/lib/google_api/sheets/v4/model/batch_get_values_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/sheets/lib/google_api/sheets/v4/model/batch_get_values_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/sheets/lib/google_api/sheets/v4/model/batch_get_values_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Sheets.V4.Model.BatchGetValuesResponse do
@moduledoc """
The response when retrieving more than one range of values in a spreadsheet.
## Attributes
* `spreadsheetId` (*type:* `String.t`, *default:* `nil`) - The ID of the spreadsheet the data was retrieved from.
* `valueRanges` (*type:* `list(GoogleApi.Sheets.V4.Model.ValueRange.t)`, *default:* `nil`) - The requested values. The order of the ValueRanges is the same as the
order of the requested ranges.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:spreadsheetId => String.t(),
:valueRanges => list(GoogleApi.Sheets.V4.Model.ValueRange.t())
}
field(:spreadsheetId)
field(:valueRanges, as: GoogleApi.Sheets.V4.Model.ValueRange, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Sheets.V4.Model.BatchGetValuesResponse do
def decode(value, options) do
GoogleApi.Sheets.V4.Model.BatchGetValuesResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Sheets.V4.Model.BatchGetValuesResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 36.686275 | 166 | 0.737039 |
e81dda849668f2f499620f95120e553235f0e768 | 521 | ex | Elixir | apps/demarest_mays/lib/demarest_mays/application.ex | bravely/demarest_mays | 9786efa1c589b23e7f8aeb7182cdc140c451fe92 | [
"MIT"
] | null | null | null | apps/demarest_mays/lib/demarest_mays/application.ex | bravely/demarest_mays | 9786efa1c589b23e7f8aeb7182cdc140c451fe92 | [
"MIT"
] | null | null | null | apps/demarest_mays/lib/demarest_mays/application.ex | bravely/demarest_mays | 9786efa1c589b23e7f8aeb7182cdc140c451fe92 | [
"MIT"
] | null | null | null | defmodule DemarestMays.Application do
@moduledoc """
The DemarestMays Application Service.
The demarest_mays system business domain lives in this application.
Exposes API to clients such as the `DemarestMaysWeb` application
for use in channels, controllers, and elsewhere.
"""
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
Supervisor.start_link([
supervisor(DemarestMays.Repo, []),
], strategy: :one_for_one, name: DemarestMays.Supervisor)
end
end
| 26.05 | 69 | 0.742802 |
e81e420286e7efb38e415c95cd12df2743f24741 | 1,376 | ex | Elixir | lib/cadet/assessments/library.ex | chownces/cadet | 0d8b264e4fad1c9aaab7ef3f037ac4e07a4c9b22 | [
"Apache-2.0"
] | 27 | 2018-01-20T05:56:24.000Z | 2021-05-24T03:21:55.000Z | lib/cadet/assessments/library.ex | chownces/cadet | 0d8b264e4fad1c9aaab7ef3f037ac4e07a4c9b22 | [
"Apache-2.0"
] | 731 | 2018-04-16T13:25:49.000Z | 2021-06-22T07:16:12.000Z | lib/cadet/assessments/library.ex | chownces/cadet | 0d8b264e4fad1c9aaab7ef3f037ac4e07a4c9b22 | [
"Apache-2.0"
] | 43 | 2018-01-20T06:35:46.000Z | 2021-05-05T03:22:35.000Z | defmodule Cadet.Assessments.Library do
@moduledoc """
The library entity represents a library to be used in a question.
"""
use Cadet, :model
alias Cadet.Assessments.Library.ExternalLibrary
@primary_key false
embedded_schema do
field(:chapter, :integer, default: 1)
field(:globals, :map, default: %{})
embeds_one(:external, ExternalLibrary, on_replace: :update)
end
@required_fields ~w(chapter)a
@optional_fields ~w(globals)a
@required_embeds ~w(external)a
def changeset(library, params \\ %{}) do
library
|> cast(params, @required_fields ++ @optional_fields)
|> cast_embed(:external)
|> put_default_external()
|> validate_required(@required_fields ++ @required_embeds)
|> validate_globals()
end
defp validate_globals(changeset) do
globals = get_change(changeset, :globals)
with {:nil?, false} <- {:nil?, is_nil(globals)},
{:valid?, true} <-
{:valid?,
Enum.all?(globals, fn {name, value} -> is_binary(name) and is_binary(value) end)} do
changeset
else
{:nil?, true} -> changeset
_ -> add_error(changeset, :globals, "invalid format")
end
end
def put_default_external(changeset) do
external = get_change(changeset, :external)
if external do
changeset
else
put_change(changeset, :external, %{})
end
end
end
| 25.962264 | 96 | 0.659157 |
e81e4a7e1577011261aa8738f05cd333a51b4b8a | 3,105 | exs | Elixir | mix.exs | univers-agency/brando | 69c3c52498a3f64518da3522cd9f27294a52cc68 | [
"Apache-2.0"
] | 1 | 2020-04-26T09:53:02.000Z | 2020-04-26T09:53:02.000Z | mix.exs | univers-agency/brando | 69c3c52498a3f64518da3522cd9f27294a52cc68 | [
"Apache-2.0"
] | 198 | 2019-08-20T16:16:07.000Z | 2020-07-03T15:42:07.000Z | mix.exs | univers-agency/brando | 69c3c52498a3f64518da3522cd9f27294a52cc68 | [
"Apache-2.0"
] | null | null | null | defmodule Brando.Mixfile do
use Mix.Project
@version "0.53.0-dev"
@description "Brando CMS"
def project do
[
app: :brando,
version: @version,
elixir: "~> 1.10",
deps: deps(),
package: package(),
compilers: [:gettext] ++ Mix.compilers(),
elixirc_paths: elixirc_paths(Mix.env()),
test_coverage: [tool: ExCoveralls],
description: @description,
aliases: aliases(),
# Docs
name: "Brando",
docs: [
source_ref: "v#{@version}",
source_url: "https://github.com/brandocms/brando"
]
]
end
defp deps do
[
{:phoenix, "~> 1.6"},
{:phoenix_ecto, "~> 4.0"},
{:postgrex, "~> 0.14"},
{:ecto, "~> 3.8"},
{:ecto_sql, "~> 3.8"},
# liveview
{:phoenix_live_view, "~> 0.17.9"},
{:phoenix_html, "~> 3.2"},
# hashing/passwords
{:bcrypt_elixir, "~> 3.0"},
{:comeonin, "~> 5.0"},
{:base62, "~> 1.2"},
# monitoring
{:sentry, "~> 8.0"},
# cache
{:cachex, "~> 3.2"},
# cron
{:oban, "~> 2.11"},
# sitemaps
{:sitemapper, "~> 0.5"},
# images
{:fastimage, "~> 1.0.0-rc4"},
# AWS
{:ex_aws, "~> 2.0"},
{:ex_aws_s3, "~> 2.0"},
{:hackney, "~> 1.9"},
{:sweet_xml, "~> 0.6"},
# Hashing
{:hashids, "~> 2.0"},
# Liquid templates
{:liquex, "~> 0.7"},
# Misc
{:httpoison, "~> 1.0"},
{:gettext, "~> 0.19"},
{:earmark, "1.4.4"},
{:jason, "~> 1.0"},
{:slugger, "~> 0.2"},
{:recase, "~> 0.2"},
{:ecto_nested_changeset, "~> 0.2"},
# Dev dependencies
{:credo, "~> 1.5", only: :dev, runtime: false},
{:dialyxir, "~> 1.0", only: :dev, runtime: false},
{:mix_test_watch, "~> 1.0", only: :dev, runtime: false},
# Test dependencies
{:ex_machina, "~> 2.0", only: :test, runtime: false},
{:excoveralls, "~> 0.6", only: :test, runtime: false},
{:floki, "~> 0.32", only: :test},
# Documentation dependencies
{:ex_doc, "~> 0.11", only: :docs, runtime: false},
{:inch_ex, "~> 2.1.0-rc", only: :docs, runtime: false}
]
end
defp package do
[
maintainers: ["Univers TM"],
licenses: ["MIT"],
files: [
"assets",
"config",
"lib",
"priv",
"test",
"mix.exs",
"README.md",
"CHANGELOG.md",
"UPGRADE.md"
]
]
end
# Specifies which paths to compile per environment
defp elixirc_paths(:test), do: ["lib", "web", "test/support"]
defp elixirc_paths(_), do: ["lib", "web"]
# Aliases are shortcut or tasks specific to the current project.
# For example, to create, migrate and run the seeds file at once:
#
# $ mix ecto.setup
#
# See the documentation for `Mix` for more info on aliases.
defp aliases do
[
"ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"],
"ecto.reset": ["ecto.drop", "ecto.setup"],
"ecto.seed": ["run priv/repo/seeds.exs"]
]
end
end
| 23.171642 | 79 | 0.490821 |
e81e5a40d4cecaef18a3cdddb33118de74e98d78 | 2,210 | ex | Elixir | lib/local_uploader.ex | mathieuprog/uploader | bafb1810d316444118adbe02cec3d9ce0f9ebba9 | [
"Apache-2.0"
] | 7 | 2019-11-04T14:41:59.000Z | 2020-03-01T00:42:49.000Z | lib/local_uploader.ex | mathieuprog/uploader | bafb1810d316444118adbe02cec3d9ce0f9ebba9 | [
"Apache-2.0"
] | null | null | null | lib/local_uploader.ex | mathieuprog/uploader | bafb1810d316444118adbe02cec3d9ce0f9ebba9 | [
"Apache-2.0"
] | null | null | null | defmodule Uploader.LocalUploader do
@moduledoc ~S"""
Store file uploads locally. This module implements the `maybe_copy_file/4` callback
function defined in the `Uploader` module.
"""
import Uploader.FileHasher, only: [hash_file_content: 1]
@behaviour Uploader
@doc ~S"""
Copies the uploaded file to a given new file path.
The third optional argument `on_file_exists` allows to specify what should
happen in case the filename already exists. If no value is given and the filename
already exists, the file is not copied and an error is returned. This behaviour
may be changed by setting the value to:
* `:overwrite`: overwrite the file if the file path already exists.
* `:compare_hash`: do not copy the file if the file path already exists;
if the hashes of the files' content are not equal, return an error.
This function returns `:ok` in case of success. If an error occurs, a two-elements
tuple is returned with the first element being `:error` and the second element another
two-elements tuple describing the type of error and a value. For example:
`{:error, {:file_path_exists, file_path}}`.
"""
def maybe_copy_file(tmp_file_path, new_file_path, on_file_exists \\ nil) do
maybe_copy_file(tmp_file_path, new_file_path, on_file_exists,
file_exists: File.exists?(new_file_path)
)
end
defp maybe_copy_file(tmp_file_path, new_file_path, _, file_exists: false) do
copy_file(tmp_file_path, new_file_path)
end
defp maybe_copy_file(tmp_file_path, new_file_path, :overwrite, file_exists: true) do
copy_file(tmp_file_path, new_file_path)
end
defp maybe_copy_file(tmp_file_path, new_file_path, :compare_hash, file_exists: true) do
have_same_hash = hash_file_content(tmp_file_path) == hash_file_content(new_file_path)
if have_same_hash do
:ok
else
{:error, {:file_path_exists, new_file_path}}
end
end
defp maybe_copy_file(_tmp_file_path, new_file_path, _, file_exists: true) do
{:error, {:file_path_exists, new_file_path}}
end
defp copy_file(tmp_file_path, new_file_path) do
File.mkdir_p!(Path.dirname(new_file_path))
File.cp!(tmp_file_path, new_file_path)
:ok
end
end
| 35.079365 | 89 | 0.747059 |
e81e72d1cf32884a8a69f57f0396998570ad0f41 | 3,724 | ex | Elixir | samples/client/petstore/elixir/lib/openapi_petstore/request_builder.ex | aaronclong/openapi-generator | 7437084cd35f5e084ffc684205241438cc2984f9 | [
"Apache-2.0"
] | 5 | 2019-12-03T13:50:09.000Z | 2021-11-14T12:59:48.000Z | samples/client/petstore/elixir/lib/openapi_petstore/request_builder.ex | cedricziel/openapi-generator | a6a1264f252abd7c55e58a6653cdf308df88826e | [
"Apache-2.0"
] | 4 | 2021-09-20T22:32:46.000Z | 2022-02-27T15:31:33.000Z | samples/client/petstore/elixir/lib/openapi_petstore/request_builder.ex | cedricziel/openapi-generator | a6a1264f252abd7c55e58a6653cdf308df88826e | [
"Apache-2.0"
] | 2 | 2020-03-05T14:10:41.000Z | 2022-03-19T23:52:35.000Z | # NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# https://openapi-generator.tech
# Do not edit the class manually.
defmodule OpenapiPetstore.RequestBuilder do
@moduledoc """
Helper functions for building Tesla requests
"""
@doc """
Specify the request method when building a request
## Parameters
- request (Map) - Collected request options
- m (atom) - Request method
## Returns
Map
"""
@spec method(map(), atom) :: map()
def method(request, m) do
Map.put_new(request, :method, m)
end
@doc """
Specify the request method when building a request
## Parameters
- request (Map) - Collected request options
- u (String) - Request URL
## Returns
Map
"""
@spec url(map(), String.t) :: map()
def url(request, u) do
Map.put_new(request, :url, u)
end
@doc """
Add optional parameters to the request
## Parameters
- request (Map) - Collected request options
- definitions (Map) - Map of parameter name to parameter location.
- options (KeywordList) - The provided optional parameters
## Returns
Map
"""
@spec add_optional_params(map(), %{optional(atom) => atom}, keyword()) :: map()
def add_optional_params(request, _, []), do: request
def add_optional_params(request, definitions, [{key, value} | tail]) do
case definitions do
%{^key => location} ->
request
|> add_param(location, key, value)
|> add_optional_params(definitions, tail)
_ ->
add_optional_params(request, definitions, tail)
end
end
@doc """
Add optional parameters to the request
## Parameters
- request (Map) - Collected request options
- location (atom) - Where to put the parameter
- key (atom) - The name of the parameter
- value (any) - The value of the parameter
## Returns
Map
"""
@spec add_param(map(), atom, atom, any()) :: map()
def add_param(request, :body, :body, value), do: Map.put(request, :body, value)
def add_param(request, :body, key, value) do
request
|> Map.put_new_lazy(:body, &Tesla.Multipart.new/0)
|> Map.update!(:body, &(Tesla.Multipart.add_field(&1, key, Poison.encode!(value), headers: [{:"Content-Type", "application/json"}])))
end
def add_param(request, :headers, key, value) do
request
|> Map.update(:headers, %{key => value}, &(Map.put(&1, key, value)))
end
def add_param(request, :file, name, path) do
request
|> Map.put_new_lazy(:body, &Tesla.Multipart.new/0)
|> Map.update!(:body, &(Tesla.Multipart.add_file(&1, path, name: name)))
end
def add_param(request, :form, name, value) do
request
|> Map.update(:body, %{name => value}, &(Map.put(&1, name, value)))
end
def add_param(request, location, key, value) do
Map.update(request, location, [{key, value}], &(&1 ++ [{key, value}]))
end
@doc """
Handle the response for a Tesla request
## Parameters
- arg1 (Tesla.Env.t | term) - The response object
- arg2 (:false | struct | [struct]) - The shape of the struct to deserialize into
## Returns
{:ok, struct} on success
{:error, term} on failure
"""
@spec decode(Tesla.Env.t | term()) :: {:ok, struct()} | {:error, Tesla.Env.t} | {:error, term()}
def decode(%Tesla.Env{status: 200, body: body}), do: Poison.decode(body)
def decode(response), do: {:error, response}
@spec decode(Tesla.Env.t | term(), :false | struct() | [struct()]) :: {:ok, struct()} | {:error, Tesla.Env.t} | {:error, term()}
def decode(%Tesla.Env{status: 200} = env, false), do: {:ok, env}
def decode(%Tesla.Env{status: 200, body: body}, struct), do: Poison.decode(body, as: struct)
def decode(response, _struct), do: {:error, response}
end
| 28.646154 | 137 | 0.64232 |
e81ebe759939e91e084d9415e3b44c4120f4f34c | 7,584 | ex | Elixir | lib/mldht/search/worker.ex | cit/MLDHT | 3ada20ab01cf1eb312bf673f7ddd244f45319744 | [
"MIT"
] | 49 | 2015-11-24T12:12:48.000Z | 2017-06-14T05:18:51.000Z | lib/mldht/search/worker.ex | faried/MlDHT | 3ada20ab01cf1eb312bf673f7ddd244f45319744 | [
"MIT"
] | 4 | 2019-07-03T15:18:51.000Z | 2019-12-01T07:25:14.000Z | lib/mldht/search/worker.ex | faried/MlDHT | 3ada20ab01cf1eb312bf673f7ddd244f45319744 | [
"MIT"
] | 7 | 2017-07-14T03:19:00.000Z | 2022-01-25T23:02:01.000Z | defmodule MlDHT.Search.Worker do
@moduledoc false
@typedoc """
A transaction_id (tid) is a two bytes binary.
"""
@type transaction_id :: <<_::16>>
@typedoc """
A DHT search is divided in a :get_peers or a :find_node search.
"""
@type search_type :: :get_peers | :find_node
use GenServer, restart: :temporary
require Logger
alias MlDHT.RoutingTable.Distance
alias MlDHT.Search.Node
##############
# Client API #
##############
# @spec start_link() :: atom
def start_link(opts) do
args = [opts[:socket], opts[:node_id], opts[:type], opts[:tid], opts[:name]]
GenServer.start_link(__MODULE__, args, name: opts[:name])
end
def get_peers(pid, args), do: GenServer.cast(pid, {:get_peers, args})
def find_node(pid, args), do: GenServer.cast(pid, {:find_node, args})
@doc """
Stops a search process.
"""
@spec stop(pid) :: :ok
def stop(pid), do: GenServer.call(pid, :stop)
@doc """
Returns the type of the search process.
"""
@spec type(pid) :: search_type
def type(pid), do: GenServer.call(pid, :type)
def tid(pid), do: GenServer.call(pid, :tid)
# @spec handle_reply(pid, foo, list) :: :ok
def handle_reply(pid, remote, nodes) do
GenServer.cast(pid, {:handle_reply, remote, nodes})
end
####################
# Server Callbacks #
####################
def init([socket, node_id, type, tid, name]) do
## Extract the id from the via string
{_, _, {_, id}} = name
{:ok, %{
:socket => socket,
:node_id => node_id,
:type => type,
:tid => tid,
:name => id
}}
end
def handle_info(:search_iterate, state) do
if search_completed?(state.nodes, state.target) do
Logger.debug "Search is complete"
## If the search is complete and it was get_peers search, then we will
## send the clostest peers an announce_peer message.
if Map.has_key?(state, :announce) and state.announce == true do
send_announce_msg(state)
end
MlDHT.Registry.unregister(state.name)
{:stop, :normal, state}
else
## Send queries to the 3 closest nodes
new_state = state.nodes
|> Distance.closest_nodes(state.target)
|> Enum.filter(fn(x) ->
x.responded == false and
x.requested < 3 and
Node.last_time_requested(x) > 5
end)
|> Enum.slice(0..2)
|> nodesinspector()
|> send_queries(state)
## Restart Timer
Process.send_after(self(), :search_iterate, 1_000)
{:noreply, new_state}
end
end
def nodesinspector(nodes) do
# Logger.error "#{inspect nodes}"
nodes
end
def handle_call(:stop, _from, state) do
MlDHT.Registry.unregister(state.name)
{:stop, :normal, :ok, state}
end
def handle_call(:type, _from, state) do
{:reply, state.type, state}
end
def handle_call(:tid, _from, state) do
{:reply, state.tid, state}
end
def handle_cast({:get_peers, args}, state) do
new_state = start_search_closure(:get_peers, args, state).()
{:noreply, new_state}
end
def handle_cast({:find_node, args}, state) do
new_state = start_search_closure(:find_node, args, state).()
{:noreply, new_state}
end
def handle_cast({:handle_reply, remote, nil}, state) do
old_nodes = update_responded_node(state.nodes, remote)
## If the reply contains values we need to inform the user of this
## information and call the callback function.
if remote.values, do: Enum.each(remote.values, state.callback)
{:noreply, %{state | nodes: old_nodes}}
end
def handle_cast({:handle_reply, remote, nodes}, state) do
old_nodes = update_responded_node(state.nodes, remote)
new_nodes = Enum.map(nodes, fn(node) ->
{id, {ip, port}} = node
unless Enum.find(state.nodes, fn(x) -> x.id == id end) do
%Node{id: id, ip: ip, port: port}
end
end)
|> Enum.filter(fn(x) -> x != nil end)
{:noreply, %{state | nodes: old_nodes ++ new_nodes}}
end
#####################
# Private Functions #
#####################
def send_announce_msg(state) do
state.nodes
|> Distance.closest_nodes(state.target, 7)
|> Enum.filter(fn(node) -> node.responded == true end)
|> Enum.each(fn(node) ->
Logger.debug "[#{Base.encode16 node.id}] << announce_peer"
args = [node_id: state.node_id, info_hash: state.target,
token: node.token, port: node.port]
args = if state.port == 0, do: args ++ [implied_port: true], else: args
payload = KRPCProtocol.encode(:announce_peer, args)
:gen_udp.send(state.socket, node.ip, node.port, payload)
end)
end
## This function merges args (keyword list) with the state map and returns a
## function depending on the type (:get_peers, :find_node).
defp start_search_closure(type, args, state) do
fn() ->
Process.send_after(self(), :search_iterate, 500)
## Convert the keyword list to a map and merge it with state.
args
|> Enum.into(%{})
|> Map.merge(state)
|> Map.put(:type, type)
|> Map.put(:nodes, nodes_to_search_nodes(args[:start_nodes]))
end
end
defp send_queries([], state), do: state
defp send_queries([node | rest], state) do
node_id_enc = node.id |> Base.encode16()
Logger.debug "[#{node_id_enc}] << #{state.type}"
payload = gen_request_msg(state.type, state)
:gen_udp.send(state.socket, node.ip, node.port, payload)
new_nodes = state.nodes
|> update_nodes(node.id, :requested, &(&1.requested + 1))
|> update_nodes(node.id, :request_sent, fn(_) -> :os.system_time(:millisecond) end)
send_queries(rest, %{state | nodes: new_nodes})
end
defp nodes_to_search_nodes(nodes) do
Enum.map(nodes, fn(node) ->
{id, ip, port} = extract_node_infos(node)
%Node{id: id, ip: ip, port: port}
end)
end
defp gen_request_msg(:find_node, state) do
args = [tid: state.tid, node_id: state.node_id, target: state.target]
KRPCProtocol.encode(:find_node, args)
end
defp gen_request_msg(:get_peers, state) do
args = [tid: state.tid, node_id: state.node_id, info_hash: state.target]
KRPCProtocol.encode(:get_peers, args)
end
## It is necessary that we need to know which node in our node list has
## responded. This function goes through the node list and sets :responded of
## the responded node to true. If the reply from the remote node also contains
## a token this function updates this too.
defp update_responded_node(nodes, remote) do
node_list = update_nodes(nodes, remote.node_id, :responded)
if Map.has_key?(remote, :token) do
update_nodes(node_list, remote.node_id, :token, fn(_) -> remote.token end)
else
node_list
end
end
## This function is a helper function to update the node list easily.
defp update_nodes(nodes, node_id, key) do
update_nodes(nodes, node_id, key, fn(_) -> true end)
end
defp update_nodes(nodes, node_id, key, func) do
Enum.map(nodes, fn(node) ->
if node.id == node_id do
Map.put(node, key, func.(node))
else
node
end
end)
end
defp extract_node_infos(node) when is_tuple(node), do: node
defp extract_node_infos(node) when is_pid(node) do
MlDHT.RoutingTable.Node.to_tuple(node)
end
## This function contains the condition when a search is completed.
defp search_completed?(nodes, target) do
nodes
|> Distance.closest_nodes(target, 7)
|> Enum.all?(fn(node) ->
node.responded == true or node.requested >= 3
end)
end
end
| 28.088889 | 87 | 0.635812 |
e81ec743175bb606b7e2346c35e09e6f7cc2a2aa | 2,267 | ex | Elixir | deps/credo/lib/credo/check/readability/predicate_function_names.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 13 | 2018-09-19T21:03:29.000Z | 2022-01-27T04:06:32.000Z | deps/credo/lib/credo/check/readability/predicate_function_names.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 1 | 2020-05-26T04:16:57.000Z | 2020-05-26T04:16:57.000Z | deps/credo/lib/credo/check/readability/predicate_function_names.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 3 | 2020-05-21T04:32:08.000Z | 2021-07-28T05:14:01.000Z | defmodule Credo.Check.Readability.PredicateFunctionNames do
@moduledoc """
Predicate functions/macros should be named accordingly:
* For functions, they should end in a question mark.
# preferred
defp user?(cookie) do
end
defp has_attachment?(mail) do
end
# NOT preferred
defp is_user?(cookie) do
end
defp is_user(cookie) do
end
* For guard-safe macros they should have the prefix `is_` and not end in a question mark.
Like all `Readability` issues, this one is not a technical concern.
But you can improve the odds of others reading and liking your code by making
it easier to follow.
"""
@explanation [check: @moduledoc]
@def_ops [:def, :defp, :defmacro]
use Credo.Check, base_priority: :high
@doc false
def run(source_file, params \\ []) do
issue_meta = IssueMeta.for(source_file, params)
Credo.Code.prewalk(source_file, &traverse(&1, &2, issue_meta))
end
for op <- @def_ops do
# catch variables named e.g. `defp`
defp traverse({unquote(op), _meta, nil} = ast, issues, _issue_meta) do
{ast, issues}
end
defp traverse({unquote(op) = op, _meta, arguments} = ast, issues, issue_meta) do
{ast, issues_for_definition(op, arguments, issues, issue_meta)}
end
end
defp traverse(ast, issues, _issue_meta) do
{ast, issues}
end
defp issues_for_definition(op, body, issues, issue_meta) do
case Enum.at(body, 0) do
{name, meta, nil} ->
issues_for_name(op, name, meta, issues, issue_meta)
_ ->
issues
end
end
def issues_for_name(_op, name, meta, issues, issue_meta) do
name = to_string(name)
cond do
String.starts_with?(name, "is_") && String.ends_with?(name, "?") ->
[issue_for(issue_meta, meta[:line], name, :predicate_and_question_mark) | issues]
String.starts_with?(name, "is_") ->
[issue_for(issue_meta, meta[:line], name, :only_predicate) | issues]
true ->
issues
end
end
defp issue_for(issue_meta, line_no, trigger, _) do
format_issue issue_meta,
message: "Predicate function names should not start with 'is', and should end in a question mark.",
trigger: trigger,
line_no: line_no
end
end
| 27.313253 | 105 | 0.659462 |
e81ef10070b2caf11184482cb24c69097b8acacb | 61,793 | ex | Elixir | lib/swarm/tracker/tracker.ex | walnut92/swarm | 4be4e60d99b47da1b909dfd089d932edddd5a403 | [
"MIT"
] | null | null | null | lib/swarm/tracker/tracker.ex | walnut92/swarm | 4be4e60d99b47da1b909dfd089d932edddd5a403 | [
"MIT"
] | null | null | null | lib/swarm/tracker/tracker.ex | walnut92/swarm | 4be4e60d99b47da1b909dfd089d932edddd5a403 | [
"MIT"
] | null | null | null | defmodule Swarm.Tracker do
@moduledoc """
This module implements the distributed tracker for process registrations and groups.
It is implemented as a finite state machine, via `:gen_statem`.
Each node Swarm runs on will have a single instance of this process, and the trackers will
replicate data between each other, and/or forward requests to remote trackers as necessary.
"""
use GenStateMachine, callback_mode: :state_functions
@sync_nodes_timeout 5_000
@retry_interval 1_000
@retry_max_attempts 10
@default_anti_entropy_interval 5 * 60_000
import Swarm.Entry
require Logger
require Swarm.Registry
alias Swarm.IntervalTreeClock, as: Clock
alias Swarm.Registry
alias Swarm.Distribution.Strategy
defmodule Tracking do
@moduledoc false
@type t :: %__MODULE__{
name: term(),
meta: %{mfa: {m :: atom(), f :: function(), a :: list()}},
from: {pid, tag :: term}
}
defstruct [:name, :meta, :from]
end
defmodule TrackerState do
@moduledoc false
@type t :: %__MODULE__{
clock: nil | Swarm.IntervalTreeClock.t(),
strategy: Strategy.t(),
self: atom(),
sync_node: nil | atom(),
sync_ref: nil | reference(),
pending_sync_reqs: [pid()]
}
defstruct clock: nil,
nodes: [],
strategy: nil,
self: :nonode@nohost,
sync_node: nil,
sync_ref: nil,
pending_sync_reqs: []
end
# Public API
@doc """
Authoritatively looks up the pid associated with a given name.
"""
def whereis(name),
do: GenStateMachine.call(__MODULE__, {:whereis, name}, :infinity)
@doc """
Hand off all the processes running on the given worker to the remaining nodes in the cluster.
This can be used to gracefully shut down a node.
Note that if you don't shut down the node after the handoff a rebalance can lead to processes being scheduled on it again.
In other words the handoff doesn't blacklist the node for further rebalances.
"""
def handoff(worker_name, state),
do: GenStateMachine.call(__MODULE__, {:handoff, worker_name, state}, :infinity)
@doc """
Tracks a process (pid) with the given name.
Tracking processes with this function will *not* restart the process when
its parent node goes down, or shift the process to other nodes if the cluster
topology changes. It is strictly for global name registration.
"""
def track(name, pid) when is_pid(pid),
do: GenStateMachine.call(__MODULE__, {:track, name, pid, %{}}, :infinity)
@doc """
Tracks a process created via the provided module/function/args with the given name.
The process will be distributed on the cluster based on the implementation of the configured distribution strategy.
If the process' parent node goes down, it will be restarted on the new node which owns its keyspace.
If the cluster topology changes, and the owner of its keyspace changes, it will be shifted to
the new owner, after initiating the handoff process as described in the documentation.
A track call will return an error tagged tuple, `{:error, :no_node_available}`, if there is no node available to start the process.
Provide a timeout value to limit the track call duration. A value of `:infinity` can be used to block indefinitely.
"""
def track(name, m, f, a, timeout) when is_atom(m) and is_atom(f) and is_list(a),
do: GenStateMachine.call(__MODULE__, {:track, name, %{mfa: {m, f, a}}}, timeout)
@doc """
Stops tracking the given process (pid).
"""
def untrack(pid) when is_pid(pid),
do: GenStateMachine.call(__MODULE__, {:untrack, pid}, :infinity)
@doc """
Adds some metadata to the given process (pid). This is primarily used for tracking group membership.
"""
def add_meta(key, value, pid) when is_pid(pid),
do: GenStateMachine.call(__MODULE__, {:add_meta, key, value, pid}, :infinity)
@doc """
Removes metadata from the given process (pid).
"""
def remove_meta(key, pid) when is_pid(pid),
do: GenStateMachine.call(__MODULE__, {:remove_meta, key, pid}, :infinity)
@doc """
Remove process from local registry.
"""
def untrack_local(entry(pid: pid, ref: ref, clock: _lclock) = obj) when is_pid(pid) do
Process.demonitor(ref, [:flush])
Registry.remove(obj)
end
## Process Internals / Internal API
defmacrop debug(msg) do
{current_state, _arity} = __CALLER__.function
quote do
Logger.debug(Swarm.Logger.format("[tracker:#{unquote(current_state)}] #{unquote(msg)}"))
end
end
defmacrop info(msg) do
{current_state, _arity} = __CALLER__.function
quote do
Logger.info(Swarm.Logger.format("[tracker:#{unquote(current_state)}] #{unquote(msg)}"))
end
end
defmacrop warn(msg) do
{current_state, _arity} = __CALLER__.function
quote do
Logger.warn(Swarm.Logger.format("[tracker:#{unquote(current_state)}] #{unquote(msg)}"))
end
end
defmacrop error(msg) do
{current_state, _arity} = __CALLER__.function
quote do
Logger.error(Swarm.Logger.format("[tracker:#{unquote(current_state)}] #{unquote(msg)}"))
end
end
def start_link() do
GenStateMachine.start_link(__MODULE__, [], name: __MODULE__)
end
def init(_) do
# Trap exits
Process.flag(:trap_exit, true)
# If this node is ignored, then make sure we ignore everyone else
# to prevent accidentally interfering with the cluster
if ignore_node?(Node.self()) do
Application.put_env(:swarm, :node_blacklist, [~r/^.+$/])
end
# Start monitoring nodes
:ok = :net_kernel.monitor_nodes(true, node_type: :all)
info("started")
nodelist = Enum.reject(Node.list(:connected), &ignore_node?/1)
strategy =
Node.self()
|> Strategy.create()
|> Strategy.add_nodes(nodelist)
if Application.get_env(:swarm, :debug, false) do
_ = Task.start(fn -> :sys.trace(Swarm.Tracker, true) end)
end
timeout = Application.get_env(:swarm, :sync_nodes_timeout, @sync_nodes_timeout)
Process.send_after(self(), :cluster_join, timeout)
state = %TrackerState{clock: Clock.seed(), nodes: nodelist, strategy: strategy, self: node()}
{:ok, :cluster_wait, state}
end
def cluster_wait(:info, {:nodeup, node, _}, %TrackerState{} = state) do
new_state =
case nodeup(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
def cluster_wait(:info, {:nodedown, node, _}, %TrackerState{} = state) do
new_state =
case nodedown(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
def cluster_wait(:info, :cluster_join, %TrackerState{nodes: []} = state) do
info("joining cluster..")
info("no connected nodes, proceeding without sync")
interval = Application.get_env(:swarm, :anti_entropy_interval, @default_anti_entropy_interval)
Process.send_after(self(), :anti_entropy, interval)
{:next_state, :tracking, %{state | clock: Clock.seed()}}
end
def cluster_wait(:info, :cluster_join, %TrackerState{nodes: nodes} = state) do
info("joining cluster..")
info("found connected nodes: #{inspect(nodes)}")
# Connect to a random node and sync registries,
# start anti-entropy, and start loop with forked clock of
# remote node
sync_node = Enum.random(nodes)
info("selected sync node: #{sync_node}")
# Send sync request
ref = Process.monitor({__MODULE__, sync_node})
GenStateMachine.cast({__MODULE__, sync_node}, {:sync, self(), state.clock})
{:next_state, :syncing, %{state | sync_node: sync_node, sync_ref: ref}}
end
def cluster_wait(:cast, {:sync, from, rclock}, %TrackerState{nodes: [from_node]} = state)
when node(from) == from_node do
info("joining cluster..")
sync_node = node(from)
info("syncing with #{sync_node}")
ref = Process.monitor({__MODULE__, sync_node})
{lclock, rclock} = Clock.fork(rclock)
debug(
"forking clock: #{inspect(state.clock)}, lclock: #{inspect(lclock)}, rclock: #{
inspect(rclock)
}"
)
GenStateMachine.cast(from, {:sync_recv, self(), rclock, get_registry_snapshot()})
{:next_state, :awaiting_sync_ack,
%{state | clock: lclock, sync_node: sync_node, sync_ref: ref}}
end
def cluster_wait(:cast, {:sync, from, _rclock}, %TrackerState{} = state) do
if ignore_node?(node(from)) do
GenStateMachine.cast(from, {:sync_err, :node_ignored})
:keep_state_and_data
else
info("pending sync request from #{node(from)}")
{:keep_state, %{state | pending_sync_reqs: [from | state.pending_sync_reqs]}}
end
end
def cluster_wait(_event_type, _event_data, _state) do
{:keep_state_and_data, :postpone}
end
def syncing(:info, {:nodeup, node, _}, %TrackerState{} = state) do
new_state =
case nodeup(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
def syncing(
:info,
{:DOWN, ref, _type, _pid, _info},
%TrackerState{clock: clock, sync_ref: ref} = state
) do
info("the remote tracker we're syncing with has crashed, selecting a new one")
case state.nodes -- [state.sync_node] do
[] ->
info("no other available nodes, cancelling sync")
new_state = %{state | sync_node: nil, sync_ref: nil}
{:next_state, :tracking, new_state}
new_nodes ->
new_sync_node = Enum.random(new_nodes)
info("selected sync node: #{new_sync_node}")
# Send sync request
ref = Process.monitor({__MODULE__, new_sync_node})
GenStateMachine.cast({__MODULE__, new_sync_node}, {:sync, self(), clock})
new_state = %{state | sync_node: new_sync_node, sync_ref: ref}
{:keep_state, new_state}
end
end
def syncing(
:info,
{:nodedown, node, _},
%TrackerState{strategy: strategy, clock: clock, nodes: nodes, sync_node: node} = state
) do
info("the selected sync node #{node} went down, selecting new node")
Process.demonitor(state.sync_ref, [:flush])
case nodes -- [node] do
[] ->
# there are no other nodes to select, nothing to do
info("no other available nodes, cancelling sync")
new_state = %{
state
| nodes: [],
strategy: Strategy.remove_node(strategy, node),
sync_node: nil,
sync_ref: nil
}
{:next_state, :tracking, new_state}
new_nodes ->
new_sync_node = Enum.random(new_nodes)
info("selected sync node: #{new_sync_node}")
# Send sync request
ref = Process.monitor({__MODULE__, new_sync_node})
GenStateMachine.cast({__MODULE__, new_sync_node}, {:sync, self(), clock})
new_state = %{
state
| nodes: new_nodes,
strategy: Strategy.remove_node(strategy, node),
sync_node: new_sync_node,
sync_ref: ref
}
{:keep_state, new_state}
end
end
def syncing(:info, {:nodedown, node, _}, %TrackerState{} = state) do
new_state =
case nodedown(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
# Successful anti-entropy sync
def syncing(
:cast,
{:sync_recv, from, sync_clock, registry},
%TrackerState{sync_node: sync_node} = state
)
when node(from) == sync_node do
info("received registry from #{sync_node}, merging..")
new_state = sync_registry(from, sync_clock, registry, state)
# let remote node know we've got the registry
GenStateMachine.cast(from, {:sync_ack, self(), sync_clock, get_registry_snapshot()})
info("local synchronization with #{sync_node} complete!")
resolve_pending_sync_requests(%{new_state | clock: sync_clock})
end
def syncing(:cast, {:sync_err, from}, %TrackerState{nodes: nodes, sync_node: sync_node} = state)
when node(from) == sync_node do
Process.demonitor(state.sync_ref, [:flush])
cond do
# Something weird happened during sync, so try a different node,
# with this implementation, we *could* end up selecting the same node
# again, but that's fine as this is effectively a retry
length(nodes) > 0 ->
warn("a problem occurred during sync, choosing a new node to sync with")
# we need to choose a different node to sync with and try again
new_sync_node = Enum.random(nodes)
ref = Process.monitor({__MODULE__, new_sync_node})
GenStateMachine.cast({__MODULE__, new_sync_node}, {:sync, self(), state.clock})
{:keep_state, %{state | sync_node: new_sync_node, sync_ref: ref}}
# Something went wrong during sync, but there are no other nodes to sync with,
# not even the original sync node (which probably implies it shutdown or crashed),
# so we're the sync node now
:else ->
warn(
"a problem occurred during sync, but no other available sync targets, becoming seed node"
)
{:next_state, :tracking, %{state | pending_sync_reqs: [], sync_node: nil, sync_ref: nil}}
end
end
def syncing(:cast, {:sync, from, rclock}, %TrackerState{sync_node: sync_node} = state)
when node(from) == sync_node do
# We're trying to sync with another node while it is trying to sync with us, deterministically
# choose the node which will coordinate the synchronization.
local_node = Node.self()
case Clock.compare(state.clock, rclock) do
:lt ->
# The local clock is dominated by the remote clock, so the remote node will begin the sync
info("syncing from #{sync_node} based on tracker clock")
:keep_state_and_data
:gt ->
# The local clock dominates the remote clock, so the local node will begin the sync
info("syncing to #{sync_node} based on tracker clock")
{lclock, rclock} = Clock.fork(state.clock)
debug(
"forking clock when local: #{inspect(state.clock)}, lclock: #{inspect(lclock)}, rclock: #{
inspect(rclock)
}"
)
GenStateMachine.cast(from, {:sync_recv, self(), rclock, get_registry_snapshot()})
{:next_state, :awaiting_sync_ack, %{state | clock: lclock}}
result when result in [:eq, :concurrent] and sync_node > local_node ->
# The remote node will begin the sync
info("syncing from #{sync_node} based on node precedence")
:keep_state_and_data
result when result in [:eq, :concurrent] ->
# The local node begins the sync
info("syncing to #{sync_node} based on node precedence")
{lclock, rclock} = Clock.fork(state.clock)
debug(
"forking clock when concurrent: #{inspect(state.clock)}, lclock: #{inspect(lclock)}, rclock: #{
inspect(rclock)
}"
)
GenStateMachine.cast(from, {:sync_recv, self(), rclock, get_registry_snapshot()})
{:next_state, :awaiting_sync_ack, %{state | clock: lclock}}
end
end
def syncing(:cast, {:sync, from, _rclock}, %TrackerState{} = state) do
if ignore_node?(node(from)) do
GenStateMachine.cast(from, {:sync_err, :node_ignored})
:keep_state_and_data
else
info("pending sync request from #{node(from)}")
new_pending_reqs = Enum.uniq([from | state.pending_sync_reqs])
{:keep_state, %{state | pending_sync_reqs: new_pending_reqs}}
end
end
def syncing(_event_type, _event_data, _state) do
{:keep_state_and_data, :postpone}
end
defp sync_registry(from, _sync_clock, registry, %TrackerState{} = state) when is_pid(from) do
sync_node = node(from)
# map over the registry and check that all local entries are correct
Enum.each(registry, fn entry(name: rname, pid: rpid, meta: rmeta, clock: rclock) = rreg ->
case Registry.get_by_name(rname) do
:undefined ->
# missing local registration
debug("local tracker is missing #{inspect(rname)}, adding to registry")
ref = Process.monitor(rpid)
lclock = Clock.join(state.clock, rclock)
Registry.new!(entry(name: rname, pid: rpid, ref: ref, meta: rmeta, clock: lclock))
entry(pid: ^rpid, meta: lmeta, clock: lclock) ->
case Clock.compare(lclock, rclock) do
:lt ->
# the remote clock dominates, take remote data
lclock = Clock.join(lclock, rclock)
Registry.update(rname, meta: rmeta, clock: lclock)
debug(
"sync metadata for #{inspect(rpid)} (#{inspect(rmeta)}) is causally dominated by remote, updated registry..."
)
:gt ->
# the local clock dominates, keep local data
debug(
"sync metadata for #{inspect(rpid)} (#{inspect(rmeta)}) is causally dominated by local, ignoring..."
)
:ok
:eq ->
# the clocks are the same, no-op
debug(
"sync metadata for #{inspect(rpid)} (#{inspect(rmeta)}) has equal clocks, ignoring..."
)
:ok
:concurrent ->
warn("local and remote metadata for #{inspect(rname)} was concurrently modified")
new_meta = Map.merge(lmeta, rmeta)
# we're going to join and bump our local clock though and re-broadcast the update to ensure we converge
lclock = Clock.join(lclock, rclock)
lclock = Clock.event(lclock)
Registry.update(rname, meta: new_meta, clock: lclock)
broadcast_event(state.nodes, lclock, {:update_meta, new_meta, rpid})
end
entry(pid: lpid, clock: lclock) = lreg ->
# there are two different processes for the same name, we need to resolve
case Clock.compare(lclock, rclock) do
:lt ->
# the remote registration dominates
resolve_incorrect_local_reg(sync_node, lreg, rreg, state)
:gt ->
# local registration dominates
debug("remote view of #{inspect(rname)} is outdated, resolving..")
resolve_incorrect_remote_reg(sync_node, lreg, rreg, state)
_ ->
# the entry clocks conflict, determine which one is correct based on
# current topology and resolve the conflict
rpid_node = node(rpid)
lpid_node = node(lpid)
case Strategy.key_to_node(state.strategy, rname) do
^rpid_node when lpid_node != rpid_node ->
debug(
"remote and local view of #{inspect(rname)} conflict, but remote is correct, resolving.."
)
resolve_incorrect_local_reg(sync_node, lreg, rreg, state)
^lpid_node when lpid_node != rpid_node ->
debug(
"remote and local view of #{inspect(rname)} conflict, but local is correct, resolving.."
)
resolve_incorrect_remote_reg(sync_node, lreg, rreg, state)
_ ->
cond do
lpid_node == rpid_node and lpid > rpid ->
debug(
"remote and local view of #{inspect(rname)} conflict, but local is more recent, resolving.."
)
resolve_incorrect_remote_reg(sync_node, lreg, rreg, state)
lpid_node == rpid_node and lpid < rpid ->
debug(
"remote and local view of #{inspect(rname)} conflict, but remote is more recent, resolving.."
)
resolve_incorrect_local_reg(sync_node, lreg, rreg, state)
:else ->
# name should be on another node, so neither registration is correct
debug(
"remote and local view of #{inspect(rname)} are both outdated, resolving.."
)
resolve_incorrect_local_reg(sync_node, lreg, rreg, state)
end
end
end
end
end)
state
end
defp resolve_pending_sync_requests(%TrackerState{pending_sync_reqs: []} = state) do
info("pending sync requests cleared")
case state.sync_ref do
nil -> :ok
ref -> Process.demonitor(ref, [:flush])
end
{:next_state, :tracking, %{state | sync_node: nil, sync_ref: nil}}
end
defp resolve_pending_sync_requests(
%TrackerState{sync_node: sync_node, pending_sync_reqs: [pid | pending]} = state
)
when sync_node == node(pid) do
info("discarding sync_node from pending_sync_reqs")
resolve_pending_sync_requests(%{state | pending_sync_reqs: pending})
end
defp resolve_pending_sync_requests(%TrackerState{pending_sync_reqs: [pid | pending]} = state) do
pending_node = node(pid)
# Remove monitoring of the previous sync node
case state.sync_ref do
nil -> :ok
ref -> Process.demonitor(ref, [:flush])
end
cond do
Enum.member?(state.nodes, pending_node) ->
info("clearing pending sync request for #{pending_node}")
{lclock, rclock} = Clock.fork(state.clock)
debug(
"forking clock when resolving: #{inspect(state.clock)}, lclock: #{inspect(lclock)}, rclock: #{
inspect(rclock)
}"
)
ref = Process.monitor(pid)
GenStateMachine.cast(pid, {:sync_recv, self(), rclock, get_registry_snapshot()})
new_state = %{
state
| sync_node: node(pid),
sync_ref: ref,
pending_sync_reqs: pending,
clock: lclock
}
{:next_state, :awaiting_sync_ack, new_state}
:else ->
resolve_pending_sync_requests(%{
state
| sync_node: nil,
sync_ref: nil,
pending_sync_reqs: pending
})
end
end
def awaiting_sync_ack(
:cast,
{:sync_ack, from, sync_clock, registry},
%TrackerState{sync_node: sync_node} = state
)
when sync_node == node(from) do
info("received sync acknowledgement from #{node(from)}, syncing with remote registry")
new_state = sync_registry(from, sync_clock, registry, state)
info("local synchronization with #{node(from)} complete!")
resolve_pending_sync_requests(new_state)
end
def awaiting_sync_ack(
:info,
{:DOWN, ref, _type, _pid, _info},
%TrackerState{sync_ref: ref} = state
) do
warn("wait for acknowledgement from #{state.sync_node} cancelled, tracker down")
resolve_pending_sync_requests(%{state | sync_node: nil, sync_ref: nil})
end
def awaiting_sync_ack(:info, {:nodeup, node, _}, %TrackerState{} = state) do
new_state =
case nodeup(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
def awaiting_sync_ack(:info, {:nodedown, node, _}, %TrackerState{sync_node: node} = state) do
new_state =
case nodedown(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
Process.demonitor(state.sync_ref, [:flush])
resolve_pending_sync_requests(%{new_state | sync_node: nil, sync_ref: nil})
end
def awaiting_sync_ack(:info, {:nodedown, node, _}, %TrackerState{} = state) do
new_state =
case nodedown(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
{:keep_state, new_state}
end
def awaiting_sync_ack(_event_type, _event_data, _state) do
{:keep_state_and_data, :postpone}
end
def tracking(:info, {:EXIT, _child, _reason}, _state) do
# A child process started by this tracker has crashed
:keep_state_and_data
end
def tracking(:info, {:nodeup, node, _}, %TrackerState{nodes: []} = state) do
if ignore_node?(node) do
:keep_state_and_data
else
# This case occurs when the tracker comes up without being connected to a cluster
# and a cluster forms after some period of time. In this case, we need to treat this
# like a cluster_wait -> cluster_join scenario, so that we sync with cluster and ensure
# any registrations on the remote node are in the local registry and vice versa
new_state =
case nodeup(state, node) do
{:ok, new_state} -> new_state
{:ok, new_state, _next_state} -> new_state
end
cluster_wait(:info, :cluster_join, new_state)
end
end
def tracking(:info, {:nodeup, node, _}, state) do
state
|> nodeup(node)
|> handle_node_status()
end
def tracking(:info, {:nodedown, node, _}, state) do
state
|> nodedown(node)
|> handle_node_status()
end
def tracking(:info, {:ensure_swarm_started_on_remote_node, node, attempts}, state) do
state
|> ensure_swarm_started_on_remote_node(node, attempts)
|> handle_node_status()
end
def tracking(:info, :anti_entropy, state) do
anti_entropy(state)
end
# A change event received from another replica/node
def tracking(:cast, {:event, from, rclock, event}, state) do
handle_replica_event(from, event, rclock, state)
end
# Received a handoff request from a node
def tracking(:cast, {:handoff, from, {name, meta, handoff_state, rclock}}, state) do
handle_handoff(from, {name, meta, handoff_state, rclock}, state)
end
# A remote registration failed due to nodedown during the call
def tracking(:cast, {:retry, from, {:track, name, m, f, a}}, state) do
handle_retry(from, {:track, name, %{mfa: {m, f, a}}}, state)
end
# A change event received locally
def tracking({:call, from}, msg, state) do
handle_call(msg, from, state)
end
def tracking(:cast, msg, state) do
handle_cast(msg, state)
end
# A tracked process has gone down
def tracking(:info, {:DOWN, ref, _type, pid, info}, state) do
handle_monitor(ref, pid, info, state)
end
def tracking(event_type, event_data, state) do
handle_event(event_type, event_data, state)
end
# This state helps us ensure that nodes proactively keep themselves synced
# after joining the cluster and initial syncrhonization. This way if replication
# events fail for some reason, we can control the drift in registry state
def anti_entropy(%TrackerState{nodes: []}) do
interval = Application.get_env(:swarm, :anti_entropy_interval, @default_anti_entropy_interval)
Process.send_after(self(), :anti_entropy, interval)
:keep_state_and_data
end
def anti_entropy(%TrackerState{nodes: nodes} = state) do
sync_node = Enum.random(nodes)
info("syncing with #{sync_node}")
ref = Process.monitor({__MODULE__, sync_node})
GenStateMachine.cast({__MODULE__, sync_node}, {:sync, self(), state.clock})
new_state = %{state | sync_node: sync_node, sync_ref: ref}
interval = Application.get_env(:swarm, :anti_entropy_interval, @default_anti_entropy_interval)
Process.send_after(self(), :anti_entropy, interval)
{:next_state, :syncing, new_state}
end
# This message is sent as a broadcast message for replication
def handle_event(:info, {:event, from, rclock, event}, state) do
handle_replica_event(from, event, rclock, state)
end
# If we receive cluster_join outside of cluster_wait it's because
# we implicitly joined the cluster due to a sync event, we know if
# we receive such an event the cluster is already formed due to how
# Erlang distribution works (it's a mesh)
def handle_event(:info, :cluster_join, _state) do
:keep_state_and_data
end
def handle_event({:call, from}, msg, state) do
handle_call(msg, from, state)
end
def handle_event(:cast, msg, state) do
handle_cast(msg, state)
end
# Default event handler
def handle_event(event_type, event_data, _state) do
debug("unexpected event: #{inspect({event_type, event_data})}")
:keep_state_and_data
end
def code_change(_oldvsn, state, data, _extra) do
{:ok, state, data}
end
defp handle_node_status({:ok, new_state}), do: {:keep_state, new_state}
defp handle_node_status({:ok, new_state, {:topology_change, change_info}}) do
handle_topology_change(change_info, new_state)
end
# This is the callback for when a process is being handed off from a remote node to this node.
defp handle_handoff(
from,
{name, meta, handoff_state, rclock},
%TrackerState{clock: clock} = state
) do
try do
# If a network split is being healed, we almost certainly will have a
# local registration already for this name (since it was present on this side of the split)
# If not, we'll restart it, but if so, we'll send the handoff state to the old process and
# let it determine how to resolve the conflict
current_node = Node.self()
case Registry.get_by_name(name) do
:undefined ->
{{m, f, a}, _other_meta} = Map.pop(meta, :mfa)
{:ok, pid} = apply(m, f, a)
GenServer.cast(pid, {:swarm, :end_handoff, handoff_state})
ref = Process.monitor(pid)
lclock = Clock.join(clock, rclock)
Registry.new!(entry(name: name, pid: pid, ref: ref, meta: meta, clock: lclock))
broadcast_event(state.nodes, lclock, {:track, name, pid, meta})
{:keep_state, state}
entry(pid: pid) when node(pid) == current_node ->
GenServer.cast(pid, {:swarm, :resolve_conflict, handoff_state})
lclock = Clock.join(clock, rclock)
broadcast_event(state.nodes, lclock, {:track, name, pid, meta})
{:keep_state, state}
entry(pid: pid, ref: ref) = obj when node(pid) == node(from) ->
# We have received the handoff before we've received the untrack event, but because
# the handoff is coming from the node where the registration existed, we can safely
# remove the registration now, and proceed with the handoff
Process.demonitor(ref, [:flush])
Registry.remove(obj)
# Re-enter this callback to take advantage of the first clause
handle_handoff(from, {name, meta, handoff_state, rclock}, state)
end
catch
kind, err ->
error(Exception.format(kind, err, System.stacktrace()))
:keep_state_and_data
end
end
# This is the callback for when a nodeup/down event occurs after the tracker has entered
# the main receive loop. Topology changes are handled a bit differently during startup.
defp handle_topology_change({type, remote_node}, %TrackerState{} = state) do
debug("topology change (#{type} for #{remote_node})")
current_node = state.self
new_state =
Registry.reduce(state, fn
entry(name: name, pid: pid, meta: %{mfa: _mfa} = meta) = obj, state
when node(pid) == current_node ->
case Strategy.key_to_node(state.strategy, name) do
:undefined ->
# No node available to host process, it must be stopped
debug("#{inspect(pid)} must be stopped as no node is available to host it")
{:ok, new_state} = remove_registration(obj, state)
send(pid, {:swarm, :die})
new_state
^current_node ->
# This process is correct
state
other_node ->
debug("#{inspect(pid)} belongs on #{other_node}")
# This process needs to be moved to the new node
try do
case GenServer.call(pid, {:swarm, :begin_handoff}) do
:ignore ->
debug("#{inspect(name)} has requested to be ignored")
state
{:resume, handoff_state} ->
debug("#{inspect(name)} has requested to be resumed")
{:ok, new_state} = remove_registration(obj, state)
send(pid, {:swarm, :die})
debug("sending handoff for #{inspect(name)} to #{other_node}")
GenStateMachine.cast(
{__MODULE__, other_node},
{:handoff, self(), {name, meta, handoff_state, Clock.peek(new_state.clock)}}
)
new_state
:restart ->
debug("#{inspect(name)} has requested to be restarted")
{:ok, new_state} = remove_registration(obj, state)
send(pid, {:swarm, :die})
case do_track(%Tracking{name: name, meta: meta}, new_state) do
:keep_state_and_data -> new_state
{:keep_state, new_state} -> new_state
end
end
catch
_, err ->
warn("handoff failed for #{inspect(name)}: #{inspect(err)}")
state
end
end
entry(name: name, pid: pid, meta: %{mfa: _mfa} = meta) = obj, state when is_map(meta) ->
cond do
Enum.member?(state.nodes, node(pid)) ->
# the parent node is still up
state
:else ->
# pid is dead, we're going to restart it
case Strategy.key_to_node(state.strategy, name) do
:undefined ->
# No node available to restart process on, so remove registration
warn("no node available to restart #{inspect(name)}")
{:ok, new_state} = remove_registration(obj, state)
new_state
^current_node ->
debug("restarting #{inspect(name)} on #{current_node}")
{:ok, new_state} = remove_registration(obj, state)
case do_track(%Tracking{name: name, meta: meta}, new_state) do
:keep_state_and_data -> new_state
{:keep_state, new_state} -> new_state
end
_other_node ->
# other_node will tell us to unregister/register the restarted pid
state
end
end
entry(name: name, pid: pid) = obj, state ->
pid_node = node(pid)
cond do
pid_node == current_node or Enum.member?(state.nodes, pid_node) ->
# the parent node is still up
state
:else ->
# the parent node is down, but we cannot restart this pid, so unregister it
debug("removing registration for #{inspect(name)}, #{pid_node} is down")
{:ok, new_state} = remove_registration(obj, state)
new_state
end
end)
info("topology change complete")
{:keep_state, new_state}
end
# This is the callback for tracker events which are being replicated from other nodes in the cluster
defp handle_replica_event(_from, {:track, name, pid, meta}, rclock, %TrackerState{clock: clock}) do
debug("replicating registration for #{inspect(name)} (#{inspect(pid)}) locally")
case Registry.get_by_name(name) do
entry(name: ^name, pid: ^pid, meta: ^meta) ->
# We're already up to date
:keep_state_and_data
entry(name: ^name, pid: ^pid, clock: lclock) ->
# We don't have the same view of the metadata
cond do
Clock.leq(lclock, rclock) ->
# The remote version is dominant
lclock = Clock.join(lclock, rclock)
Registry.update(name, meta: meta, clock: lclock)
:keep_state_and_data
Clock.leq(rclock, lclock) ->
# The local version is dominant
:keep_state_and_data
:else ->
warn(
"received track event for #{inspect(name)}, but local clock conflicts with remote clock, event unhandled"
)
:keep_state_and_data
end
entry(name: ^name, pid: other_pid, ref: ref, clock: lclock) = obj ->
# we have conflicting views of this name, compare clocks and fix it
current_node = Node.self()
cond do
Clock.leq(lclock, rclock) and node(other_pid) == current_node ->
# The remote version is dominant, kill the local pid and remove the registration
Process.demonitor(ref, [:flush])
Process.exit(other_pid, :kill)
Registry.remove(obj)
new_ref = Process.monitor(pid)
lclock = Clock.join(lclock, rclock)
Registry.new!(entry(name: name, pid: pid, ref: new_ref, meta: meta, clock: lclock))
:keep_state_and_data
Clock.leq(rclock, lclock) ->
# The local version is dominant, so ignore this event
:keep_state_and_data
:else ->
# The clocks are conflicted, warn, and ignore this event
warn(
"received track event for #{inspect(name)}, mismatched pids, local clock conflicts with remote clock, event unhandled"
)
:keep_state_and_data
end
:undefined ->
ref = Process.monitor(pid)
lclock = Clock.join(clock, rclock)
Registry.new!(entry(name: name, pid: pid, ref: ref, meta: meta, clock: lclock))
:keep_state_and_data
end
end
defp handle_replica_event(_from, {:untrack, pid}, rclock, _state) do
debug("replica event: untrack #{inspect(pid)}")
case Registry.get_by_pid(pid) do
:undefined ->
:keep_state_and_data
entries when is_list(entries) ->
Enum.each(entries, fn entry(ref: ref, clock: lclock) = obj ->
cond do
Clock.leq(lclock, rclock) ->
# registration came before unregister, so remove the registration
Process.demonitor(ref, [:flush])
Registry.remove(obj)
Clock.leq(rclock, lclock) ->
# registration is newer than de-registration, ignore msg
debug("untrack is causally dominated by track for #{inspect(pid)}, ignoring..")
:else ->
debug("untrack is causally conflicted with track for #{inspect(pid)}, ignoring..")
end
end)
:keep_state_and_data
end
end
defp handle_replica_event(_from, {:update_meta, new_meta, pid}, rclock, state) do
debug("replica event: update_meta #{inspect(new_meta)} for #{inspect(pid)}")
case Registry.get_by_pid(pid) do
:undefined ->
:keep_state_and_data
entries when is_list(entries) ->
Enum.each(entries, fn entry(name: name, meta: old_meta, clock: lclock) ->
cond do
Clock.leq(lclock, rclock) ->
lclock = Clock.join(lclock, rclock)
Registry.update(name, meta: new_meta, clock: lclock)
debug(
"request to update meta from #{inspect(pid)} (#{inspect(new_meta)}) is causally dominated by remote, updated registry..."
)
Clock.leq(rclock, lclock) ->
# ignore the request, as the local clock dominates the remote
debug(
"request to update meta from #{inspect(pid)} (#{inspect(new_meta)}) is causally dominated by local, ignoring.."
)
:else ->
new_meta = Map.merge(old_meta, new_meta)
# we're going to join and bump our local clock though and re-broadcast the update to ensure we converge
debug(
"conflicting meta for #{inspect(name)}, updating and notifying other nodes, old meta: #{
inspect(old_meta)
}, new meta: #{inspect(new_meta)}"
)
lclock = Clock.join(lclock, rclock)
lclock = Clock.event(lclock)
Registry.update(name, meta: new_meta, clock: lclock)
broadcast_event(state.nodes, lclock, {:update_meta, new_meta, pid})
end
end)
:keep_state_and_data
end
end
defp handle_replica_event(_from, event, _clock, _state) do
warn("received unrecognized replica event: #{inspect(event)}")
:keep_state_and_data
end
# This is the handler for local operations on the tracker which require a response.
defp handle_call({:whereis, name}, from, %TrackerState{strategy: strategy}) do
current_node = Node.self()
case Strategy.key_to_node(strategy, name) do
:undefined ->
GenStateMachine.reply(from, :undefined)
^current_node ->
case Registry.get_by_name(name) do
:undefined ->
GenStateMachine.reply(from, :undefined)
entry(pid: pid) ->
GenStateMachine.reply(from, pid)
end
other_node ->
_ =
Task.Supervisor.start_child(Swarm.TaskSupervisor, fn ->
case :rpc.call(other_node, Swarm.Registry, :get_by_name, [name], :infinity) do
:undefined ->
GenStateMachine.reply(from, :undefined)
entry(pid: pid) ->
GenStateMachine.reply(from, pid)
{:badrpc, reason} ->
warn(
"failed to execute remote get_by_name on #{inspect(other_node)}: #{
inspect(reason)
}"
)
GenStateMachine.reply(from, :undefined)
end
end)
end
:keep_state_and_data
end
defp handle_call({:track, name, pid, meta}, from, %TrackerState{} = state) do
debug("registering #{inspect(pid)} as #{inspect(name)}, with metadata #{inspect(meta)}")
add_registration({name, pid, meta}, from, state)
end
defp handle_call({:track, name, meta}, from, state) do
current_node = Node.self()
{{m, f, a}, _other_meta} = Map.pop(meta, :mfa)
case from do
{from_pid, _} when node(from_pid) != current_node ->
debug(
"#{inspect(node(from_pid))} is registering #{inspect(name)} as process started by #{m}.#{
f
}/#{length(a)} with args #{inspect(a)}"
)
_ ->
debug(
"registering #{inspect(name)} as process started by #{m}.#{f}/#{length(a)} with args #{
inspect(a)
}"
)
end
do_track(%Tracking{name: name, meta: meta, from: from}, state)
end
defp handle_call({:untrack, pid}, from, %TrackerState{} = state) do
debug("untrack #{inspect(pid)}")
{:ok, new_state} = remove_registration_by_pid(pid, state)
GenStateMachine.reply(from, :ok)
{:keep_state, new_state}
end
defp handle_call({:add_meta, key, value, pid}, from, %TrackerState{} = state) do
debug("add_meta #{inspect({key, value})} to #{inspect(pid)}")
{:ok, new_state} = add_meta_by_pid({key, value}, pid, state)
GenStateMachine.reply(from, :ok)
{:keep_state, new_state}
end
defp handle_call({:remove_meta, key, pid}, from, %TrackerState{} = state) do
debug("remote_meta #{inspect(key)} for #{inspect(pid)}")
{:ok, new_state} = remove_meta_by_pid(key, pid, state)
GenStateMachine.reply(from, :ok)
{:keep_state, new_state}
end
defp handle_call({:handoff, worker_name, handoff_state}, from, state) do
Registry.get_by_name(worker_name)
|> case do
:undefined ->
# Worker was already removed from registry -> do nothing
debug("The node #{worker_name} was not found in the registry")
entry(name: name, pid: pid, meta: %{mfa: _mfa} = meta) = obj ->
case Strategy.remove_node(state.strategy, state.self) |> Strategy.key_to_node(name) do
{:error, {:invalid_ring, :no_nodes}} ->
debug("Cannot handoff #{inspect(name)} because there is no other node left")
other_node ->
debug("#{inspect(name)} has requested to be terminated and resumed on another node")
{:ok, state} = remove_registration(obj, state)
send(pid, {:swarm, :die})
debug("sending handoff for #{inspect(name)} to #{other_node}")
GenStateMachine.cast(
{__MODULE__, other_node},
{:handoff, self(), {name, meta, handoff_state, Clock.peek(state.clock)}}
)
end
end
GenStateMachine.reply(from, :finished)
:keep_state_and_data
end
defp handle_call(msg, _from, _state) do
warn("unrecognized call: #{inspect(msg)}")
:keep_state_and_data
end
# This is the handler for local operations on the tracker which are asynchronous
defp handle_cast({:sync, from, rclock}, %TrackerState{} = state) do
if ignore_node?(node(from)) do
GenStateMachine.cast(from, {:sync_err, :node_ignored})
:keep_state_and_data
else
debug("received sync request from #{node(from)}")
sync_node = node(from)
ref = Process.monitor(from)
GenStateMachine.cast(from, {:sync_recv, self(), rclock, get_registry_snapshot()})
{:next_state, :awaiting_sync_ack, %{state | sync_node: sync_node, sync_ref: ref}}
end
end
defp handle_cast(msg, _state) do
warn("unrecognized cast: #{inspect(msg)}")
:keep_state_and_data
end
# This is only ever called if a registration needs to be sent to a remote node
# and that node went down in the middle of the call to its Swarm process.
# We need to process the nodeup/down events by re-entering the receive loop first,
# so we send ourselves a message to retry. This is the handler for that message.
defp handle_retry(from, {:track, name, meta}, state) do
handle_call({:track, name, meta}, from, state)
end
defp handle_retry(_from, _event, _state) do
:keep_state_and_data
end
# Called when a pid dies, and the monitor is triggered
defp handle_monitor(ref, pid, :noconnection, %TrackerState{} = state) do
# lost connection to the node this pid is running on, check if we should restart it
case Registry.get_by_ref(ref) do
:undefined ->
debug(
"lost connection to #{inspect(pid)}, but no registration could be found, ignoring.."
)
:keep_state_and_data
entry(name: name, pid: ^pid, meta: %{mfa: _mfa}) ->
debug(
"lost connection to #{inspect(name)} (#{inspect(pid)}) on #{node(pid)}, node is down"
)
state
|> nodedown(node(pid))
|> handle_node_status()
entry(pid: ^pid) = obj ->
debug("lost connection to #{inspect(pid)}, but not restartable, removing registration..")
{:ok, new_state} = remove_registration(obj, state)
{:keep_state, new_state}
end
end
defp handle_monitor(ref, pid, reason, %TrackerState{} = state) do
case Registry.get_by_ref(ref) do
:undefined ->
debug(
"#{inspect(pid)} is down: #{inspect(reason)}, but no registration found, ignoring.."
)
:keep_state_and_data
entry(name: name, pid: ^pid) = obj ->
debug("#{inspect(name)} is down: #{inspect(reason)}")
{:ok, new_state} = remove_registration(obj, state)
{:keep_state, new_state}
end
end
# Attempt to start a named process on its destination node
defp do_track(
%Tracking{name: name, meta: meta, from: from},
%TrackerState{strategy: strategy} = state
) do
current_node = Node.self()
{{m, f, a}, _other_meta} = Map.pop(meta, :mfa)
case Strategy.key_to_node(strategy, name) do
:undefined ->
warn("no node available to start #{inspect(name)} process")
reply(from, {:error, :no_node_available})
:keep_state_and_data
^current_node ->
case Registry.get_by_name(name) do
:undefined ->
debug("starting #{inspect(name)} on #{current_node}")
try do
case apply(m, f, a) do
{:ok, pid} ->
debug("started #{inspect(name)} on #{current_node}")
add_registration({name, pid, meta}, from, state)
err ->
warn("failed to start #{inspect(name)} on #{current_node}: #{inspect(err)}")
reply(from, {:error, {:invalid_return, err}})
:keep_state_and_data
end
catch
kind, reason ->
warn(Exception.format(kind, reason, System.stacktrace()))
reply(from, {:error, reason})
:keep_state_and_data
end
entry(pid: pid) ->
debug("found #{inspect(name)} already registered on #{node(pid)}")
reply(from, {:error, {:already_registered, pid}})
:keep_state_and_data
end
remote_node ->
debug("starting #{inspect(name)} on remote node #{remote_node}")
{:ok, _pid} =
Task.start(fn ->
start_pid_remotely(remote_node, from, name, meta, state)
end)
:keep_state_and_data
end
end
# Starts a process on a remote node. Handles failures with a retry mechanism
defp start_pid_remotely(remote_node, from, name, meta, state, attempts \\ 0)
defp start_pid_remotely(remote_node, from, name, meta, %TrackerState{} = state, attempts)
when attempts <= @retry_max_attempts do
try do
case GenStateMachine.call({__MODULE__, remote_node}, {:track, name, meta}, :infinity) do
{:ok, pid} ->
debug("remotely started #{inspect(name)} (#{inspect(pid)}) on #{remote_node}")
reply(from, {:ok, pid})
{:error, {:already_registered, pid}} ->
debug(
"#{inspect(name)} already registered to #{inspect(pid)} on #{node(pid)}, registering locally"
)
# register named process that is unknown locally
add_registration({name, pid, meta}, from, state)
:ok
{:error, {:noproc, _}} = err ->
warn(
"#{inspect(name)} could not be started on #{remote_node}: #{inspect(err)}, retrying operation after #{
@retry_interval
}ms.."
)
:timer.sleep(@retry_interval)
start_pid_remotely(remote_node, from, name, meta, state, attempts + 1)
{:error, :undef} ->
warn(
"#{inspect(name)} could not be started on #{remote_node}: target module not available on remote node, retrying operation after #{
@retry_interval
}ms.."
)
:timer.sleep(@retry_interval)
start_pid_remotely(remote_node, from, name, meta, state, attempts + 1)
{:error, _reason} = err ->
warn("#{inspect(name)} could not be started on #{remote_node}: #{inspect(err)}")
reply(from, err)
end
catch
_, {:noproc, _} ->
warn(
"remote tracker on #{remote_node} went down during registration, retrying operation.."
)
start_pid_remotely(remote_node, from, name, meta, state)
_, {{:nodedown, _}, _} ->
warn("failed to start #{inspect(name)} on #{remote_node}: nodedown, retrying operation..")
new_state = %{
state
| nodes: state.nodes -- [remote_node],
strategy: Strategy.remove_node(state.strategy, remote_node)
}
case Strategy.key_to_node(new_state.strategy, name) do
:undefined ->
warn("failed to start #{inspect(name)} as no node available")
reply(from, {:error, :no_node_available})
new_node ->
start_pid_remotely(new_node, from, name, meta, new_state)
end
kind, err ->
error(Exception.format(kind, err, System.stacktrace()))
warn("failed to start #{inspect(name)} on #{remote_node}: #{inspect(err)}")
reply(from, {:error, err})
end
end
defp start_pid_remotely(remote_node, from, name, _meta, _state, attempts) do
warn(
"#{inspect(name)} could not be started on #{remote_node}, failed to start after #{attempts} attempt(s)"
)
reply(from, {:error, :too_many_attempts})
end
## Internal helpers
# Send a reply message unless the recipient client is `nil`. Function always returns `:ok`
defp reply(nil, _message), do: :ok
defp reply(from, message), do: GenStateMachine.reply(from, message)
defp broadcast_event([], _clock, _event), do: :ok
defp broadcast_event(nodes, clock, event) do
clock = Clock.peek(clock)
:abcast = :rpc.abcast(nodes, __MODULE__, {:event, self(), clock, event})
:ok
end
# Add a registration and reply to the caller with the result, then return the state transition
defp add_registration({_name, _pid, _meta} = reg, from, state) do
case register(reg, state) do
{:ok, reply, new_state} ->
reply(from, {:ok, reply})
{:keep_state, new_state}
{:error, reply, new_state} ->
reply(from, {:error, reply})
{:keep_state, new_state}
end
end
# Add a registration and return the result of the add
defp register({name, pid, meta}, %TrackerState{clock: clock, nodes: nodes} = state) do
case Registry.get_by_name(name) do
:undefined ->
ref = Process.monitor(pid)
lclock = Clock.event(clock)
Registry.new!(entry(name: name, pid: pid, ref: ref, meta: meta, clock: lclock))
broadcast_event(nodes, lclock, {:track, name, pid, meta})
{:ok, pid, state}
entry(pid: ^pid) ->
# Not sure how this could happen, but hey, no need to return an error
{:ok, pid, state}
entry(pid: other_pid) ->
debug(
"conflicting registration for #{inspect(name)}: remote (#{inspect(pid)}) vs. local #{
inspect(other_pid)
}"
)
# Since there is already a registration, we need to check whether to kill the newly
# created process
pid_node = node(pid)
current_node = Node.self()
case meta do
%{mfa: _} when pid_node == current_node ->
# This was created via register_name/5, which means we need to kill the pid we started
Process.exit(pid, :kill)
_ ->
# This was a pid started by something else, so we can ignore it
:ok
end
{:error, {:already_registered, other_pid}, state}
end
end
# Remove a registration, and return the result of the remove
defp remove_registration(entry(pid: pid, ref: ref, clock: lclock) = obj, state) do
Process.demonitor(ref, [:flush])
Registry.remove(obj)
lclock = Clock.event(lclock)
broadcast_event(state.nodes, lclock, {:untrack, pid})
{:ok, state}
end
defp remove_registration_by_pid(pid, state) do
case Registry.get_by_pid(pid) do
:undefined ->
{:ok, state}
entries when is_list(entries) ->
Enum.each(entries, fn entry ->
remove_registration(entry, state)
end)
{:ok, state}
end
end
defp add_meta_by_pid({key, value}, pid, state) do
case Registry.get_by_pid(pid) do
:undefined ->
{:ok, state}
entries when is_list(entries) ->
Enum.each(entries, fn entry(name: name, meta: old_meta, clock: lclock) ->
new_meta = Map.put(old_meta, key, value)
lclock = Clock.event(lclock)
Registry.update(name, meta: new_meta, clock: lclock)
broadcast_event(state.nodes, lclock, {:update_meta, new_meta, pid})
end)
{:ok, state}
end
end
defp remove_meta_by_pid(key, pid, state) do
case Registry.get_by_pid(pid) do
:undefined ->
{:ok, state}
entries when is_list(entries) ->
Enum.each(entries, fn entry(name: name, meta: old_meta, clock: lclock) ->
new_meta = Map.drop(old_meta, [key])
lclock = Clock.event(lclock)
Registry.update(name, meta: new_meta, clock: lclock)
broadcast_event(state.nodes, lclock, {:update_meta, new_meta, pid})
end)
{:ok, state}
end
end
@global_blacklist MapSet.new([~r/^remsh.*$/, ~r/^.+_upgrader_.+$/, ~r/^.+_maint_.+$/])
# The list of configured ignore patterns for nodes
# This is only applied if no blacklist is provided.
defp node_blacklist() do
Application.get_env(:swarm, :node_blacklist, [])
|> MapSet.new()
|> MapSet.union(@global_blacklist)
|> MapSet.to_list()
end
# The list of configured whitelist patterns for nodes
# If a whitelist is provided, any nodes which do not match the whitelist are ignored
defp node_whitelist(), do: Application.get_env(:swarm, :node_whitelist, [])
# Determine if a node should be ignored, even if connected
# The whitelist and blacklist can contain literal strings, regexes, or regex strings
# By default, all nodes are allowed, except those which are remote shell sessions
# where the node name of the remote shell starts with `remsh` (relx, exrm, and distillery)
# all use that prefix for remote shells.
defp ignore_node?(node) do
blacklist = node_blacklist()
whitelist = node_whitelist()
HashRing.Utils.ignore_node?(node, blacklist, whitelist)
end
# Used during anti-entropy checks to remove local registrations and replace them with the remote version
defp resolve_incorrect_local_reg(
_remote_node,
entry(pid: lpid, clock: lclock) = lreg,
entry(name: rname, pid: rpid, meta: rmeta, clock: rclock),
state
) do
# the remote registration is correct
{:ok, new_state} = remove_registration(lreg, state)
send(lpid, {:swarm, :die})
# add the remote registration
ref = Process.monitor(rpid)
lclock = Clock.join(lclock, rclock)
Registry.new!(entry(name: rname, pid: rpid, ref: ref, meta: rmeta, clock: lclock))
new_state
end
# Used during anti-entropy checks to remove remote registrations and replace them with the local version
defp resolve_incorrect_remote_reg(
remote_node,
entry(pid: lpid, meta: lmeta),
entry(name: rname, pid: rpid),
state
) do
GenStateMachine.cast({__MODULE__, remote_node}, {:untrack, rpid})
send(rpid, {:swarm, :die})
GenStateMachine.cast({__MODULE__, remote_node}, {:track, rname, lpid, lmeta})
state
end
# A new node has been added to the cluster, we need to update the distribution strategy and handle shifting
# processes to new nodes based on the new topology.
defp nodeup(%TrackerState{nodes: nodes, strategy: strategy} = state, node) do
cond do
node == Node.self() ->
new_strategy =
strategy
|> Strategy.remove_node(state.self)
|> Strategy.add_node(node)
info("node name changed from #{state.self} to #{node}")
{:ok, %{state | self: node, strategy: new_strategy}}
Enum.member?(nodes, node) ->
{:ok, state}
ignore_node?(node) ->
{:ok, state}
:else ->
ensure_swarm_started_on_remote_node(state, node)
end
end
defp ensure_swarm_started_on_remote_node(state, node, attempts \\ 0)
defp ensure_swarm_started_on_remote_node(
%TrackerState{nodes: nodes, strategy: strategy} = state,
node,
attempts
)
when attempts <= @retry_max_attempts do
case :rpc.call(node, :application, :which_applications, []) do
app_list when is_list(app_list) ->
case List.keyfind(app_list, :swarm, 0) do
{:swarm, _, _} ->
info("nodeup #{node}")
new_state = %{
state
| nodes: [node | nodes],
strategy: Strategy.add_node(strategy, node)
}
{:ok, new_state, {:topology_change, {:nodeup, node}}}
nil ->
debug(
"nodeup for #{node} was ignored because swarm not started yet, will retry in #{
@retry_interval
}ms.."
)
Process.send_after(
self(),
{:ensure_swarm_started_on_remote_node, node, attempts + 1},
@retry_interval
)
{:ok, state}
end
other ->
warn("nodeup for #{node} was ignored because: #{inspect(other)}")
{:ok, state}
end
end
defp ensure_swarm_started_on_remote_node(%TrackerState{} = state, node, attempts) do
warn(
"nodeup for #{node} was ignored because swarm failed to start after #{attempts} attempt(s)"
)
{:ok, state}
end
# A remote node went down, we need to update the distribution strategy and handle restarting/shifting processes
# as needed based on the new topology
defp nodedown(%TrackerState{nodes: nodes, strategy: strategy} = state, node) do
cond do
Enum.member?(nodes, node) ->
info("nodedown #{node}")
strategy = Strategy.remove_node(strategy, node)
pending_reqs =
Enum.filter(state.pending_sync_reqs, fn
^node -> false
_ -> true
end)
new_state = %{
state
| nodes: nodes -- [node],
strategy: strategy,
pending_sync_reqs: pending_reqs
}
{:ok, new_state, {:topology_change, {:nodedown, node}}}
:else ->
{:ok, state}
end
end
defp get_registry_snapshot() do
snapshot = Registry.snapshot()
Enum.map(snapshot, fn entry(name: name, pid: pid, ref: ref, meta: meta, clock: clock) ->
entry(name: name, pid: pid, ref: ref, meta: meta, clock: Clock.peek(clock))
end)
end
end
| 35.249857 | 141 | 0.613807 |
e81efa6058a511f380b47e607d04b5cb3968e40a | 203 | ex | Elixir | sensor_hub_poncho/sensor_hub/lib/sensor_hub.ex | colindensem/weather-station-phoenix-nerves | 8193a6e540c406d4412ba3dfb0ab548d38a982ff | [
"MIT"
] | null | null | null | sensor_hub_poncho/sensor_hub/lib/sensor_hub.ex | colindensem/weather-station-phoenix-nerves | 8193a6e540c406d4412ba3dfb0ab548d38a982ff | [
"MIT"
] | null | null | null | sensor_hub_poncho/sensor_hub/lib/sensor_hub.ex | colindensem/weather-station-phoenix-nerves | 8193a6e540c406d4412ba3dfb0ab548d38a982ff | [
"MIT"
] | null | null | null | defmodule SensorHub do
@moduledoc """
Documentation for SensorHub.
"""
@doc """
Hello world.
## Examples
iex> SensorHub.hello
:world
"""
def hello do
:world
end
end
| 10.684211 | 30 | 0.581281 |
e81f038016ccd99d9f6480fe03c203071ce12372 | 3,991 | ex | Elixir | lib/toolshed.ex | ConnorRigby/toolshed | 0f5cd4bb4f3f6fdbe54d58e3327e117176f06e97 | [
"Apache-2.0"
] | null | null | null | lib/toolshed.ex | ConnorRigby/toolshed | 0f5cd4bb4f3f6fdbe54d58e3327e117176f06e97 | [
"Apache-2.0"
] | null | null | null | lib/toolshed.ex | ConnorRigby/toolshed | 0f5cd4bb4f3f6fdbe54d58e3327e117176f06e97 | [
"Apache-2.0"
] | null | null | null | defmodule Toolshed do
@moduledoc """
Making the IEx console friendlier one command at a time
To use the helpers, run:
iex> use Toolshed
Add this to your `.iex.exs` to load automatically.
The following is a list of helpers:
* `cat/1` - print out a file
* `cmd/1` - run a system command and print the output
* `date/0` - print out the current date and time
* `dmesg/0` - print kernel messages (Nerves-only)
* `exit/0` - exit out of an IEx session
* `fw_validate/0` - marks the current image as valid (check Nerves system if supported)
* `grep/2` - print out lines that match a regular expression
* `hex/1` - print a number as hex
* `httpget/2` - print or download the results of a HTTP GET request
* `hostname/0` - print our hostname
* `ifconfig/0` - print info on network interfaces
* `load_term!/2` - load a term that was saved by `save_term/2`
* `lsof/0` - print out open file handles by OS process
* `lsmod/0` - print out what kernel modules have been loaded (Nerves-only)
* `lsusb/0` - print info on USB devices
* `multicast_addresses/0` - print out all multicast addresses
* `nslookup/1` - query DNS to find an IP address
* `pastebin/1` - post text to a pastebin server (requires networking)
* `ping/2` - ping a remote host (but use TCP instead of ICMP)
* `qr_encode/1` - create a QR code (requires networking)
* `reboot/0` - reboots gracefully (Nerves-only)
* `reboot!/0` - reboots immediately (Nerves-only)
* `save_value/2` - save a value to a file as Elixir terms (uses inspect)
* `save_term!/2` - save a term as a binary
* `top/2` - list out the top processes
* `tping/2` - check if a host can be reached (like ping, but uses TCP)
* `tree/1` - pretty print a directory tree
* `uptime/0` - print out the current Erlang VM uptime
* `uname/0` - print information about the running system (Nerves-only)
* `weather/0` - get the local weather (requires networking)
"""
defmacro __using__(_) do
nerves =
if Code.ensure_loaded?(Toolshed.Nerves) do
quote do
import Toolshed.Nerves
end
else
quote do
end
end
quote do
import Toolshed
import Toolshed.Top
import Toolshed.Lsof
unquote(nerves)
import Toolshed.Unix
import Toolshed.Net
import Toolshed.Misc
import Toolshed.HW
import Toolshed.HTTP
import Toolshed.Multicast
# If module docs have been stripped, then don't tell the user that they can
# see them.
help_text =
case Code.fetch_docs(Toolshed) do
{:error, _anything} -> ""
_ -> " Run h(Toolshed) for more info."
end
Toolshed.Autocomplete.set_expand_fun()
IO.puts([
IO.ANSI.color(:rand.uniform(231) + 1),
"Toolshed",
IO.ANSI.reset(),
" imported.",
help_text
])
end
end
@doc """
Run a command and return the exit code. This function is intended to be run
interactively.
"""
@spec cmd(String.t() | charlist()) :: integer()
def cmd(str) when is_binary(str) do
{_collectable, exit_code} =
System.cmd("sh", ["-c", str], stderr_to_stdout: true, into: IO.stream(:stdio, :line))
exit_code
end
def cmd(str) when is_list(str) do
str |> to_string |> cmd
end
@doc """
Inspect a value with all integers printed out in hex. This is useful for
one-off hex conversions. If you're doing a lot of work that requires
hexadecimal output, you should consider running:
`IEx.configure(inspect: [base: :hex])`
The drawback of doing the above is that strings print out as hex binaries.
"""
@spec hex(integer()) :: String.t()
def hex(value) do
inspect(value, base: :hex)
end
end
| 33.537815 | 92 | 0.611626 |
e81f109c93353f66fb5bdaf865457aadbc882fd1 | 666 | ex | Elixir | test/elixir/lib/setup/common.ex | mtenrero/couchdb-vetcontrol | b7ede3ededdf0072c73f08d8f1217cb723b03f7a | [
"Apache-2.0"
] | 1 | 2020-09-11T19:23:27.000Z | 2020-09-11T19:23:27.000Z | test/elixir/lib/setup/common.ex | mtenrero/couchdb-vetcontrol | b7ede3ededdf0072c73f08d8f1217cb723b03f7a | [
"Apache-2.0"
] | null | null | null | test/elixir/lib/setup/common.ex | mtenrero/couchdb-vetcontrol | b7ede3ededdf0072c73f08d8f1217cb723b03f7a | [
"Apache-2.0"
] | null | null | null | defmodule Couch.Test.Setup.Common do
@moduledoc """
A set of common setup pipelines for reuse
- httpd_with_admin - chttpd is started and new admin is created
- httpd_with_db - httpd_with_admin and new database is created
"""
alias Couch.Test.Setup.Step
def httpd_with_admin(setup) do
setup
|> Step.Start.new(:start, extra_apps: [:chttpd])
|> Step.User.new(:admin, roles: [:server_admin])
end
def httpd_with_db(setup) do
setup
|> httpd_with_admin()
|> Step.Create.DB.new(:db)
end
def with_db(setup) do
setup
|> Step.Start.new(:start, extra_apps: [:fabric])
|> Step.Create.DB.new(:db)
end
end | 24.666667 | 65 | 0.663664 |
e81f142a56fcda3dfdbe7783928bcd81ba3fd5c0 | 265 | exs | Elixir | test/map_diff_test.exs | Qqwy/elixir-map_diff | 7a0e0292d44bfc9a87bccfe1430c3867caab2592 | [
"MIT"
] | 55 | 2019-08-19T04:14:39.000Z | 2022-03-28T08:10:04.000Z | test/map_diff_test.exs | Qqwy/elixir-map_diff | 7a0e0292d44bfc9a87bccfe1430c3867caab2592 | [
"MIT"
] | 46 | 2019-08-17T20:52:52.000Z | 2022-03-25T11:15:12.000Z | test/map_diff_test.exs | Qqwy/elixir_map_diff | 7a0e0292d44bfc9a87bccfe1430c3867caab2592 | [
"MIT"
] | 7 | 2016-12-20T20:39:16.000Z | 2019-08-05T19:05:49.000Z |
defmodule MapDiffTest do
use ExUnit.Case
# Simple example structs
# that are used in some of the doctests.
defmodule Foo do
defstruct a: 1, b: 2, c: 3
end
defmodule Bar do
defstruct a: "foo", b: "bar", z: "baz"
end
doctest MapDiff
end
| 13.947368 | 42 | 0.649057 |
e81f2990938d3d7ec0e330df2a331914c108e8bf | 18,039 | ex | Elixir | clients/storage_transfer/lib/google_api/storage_transfer/v1/api/transfer_operations.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/storage_transfer/lib/google_api/storage_transfer/v1/api/transfer_operations.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/storage_transfer/lib/google_api/storage_transfer/v1/api/transfer_operations.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.StorageTransfer.V1.Api.TransferOperations do
@moduledoc """
API calls for all endpoints tagged `TransferOperations`.
"""
alias GoogleApi.StorageTransfer.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Cancels a transfer. Use the transferOperations.get method to check if the cancellation succeeded or if the operation completed despite the `cancel` request. When you cancel an operation, the currently running transfer is interrupted. For recurring transfer jobs, the next instance of the transfer job will still run. For example, if your job is configured to run every day at 1pm and you cancel Monday's operation at 1:05pm, Monday's transfer will stop. However, a transfer job will still be attempted on Tuesday. This applies only to currently running operations. If an operation is not currently running, `cancel` does nothing. *Caution:* Canceling a transfer job can leave your data in an unknown state. We recommend that you restore the state at both the destination and the source after the `cancel` request completes so that your data is in a consistent state. When you cancel a job, the next job computes a delta of files and may repair any inconsistent state. For instance, if you run a job every day, and today's job found 10 new files and transferred five files before you canceled the job, tomorrow's transfer operation will compute a new delta with the five files that were not copied today plus any new files discovered tomorrow.
## Parameters
* `connection` (*type:* `GoogleApi.StorageTransfer.V1.Connection.t`) - Connection to server
* `name` (*type:* `String.t`) - The name of the operation resource to be cancelled.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.StorageTransfer.V1.Model.CancelOperationRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.StorageTransfer.V1.Model.Empty{}}` on success
* `{:error, info}` on failure
"""
@spec storagetransfer_transfer_operations_cancel(
Tesla.Env.client(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.StorageTransfer.V1.Model.Empty.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def storagetransfer_transfer_operations_cancel(
connection,
name,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1/{+name}:cancel", %{
"name" => URI.encode(name, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.StorageTransfer.V1.Model.Empty{}])
end
@doc """
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
## Parameters
* `connection` (*type:* `GoogleApi.StorageTransfer.V1.Connection.t`) - Connection to server
* `name` (*type:* `String.t`) - The name of the operation resource.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.StorageTransfer.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec storagetransfer_transfer_operations_get(
Tesla.Env.client(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.StorageTransfer.V1.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def storagetransfer_transfer_operations_get(connection, name, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1/{+name}", %{
"name" => URI.encode(name, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.StorageTransfer.V1.Model.Operation{}])
end
@doc """
Lists transfer operations. Operations are ordered by their creation time in reverse chronological order.
## Parameters
* `connection` (*type:* `GoogleApi.StorageTransfer.V1.Connection.t`) - Connection to server
* `name` (*type:* `String.t`) - Not used.
* `filter` (*type:* `String.t`) - Required. A list of query parameters specified as JSON text in the form of: `{"projectId":"my_project_id", "jobNames":["jobid1","jobid2",...], "operationNames":["opid1","opid2",...], "transferStatuses":["status1","status2",...]}` Since `jobNames`, `operationNames`, and `transferStatuses` support multiple values, they must be specified with array notation. `projectId` is required. `jobNames`, `operationNames`, and `transferStatuses` are optional. The valid values for `transferStatuses` are case-insensitive: IN_PROGRESS, PAUSED, SUCCESS, FAILED, and ABORTED.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:pageSize` (*type:* `integer()`) - The list page size. The max allowed value is 256.
* `:pageToken` (*type:* `String.t`) - The list page token.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.StorageTransfer.V1.Model.ListOperationsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec storagetransfer_transfer_operations_list(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.StorageTransfer.V1.Model.ListOperationsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def storagetransfer_transfer_operations_list(
connection,
name,
filter,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:pageSize => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1/{+name}", %{
"name" => URI.encode(name, &URI.char_unreserved?/1)
})
|> Request.add_param(:query, :filter, filter)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.StorageTransfer.V1.Model.ListOperationsResponse{}]
)
end
@doc """
Pauses a transfer operation.
## Parameters
* `connection` (*type:* `GoogleApi.StorageTransfer.V1.Connection.t`) - Connection to server
* `name` (*type:* `String.t`) - Required. The name of the transfer operation.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.StorageTransfer.V1.Model.PauseTransferOperationRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.StorageTransfer.V1.Model.Empty{}}` on success
* `{:error, info}` on failure
"""
@spec storagetransfer_transfer_operations_pause(
Tesla.Env.client(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.StorageTransfer.V1.Model.Empty.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def storagetransfer_transfer_operations_pause(
connection,
name,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1/{+name}:pause", %{
"name" => URI.encode(name, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.StorageTransfer.V1.Model.Empty{}])
end
@doc """
Resumes a transfer operation that is paused.
## Parameters
* `connection` (*type:* `GoogleApi.StorageTransfer.V1.Connection.t`) - Connection to server
* `name` (*type:* `String.t`) - Required. The name of the transfer operation.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.StorageTransfer.V1.Model.ResumeTransferOperationRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.StorageTransfer.V1.Model.Empty{}}` on success
* `{:error, info}` on failure
"""
@spec storagetransfer_transfer_operations_resume(
Tesla.Env.client(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.StorageTransfer.V1.Model.Empty.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def storagetransfer_transfer_operations_resume(
connection,
name,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1/{+name}:resume", %{
"name" => URI.encode(name, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.StorageTransfer.V1.Model.Empty{}])
end
end
| 46.372751 | 1,244 | 0.624092 |
e81f48d977dc19ae06654e8971a640d947b6b6ef | 431 | ex | Elixir | lib/hap/characteristics/current_heater_cooler_state.ex | petermm/hap | 550433a78bccd586ab6a7d8bf85765bfae58b13b | [
"MIT"
] | 40 | 2019-10-26T01:58:42.000Z | 2022-03-09T18:18:39.000Z | lib/hap/characteristics/current_heater_cooler_state.ex | petermm/hap | 550433a78bccd586ab6a7d8bf85765bfae58b13b | [
"MIT"
] | 11 | 2021-04-02T14:55:02.000Z | 2021-11-05T13:49:55.000Z | lib/hap/characteristics/current_heater_cooler_state.ex | petermm/hap | 550433a78bccd586ab6a7d8bf85765bfae58b13b | [
"MIT"
] | 6 | 2020-05-18T09:34:14.000Z | 2021-11-04T11:14:15.000Z | defmodule HAP.Characteristics.CurrentHeaterCoolerState do
@moduledoc """
Definition of the `public.hap.characteristic.heater-cooler.state.current` characteristic
Valid values:
0 Inactive
1 Idle
2 Heating
3 Cooling
"""
@behaviour HAP.CharacteristicDefinition
def type, do: "B1"
def perms, do: ["pr", "ev"]
def format, do: "uint8"
def min_value, do: 0
def max_value, do: 3
def step_value, do: 1
end
| 19.590909 | 90 | 0.707657 |
e81f4aa378efcf4a3b2faec1671f759eaaccaa92 | 251 | ex | Elixir | apps/buzzcms/lib/buzzcms.ex | buzzcms/buzzcms | 8ca8e6dea381350f94cc4a666448b5dba6676520 | [
"Apache-2.0"
] | null | null | null | apps/buzzcms/lib/buzzcms.ex | buzzcms/buzzcms | 8ca8e6dea381350f94cc4a666448b5dba6676520 | [
"Apache-2.0"
] | 41 | 2020-02-12T07:53:14.000Z | 2020-03-30T02:18:14.000Z | apps/buzzcms/lib/buzzcms.ex | buzzcms/buzzcms | 8ca8e6dea381350f94cc4a666448b5dba6676520 | [
"Apache-2.0"
] | null | null | null | defmodule Buzzcms do
@moduledoc """
Buzzcms keeps the contexts that define your domain
and business logic.
Contexts are also responsible for managing your data, regardless
if it comes from the database, an external API or others.
"""
end
| 25.1 | 66 | 0.752988 |
e81f977e3b049099b741c9f87105db92e631ce4b | 966 | ex | Elixir | deps/postgrex/lib/postgrex/extensions/jsonb.ex | rchervin/phoenixportfolio | a5a6a60168d7261647a10a8dbd395b440db8a4f9 | [
"MIT"
] | null | null | null | deps/postgrex/lib/postgrex/extensions/jsonb.ex | rchervin/phoenixportfolio | a5a6a60168d7261647a10a8dbd395b440db8a4f9 | [
"MIT"
] | null | null | null | deps/postgrex/lib/postgrex/extensions/jsonb.ex | rchervin/phoenixportfolio | a5a6a60168d7261647a10a8dbd395b440db8a4f9 | [
"MIT"
] | null | null | null | defmodule Postgrex.Extensions.JSONB do
@moduledoc false
import Postgrex.BinaryUtils, warn: false
def init(opts) do
{Keyword.get(opts, :json), Keyword.get(opts, :decode_binary, :copy)}
end
def matching({nil, _}),
do: []
def matching(_),
do: [type: "jsonb"]
def format(_),
do: :binary
def encode({library, _}) do
quote location: :keep do
map ->
data = unquote(library).encode!(map)
[<<(IO.iodata_length(data)+1) :: int32, 1>> | data]
end
end
def decode({library, :copy}) do
quote location: :keep do
<<len :: int32, data :: binary-size(len)>> ->
<<1, json :: binary>> = data
json
|> :binary.copy()
|> unquote(library).decode!()
end
end
def decode({library, :reference}) do
quote location: :keep do
<<len :: int32, data :: binary-size(len)>> ->
<<1, json :: binary>> = data
unquote(library).decode!(json)
end
end
end
| 23 | 72 | 0.568323 |
e81fa7438c69e1fb1c3e343b80df2bbde7a6c5d4 | 1,453 | exs | Elixir | mix.exs | tubone24/elixir_performance_tool | d7d8bb44b276ed349c4c3f3d188d15111aabbee0 | [
"MIT"
] | null | null | null | mix.exs | tubone24/elixir_performance_tool | d7d8bb44b276ed349c4c3f3d188d15111aabbee0 | [
"MIT"
] | null | null | null | mix.exs | tubone24/elixir_performance_tool | d7d8bb44b276ed349c4c3f3d188d15111aabbee0 | [
"MIT"
] | null | null | null | defmodule ElixirPerformanceTool.MixProject do
use Mix.Project
@description """
Checking TAT benchmark test tools for Parallel HTTP requests by Elixir.
"""
def project do
[
app: :elixir_performance_tool,
version: "0.1.0",
elixir: "~> 1.9",
name: "ElixirPerformanceTool",
description: @description,
package: package(),
source_url: "https://github.com/tubone24/elixir_performance_tool",
homepage_url: "https://github.com/tubone24/elixir_performance_tool",
start_permanent: Mix.env() == :prod,
deps: deps(),
docs: [
main: "readme",
logo: "elixir_perfomance_logo.png",
extras: [
"README.md"
]
],
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger, :httpoison]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
{ :httpoison, "~> 0.7.4"},
{:poison, "~> 3.1"},
{:csv, "~> 2.3"},
{:earmark, "~> 1.0", only: :dev},
{:ex_doc, "~> 0.18", only: :dev}
]
end
defp package do
[
maintainers: ["tubone24"],
licenses: ["MIT"],
links: %{
GitHub: "https://github.com/tubone24/elixir_performance_tool"
}
]
end
end
| 24.216667 | 87 | 0.569855 |
e81fc4d7dd7fa9ff879bd998b2c7c72b6f6f6157 | 1,635 | exs | Elixir | mix.exs | jungsoft/uploadex | 3f9748ffd9b6a13c52c3093f9d8e112e2afecf19 | [
"MIT"
] | 11 | 2020-04-20T15:45:47.000Z | 2022-01-07T22:24:27.000Z | mix.exs | gabrielpra1/uploadex | 3f9748ffd9b6a13c52c3093f9d8e112e2afecf19 | [
"MIT"
] | 7 | 2019-09-11T15:36:34.000Z | 2020-01-31T14:04:12.000Z | mix.exs | jungsoft/uploadex | 3f9748ffd9b6a13c52c3093f9d8e112e2afecf19 | [
"MIT"
] | 2 | 2020-11-18T19:02:04.000Z | 2020-11-18T19:59:02.000Z | defmodule Uploadex.MixProject do
use Mix.Project
def project do
[
app: :uploadex,
version: "3.0.0-rc.1",
elixir: "~> 1.8",
start_permanent: Mix.env() == :prod,
deps: deps(),
name: "Uploadex",
source_url: "https://github.com/gabrielpra1/uploadex",
description: "Elixir library for handling uploads using Ecto and Arc",
package: package(),
elixirc_paths: elixirc_paths(Mix.env()),
dialyzer: [
plt_add_apps: [:ex_unit, :mix, :ex_aws, :ex_aws_s3],
plt_file: {:no_warn, "priv/plts/dialyzer.plt"},
],
]
end
def elixirc_paths(:test), do: ["lib", "test/support"]
def elixirc_paths(_), do: ["lib"]
def application do
[
extra_applications: [:logger]
]
end
defp package do
[
files: ~w(lib mix.exs README* LICENSE*),
licenses: ["MIT"],
links: %{
"GitHub" => "https://github.com/gabrielpra1/uploadex",
"Docs" => "https://hexdocs.pm/uploadex/"
}
]
end
defp deps do
[
# Ecto
{:ecto, ">= 3.1.7"},
# For AWS
{:ex_aws, "~> 2.0", optional: true},
{:ex_aws_s3, "~> 2.0", optional: true},
{:poison, ">= 3.0.0", optional: true},
{:hackney, ">= 1.9.0", optional: true},
{:sweet_xml, "~> 0.6", optional: true},
# Runtime checks and doc
{:ex_doc, "~> 0.19", only: :dev, runtime: false},
{:credo, "~> 1.1.0", only: [:dev, :test], runtime: false},
{:dialyxir, github: "gabrielpra1/dialyxir", only: [:dev, :test], runtime: false},
# Temporary files
{:task_after, "~> 1.0.0"},
]
end
end
| 26.370968 | 87 | 0.543119 |
e82020570d896f94b1a443c641208e11ec5c18cf | 68 | exs | Elixir | tags/test/views/page_view_test.exs | merxer/kata | 5dbbca8b4173029f9311398148de9437a329cf9a | [
"MIT"
] | null | null | null | tags/test/views/page_view_test.exs | merxer/kata | 5dbbca8b4173029f9311398148de9437a329cf9a | [
"MIT"
] | null | null | null | tags/test/views/page_view_test.exs | merxer/kata | 5dbbca8b4173029f9311398148de9437a329cf9a | [
"MIT"
] | null | null | null | defmodule Tags.PageViewTest do
use Tags.ConnCase, async: true
end
| 17 | 32 | 0.794118 |
e82023817f20a9adecfe4ac8dd17008a46dc0fd2 | 2,126 | exs | Elixir | test/widecolumn_read_test.exs | Yarnus/ex_aliyun_ots | 1d2ed426cb18fca68811e7a6297da9583cbd0a77 | [
"MIT"
] | 19 | 2018-05-10T09:33:25.000Z | 2022-01-25T11:57:33.000Z | test/widecolumn_read_test.exs | Yarnus/ex_aliyun_ots | 1d2ed426cb18fca68811e7a6297da9583cbd0a77 | [
"MIT"
] | 23 | 2019-11-28T07:51:52.000Z | 2022-03-09T08:33:42.000Z | test/widecolumn_read_test.exs | Yarnus/ex_aliyun_ots | 1d2ed426cb18fca68811e7a6297da9583cbd0a77 | [
"MIT"
] | 9 | 2019-01-30T03:17:12.000Z | 2021-12-14T02:23:32.000Z | defmodule ExAliyunOtsTest.WideColumnRead do
use ExUnit.Case
use ExAliyunOts,
instance: EDCEXTestInstance
require Logger
alias ExAliyunOts.Const.PKType
require PKType
test "wide column read" do
cur_timestamp = System.os_time(:second)
table_name = "test_wcr_#{cur_timestamp}"
create_table_result = create_table(table_name, [{"key", PKType.string()}])
assert create_table_result == :ok
{:ok, _putrow_response} =
put_row(
table_name,
[{"key", "1"}],
[
{"bcol", "bc"},
{"ecol", "ec"},
{"dcol", "dc"},
{"acol", "ac"},
{"fcol", "fc"},
{"ccol", "cc"},
{"room_a", "room_a1"},
{"room_c", "room_c2"},
{"room_b", "room_b3"},
{"room_x", "room_x4"},
{"room_g", "room_g5"},
{"room_e", "room_e6"}
],
condition: condition(:expect_not_exist)
)
# wide column read by start/end
# using `start_column` as "room", `end_column` as "room|" will get all "room_*" attribute columns
{:ok, response} =
get_row(table_name, [{"key", "1"}],
start_column: "room",
end_column: "room|"
)
{_key, attrs} = response.row
assert length(attrs) == 6
assert {"room_a", "room_a1", _} = Enum.at(attrs, 0)
assert {"room_b", "room_b3", _} = Enum.at(attrs, 1)
assert {"room_c", "room_c2", _} = Enum.at(attrs, 2)
assert {"room_e", "room_e6", _} = Enum.at(attrs, 3)
assert {"room_g", "room_g5", _} = Enum.at(attrs, 4)
assert {"room_x", "room_x4", _} = Enum.at(attrs, 5)
# wide column read by filter
{:ok, response} =
get_row(table_name, [{"key", "1"}],
start_column: "room",
filter: pagination(offset: 0, limit: 3)
)
{_key, attrs} = response.row
assert length(attrs) == 3
assert {"room_a", "room_a1", _} = Enum.at(attrs, 0)
assert {"room_b", "room_b3", _} = Enum.at(attrs, 1)
assert {"room_c", "room_c2", _} = Enum.at(attrs, 2)
# delete table
del_result = delete_table(table_name)
assert del_result == :ok
end
end
| 28.346667 | 101 | 0.556444 |
e82080922e4d14e179ba6370c91ab5368069274a | 1,925 | ex | Elixir | lib/dgraph_ex/mutation/mutation_set.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 21 | 2017-08-20T06:19:37.000Z | 2021-02-04T23:22:10.000Z | lib/dgraph_ex/mutation/mutation_set.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 43 | 2017-08-06T21:03:28.000Z | 2018-09-08T13:00:35.000Z | lib/dgraph_ex/mutation/mutation_set.ex | WolfDan/dgraph_ex | 4dad42983f2387f10febf9996ac8f2db20aea710 | [
"MIT"
] | 1 | 2017-10-12T02:20:13.000Z | 2017-10-12T02:20:13.000Z | defmodule DgraphEx.Mutation.MutationSet do
alias DgraphEx.Mutation.MutationSet
alias DgraphEx.Field
defstruct [
fields: []
]
defmacro __using__(_) do
quote do
alias DgraphEx.{Mutation, Vertex}
alias Mutation.MutationSet
def set(%Mutation{} = mut) do
Mutation.put_sequence(mut, %MutationSet{})
end
defp raise_vertex_only_error do
raise %ArgumentError{
message: "MutationSet.set structs must be Vertex models only"
}
end
def set(%Mutation{} = mut, %{__struct__: module} = model) do
if DgraphEx.Util.has_function?(module, :__vertex__, 1) do
set(mut, Vertex.setter_subject(model), model)
else
raise_vertex_only_error()
end
end
def set(%Mutation{} = mut, subject, %{__struct__: module} = model) do
if DgraphEx.Util.has_function?(module, :__vertex__, 1) do
Mutation.put_sequence(mut, %MutationSet{
fields: Vertex.populate_fields(subject, module, model)
})
else
raise_vertex_only_error()
end
end
end
end
def put_field(%MutationSet{fields: prev_fields} = set, %Field{} = field) do
%{ set | fields: [ field | prev_fields ]}
end
def merge(%MutationSet{fields: fields1} = mset1, %MutationSet{fields: fields2}) do
%{ mset1 | fields: [ fields1 ++ fields2 ] }
end
def render(%MutationSet{fields: []}) do
""
end
def render(%MutationSet{fields: fields}) when length(fields) > 0 do
"set { " <> render_fields(fields) <> " }"
end
defp render_fields(fields) do
fields
|> remove_uid
|> Enum.map(&Field.as_setter/1)
|> List.flatten
|> Enum.join("\n")
end
defp remove_uid(fields) when is_list(fields) do
Enum.filter(fields, &remove_uid/1)
end
defp remove_uid(%{predicate: :_uid_}) do
false
end
defp remove_uid(x) do
x
end
end | 24.679487 | 84 | 0.622857 |
e820cef07e88170c541fafcbb3968ad6309c094e | 42,603 | ex | Elixir | lib/date/date.ex | appcues/timex | 700643279531bbf1711cd721b3851f025cc28a95 | [
"MIT"
] | null | null | null | lib/date/date.ex | appcues/timex | 700643279531bbf1711cd721b3851f025cc28a95 | [
"MIT"
] | null | null | null | lib/date/date.ex | appcues/timex | 700643279531bbf1711cd721b3851f025cc28a95 | [
"MIT"
] | null | null | null | defmodule Timex.Date do
@moduledoc """
Module for working with dates.
Functions that produce time intervals use UNIX epoch (or simly Epoch) as the
default reference date. Epoch is defined as UTC midnight of January 1, 1970.
Time intervals in this module don't account for leap seconds.
Supported tasks:
* get current date in the desired time zone
* convert dates between time zones and time units
* introspect dates to find out weekday, week number, number of days in a given month, etc.
* parse dates from string
* compare dates
* date arithmetic
"""
require Record
alias __MODULE__, as: Date
alias Timex.DateTime, as: DateTime
alias Timex.Time, as: Time
alias Timex.Timezone, as: Timezone
alias Timex.TimezoneInfo, as: TimezoneInfo
# Date types
@type year :: non_neg_integer
@type month :: 1..12
@type day :: 1..31
@type daynum :: 1..366
@type weekday :: 1..7
@type weeknum :: 1..53
@type num_of_days :: 28..31
# Time types
@type hour :: 0..23
@type minute :: 0..59
@type second :: 0..59
@type timestamp :: {megaseconds, seconds, microseconds }
@type megaseconds :: non_neg_integer
@type seconds :: non_neg_integer
@type microseconds :: non_neg_integer
# Complex types
@type time :: { hour, minute, second }
@type date :: { year, month, day }
@type datetime :: { date, time }
@type dtz :: { datetime, TimezoneInfo.t }
@type iso_triplet :: { year, weeknum, weekday }
# Constants
@valid_months 1..12
@million 1_000_000
@weekdays [
{"Monday", 1}, {"Tuesday", 2}, {"Wednesday", 3}, {"Thursday", 4},
{"Friday", 5}, {"Saturday", 6}, {"Sunday", 7}
]
@months [
{"January", 1}, {"February", 2}, {"March", 3},
{"April", 4}, {"May", 5}, {"June", 6},
{"July", 7}, {"August", 8}, {"September", 9},
{"October", 10}, {"November", 11}, {"December", 12}
]
# {is_leap_year, month, shift}
@ordinal_day_map [
{true, 1, 0}, {false, 1, 0},
{true, 2, 31}, {false, 2, 31},
{true, 3, 60}, {false, 3, 59},
{true, 4, 91}, {false, 4, 90},
{true, 5, 121}, {false, 5, 120},
{true, 6, 152}, {false, 6, 151},
{true, 7, 182}, {false, 7, 181},
{true, 8, 213}, {false, 8, 212},
{true, 9, 244}, {false, 9, 243},
{true, 10, 274}, {false, 10, 273},
{true, 11, 305}, {false, 11, 304},
{true, 12, 335}, {false, 12, 334}
]
@doc """
Get a TimezoneInfo object for the specified offset or name.
When offset or name is invalid, exception is raised.
## Examples
iex> date = #{__MODULE__}.from({2015, 4, 12})
iex> tz = #{__MODULE__}.timezone(:utc, date)
iex> tz.full_name
"UTC"
iex> date = #{__MODULE__}.from({2015, 4, 12})
iex> tz = #{__MODULE__}.timezone("America/Chicago", date)
iex> {tz.full_name, tz.abbreviation}
{"America/Chicago", "CDT"}
iex> date = #{__MODULE__}.from({2015, 4, 12})
iex> tz = #{__MODULE__}.timezone(+2, date)
iex> {tz.full_name, tz.abbreviation}
{"Etc/GMT-2", "GMT-2"}
"""
@spec timezone(:local | :utc | number | binary, DateTime.t | nil) :: TimezoneInfo.t
def timezone(:local, {{_,_,_},{_,_,_}}=datetime), do: Timezone.local(construct(datetime))
def timezone(:local, %DateTime{}=date), do: Timezone.local(date)
def timezone(:utc, _), do: %TimezoneInfo{}
defdelegate timezone(name, datetime), to: Timezone, as: :get
@doc """
Get current date.
## Examples
> #{__MODULE__}.now
%Timex.DateTime{year: 2015, month: 6, day: 26, hour: 23, minute: 56, second: 12}
"""
@spec now() :: DateTime.t
def now do
construct(calendar_universal_time(), %TimezoneInfo{})
end
@doc """
Get the current date, in a specific timezone.
## Examples
iex> %Timex.DateTime{timezone: tz} = #{__MODULE__}.now("America/Chicago")
iex> tz.abbreviation in ["CST", "CDT"]
true
iex> tz.full_name === "America/Chicago"
true
"""
@spec now(binary) :: DateTime.t
def now(tz) when is_binary(tz) do
Timezone.convert(now(), tz)
end
@doc """
Get representation of the current date in seconds or days since Epoch.
See convert/2 for converting arbitrary dates to various time units.
## Examples
> #{__MODULE__}.now(:secs)
1363439013
> #{__MODULE__}.now(:days)
15780
"""
@spec now(:secs | :days) :: integer
def now(:secs), do: to_secs(now())
def now(:days), do: to_days(now())
@doc """
Get current local date.
See also `universal/0`.
## Examples
> #{__MODULE__}.local
%Timex.DateTime{year: 2013, month: 3, day: 16, hour: 11, minute: 1, second: 12, timezone: %TimezoneInfo{}}
"""
@spec local() :: DateTime.t
def local do
date = construct(calendar_local_time())
tz = Timezone.local(date)
%{date | :timezone => tz}
end
@doc """
Convert a date to your local timezone.
See also `universal/1`.
## Examples
Date.now |> Date.local
"""
@spec local(date :: DateTime.t) :: DateTime.t
def local(%DateTime{:timezone => tz} = date) do
case Timezone.local(date) do
^tz -> date
new_zone -> Timezone.convert(date, new_zone)
end
end
@doc """
Get current the current datetime in UTC.
See also `local/0`. Delegates to `now/0`, since they are identical in behavior
## Examples
> #{__MODULE__}.universal
%Timex.DateTime{timezone: %Timex.TimezoneInfo{full_name: "UTC"}}
"""
@spec universal() :: DateTime.t
defdelegate universal, to: __MODULE__, as: :now
@doc """
Convert a date to UTC
See also `local/1`.
## Examples
> localdate = Date.local
%Timex.DateTime{hour: 5, timezone: %Timex.TimezoneInfo{full_name: "America/Chicago"}}
> localdate |> Date.universal
%Timex.DateTime{hour: 10, timezone: %Timex.TimezoneInfo{full_name: "UTC"}}
"""
@spec universal(DateTime.t) :: DateTime.t
def universal(date), do: Timezone.convert(date, %TimezoneInfo{})
@doc """
The first day of year zero (calendar module's default reference date).
See also `epoch/0`.
## Examples
iex> date = %Timex.DateTime{year: 0, month: 1, day: 1, timezone: %Timex.TimezoneInfo{}}
iex> #{__MODULE__}.zero === date
true
"""
@spec zero() :: DateTime.t
def zero, do: construct({0, 1, 1}, {0, 0, 0}, %TimezoneInfo{})
@doc """
The date of Epoch, used as default reference date by this module
and also by the Time module.
See also `zero/0`.
## Examples
iex> date = %Timex.DateTime{year: 1970, month: 1, day: 1, timezone: %Timex.TimezoneInfo{}}
iex> #{__MODULE__}.epoch === date
true
"""
@spec epoch() :: DateTime.t
def epoch, do: construct({1970, 1, 1}, {0, 0, 0}, %TimezoneInfo{})
@doc """
Time interval since year 0 of Epoch expressed in the specified units.
## Examples
iex> #{__MODULE__}.epoch(:timestamp)
{0,0,0}
iex> #{__MODULE__}.epoch(:secs)
62167219200
"""
@spec epoch(:timestamp) :: timestamp
@spec epoch(:secs | :days) :: integer
def epoch(:timestamp), do: to_timestamp(epoch())
def epoch(:secs), do: to_secs(epoch(), :zero)
@doc """
Construct a date from Erlang's date or datetime value.
You may specify the date's time zone as the second argument. If the argument
is omitted, UTC time zone is assumed.
When passing {year, month, day} as the first argument, the resulting date
will indicate midnight of that day in the specified timezone (UTC by
default).
NOTE: When using `from` the input value is normalized to prevent invalid
dates from being accidentally introduced. Use `set` with `validate: false`,
or create the %DateTime{} by hand if you do not want normalization.
## Examples
> Date.from(:erlang.universaltime) #=> %DateTime{...}
> Date.from(:erlang.localtime) #=> %Datetime{...}
> Date.from(:erlang.localtime, :local) #=> %DateTime{...}
> Date.from({2014,3,16}, "America/Chicago") #=> %DateTime{...}
"""
@spec from(datetime | date) :: DateTime.t
@spec from(datetime | date, :utc | :local | TimezoneInfo.t | binary) :: DateTime.t
def from({y,m,d} = date) when is_integer(y) and is_integer(m) and is_integer(d), do: from(date, :utc)
def from({{_,_,_},{_,_,_}} = datetime), do: from(datetime, :utc)
def from({{_,_,_},{_,_,_,_}} = datetime), do: from(datetime, :utc)
def from({_,_,_} = date, :utc), do: construct({date, {0,0,0}}, %TimezoneInfo{})
def from({{_,_,_},{_,_,_}} = datetime, :utc), do: construct(datetime, %TimezoneInfo{})
def from({{_,_,_},{_,_,_,_}} = datetime, :utc), do: construct(datetime, %TimezoneInfo{})
def from({_,_,_} = date, :local), do: from({date, {0,0,0}}, timezone(:local, {date, {0,0,0}}))
def from({{_,_,_},{_,_,_}} = datetime, :local), do: from(datetime, timezone(:local, datetime))
def from({{_,_,_}=date,{h,min,sec,_}} = datetime, :local),do: from(datetime, timezone(:local, {date,{h, min, sec}}))
def from({_,_,_} = date, %TimezoneInfo{} = tz), do: from({date, {0,0,0}}, tz)
def from({{_,_,_},{_,_,_}} = datetime, %TimezoneInfo{} = tz), do: construct(datetime, tz)
def from({{_,_,_},{_,_,_,_}} = datetime, %TimezoneInfo{} = tz), do: construct(datetime, tz)
def from({_,_,_} = date, tz) when is_binary(tz), do: from({date, {0, 0, 0}}, tz)
def from({{_,_,_}=d,{h,m,s}}, tz) when is_binary(tz), do: from({d,{h,m,s,0}},tz)
def from({{_,_,_}=date,{h,min,sec,_}} = datetime, tz) when is_binary(tz) do
case timezone(tz, {date, {h,min,sec}}) do
%TimezoneInfo{} = tzinfo ->
construct(datetime, tzinfo)
{:error, _} = error ->
error
end
end
@doc """
Construct a date from a time interval since Epoch or year 0.
UTC time zone is assumed. This assumption can be modified by setting desired
time zone using set/3 after the date is constructed.
## Examples
> Date.from(13, :secs)
> Date.from(13, :days, :zero)
> Date.from(Time.now, :timestamp)
"""
@spec from(timestamp, :timestamp) :: DateTime.t
@spec from(number, :us | :secs | :days) :: DateTime.t
@spec from(timestamp, :timestamp, :epoch | :zero) :: DateTime.t
@spec from(number, :us | :secs | :days, :epoch | :zero) :: DateTime.t
def from(value, type, reference \\ :epoch)
def from({mega, sec, us}, :timestamp, :epoch), do: from((mega * @million + sec) * @million + us, :us)
def from({mega, sec, us}, :timestamp, :zero) do
from((mega * @million + sec) * @million + us, :us, :zero)
end
def from(us, :us, :epoch) do
construct(calendar_gregorian_microseconds_to_datetime(us, epoch(:secs)), %TimezoneInfo{})
end
def from(us, :us, :zero) do
construct(calendar_gregorian_microseconds_to_datetime(us, 0), %TimezoneInfo{})
end
def from(sec, :secs, :epoch) do
construct(:calendar.gregorian_seconds_to_datetime(trunc(sec) + epoch(:secs)), %TimezoneInfo{})
end
def from(sec, :secs, :zero) do
construct(:calendar.gregorian_seconds_to_datetime(trunc(sec)), %TimezoneInfo{})
end
def from(days, :days, :epoch) do
construct(:calendar.gregorian_days_to_date(trunc(days) + to_days(epoch(), :zero)), {0,0,0}, %TimezoneInfo{})
end
def from(days, :days, :zero) do
construct(:calendar.gregorian_days_to_date(trunc(days)), {0,0,0}, %TimezoneInfo{})
end
@doc """
Convert a date to a timestamp value consumable by the Time module.
See also `diff/2` if you want to specify an arbitrary reference date.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.to_timestamp
{0,0,0}
"""
@spec to_timestamp(DateTime.t) :: timestamp
@spec to_timestamp(DateTime.t, :epoch | :zero) :: timestamp
def to_timestamp(date, reference \\ :epoch)
def to_timestamp(%DateTime{:ms => ms} = date, :epoch) do
sec = to_secs(date)
{ div(sec, @million), rem(sec, @million), ms * 1000 }
end
def to_timestamp(%DateTime{:ms => ms} = date, :zero) do
sec = to_secs(date, :zero)
{ div(sec, @million), rem(sec, @million), ms * 1000 }
end
@doc """
Convert a date to an integer number of seconds since Epoch or year 0.
With `to_secs/3`, you can also specify an option `utc: false | true`,
which controls whether the DateTime is converted to UTC prior to calculating
the number of seconds from the reference date. By default, UTC conversion is
enabled.
See also `diff/2` if you want to specify an arbitrary reference date.
## Examples
iex> #{__MODULE__}.from({{1999, 1, 2}, {12,13,14}}) |> #{__MODULE__}.to_secs
915279194
"""
@spec to_secs(DateTime.t) :: integer
@spec to_secs(DateTime.t, :epoch | :zero) :: integer
@spec to_secs(DateTime.t, :epoch | :zero, [utc: false | true]) :: integer
def to_secs(date, reference \\ :epoch, options \\ [utc: true])
def to_secs(date, :epoch, utc: true), do: to_secs(date, :zero) - epoch(:secs)
def to_secs(date, :zero, utc: true) do
offset = Timex.Timezone.diff(date, %TimezoneInfo{})
case offset do
0 -> utc_to_secs(date)
_ -> utc_to_secs(date) + ( 60 * offset )
end
end
def to_secs(date, :epoch, utc: false), do: to_secs(date, :zero, utc: false) - epoch(:secs)
def to_secs(date, :zero, utc: false), do: utc_to_secs(date)
defp utc_to_secs(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => min, :second => s}) do
:calendar.datetime_to_gregorian_seconds({{y, m, d}, {h, min, s}})
end
@doc """
Convert the date to an integer number of days since Epoch or year 0.
See also `diff/2` if you want to specify an arbitray reference date.
## Examples
iex> #{__MODULE__}.from({1970, 1, 15}) |> #{__MODULE__}.to_days
14
"""
@spec to_days(DateTime.t) :: integer
@spec to_days(DateTime.t, :epoch | :zero) :: integer
def to_days(date, reference \\ :epoch)
def to_days(date, :epoch), do: to_days(date, :zero) - to_days(epoch(), :zero)
def to_days(%DateTime{:year => y, :month => m, :day => d}, :zero) do
:calendar.date_to_gregorian_days({y, m, d})
end
@doc """
Gets the current century
## Examples
iex> #{__MODULE__}.century
21
"""
@spec century() :: non_neg_integer
def century(), do: Date.now |> century
@doc """
Given a date, get the century this date is in.
## Examples
iex> #{__MODULE__}.now |> #{__MODULE__}.century
21
"""
@spec century(DateTime.t) :: non_neg_integer
def century(%DateTime{:year => y}) do
base_century = div(y, 100)
years_past = rem(y, 100)
cond do
base_century == (base_century - years_past) -> base_century
true -> base_century + 1
end
end
@doc """
Return weekday number (as defined by ISO 8601) of the specified date.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.weekday
4 # (i.e. Thursday)
"""
@spec weekday(DateTime.t) :: weekday
def weekday(%DateTime{:year => y, :month => m, :day => d}), do: :calendar.day_of_the_week({y, m, d})
@doc """
Returns the ordinal day number of the date.
## Examples
iex> #{__MODULE__}.from({{2015,6,26},{0,0,0}}) |> #{__MODULE__}.day
177
"""
@spec day(DateTime.t) :: daynum
def day(date) do
start_of_year = date |> set([month: 1, day: 1])
1 + diff(start_of_year, date, :days)
end
@doc """
Convert an iso ordinal day number to the day it represents in the
current year. If no date is provided, a new one will be created, with
the time will be set to 0:00:00, in UTC. Otherwise, the date provided will
have its month and day reset to the date represented by the ordinal day.
## Examples
# Creating a DateTime from the given day
iex> expected = #{__MODULE__}.from({{2015, 6, 29}, {0,0,0}})
iex> (#{__MODULE__}.from_iso_day(180) === expected)
true
# Shifting a DateTime to the given day
iex> date = #{__MODULE__}.from({{2015,6,26}, {12,0,0}})
iex> expected = #{__MODULE__}.from({{2015, 6, 29}, {12,0,0}})
iex> (#{__MODULE__}.from_iso_day(180, date) === expected)
true
"""
@spec from_iso_day(non_neg_integer, DateTime.t | nil) :: DateTime.t
def from_iso_day(day, date \\ nil)
def from_iso_day(day, nil) do
{{year,_,_},_} = :calendar.universal_time
datetime = iso_day_to_date_tuple(year, day)
Date.from(datetime)
end
def from_iso_day(day, %DateTime{year: year} = date) do
{year, month, day_of_month} = iso_day_to_date_tuple(year, day)
%{date | :year => year, :month => month, :day => day_of_month}
end
defp iso_day_to_date_tuple(year, day) do
{year, day} = cond do
day < 1 && :calendar.is_leap_year(year - 1) -> {year - 1, day + 366}
day < 1 -> {year - 1, day + 365}
day > 366 && :calendar.is_leap_year(year) -> {year, day - 366}
day > 365 -> {year, day - 365}
true -> {year, day}
end
{_, month, first_of_month} = Enum.take_while(@ordinal_day_map, fn {_, _, oday} -> oday <= day end) |> List.last
{year, month, day - first_of_month}
end
@doc """
Return a pair {year, week number} (as defined by ISO 8601) that date falls
on.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.iso_week
{1970,1}
"""
@spec iso_week(DateTime.t) :: {year, weeknum}
def iso_week(%DateTime{:year => y, :month => m, :day => d}) do
:calendar.iso_week_number({y, m, d})
end
def iso_week(date), do: iso_week(from(date, :utc))
@doc """
Get the day of the week corresponding to the given name.
## Examples
iex> #{__MODULE__}.day_to_num("Monday")
1
iex> #{__MODULE__}.day_to_num("monday")
1
iex> #{__MODULE__}.day_to_num("Mon")
1
iex> #{__MODULE__}.day_to_num("mon")
1
iex> #{__MODULE__}.day_to_num(:mon)
1
"""
@spec day_to_num(binary | atom()) :: integer
@weekdays |> Enum.each fn {day_name, day_num} ->
lower = day_name |> String.downcase
abbr_cased = day_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
day_quoted = quote do
def day_to_num(unquote(day_name)), do: unquote(day_num)
def day_to_num(unquote(lower)), do: unquote(day_num)
def day_to_num(unquote(abbr_cased)), do: unquote(day_num)
def day_to_num(unquote(abbr_lower)), do: unquote(day_num)
def day_to_num(unquote(symbol)), do: unquote(day_num)
end
Module.eval_quoted __MODULE__, day_quoted, [], __ENV__
end
# Make an attempt at cleaning up the provided string
def day_to_num(x), do: {:error, "Invalid day name: #{x}"}
@doc """
Get the name of the day corresponding to the provided number
## Examples
iex> #{__MODULE__}.day_name(1)
"Monday"
iex> #{__MODULE__}.day_name(0)
{:error, "Invalid day num: 0"}
"""
@spec day_name(weekday) :: binary
@weekdays |> Enum.each fn {name, day_num} ->
def day_name(unquote(day_num)), do: unquote(name)
end
def day_name(x), do: {:error, "Invalid day num: #{x}"}
@doc """
Get the short name of the day corresponding to the provided number
## Examples
iex> #{__MODULE__}.day_shortname(1)
"Mon"
iex> #{__MODULE__}.day_shortname(0)
{:error, "Invalid day num: 0"}
"""
@spec day_shortname(weekday) :: binary
@weekdays |> Enum.each fn {name, day_num} ->
def day_shortname(unquote(day_num)), do: String.slice(unquote(name), 0..2)
end
def day_shortname(x), do: {:error, "Invalid day num: #{x}"}
@doc """
Get the number of the month corresponding to the given name.
## Examples
iex> #{__MODULE__}.month_to_num("January")
1
iex> #{__MODULE__}.month_to_num("january")
1
iex> #{__MODULE__}.month_to_num("Jan")
1
iex> #{__MODULE__}.month_to_num("jan")
1
iex> #{__MODULE__}.month_to_num(:jan)
1
"""
@spec month_to_num(binary) :: integer
@months |> Enum.each fn {month_name, month_num} ->
lower = month_name |> String.downcase
abbr_cased = month_name |> String.slice(0..2)
abbr_lower = lower |> String.slice(0..2)
symbol = abbr_lower |> String.to_atom
full_chars = month_name |> String.to_char_list
abbr_chars = abbr_cased |> String.to_char_list
month_quoted = quote do
def month_to_num(unquote(month_name)), do: unquote(month_num)
def month_to_num(unquote(lower)), do: unquote(month_num)
def month_to_num(unquote(abbr_cased)), do: unquote(month_num)
def month_to_num(unquote(abbr_lower)), do: unquote(month_num)
def month_to_num(unquote(symbol)), do: unquote(month_num)
def month_to_num(unquote(full_chars)), do: unquote(month_num)
def month_to_num(unquote(abbr_chars)), do: unquote(month_num)
end
Module.eval_quoted __MODULE__, month_quoted, [], __ENV__
end
# Make an attempt at cleaning up the provided string
def month_to_num(x), do: {:error, "Invalid month name: #{x}"}
@doc """
Get the name of the month corresponding to the provided number
## Examples
iex> #{__MODULE__}.month_name(1)
"January"
iex> #{__MODULE__}.month_name(0)
{:error, "Invalid month num: 0"}
"""
@spec month_name(month) :: binary
@months |> Enum.each fn {name, month_num} ->
def month_name(unquote(month_num)), do: unquote(name)
end
def month_name(x), do: {:error, "Invalid month num: #{x}"}
@doc """
Get the short name of the month corresponding to the provided number
## Examples
iex> #{__MODULE__}.month_name(1)
"January"
iex> #{__MODULE__}.month_name(0)
{:error, "Invalid month num: 0"}
"""
@spec month_shortname(month) :: binary
@months |> Enum.each fn {name, month_num} ->
def month_shortname(unquote(month_num)), do: String.slice(unquote(name), 0..2)
end
def month_shortname(x), do: {:error, "Invalid month num: #{x}"}
@doc """
Return a 3-tuple {year, week number, weekday} for the given date.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.iso_triplet
{1970, 1, 4}
"""
@spec iso_triplet(DateTime.t) :: {year, weeknum, weekday}
def iso_triplet(%DateTime{} = datetime) do
{ iso_year, iso_week } = iso_week(datetime)
{ iso_year, iso_week, weekday(datetime) }
end
@doc """
Given an ISO triplet `{year, week number, weekday}`, convert it to a
DateTime struct.
## Examples
iex> expected = #{__MODULE__}.from({2014, 1, 28})
iex> #{__MODULE__}.from_iso_triplet({2014, 5, 2}) === expected
true
"""
@spec from_iso_triplet(iso_triplet) :: DateTime.t
def from_iso_triplet({year, week, weekday}) do
{_, _, jan4weekday} = Date.from({year, 1, 4}) |> iso_triplet
offset = jan4weekday + 3
ordinal_date = ((week * 7) + weekday) - offset
datetime = iso_day_to_date_tuple(year, ordinal_date)
Date.from(datetime)
end
@doc """
Return the number of days in the month which the date falls on.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.days_in_month
31
"""
@spec days_in_month(DateTime.t | {year, month}) :: num_of_days
def days_in_month(%DateTime{:year => year, :month => month}) when year >= 0 and month in @valid_months do
:calendar.last_day_of_the_month(year, month)
end
def days_in_month(year, month) when year >= 0 and month in @valid_months do
:calendar.last_day_of_the_month(year, month)
end
def days_in_month(year, month) do
valid_year? = year > 0
valid_month? = month in @valid_months
cond do
!valid_year? && valid_month? ->
raise ArgumentError, message: "Invalid year passed to days_in_month/2: #{year}"
valid_year? && !valid_month? ->
raise ArgumentError, message: "Invalid month passed to days_in_month/2: #{month}"
true ->
raise ArgumentError, message: "Invalid year/month pair passed to days_in_month/2: {#{year}, #{month}}"
end
end
@doc """
Return a boolean indicating whether the given year is a leap year. You may
pase a date or a year number.
## Examples
iex> #{__MODULE__}.epoch |> #{__MODULE__}.is_leap?
false
iex> #{__MODULE__}.is_leap?(2012)
true
"""
@spec is_leap?(DateTime.t | year) :: boolean
def is_leap?(year) when is_integer(year), do: :calendar.is_leap_year(year)
def is_leap?(%DateTime{:year => year}), do: is_leap?(year)
@doc """
Return a boolean indicating whether the given date is valid.
## Examples
iex> #{__MODULE__}.from({{1,1,1}, {1,1,1}}) |> #{__MODULE__}.is_valid?
true
iex> %Timex.DateTime{} |> #{__MODULE__}.set([month: 13, validate: false]) |> #{__MODULE__}.is_valid?
false
iex> %Timex.DateTime{} |> #{__MODULE__}.set(hour: -1) |> #{__MODULE__}.is_valid?
false
"""
@spec is_valid?(dtz | DateTime.t) :: boolean
def is_valid?({date, time, tz}) do
:calendar.valid_date(date) and is_valid_time?(time) and is_valid_tz?(tz)
end
def is_valid?(%DateTime{:year => y, :month => m, :day => d, :hour => h, :minute => min, :second => sec, :timezone => tz}) do
:calendar.valid_date({y,m,d}) and is_valid_time?({h,min,sec}) and is_valid_tz?(tz)
end
defp is_valid_time?({hour,min,sec}) do
hour >= 0 and hour < 24 and min >= 0 and min < 60 and sec >= 0 and sec < 60
end
defp is_valid_tz?(%TimezoneInfo{:full_name => tzname}), do: Timezone.exists?(tzname)
defp is_valid_tz?(_), do: false
@doc """
Produce a valid date from a possibly invalid one.
All date's components will be clamped to the minimum or maximum valid value.
## Examples
iex> expected = #{__MODULE__}.from({{1, 12, 31}, {0, 59, 59}}, :local)
iex> date = {{1,12,31},{0,59,59}}
iex> localtz = Timex.Timezone.local(date)
iex> result = {{1,12,31},{0,59,59}, localtz} |> #{__MODULE__}.normalize |> #{__MODULE__}.local
iex> result === expected
true
"""
@spec normalize(datetime | dtz | {date, time, TimezoneInfo.t}) :: DateTime.t
def normalize({{_,_,_}=date, time}), do: normalize({date, time, %TimezoneInfo{}})
def normalize({{_,_,_}=date, time, tz}) do
construct(do_normalize(:date, date), do_normalize(:time, time), tz)
end
@spec do_normalize(atom(), term) :: DateTime.t
defp do_normalize(:date, {year, month, day}) do
year = do_normalize(:year, year)
month = do_normalize(:month, month)
day = do_normalize(:day, {year, month, day})
{year, month, day}
end
defp do_normalize(:year, year) when year < 0, do: 0
defp do_normalize(:year, year), do: year
defp do_normalize(:month, month) do
cond do
month < 1 -> 1
month > 12 -> 12
true -> month
end
end
defp do_normalize(:time, {hour,min,sec}) do
hour = do_normalize(:hour, hour)
min = do_normalize(:minute, min)
sec = do_normalize(:second, sec)
{hour, min, sec}
end
defp do_normalize(:time, {hour,min,sec,ms}) do
{h,m,s} = do_normalize(:time, {hour,min,sec})
msecs = do_normalize(:ms, ms)
{h, m, s, msecs}
end
defp do_normalize(:hour, hour) do
cond do
hour < 0 -> 0
hour > 23 -> 23
true -> hour
end
end
defp do_normalize(:minute, min) do
cond do
min < 0 -> 0
min > 59 -> 59
true -> min
end
end
defp do_normalize(:second, sec) do
cond do
sec < 0 -> 0
sec > 59 -> 59
true -> sec
end
end
defp do_normalize(:ms, ms) do
cond do
ms < 0 -> 0
ms > 999 -> 999
true -> ms
end
end
defp do_normalize(:timezone, tz), do: tz
defp do_normalize(:day, {year, month, day}) do
year = do_normalize(:year, year)
month = do_normalize(:month, month)
ndays = days_in_month(year, month)
cond do
day < 1 -> 1
day > ndays -> ndays
true -> day
end
end
@doc """
Return a new date with the specified fields replaced by new values.
Values are automatically validated and clamped to good values by default. If
you wish to skip validation, perhaps for performance reasons, pass `validate: false`.
Values are applied in order, so if you pass `[datetime: dt, date: d]`, the date value
from `date` will override `datetime`'s date value.
## Examples
iex> now = #{__MODULE__}.epoch
iex> #{__MODULE__}.set(now, date: {1,1,1})
%Timex.DateTime{year: 1, month: 1, day: 1, hour: 0, minute: 0, second: 0, timezone: %Timex.TimezoneInfo{}, calendar: :gregorian}
iex> #{__MODULE__}.set(now, hour: 8)
%Timex.DateTime{year: 1970, month: 1, day: 1, hour: 8, minute: 0, second: 0, timezone: %Timex.TimezoneInfo{}, calendar: :gregorian}
iex> #{__MODULE__}.set(now, [date: {2013,3,26}, hour: 30])
%Timex.DateTime{year: 2013, month: 3, day: 26, hour: 23, minute: 0, second: 0, timezone: %Timex.TimezoneInfo{}, calendar: :gregorian}
iex> #{__MODULE__}.set(now, [
...> datetime: {{2013,3,26}, {12,30,0}},
...> date: {2014,4,12}
...>])
%Timex.DateTime{year: 2014, month: 4, day: 12, hour: 12, minute: 30, second: 0, timezone: %Timex.TimezoneInfo{}, calendar: :gregorian}
iex> #{__MODULE__}.set(now, [minute: 74, validate: false])
%Timex.DateTime{year: 1970, month: 1, day: 1, hour: 0, minute: 74, second: 0, timezone: %Timex.TimezoneInfo{}, calendar: :gregorian}
"""
@spec set(DateTime.t, list({atom(), term})) :: DateTime.t
def set(%DateTime{} = date, options) do
validate? = case options |> List.keyfind(:validate, 0, true) do
{:validate, bool} -> bool
_ -> true
end
Enum.reduce options, date, fn option, result ->
case option do
{:validate, _} -> result
{:datetime, {{y, m, d}, {h, min, sec}}} ->
if validate? do
%{result |
:year => do_normalize(:year, y),
:month => do_normalize(:month, m),
:day => do_normalize(:day, {y,m,d}),
:hour => do_normalize(:hour, h),
:minute => do_normalize(:minute, min),
:second => do_normalize(:second, sec)
}
else
%{result | :year => y, :month => m, :day => d, :hour => h, :minute => min, :second => sec}
end
{:date, {y, m, d}} ->
if validate? do
{yn,mn,dn} = do_normalize(:date, {y,m,d})
%{result | :year => yn, :month => mn, :day => dn}
else
%{result | :year => y, :month => m, :day => d}
end
{:time, {h, m, s}} ->
if validate? do
%{result | :hour => do_normalize(:hour, h), :minute => do_normalize(:minute, m), :second => do_normalize(:second, s)}
else
%{result | :hour => h, :minute => m, :second => s}
end
{:day, d} ->
if validate? do
%{result | :day => do_normalize(:day, {result.year, result.month, d})}
else
%{result | :day => d}
end
{:timezone, tz} ->
tz = case tz do
%TimezoneInfo{} -> tz
_ -> Timezone.get(tz, result)
end
if validate? do
%{result | :timezone => do_normalize(:timezone, tz)}
else
%{result | :timezone => tz}
end
{name, val} when name in [:year, :month, :hour, :minute, :second, :ms] ->
if validate? do
Map.put(result, name, do_normalize(name, val))
else
Map.put(result, name, val)
end
{option_name, _} -> raise "Invalid option passed to Date.set: #{option_name}"
end
end
end
@doc """
Compare two dates returning one of the following values:
* `-1` -- the first date comes before the second one
* `0` -- both arguments represent the same date when coalesced to the same timezone.
* `1` -- the first date comes after the second one
You can optionality specify a granularity of any of
:years :months :weeks :days :hours :mins :secs :timestamp
and the dates will be compared with the cooresponding accuracy.
The default granularity is :secs.
## Examples
iex> date1 = #{__MODULE__}.from({2014, 3, 4})
iex> date2 = #{__MODULE__}.from({2015, 3, 4})
iex> #{__MODULE__}.compare(date1, date2, :years)
-1
iex> #{__MODULE__}.compare(date2, date1, :years)
1
iex> #{__MODULE__}.compare(date1, date1)
0
"""
@spec compare(DateTime.t, DateTime.t | :epoch | :zero | :distant_past | :distant_future) :: -1 | 0 | 1
@spec compare(DateTime.t, DateTime.t, :years | :months | :weeks | :days | :hours | :mins | :secs | :timestamp) :: -1 | 0 | 1
def compare(date, :epoch), do: compare(date, epoch())
def compare(date, :zero), do: compare(date, zero())
def compare(_, :distant_past), do: +1
def compare(_, :distant_future), do: -1
def compare(date, date), do: 0
def compare(a, b), do: compare(a, b, :secs)
def compare( this, other, granularity)
when granularity in [:years, :months, :weeks, :days, :hours, :mins, :secs, :timestamp] do
difference = diff(this, other, granularity)
cond do
difference < 0 -> +1
difference == 0 -> 0
difference > 0 -> -1
end
end
def compare(_, _, _), do: {:error, "Invalid comparison granularity."}
@doc """
Determine if two dates represent the same point in time
## Examples
iex> date1 = #{__MODULE__}.from({2014, 3, 1})
iex> date2 = #{__MODULE__}.from({2014, 3, 1})
iex> #{__MODULE__}.equal?(date1, date2)
true
"""
@spec equal?(DateTime.t, DateTime.t) :: boolean
def equal?(this, other), do: compare(this, other) == 0
@doc """
Calculate time interval between two dates. If the second date comes after the
first one in time, return value will be positive; and negative otherwise.
You must specify one of the following units:
:years :months :weeks :days :hours :mins :secs :timestamp
and the result will be an integer value of those units or a timestamp.
"""
@spec diff(DateTime.t, DateTime.t, :timestamp) :: timestamp
@spec diff(DateTime.t, DateTime.t, :secs | :mins | :hours | :days | :weeks | :months | :years) :: integer
def diff(this, other, :timestamp) do
diff(this, other, :secs) |> Time.from(:secs)
end
def diff(this, other, :secs) do
to_secs(other, :zero) - to_secs(this, :zero)
end
def diff(this, other, :mins) do
(to_secs(other, :zero) - to_secs(this, :zero)) |> div(60)
end
def diff(this, other, :hours) do
(to_secs(other, :zero) - to_secs(this, :zero)) |> div(60) |> div(60)
end
def diff(this, other, :days) do
to_days(other, :zero) - to_days(this, :zero)
end
def diff(this, other, :weeks) do
# TODO: think of a more accurate method
diff(this, other, :days) |> div(7)
end
def diff(this, other, :months) do
%DateTime{:year => y1, :month => m1} = universal(this)
%DateTime{:year => y2, :month => m2} = universal(other)
((y2 - y1) * 12) + (m2 - m1)
end
def diff(this, other, :years) do
%DateTime{:year => y1} = universal(this)
%DateTime{:year => y2} = universal(other)
y2 - y1
end
@doc """
Add time to a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :mins), :timestamp).
"""
@spec add(DateTime.t, timestamp) :: DateTime.t
def add(%DateTime{} = date, {mega, sec, _}) do
shift(date, [secs: (mega * @million) + sec])
end
@doc """
Subtract time from a date using a timestamp, i.e. {megasecs, secs, microsecs}
Same as shift(date, Time.to_timestamp(5, :mins) |> Time.invert, :timestamp).
"""
@spec subtract(DateTime.t, timestamp) :: DateTime.t
def subtract(%DateTime{} = date, {mega, sec, _}) do
shift(date, [secs: (-mega * @million) - sec])
end
@doc """
A single function for adjusting the date using various units: timestamp,
seconds, minutes, hours, days, weeks, months, years.
When shifting by timestamps, microseconds are ignored.
If the list contains `:month` and at least one other unit, an ArgumentError
is raised (due to ambiguity of such shifts). You can still shift by months
separately.
If `:year` is present, it is applied in the last turn.
The returned date is always valid. If after adding months or years the day
exceeds maximum number of days in the resulting month, that month's last day
is used.
To prevent day skew, fix up the date after shifting. For example, if you want
to land on the last day of the next month, do the following:
shift(date, 1, :month) |> set(:month, 31)
Since `set/3` is capping values that are out of range, you will get the
correct last day for each month.
## Examples
date = from({{2013,3,5}, {23,23,23}})
local(shift(date, secs: 24*3600*365))
#=> {{2014,3,5}, {23,23,23}}
local(shift(date, secs: -24*3600*(365*2 + 1))) # +1 day for leap year 2012
#=> {{2011,3,5}, {23,23,23}}
local(shift(date, [secs: 13, day: -1, week: 2]))
#=> {{2013,3,18}, {23,23,36}}
"""
@spec shift(DateTime.t, list({atom(), term})) :: DateTime.t
def shift(%DateTime{} = date, [{_, 0}]), do: date
def shift(%DateTime{} = date, [timestamp: {0,0,0}]), do: date
def shift(%DateTime{} = date, [timestamp: timestamp]), do: add(date, timestamp)
def shift(%DateTime{timezone: tz} = date, [{type, value}]) when type in [:secs, :mins, :hours] do
secs = to_secs(date, :epoch, utc: false)
secs = secs + case type do
:secs -> value
:mins -> value * 60
:hours -> value * 3600
end
shifted = from(secs, :secs)
%{shifted | :timezone => tz}
end
def shift(%DateTime{:hour => h, :minute => m, :second => s, :timezone => tz} = date, [days: value]) do
days = to_days(date)
days = days + value
shifted = from(days, :days) |> set([time: {h, m, s}])
%{shifted | :timezone => tz}
end
def shift(%DateTime{} = date, [weeks: value]) do
date |> shift([days: value * 7])
end
def shift(%DateTime{} = date, [months: value]) do
%DateTime{
:year => year, :month => month, :day => day,
:hour => h, :minute => m, :second => s,
:timezone => tz
} = date
month = month + value
# Calculate a valid year value
year = cond do
month == 0 -> year - 1
month < 0 -> year + div(month, 12) - 1
month > 12 -> year + div(month - 1, 12)
true -> year
end
validate({year, round_month(month), day}) |> construct({h, m, s}, tz)
end
def shift(%DateTime{} = date, [years: value]) do
%DateTime{
:year => year, :month => month, :day => day,
:hour => h, :minute => m, :second => s,
:timezone => tz
} = date
validate({year + value, month, day}) |> construct({h, m, s}, tz)
end
Record.defrecordp :shift_rec, secs: 0, days: 0, years: 0
# This clause will match lists with at least 2 values
def shift(%DateTime{} = date, spec) when is_list(spec) do
shift_rec(secs: sec, days: day, years: year)
= Enum.reduce spec, shift_rec(), fn
({:timestamp, {mega, tsec, _}}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + mega * @million + tsec])
({:secs, tsec}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + tsec])
({:mins, min}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + min * 60])
({:hours, hrs}, shift_rec(secs: sec) = rec) ->
shift_rec(rec, [secs: sec + hrs * 3600])
({:days, days}, shift_rec(days: day) = rec) ->
shift_rec(rec, [days: day + days])
({:weeks, weeks}, shift_rec(days: day) = rec) ->
shift_rec(rec, [days: day + weeks * 7])
({:years, years}, shift_rec(years: year) = rec) ->
shift_rec(rec, [years: year + years])
({:months, _}, _) ->
raise ArgumentError, message: ":months not supported in bulk shifts"
end
# The order in which we apply secs and days is not important.
# The year shift must always go last though.
date |> shift([secs: sec]) |> shift([days: day]) |> shift([years: year])
end
# Primary constructor for DateTime objects
defp construct({{_, _, _} = date, {_, _, _} = time}), do: construct(date, time, %TimezoneInfo{})
defp construct({{_, _, _} = date, {_, _, _, _} = time}), do: construct(date, time, %TimezoneInfo{})
defp construct({_,_,_} = date, {_,_,_} = time, nil), do: construct(date, time, %TimezoneInfo{})
defp construct({_,_,_} = date, {_,_,_,_} = time, nil), do: construct(date, time, %TimezoneInfo{})
defp construct(date, {h, min, sec}, %TimezoneInfo{} = tz), do: construct(date, {h, min, sec, 0}, tz)
defp construct({_,_,_}=date, {_,_,_,_}=time, %TimezoneInfo{} = tz) do
{y,m,d} = do_normalize(:date, date)
{h,min,sec,ms} = do_normalize(:time, time)
%DateTime{
year: y, month: m, day: d,
hour: h, minute: min, second: sec,
ms: ms,
timezone: tz
}
end
defp construct({_,_,_}=date, {_,_,_,_}=time, {_, name}) do
{y,m,d} = do_normalize(:date, date)
{h,min,sec,ms} = do_normalize(:time, time)
dt = %DateTime{
year: y, month: m, day: d,
hour: h, minute: min, second: sec,
ms: ms
}
%{dt | :timezone => Timezone.get(name, dt)}
end
defp construct(date, {h, min, sec}, tz), do: construct(date, {h, min, sec, 0}, tz)
defp construct({date, time}, tz), do: construct(date, time, tz)
defp validate({year, month, day}) do
# Check if we got past the last day of the month
max_day = days_in_month(year, month)
if day > max_day do
day = max_day
end
{year, month, day}
end
defp mod(a, b), do: rem(rem(a, b) + b, b)
defp round_month(m) do
case mod(m, 12) do
0 -> 12
other -> other
end
end
defp calendar_universal_time() do
{_, _, us} = ts = Timex.Time.now
{d,{h,min,sec}} = :calendar.now_to_universal_time(ts)
{d,{h,min,sec,round(us/1000)}}
end
defp calendar_local_time() do
{_, _, us} = ts = Timex.Time.now
{d,{h,min,sec}} = :calendar.now_to_local_time(ts)
{d,{h,min,sec,round(us/1000)}}
end
defp calendar_gregorian_microseconds_to_datetime(us, addseconds) do
sec = div(us, @million)
u = rem(us, @million)
{d,{h,m,s}} = :calendar.gregorian_seconds_to_datetime(sec + addseconds)
{d,{h,m,s,round(u/1000)}}
end
end
| 32.872685 | 140 | 0.603666 |
e820fa9c116c5bb148fef25b4f4e47f824b15923 | 1,063 | exs | Elixir | test/fdb/native_test.exs | VinogradovAlexandr/fdb | 312b0a173337993ce28024f338f0977ccf81ae25 | [
"MIT"
] | null | null | null | test/fdb/native_test.exs | VinogradovAlexandr/fdb | 312b0a173337993ce28024f338f0977ccf81ae25 | [
"MIT"
] | null | null | null | test/fdb/native_test.exs | VinogradovAlexandr/fdb | 312b0a173337993ce28024f338f0977ccf81ae25 | [
"MIT"
] | null | null | null | defmodule FDB.NativeTest do
use ExUnit.Case, async: false
import FDB.Native
@current 600
test "get_max_api_version" do
assert get_max_api_version() >= @current
end
test "select_api_version_impl" do
assert_raise ErlangError, ~r/runtime_version/, fn ->
select_api_version_impl("hello", @current)
end
assert_raise ErlangError, ~r/header_version/, fn ->
select_api_version_impl(@current, "hello")
end
assert select_api_version_impl(@current, @current)
assert select_api_version_impl(@current + 100, @current) == 2201
end
test "get_error" do
assert get_error(2202) == "API version not valid"
assert get_error(2201) == "API version may be set only once"
assert get_error(0) == "Success"
assert get_error(42) == "UNKNOWN_ERROR"
end
test "can be resolved multiple times" do
cluster_future = create_cluster(nil) |> FDB.Future.create()
cluster_a = FDB.Future.await(cluster_future)
assert cluster_a
cluster_b = FDB.Future.await(cluster_future)
assert cluster_b
end
end
| 27.25641 | 68 | 0.710254 |
e821058c354f9b9547fc07744d7df6b30fbcbbde | 3,691 | exs | Elixir | lib/mix/test/mix/tasks/deps.tree_test.exs | tsloughter/elixir | 44a9f505c14c58878010cb07349802f99ca225ac | [
"Apache-2.0"
] | null | null | null | lib/mix/test/mix/tasks/deps.tree_test.exs | tsloughter/elixir | 44a9f505c14c58878010cb07349802f99ca225ac | [
"Apache-2.0"
] | null | null | null | lib/mix/test/mix/tasks/deps.tree_test.exs | tsloughter/elixir | 44a9f505c14c58878010cb07349802f99ca225ac | [
"Apache-2.0"
] | null | null | null | Code.require_file "../../test_helper.exs", __DIR__
defmodule Mix.Tasks.Deps.TreeTest do
use MixTest.Case
defmodule ConvergedDepsApp do
def project do
[
app: :sample,
version: "0.1.0",
deps: [
{:deps_on_git_repo, "0.2.0", git: fixture_path("deps_on_git_repo")},
{:git_repo, ">= 0.1.0", git: MixTest.Case.fixture_path("git_repo")}
]
]
end
end
defmodule OverridenDepsApp do
def project do
[
app: :sample,
version: "0.1.0",
deps: [
{:deps_on_git_repo, ~r"0.2.0", git: fixture_path("deps_on_git_repo"), only: :test},
{:git_repo, git: MixTest.Case.fixture_path("git_repo"), override: true}
]
]
end
end
test "shows the dependency tree", context do
Mix.Project.push ConvergedDepsApp
in_tmp context.test, fn ->
Mix.Tasks.Deps.Tree.run(["--pretty"])
assert_received {:mix_shell, :info, ["sample"]}
assert_received {:mix_shell, :info, ["├── git_repo >= 0.1.0 (" <> _]}
assert_received {:mix_shell, :info, ["└── deps_on_git_repo 0.2.0 (" <> _]}
refute_received {:mix_shell, :info, [" └── git_repo (" <> _]}
Mix.Tasks.Deps.Get.run([])
Mix.Tasks.Deps.Tree.run(["--pretty"])
assert_received {:mix_shell, :info, ["sample"]}
assert_received {:mix_shell, :info, ["├── git_repo >= 0.1.0 (" <> _]}
assert_received {:mix_shell, :info, ["└── deps_on_git_repo 0.2.0 (" <> _]}
assert_received {:mix_shell, :info, [" └── git_repo (" <> _]}
end
end
test "show the dependency tree for umbrella apps" do
in_fixture "umbrella_dep/deps/umbrella", fn ->
Mix.Project.in_project(:umbrella, ".", fn _ ->
Mix.Task.run "deps.tree", ["--pretty"]
assert_received {:mix_shell, :info, ["foo"]}
assert_received {:mix_shell, :info, ["bar"]}
assert_received {:mix_shell, :info, ["└── foo (../foo)"]}
end)
end
end
test "shows the given dependency", context do
Mix.Project.push ConvergedDepsApp
in_tmp context.test, fn ->
assert_raise Mix.Error, "could not find dependency unknown", fn ->
Mix.Tasks.Deps.Tree.run(["--pretty", "unknown"])
end
Mix.Tasks.Deps.Tree.run(["--pretty", "deps_on_git_repo"])
assert_received {:mix_shell, :info, ["deps_on_git_repo 0.2.0 (" <> _]}
refute_received {:mix_shell, :info, ["└── git_repo (" <> _]}
end
end
test "shows overriden deps", context do
Mix.Project.push OverridenDepsApp
in_tmp context.test, fn ->
Mix.Tasks.Deps.Tree.run(["--pretty"])
assert_received {:mix_shell, :info, ["sample"]}
assert_received {:mix_shell, :info, ["├── git_repo (" <> msg]}
assert_received {:mix_shell, :info, ["└── deps_on_git_repo ~r/0.2.0/ (" <> _]}
assert msg =~ "*override*"
end
end
test "excludes the given deps", context do
Mix.Project.push OverridenDepsApp
in_tmp context.test, fn ->
Mix.Tasks.Deps.Tree.run(["--pretty", "--exclude", "deps_on_git_repo"])
assert_received {:mix_shell, :info, ["sample"]}
assert_received {:mix_shell, :info, ["└── git_repo (" <> _]}
refute_received {:mix_shell, :info, ["└── deps_on_git_repo ~r/0.2.0/ (" <> _]}
end
end
test "shows a particular environment", context do
Mix.Project.push OverridenDepsApp
in_tmp context.test, fn ->
Mix.Tasks.Deps.Tree.run(["--pretty", "--only", "prod"])
assert_received {:mix_shell, :info, ["sample"]}
assert_received {:mix_shell, :info, ["└── git_repo (" <> _]}
refute_received {:mix_shell, :info, ["└── deps_on_git_repo ~r/0.2.0/ (" <> _]}
end
end
end
| 33.554545 | 93 | 0.596044 |
e8213c28d57c3160a382e4b98c54929193805663 | 946 | ex | Elixir | test/support/channel_case.ex | mzavoloka/kamleague | ba29263ed54cac5c67b537c4b7d1dbc522215341 | [
"MIT"
] | null | null | null | test/support/channel_case.ex | mzavoloka/kamleague | ba29263ed54cac5c67b537c4b7d1dbc522215341 | [
"MIT"
] | 2 | 2021-11-04T21:05:24.000Z | 2021-11-04T21:51:48.000Z | test/support/channel_case.ex | mzavoloka/kamleague | ba29263ed54cac5c67b537c4b7d1dbc522215341 | [
"MIT"
] | 1 | 2021-11-04T18:40:26.000Z | 2021-11-04T18:40:26.000Z | defmodule KamleagueWeb.ChannelCase do
@moduledoc """
This module defines the test case to be used by
channel tests.
Such tests rely on `Phoenix.ChannelTest` and also
import other functionality to make it easier
to build common data structures and query the data layer.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with channels
use Phoenix.ChannelTest
# The default endpoint for testing
@endpoint KamleagueWeb.Endpoint
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Kamleague.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Kamleague.Repo, {:shared, self()})
end
:ok
end
end
| 24.894737 | 71 | 0.717759 |
e821519b3897a42e8abc4e5f4e46c73b045c2d23 | 104 | ex | Elixir | lib/supreme_tsugu_chan_web/scheduler.ex | c18t/supreme-tsugu-chan | 9d1d4cffcd917f2454a8a2918389ea239f2a6cdc | [
"MIT"
] | null | null | null | lib/supreme_tsugu_chan_web/scheduler.ex | c18t/supreme-tsugu-chan | 9d1d4cffcd917f2454a8a2918389ea239f2a6cdc | [
"MIT"
] | null | null | null | lib/supreme_tsugu_chan_web/scheduler.ex | c18t/supreme-tsugu-chan | 9d1d4cffcd917f2454a8a2918389ea239f2a6cdc | [
"MIT"
] | null | null | null | defmodule SupremeTsuguChanWeb.Scheduler do
use Quantum.Scheduler,
otp_app: :supreme_tsugu_chan
end | 26 | 42 | 0.826923 |
e8216541cb5675d13b7f6459ec1f6aca08aa9f28 | 3,674 | ex | Elixir | clients/content/lib/google_api/content/v21/model/settlement_transaction_amount.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v21/model/settlement_transaction_amount.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v21/model/settlement_transaction_amount.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Content.V21.Model.SettlementTransactionAmount do
@moduledoc """
## Attributes
* `commission` (*type:* `GoogleApi.Content.V21.Model.SettlementTransactionAmountCommission.t`, *default:* `nil`) -
* `description` (*type:* `String.t`, *default:* `nil`) - The description of the event.
Acceptable values are:
- "`taxWithhold`"
- "`principal`"
- "`principalAdjustment`"
- "`shippingFee`"
- "`merchantRemittedSalesTax`"
- "`googleRemittedSalesTax`"
- "`merchantCoupon`"
- "`merchantCouponTax`"
- "`merchantRemittedDisposalTax`"
- "`googleRemittedDisposalTax`"
- "`merchantRemittedRedemptionFee`"
- "`googleRemittedRedemptionFee`"
- "`eeeEcoFee`"
- "`furnitureEcoFee`"
- "`copyPrivateFee`"
- "`eeeEcoFeeCommission`"
- "`furnitureEcoFeeCommission`"
- "`copyPrivateFeeCommission`"
- "`principalRefund`"
- "`principalRefundTax`"
- "`itemCommission`"
- "`adjustmentCommission`"
- "`shippingFeeCommission`"
- "`commissionRefund`"
- "`damaged`"
- "`damagedOrDefectiveItem`"
- "`expiredItem`"
- "`faultyItem`"
- "`incorrectItemReceived`"
- "`itemMissing`"
- "`qualityNotExpected`"
- "`receivedTooLate`"
- "`storePackageMissing`"
- "`transitPackageMissing`"
- "`unsuccessfulDeliveryUndeliverable`"
- "`wrongChargeInStore`"
- "`wrongItem`"
- "`returns`"
- "`undeliverable`"
- "`refundFromMerchant`"
- "`returnLabelShippingFee`"
* `transactionAmount` (*type:* `GoogleApi.Content.V21.Model.Price.t`, *default:* `nil`) - The amount that contributes to the line item price.
* `type` (*type:* `String.t`, *default:* `nil`) - The type of the amount.
Acceptable values are:
- "`itemPrice`"
- "`orderPrice`"
- "`refund`"
- "`earlyRefund`"
- "`courtesyRefund`"
- "`returnRefund`"
- "`returnLabelShippingFeeAmount`"
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:commission => GoogleApi.Content.V21.Model.SettlementTransactionAmountCommission.t(),
:description => String.t(),
:transactionAmount => GoogleApi.Content.V21.Model.Price.t(),
:type => String.t()
}
field(:commission, as: GoogleApi.Content.V21.Model.SettlementTransactionAmountCommission)
field(:description)
field(:transactionAmount, as: GoogleApi.Content.V21.Model.Price)
field(:type)
end
defimpl Poison.Decoder, for: GoogleApi.Content.V21.Model.SettlementTransactionAmount do
def decode(value, options) do
GoogleApi.Content.V21.Model.SettlementTransactionAmount.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V21.Model.SettlementTransactionAmount do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 34.018519 | 145 | 0.653783 |
e82169b986818d8845a1130a90045f0038f75bcd | 2,234 | ex | Elixir | lib/mix/tasks/phoenix.gen.component.ex | san650/phoenix_components | 17429dc60a8ed0e469fc828396378fad777533a4 | [
"MIT"
] | 31 | 2018-04-03T15:14:47.000Z | 2021-09-28T03:12:20.000Z | lib/mix/tasks/phoenix.gen.component.ex | mvdwg/phoenix_components | 17429dc60a8ed0e469fc828396378fad777533a4 | [
"MIT"
] | 13 | 2017-06-11T01:13:25.000Z | 2017-07-24T18:29:17.000Z | lib/mix/tasks/phoenix.gen.component.ex | mvdwg/phoenix_components | 17429dc60a8ed0e469fc828396378fad777533a4 | [
"MIT"
] | 4 | 2018-08-24T11:59:14.000Z | 2020-06-09T01:33:51.000Z | defmodule Mix.Tasks.Phoenix.Gen.Component do
use Mix.Task
import Mix.Generator
import PhoenixComponents.Helpers, only: [to_pascal_case: 1]
@shortdoc "Creates a new Phoenix component"
@moduledoc """
Creates a new Phoenix component.
It expects the name of the component as argument.
mix phoenix.component my_button
A component at web/components/my_button path will be created.
"""
@switches []
def run(argv) do
{_, argv} = OptionParser.parse!(argv, strict: @switches)
case argv do
[] ->
Mix.raise "Expected module name to be given, please use \"mix phx.component my_component\""
[name | _] ->
generate(component_name: name)
end
end
defp generate(component_name: name) do
config_path = Application.get_env(:phoenix_components, :path, "web")
path = Path.join([config_path, "components", name])
[module_base] =
:phoenix_components
|> Application.fetch_env!(:app_name)
|> Module.split
assigns = %{
name: name,
module_name: to_pascal_case(name),
module_base: module_base,
}
# Creates component
File.mkdir_p!(path)
create_file Path.join(path, "view.ex"), view_template(assigns)
create_file Path.join(path, "template.html.eex"), template_text()
# Creates test
test_path =
config_path
|> String.replace_prefix("lib", "test") # Phoenix >= 1.3
|> String.replace_prefix("web", "test") # Phoenix < 1.3
|> Kernel.<>("/components")
File.mkdir_p!(test_path)
create_file Path.join(test_path, "#{name}_test.exs"), test_template(assigns)
end
embed_template :view, """
defmodule <%= @module_base %>.Components.<%= @module_name %> do
use PhoenixComponents.Component
end
"""
embed_text :template, """
<p><%= @content %></p>
"""
embed_template :test, """
defmodule <%= @module_base %>.Components.<%= @module_name %>Test do
use ExUnit.Case
test "renders block" do
html = PhoenixComponents.View.component :<%= @name %> do
"Hello, World!"
end
assert raw(html) == "<p>Hello, World!</p>"
end
def raw({:safe, html}) do
html
|> to_string
|> String.trim
end
end
"""
end
| 24.282609 | 99 | 0.634736 |
e8217faba98853613ca9a4ab1960963d027a863e | 13,145 | ex | Elixir | lib/phoenix/router/helpers.ex | Mikeylikesit666/phoenix | 2226c7863033eb55f99c9a4f1000b1bca6708cb3 | [
"MIT"
] | null | null | null | lib/phoenix/router/helpers.ex | Mikeylikesit666/phoenix | 2226c7863033eb55f99c9a4f1000b1bca6708cb3 | [
"MIT"
] | 1 | 2020-11-08T08:30:10.000Z | 2020-11-08T08:30:10.000Z | lib/phoenix/router/helpers.ex | Mikeylikesit666/phoenix | 2226c7863033eb55f99c9a4f1000b1bca6708cb3 | [
"MIT"
] | null | null | null | defmodule Phoenix.Router.Helpers do
# Module that generates the routing helpers.
@moduledoc false
alias Phoenix.Router.Route
alias Plug.Conn
@anno (if :erlang.system_info(:otp_release) >= '19' do
[generated: true, unquote: false]
else
[line: -1, unquote: false]
end)
@doc """
Callback invoked by the url generated in each helper module.
"""
def url(_router, %Conn{private: private}) do
case private do
%{phoenix_router_url: %URI{} = uri} -> URI.to_string(uri)
%{phoenix_router_url: url} when is_binary(url) -> url
%{phoenix_endpoint: endpoint} -> endpoint.url()
end
end
def url(_router, %_{endpoint: endpoint}) do
endpoint.url()
end
def url(_router, %URI{} = uri) do
URI.to_string(%{uri | path: nil})
end
def url(_router, endpoint) when is_atom(endpoint) do
endpoint.url()
end
def url(router, other) do
raise ArgumentError,
"expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <>
"or a Phoenix.Endpoint when building url for #{inspect(router)}, got: #{inspect(other)}"
end
@doc """
Callback invoked by path generated in each helper module.
"""
def path(router, %Conn{} = conn, path) do
conn
|> build_own_forward_path(router, path)
|> Kernel.||(build_conn_forward_path(conn, router, path))
|> Kernel.||(path_with_script(path, conn.script_name))
end
def path(_router, %URI{} = uri, path) do
(uri.path || "") <> path
end
def path(_router, %_{endpoint: endpoint}, path) do
endpoint.path(path)
end
def path(_router, endpoint, path) when is_atom(endpoint) do
endpoint.path(path)
end
def path(router, other, _path) do
raise ArgumentError,
"expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <>
"or a Phoenix.Endpoint when building path for #{inspect(router)}, got: #{inspect(other)}"
end
## Helpers
defp build_own_forward_path(conn, router, path) do
case Map.fetch(conn.private, router) do
{:ok, {local_script, _}} ->
path_with_script(path, local_script)
:error -> nil
end
end
defp build_conn_forward_path(%Conn{private: %{phoenix_router: phx_router}} = conn, router, path) do
case Map.fetch(conn.private, phx_router) do
{:ok, {script_name, forwards}} ->
case Map.fetch(forwards, router) do
{:ok, local_script} ->
path_with_script(path, script_name ++ local_script)
:error -> nil
end
:error -> nil
end
end
defp build_conn_forward_path(_conn, _router, _path), do: nil
defp path_with_script(path, []) do
path
end
defp path_with_script(path, script) do
"/" <> Enum.join(script, "/") <> path
end
@doc """
Generates the helper module for the given environment and routes.
"""
def define(env, routes, opts \\ []) do
# Ignore any route without helper or forwards.
routes =
Enum.filter(routes, fn {route, _exprs} ->
(not is_nil(route.helper) and not (route.kind == :forward))
end)
groups = Enum.group_by(routes, fn {route, _exprs} -> route.helper end)
impls = for {_helper, group} <- groups,
{route, exprs} <- Enum.sort_by(group, fn {_, exprs} -> length(exprs.binding) end),
do: defhelper(route, exprs)
catch_all = Enum.map(groups, &defhelper_catch_all/1)
defhelper = quote @anno do
defhelper = fn helper, vars, opts, bins, segs ->
def unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars)) do
unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), [])
end
def unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params)
when is_list(params) or is_map(params) do
path(conn_or_endpoint, segments(unquote(segs), params, unquote(bins),
{unquote(helper), unquote(Macro.escape(opts)), unquote(Enum.map(vars, &Macro.to_string/1))}))
end
def unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars)) do
unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), [])
end
def unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params)
when is_list(params) or is_map(params) do
url(conn_or_endpoint) <> unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params)
end
end
end
defcatch_all = quote @anno do
defcatch_all = fn helper, lengths, routes ->
for length <- lengths do
binding = List.duplicate({:_, [], nil}, length)
arity = length + 2
def unquote(:"#{helper}_path")(conn_or_endpoint, action, unquote_splicing(binding)) do
path(conn_or_endpoint, "/")
raise_route_error(unquote(helper), :path, unquote(arity), action, [])
end
def unquote(:"#{helper}_path")(conn_or_endpoint, action, unquote_splicing(binding), params) do
path(conn_or_endpoint, "/")
raise_route_error(unquote(helper), :path, unquote(arity + 1), action, params)
end
def unquote(:"#{helper}_url")(conn_or_endpoint, action, unquote_splicing(binding)) do
url(conn_or_endpoint)
raise_route_error(unquote(helper), :url, unquote(arity), action, [])
end
def unquote(:"#{helper}_url")(conn_or_endpoint, action, unquote_splicing(binding), params) do
url(conn_or_endpoint)
raise_route_error(unquote(helper), :url, unquote(arity + 1), action, params)
end
end
defp raise_route_error(unquote(helper), suffix, arity, action, params) do
Phoenix.Router.Helpers.raise_route_error(__MODULE__, "#{unquote(helper)}_#{suffix}",
arity, action, unquote(Macro.escape(routes)), params)
end
end
end
docs = Keyword.get(opts, :docs, true)
# It is in general bad practice to generate large chunks of code
# inside quoted expressions. However, we can get away with this
# here for two reasons:
#
# * Helper modules are quite uncommon, typically one per project.
#
# * We inline most of the code for performance, so it is specific
# per helper module anyway.
#
code = quote do
@moduledoc unquote(docs) && """
Module with named helpers generated from #{inspect unquote(env.module)}.
"""
unquote(defhelper)
unquote(defcatch_all)
unquote_splicing(impls)
unquote_splicing(catch_all)
@doc """
Generates the path information including any necessary prefix.
"""
def path(data, path) do
Phoenix.Router.Helpers.path(unquote(env.module), data, path)
end
@doc """
Generates the connection/endpoint base URL without any path information.
"""
def url(data) do
Phoenix.Router.Helpers.url(unquote(env.module), data)
end
@doc """
Generates path to a static asset given its file path.
"""
def static_path(%Conn{private: private} = conn, path) do
private.phoenix_endpoint.static_path(path)
end
def static_path(%_{endpoint: endpoint} = conn, path) do
endpoint.static_path(path)
end
def static_path(endpoint, path) when is_atom(endpoint) do
endpoint.static_path(path)
end
@doc """
Generates url to a static asset given its file path.
"""
def static_url(%Conn{private: private}, path) do
case private do
%{phoenix_static_url: %URI{} = uri} -> URI.to_string(uri) <> path
%{phoenix_static_url: url} when is_binary(url) -> url <> path
%{phoenix_endpoint: endpoint} -> static_url(endpoint, path)
end
end
def static_url(%_{endpoint: endpoint} = conn, path) do
static_url(endpoint, path)
end
def static_url(endpoint, path) when is_atom(endpoint) do
endpoint.static_url <> endpoint.static_path(path)
end
@doc """
Generates an integrity hash to a static asset given its file path.
"""
def static_integrity(%Conn{private: %{phoenix_endpoint: endpoint}}, path) do
static_integrity(endpoint, path)
end
def static_integrity(%_{endpoint: endpoint}, path) do
static_integrity(endpoint, path)
end
def static_integrity(endpoint, path) when is_atom(endpoint) do
endpoint.static_integrity(path)
end
# Functions used by generated helpers
# Those are inlined here for performance
defp to_param(int) when is_integer(int), do: Integer.to_string(int)
defp to_param(bin) when is_binary(bin), do: bin
defp to_param(false), do: "false"
defp to_param(true), do: "true"
defp to_param(data), do: Phoenix.Param.to_param(data)
defp segments(segments, [], _reserved, _opts) do
segments
end
defp segments(segments, query, reserved, _opts) when is_list(query) or is_map(query) do
dict = for {k, v} <- query,
not ((k = to_string(k)) in reserved),
do: {k, v}
case Conn.Query.encode dict, &to_param/1 do
"" -> segments
o -> segments <> "?" <> o
end
end
end
Module.create(Module.concat(env.module, Helpers), code, line: env.line, file: env.file)
end
@doc """
Receives a route and returns the quoted definition for its helper function.
In case a helper name was not given, or route is forwarded, returns nil.
"""
def defhelper(%Route{} = route, exprs) do
helper = route.helper
opts = route.plug_opts
{bins, vars} = :lists.unzip(exprs.binding)
segs = expand_segments(exprs.path)
quote do
defhelper.(
unquote(helper),
unquote(Macro.escape(vars)),
unquote(Macro.escape(opts)),
unquote(Macro.escape(bins)),
unquote(Macro.escape(segs))
)
end
end
def defhelper_catch_all({helper, routes_and_exprs}) do
routes =
routes_and_exprs
|> Enum.map(fn {routes, exprs} -> {routes.plug_opts, Enum.map(exprs.binding, &elem(&1, 0))} end)
|> Enum.sort()
lengths =
routes
|> Enum.map(fn {_, bindings} -> length(bindings) end)
|> Enum.uniq()
quote do
defcatch_all.(
unquote(helper),
unquote(lengths),
unquote(Macro.escape(routes))
)
end
end
@doc """
Callback for generate router catch alls.
"""
def raise_route_error(mod, fun, arity, action, routes, params) do
cond do
not Keyword.has_key?(routes, action) ->
"no action #{inspect action} for #{inspect mod}.#{fun}/#{arity}"
|> invalid_route_error(fun, routes)
is_list(params) or is_map(params) ->
"no function clause for #{inspect mod}.#{fun}/#{arity} and action #{inspect action}"
|> invalid_route_error(fun, routes)
true ->
invalid_param_error(mod, fun, arity, action, routes)
end
end
defp invalid_route_error(prelude, fun, routes) do
suggestions =
for {action, bindings} <- routes do
bindings = Enum.join([inspect(action) | bindings], ", ")
"\n #{fun}(conn_or_endpoint, #{bindings}, params \\\\ [])"
end
raise ArgumentError, "#{prelude}. The following actions/clauses are supported:\n#{suggestions}"
end
defp invalid_param_error(mod, fun, arity, action, routes) do
call_vars = Keyword.fetch!(routes, action)
raise ArgumentError, """
#{inspect(mod)}.#{fun}/#{arity} called with invalid params.
The last argument to this function should be a keyword list or a map.
For example:
#{fun}(#{Enum.join(["conn", ":#{action}" | call_vars], ", ")}, page: 5, per_page: 10)
It is possible you have called this function without defining the proper
number of path segments in your router.
"""
end
@doc """
Callback for properly encoding parameters in routes.
"""
def encode_param(str), do: URI.encode(str, &URI.char_unreserved?/1)
defp expand_segments([]), do: "/"
defp expand_segments(segments) when is_list(segments) do
expand_segments(segments, "")
end
defp expand_segments(segments) do
quote(do: "/" <> Enum.map_join(unquote(segments), "/", &unquote(__MODULE__).encode_param/1))
end
defp expand_segments([{:|, _, [h, t]}], acc),
do: quote(do: unquote(expand_segments([h], acc)) <> "/" <> Enum.map_join(unquote(t), "/", fn(s) -> URI.encode(s, &URI.char_unreserved?/1) end))
defp expand_segments([h|t], acc) when is_binary(h),
do: expand_segments(t, quote(do: unquote(acc) <> unquote("/" <> h)))
defp expand_segments([h|t], acc),
do: expand_segments(t, quote(do: unquote(acc) <> "/" <> URI.encode(to_param(unquote(h)), &URI.char_unreserved?/1)))
defp expand_segments([], acc),
do: acc
end
| 32.944862 | 147 | 0.629593 |
e821a0c9880d2947eb996f912f23fd643192b88a | 37 | ex | Elixir | lib/avd_simple_controller.ex | jorgwel/avd_simple_controller | 0bc6c6ca39703b25039efd3cdabf817eec508c60 | [
"MIT"
] | 2 | 2019-01-14T07:56:25.000Z | 2019-02-08T11:35:40.000Z | lib/avd_simple_controller.ex | jorgwel/avd_simple_controller | 0bc6c6ca39703b25039efd3cdabf817eec508c60 | [
"MIT"
] | null | null | null | lib/avd_simple_controller.ex | jorgwel/avd_simple_controller | 0bc6c6ca39703b25039efd3cdabf817eec508c60 | [
"MIT"
] | null | null | null | defmodule AvdSimpleController do
end
| 12.333333 | 32 | 0.891892 |
e821b394ed95377c07f57e3ada98c0660c73e42f | 5,400 | exs | Elixir | test/instream/encoder/line_test.exs | qgadrian/instream | 3dc828fe476817d442b83dc5da58ceca56e9886f | [
"Apache-2.0"
] | null | null | null | test/instream/encoder/line_test.exs | qgadrian/instream | 3dc828fe476817d442b83dc5da58ceca56e9886f | [
"Apache-2.0"
] | null | null | null | test/instream/encoder/line_test.exs | qgadrian/instream | 3dc828fe476817d442b83dc5da58ceca56e9886f | [
"Apache-2.0"
] | null | null | null | defmodule Instream.Encoder.LineTest do
use ExUnit.Case
alias Instream.Encoder.Line
# This test suite is a direct port of:
# https://influxdb.com/docs/v0.9/write_protocols/write_syntax.html
test "simplest valid point" do
expected = "disk_free value=442221834240i"
points = [
%{
measurement: "disk_free",
fields: %{
value: 442_221_834_240
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "with timestamp" do
expected = "disk_free value=442221834240i 1435362189575692182"
points = [
%{
measurement: "disk_free",
fields: %{
value: 442_221_834_240
},
timestamp: 1_435_362_189_575_692_182
}
]
assert expected == Line.encode(points)
end
test "with tags" do
expected = "disk_free,hostname=server01,disk_type=SSD value=442221834240i"
points = [
%{
measurement: "disk_free",
fields: %{
value: 442_221_834_240
},
tags: %{
hostname: "server01",
disk_type: "SSD"
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "with tags and timestamp" do
expected = "disk_free,hostname=server01,disk_type=SSD value=442221834240i 1435362189575692182"
points = [
%{
measurement: "disk_free",
fields: %{
value: 442_221_834_240
},
tags: %{
hostname: "server01",
disk_type: "SSD"
},
timestamp: 1_435_362_189_575_692_182
}
]
assert expected == Line.encode(points)
end
test "multiple fields" do
expected = "disk_free free_space=442221834240i,disk_type=\"SSD\" 1435362189575692182"
points = [
%{
measurement: "disk_free",
fields: %{
free_space: 442_221_834_240,
disk_type: "SSD"
},
timestamp: 1_435_362_189_575_692_182
}
]
assert expected == Line.encode(points)
end
test "escaping commas and spaces" do
expected =
~S|total\ disk\ free,volumes=/net\,/home\,/ value=442221834240i 1435362189575692182|
points = [
%{
measurement: "total disk free",
tags: %{
volumes: "/net,/home,/"
},
fields: %{
value: 442_221_834_240
},
timestamp: 1_435_362_189_575_692_182
}
]
assert expected == Line.encode(points)
end
test "escaping equals signs" do
expected = ~S|disk_free,a\=b=y\=z value=442221834240i|
points = [
%{
measurement: "disk_free",
tags: %{
"a=b" => "y=z"
},
fields: %{
value: 442_221_834_240
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "with backslash in tag value" do
expected = ~S|disk_free,path=C:\Windows value=442221834240i|
points = [
%{
measurement: "disk_free",
tags: %{
path: ~S|C:\Windows|
},
fields: %{
value: 442_221_834_240
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "escaping field key" do
expected =
~S|disk_free working\ directories="C:\My Documents\Stuff for examples,C:\My Documents",value=442221834240i|
points = [
%{
measurement: "disk_free",
fields: %{
"value" => 442_221_834_240,
"working directories" => ~S|C:\My Documents\Stuff for examples,C:\My Documents|
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "showing all escaping and quoting together" do
expected =
~S|"measurement\ with\ quotes",tag\ key\ with\ spaces=tag\,value\,with"commas" field_key\\\\="string field value, only \" need be quoted"|
points = [
%{
measurement: ~S|"measurement with quotes"|,
tags: %{
"tag key with spaces" => ~S|tag,value,with"commas"|
},
fields: %{
~S|field_key\\\\| => ~S|string field value, only " need be quoted|
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "multiple points" do
expected = "multiline value=\"first\"\nmultiline value=\"second\""
points = [
%{
measurement: "multiline",
fields: %{value: "first"}
},
%{
measurement: "multiline",
fields: %{value: "second"}
}
]
assert expected == Line.encode(points)
end
test "with list of strings in tag value" do
expected = ~S|disk_free,items=string_a,string_b value=442221834240i|
points = [
%{
measurement: "disk_free",
tags: %{
items: ["string_a", "string_b"]
},
fields: %{
value: 442_221_834_240
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
test "with list of atoms in tag value" do
expected = ~S|disk_free,items=atom_a,atom_b value=442221834240i|
points = [
%{
measurement: "disk_free",
tags: %{
items: [:atom_a, :atom_b]
},
fields: %{
value: 442_221_834_240
},
timestamp: nil
}
]
assert expected == Line.encode(points)
end
end
| 21.686747 | 144 | 0.552778 |
e821cabce7e43cdeaff772f764f8f7ec71b42b21 | 4,714 | ex | Elixir | day18/lib/day18.ex | bjorng/advent-of-code-2017 | bd58a36864a4d82809253770f8a6d0c4e02cb59a | [
"Apache-2.0"
] | null | null | null | day18/lib/day18.ex | bjorng/advent-of-code-2017 | bd58a36864a4d82809253770f8a6d0c4e02cb59a | [
"Apache-2.0"
] | null | null | null | day18/lib/day18.ex | bjorng/advent-of-code-2017 | bd58a36864a4d82809253770f8a6d0c4e02cb59a | [
"Apache-2.0"
] | null | null | null | defmodule Day18 do
def part1(input) do
Machine.new(input)
|> Machine.execute
|> Machine.get_output
|> elem(0)
|> List.last
end
def part2(input) do
machine0 = Machine.new(input, 0)
machine1 = Machine.new(input, 1)
machine0 = Machine.execute(machine0)
machine1 = Machine.execute(machine1)
machines = Enum.into([{0, machine0}, {1, machine1}], %{})
run_until_deadlock(machines)
end
defp run_until_deadlock(machines, num_sends \\ 0) do
new_sends = Map.fetch!(machines, 1)
|> Machine.get_output
|> elem(0)
|> length
num_sends = num_sends + new_sends
machines = do_io(machines)
case all_terminated?(machines) or no_input?(machines) do
true ->
num_sends
false ->
machines = resume_all(machines)
run_until_deadlock(machines, num_sends)
end
end
defp do_io(machines) do
Enum.reduce(machines, machines, fn {id, _}, acc ->
other_id = rem(id + 1, 2)
%{^id => machine, ^other_id => other_machine} = acc
{output, machine} = Machine.get_output(machine)
other_machine = Machine.push_input(other_machine, output)
%{acc | id => machine, other_id => other_machine}
end)
end
defp all_terminated?(machines) do
Enum.all?(machines, fn {_, machine} ->
Machine.terminated?(machine)
end)
end
defp no_input?(machines) do
Enum.all?(machines, fn {_, machine} ->
Machine.no_input?(machine)
end)
end
defp resume_all(machines) do
Enum.into(machines, %{}, fn {id, machine} ->
{id, Machine.resume(machine)}
end)
end
end
defmodule Machine do
def new(program, id \\ 0) do
program
|> Stream.with_index
|> Enum.into(%{}, fn {instr, addr} ->
[op | operands] = String.split(instr, " ")
op = String.to_atom(op)
operands = Enum.map(operands, fn operand ->
case Integer.parse(operand) do
:error ->
String.to_atom(operand)
{int, ""} ->
int
end
end)
{addr, List.to_tuple([op | operands])}
end)
|> init_machine(id)
end
defp init_machine(machine, id) do
q = :queue.new()
[{:p, id}, {:input, q}, {:output, q}]
|> Enum.into(machine)
end
def push_input(machine, input) do
q = Enum.reduce(input, machine.input, fn char, q ->
:queue.in(char, q)
end)
Map.put(machine, :input, q)
end
def no_input?(machine) do
:queue.is_empty(machine.input)
end
def get_output(machine) do
output = :queue.to_list(machine.output)
machine = %{machine | output: :queue.new()}
{output, machine}
end
def terminated?(machine) do
Map.get(machine, :ip, nil) == nil
end
def resume(machine) do
case Map.get(machine, :ip, nil) do
nil ->
machine
ip ->
execute(machine, ip)
end
end
def execute(machine, ip \\ 0) do
case Map.get(machine, ip, nil) do
nil ->
Map.delete(machine, :ip)
{:snd, val} ->
val = get_value(machine, val)
output = Map.fetch!(machine, :output)
output = :queue.in(val, output)
machine = Map.put(machine, :output, output)
execute(machine, ip + 1)
{:set, dst, src} ->
machine = set_value(machine, dst, get_value(machine, src))
execute(machine, ip + 1)
{:add, dst, src} ->
execute_arith_op(machine, ip, dst, src, &+/2)
{:mul, dst, src} ->
execute_arith_op(machine, ip, dst, src, &*/2)
{:mod, dst, src} ->
execute_arith_op(machine, ip, dst, src, &modulo/2)
{:rcv, dst} ->
input = Map.fetch!(machine, :input)
case :queue.out(input) do
{:empty, _} ->
Map.put(machine, :ip, ip)
{{:value, value}, input} ->
machine = set_value(machine, dst, value)
machine = Map.put(machine, :input, input)
execute(machine, ip + 1)
end
{:jgz, src, offset} ->
if get_value(machine, src) > 0 do
execute(machine, ip + get_value(machine, offset))
else
execute(machine, ip + 1)
end
end
end
defp modulo(n, m) do
case rem(n, m) do
mod when mod >= 0 -> mod
mod -> mod + m
end
end
defp execute_arith_op(machine, ip, dst, src, op) do
value = op.(get_value(machine, dst), get_value(machine, src))
machine = set_value(machine, dst, value)
execute(machine, ip + 1)
end
defp get_value(machine, value) do
cond do
is_integer(value) -> value
is_atom(value) -> Map.get(machine, value, 0)
end
end
defp set_value(machine, register, value) when is_atom(register) do
Map.put(machine, register, value)
end
end
| 25.759563 | 68 | 0.58252 |
e821cfe5ef2ece53f8f8e8ca1f72ea20db23b735 | 8,246 | ex | Elixir | lib/thrift/parser/file_group.ex | CultivateHQ/elixir-thrift | d285ddc0f134afc6f7d8c5d0b504a8d27157da9a | [
"Apache-2.0"
] | null | null | null | lib/thrift/parser/file_group.ex | CultivateHQ/elixir-thrift | d285ddc0f134afc6f7d8c5d0b504a8d27157da9a | [
"Apache-2.0"
] | 1 | 2018-03-08T18:17:35.000Z | 2021-06-10T15:34:13.000Z | lib/thrift/parser/file_group.ex | CultivateHQ/elixir-thrift | d285ddc0f134afc6f7d8c5d0b504a8d27157da9a | [
"Apache-2.0"
] | null | null | null | defmodule Thrift.Parser.FileGroup do
@moduledoc """
Represents a group of parsed files.
When you parse a file, it might include other thrift files. These files are
in turn accumulated and parsed and added to this module. Additionally, this
module allows resolution of the names of Structs / Enums / Unions etc across
files.
"""
alias Thrift.Parser
alias Thrift.Parser.{
FileGroup,
FileRef,
ParsedFile,
Resolver
}
alias Thrift.AST.{
Constant,
Exception,
Field,
Namespace,
Schema,
Service,
Struct,
TEnum,
TypeRef,
Union,
ValueRef
}
@type t :: %FileGroup{
initial_file: Path.t(),
parsed_files: %{FileRef.thrift_include() => %ParsedFile{}},
schemas: %{FileRef.thrift_include() => %Schema{}},
ns_mappings: %{atom => %Namespace{}},
opts: Parser.opts()
}
@enforce_keys [:initial_file, :opts]
defstruct initial_file: nil,
parsed_files: %{},
schemas: %{},
resolutions: %{},
immutable_resolutions: %{},
ns_mappings: %{},
opts: Keyword.new()
@spec new(Path.t(), Parser.opts()) :: t
def new(initial_file, opts \\ []) do
%FileGroup{initial_file: initial_file, opts: opts}
end
@spec add(t, ParsedFile.t()) :: t
def add(file_group, parsed_file) do
file_group = add_includes(file_group, parsed_file)
new_parsed_files = Map.put(file_group.parsed_files, parsed_file.name, parsed_file)
new_schemas = Map.put(file_group.schemas, parsed_file.name, parsed_file.schema)
resolutions = Resolver.add(file_group.resolutions, parsed_file)
%__MODULE__{
file_group
| parsed_files: new_parsed_files,
schemas: new_schemas,
immutable_resolutions: resolutions,
resolutions: resolutions
}
end
defp add_includes(%FileGroup{} = group, %ParsedFile{schema: schema, file_ref: file_ref}) do
# Search for included files in the current directory (relative to the
# parsed file) as well as any additionally configured include paths.
include_paths = [Path.dirname(file_ref.path) | Keyword.get(group.opts, :include_paths, [])]
Enum.reduce(schema.includes, group, fn include, group ->
parsed_file =
include.path
|> find_include(include_paths)
|> FileRef.new()
|> ParsedFile.new()
add(group, parsed_file)
end)
end
# Attempt to locate `path` in one of `dirs`, returning the path of the
# first match on success or the original `path` if not match is found.
defp find_include(path, dirs) do
dirs
|> Enum.map(&Path.join(&1, path))
|> Enum.find(path, &File.exists?/1)
end
@spec set_current_module(t, atom) :: t
def set_current_module(file_group, module) do
# since in a file, we can refer to things defined in that file in a non-qualified
# way, we add unqualified names to the resolutions map.
current_module = Atom.to_string(module)
resolutions =
file_group.immutable_resolutions
|> Enum.flat_map(fn {name, v} = original_mapping ->
case String.split(Atom.to_string(name), ".") do
[^current_module, enum_name, value_name] ->
[{:"#{enum_name}.#{value_name}", v}, original_mapping]
[^current_module, rest] ->
[{:"#{rest}", v}, original_mapping]
_ ->
[original_mapping]
end
end)
|> Map.new()
default_namespace =
if file_group.opts[:namespace] do
%Namespace{:name => :elixir, :path => file_group.opts[:namespace]}
end
ns_mappings = build_ns_mappings(file_group.schemas, default_namespace)
%FileGroup{file_group | resolutions: resolutions, ns_mappings: ns_mappings}
end
@spec resolve(t, any) :: any
for type <- Thrift.primitive_names() do
def resolve(_, unquote(type)), do: unquote(type)
end
def resolve(%FileGroup{} = group, %Field{type: type} = field) do
%Field{field | type: resolve(group, type)}
end
def resolve(%FileGroup{resolutions: resolutions} = group, %TypeRef{referenced_type: type_name}) do
resolve(group, resolutions[type_name])
end
def resolve(%FileGroup{resolutions: resolutions} = group, %ValueRef{
referenced_value: value_name
}) do
resolve(group, resolutions[value_name])
end
def resolve(%FileGroup{resolutions: resolutions} = group, path)
when is_atom(path) and not is_nil(path) do
# this can resolve local mappings like :Weather or
# remote mappings like :"common.Weather"
resolve(group, resolutions[path])
end
def resolve(%FileGroup{} = group, {:list, elem_type}) do
{:list, resolve(group, elem_type)}
end
def resolve(%FileGroup{} = group, {:set, elem_type}) do
{:set, resolve(group, elem_type)}
end
def resolve(%FileGroup{} = group, {:map, {key_type, val_type}}) do
{:map, {resolve(group, key_type), resolve(group, val_type)}}
end
def resolve(_, other) do
other
end
@spec dest_module(t, any) :: atom
def dest_module(file_group, %Struct{name: name}) do
dest_module(file_group, name)
end
def dest_module(file_group, %Union{name: name}) do
dest_module(file_group, name)
end
def dest_module(file_group, %Exception{name: name}) do
dest_module(file_group, name)
end
def dest_module(file_group, %TEnum{name: name}) do
dest_module(file_group, name)
end
def dest_module(file_group, %Service{name: name}) do
dest_module(file_group, name)
end
def dest_module(file_group, Constant) do
# Default to naming the constants module after the namespaced, camelized
# basename of its file. For foo.thrift, this would be `foo.Foo`.
base = Path.basename(file_group.initial_file, ".thrift")
default = base <> "." <> Macro.camelize(base)
# However, if we're already going to generate an equivalent module name
# (ignoring case), use that instead to avoid generating two modules with
# the same spellings but different cases.
schema = file_group.schemas[base]
symbols =
[
Enum.map(schema.enums, fn {_, s} -> s.name end),
Enum.map(schema.exceptions, fn {_, s} -> s.name end),
Enum.map(schema.structs, fn {_, s} -> s.name end),
Enum.map(schema.services, fn {_, s} -> s.name end),
Enum.map(schema.unions, fn {_, s} -> s.name end)
]
|> List.flatten()
|> Enum.map(&Atom.to_string/1)
target = String.downcase(default)
name = Enum.find(symbols, default, fn s -> String.downcase(s) == target end)
dest_module(file_group, String.to_atom(name))
end
def dest_module(file_group, name) do
name_parts =
name
|> Atom.to_string()
|> String.split(".", parts: 2)
module_name = name_parts |> Enum.at(0) |> String.to_atom()
struct_name = name_parts |> Enum.at(1) |> initialcase
case file_group.ns_mappings[module_name] do
nil ->
Module.concat([struct_name])
namespace = %Namespace{} ->
namespace_parts =
namespace.path
|> String.split(".")
|> Enum.map(&Macro.camelize/1)
Module.concat(namespace_parts ++ [struct_name])
end
end
# Capitalize just the initial character of a string, leaving the rest of the
# string's characters intact.
@spec initialcase(String.t()) :: String.t()
defp initialcase(string) when is_binary(string) do
{first, rest} = String.next_grapheme(string)
String.upcase(first) <> rest
end
# check if the given model is defined in the root file of the file group
# this should eventually be replaced if we find a way to only parse files
# once
@spec own_constant?(t, Constant.t()) :: boolean
def own_constant?(file_group, %Constant{} = constant) do
basename = Path.basename(file_group.initial_file, ".thrift")
initial_file = file_group.parsed_files[basename]
Enum.member?(Map.keys(initial_file.schema.constants), constant.name)
end
defp build_ns_mappings(schemas, default_namespace) do
schemas
|> Enum.map(fn {module_name, %Schema{namespaces: ns}} ->
namespace = Map.get(ns, :elixir, default_namespace)
{String.to_atom(module_name), namespace}
end)
|> Map.new()
end
end
| 30.540741 | 100 | 0.655106 |
e821dea8f89ea69a7886aedc0af90031b8e00efb | 725 | ex | Elixir | lib/nookal/address.ex | theo-agilelab/nookal-elixir | 09db7cc48c48ed1e714fb74c5c38c9a21e7e189a | [
"MIT"
] | 1 | 2020-06-11T07:57:06.000Z | 2020-06-11T07:57:06.000Z | lib/nookal/address.ex | theo-agilelab/nookal-elixir | 09db7cc48c48ed1e714fb74c5c38c9a21e7e189a | [
"MIT"
] | null | null | null | lib/nookal/address.ex | theo-agilelab/nookal-elixir | 09db7cc48c48ed1e714fb74c5c38c9a21e7e189a | [
"MIT"
] | 1 | 2019-09-05T08:30:48.000Z | 2019-09-05T08:30:48.000Z | defmodule Nookal.Address do
import Nookal.Utils
@type t() :: %__MODULE__{
line1: String.t(),
line2: String.t(),
line3: String.t(),
city: String.t(),
state: String.t(),
country: String.t(),
postcode: String.t()
}
defstruct [:line1, :line2, :line3, :city, :state, :country, :postcode]
@mapping [
{:line1, "AddressLine1", :string},
{:line2, "AddressLine2", :string},
{:line3, "AddressLine3", :string},
{:city, "City", :string},
{:state, "State", :string},
{:country, "Country", :string},
{:postcode, "Postcode", :string}
]
def new(payload) do
extract_fields(@mapping, payload, %__MODULE__{})
end
end
| 24.166667 | 72 | 0.550345 |
e821f71bcc24edd069c19aa4ceed8280138c3ac0 | 3,014 | exs | Elixir | apps/ewallet/test/ewallet/fetchers/transaction_consumption_fetcher_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/ewallet/test/ewallet/fetchers/transaction_consumption_fetcher_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/ewallet/test/ewallet/fetchers/transaction_consumption_fetcher_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule EWallet.TransactionConsumptionFetcherTest do
use EWallet.DBCase, async: true
import EWalletDB.Factory
alias ActivityLogger.System
alias EWallet.{
TestEndpoint,
TransactionConsumptionConsumerGate,
TransactionConsumptionFetcher
}
alias EWalletDB.{Account, TransactionConsumption, User}
setup do
{:ok, pid} = TestEndpoint.start_link()
on_exit(fn ->
ref = Process.monitor(pid)
assert_receive {:DOWN, ^ref, _, _, _}
end)
token = insert(:token)
{:ok, receiver} = :user |> params_for() |> User.insert()
{:ok, sender} = :user |> params_for() |> User.insert()
account = Account.get_master_account()
receiver_wallet = User.get_primary_wallet(receiver)
sender_wallet = User.get_primary_wallet(sender)
account_wallet = Account.get_primary_wallet(account)
mint!(token)
transaction_request =
insert(
:transaction_request,
type: "receive",
token_uuid: token.uuid,
user_uuid: receiver.uuid,
wallet: receiver_wallet,
amount: 100_000 * token.subunit_to_unit
)
%{
sender: sender,
receiver: receiver,
account: account,
token: token,
receiver_wallet: receiver_wallet,
sender_wallet: sender_wallet,
account_wallet: account_wallet,
request: transaction_request
}
end
describe "get/1" do
test "returns the consumption when given valid ID", meta do
initialize_wallet(meta.sender_wallet, 200_000, meta.token)
{res, consumption} =
TransactionConsumptionConsumerGate.consume(meta.sender, %{
"formatted_transaction_request_id" => meta.request.id,
"correlation_id" => nil,
"amount" => nil,
"address" => nil,
"metadata" => nil,
"idempotency_token" => "123",
"token_id" => nil,
"originator" => %System{}
})
assert res == :ok
assert {:ok, consumption} = TransactionConsumptionFetcher.get(consumption.id)
assert %TransactionConsumption{} = consumption
end
test "returns nil when given nil" do
assert TransactionConsumptionFetcher.get(nil) ==
{:error, :transaction_consumption_not_found}
end
test "returns nil when given invalid UUID" do
assert TransactionConsumptionFetcher.get("123") ==
{:error, :transaction_consumption_not_found}
end
end
end
| 30.14 | 83 | 0.671533 |
e821f7c6d0b916f7e3efc328de88ecb44ce69e93 | 8,113 | ex | Elixir | lib/sanbase_web/graphql/schema/types/user_types.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 81 | 2017-11-20T01:20:22.000Z | 2022-03-05T12:04:25.000Z | lib/sanbase_web/graphql/schema/types/user_types.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 359 | 2017-10-15T14:40:53.000Z | 2022-01-25T13:34:20.000Z | lib/sanbase_web/graphql/schema/types/user_types.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 16 | 2017-11-19T13:57:40.000Z | 2022-02-07T08:13:02.000Z | defmodule SanbaseWeb.Graphql.UserTypes do
use Absinthe.Schema.Notation
import SanbaseWeb.Graphql.Cache, only: [cache_resolve: 1, cache_resolve: 2]
import Absinthe.Resolution.Helpers
alias SanbaseWeb.Graphql.SanbaseRepo
alias SanbaseWeb.Graphql.Resolvers.{
ApikeyResolver,
UserResolver,
UserChartConfigurationResolver,
EthAccountResolver,
UserSettingsResolver,
UserTriggerResolver,
UserListResolver,
InsightResolver,
BillingResolver
}
enum :api_call_auth_method do
value(:all)
value(:apikey)
value(:basic)
value(:jwt)
end
input_object :user_selector_input_object do
field(:id, :id)
field(:email, :string)
field(:username, :string)
end
object :public_user do
field(:id, non_null(:id))
field :email, :string do
resolve(&UserResolver.email/3)
end
field(:username, :string)
field(:avatar_url, :string)
field :triggers, list_of(:trigger) do
cache_resolve(&UserTriggerResolver.public_triggers/3, ttl: 60)
end
field :chart_configurations, list_of(:chart_configuration) do
cache_resolve(&UserChartConfigurationResolver.public_chart_configurations/3, ttl: 60)
end
field :following, :follower_data do
resolve(&UserResolver.following/3)
end
field :followers, :follower_data do
resolve(&UserResolver.followers/3)
end
field :insights, list_of(:post) do
arg(:is_pulse, :boolean)
arg(:is_paywall_required, :boolean)
arg(:from, :datetime)
arg(:to, :datetime)
arg(:page, :integer, default_value: 1)
arg(:page_size, :integer, default_value: 20)
cache_resolve(&InsightResolver.public_insights/3, ttl: 60)
end
field :insights_count, :public_insights_count do
cache_resolve(&InsightResolver.insights_count/3, ttl: 60)
end
field :watchlists, list_of(:user_list) do
arg(:type, :watchlist_type_enum, default_value: :project)
cache_resolve(&UserListResolver.public_watchlists/3, ttl: 60)
end
end
object :user do
field(:id, non_null(:id))
field(:email, :string)
field(:username, :string)
field(:consent_id, :string)
field(:privacy_policy_accepted, :boolean)
field(:marketing_accepted, :boolean)
field(:first_login, :boolean, default_value: false)
field(:avatar_url, :string)
field(:stripe_customer_id, :string)
field :permissions, :access_level do
resolve(&UserResolver.permissions/3)
end
field :san_balance, :float do
cache_resolve(&UserResolver.san_balance/3)
end
@desc ~s"""
A list of ethereum addresses owned by the user. A special message needs to be
signed in order to be confirmed that the address belongs to the user.
The combined SAN balance of the addresses is used for the `san_balance`
"""
field(:eth_accounts, list_of(:eth_account), resolve: dataloader(SanbaseRepo))
@desc ~s"""
A list of api keys. They are used by providing `Authorization` header to the
HTTP request with the value `Apikey <apikey>` (case sensitive). To generate
or revoke api keys check the `generateApikey` and `revokeApikey` mutations.
Using an apikey gives access to the queries, but not to the mutations. Every
api key has the same SAN balance and subsription as the whole account
"""
field :apikeys, list_of(:string) do
resolve(&ApikeyResolver.apikeys_list/3)
end
field :settings, :user_settings do
resolve(&UserSettingsResolver.settings/3)
end
field :triggers, list_of(:trigger) do
resolve(&UserTriggerResolver.triggers/3)
end
field :chart_configurations, list_of(:chart_configuration) do
resolve(&UserChartConfigurationResolver.chart_configurations/3)
end
field :following, :follower_data do
resolve(&UserResolver.following/3)
end
field :followers, :follower_data do
resolve(&UserResolver.followers/3)
end
field :following2, :follower_data2 do
resolve(&UserResolver.following2/3)
end
field :followers2, :follower_data2 do
resolve(&UserResolver.followers2/3)
end
field :insights_count, :insights_count do
cache_resolve(&InsightResolver.insights_count/3, ttl: 60)
end
field :insights, list_of(:post) do
arg(:is_pulse, :boolean)
arg(:is_paywall_required, :boolean)
arg(:page, :integer, default_value: 1)
arg(:page_size, :integer, default_value: 20)
resolve(&InsightResolver.insights/3)
end
field :watchlists, list_of(:user_list) do
arg(:type, :watchlist_type_enum, default_value: :project)
cache_resolve(&UserListResolver.watchlists/3, ttl: 60)
end
field :subscriptions, list_of(:subscription_plan) do
resolve(&BillingResolver.subscriptions/3)
end
field :is_eligible_for_sanbase_trial, :boolean do
resolve(&BillingResolver.eligible_for_sanbase_trial?/3)
end
@desc ~s"""
Timeseries data of api calls count per interval in a given time range.
"""
field :api_calls_history, list_of(:api_call_data) do
arg(:from, non_null(:datetime))
arg(:to, non_null(:datetime))
arg(:auth_method, :api_call_auth_method, default_value: :apikey)
arg(:interval, :interval, default_value: "1d")
cache_resolve(&UserResolver.api_calls_history/3)
end
@desc ~s"""
Timeseries data of api calls count per interval in a given time range.
"""
field :api_calls_count, :integer do
arg(:from, non_null(:datetime))
arg(:to, non_null(:datetime))
arg(:auth_method, :api_call_auth_method, default_value: :apikey)
cache_resolve(&UserResolver.api_calls_count/3)
end
field :relays_quota, :relays_quota do
resolve(&UserResolver.relays_quota/3)
end
end
object :relays_quota do
field(:global_relays_left, :integer)
field(:global_relays_quota, :integer)
field(:global_relays_used, :integer)
field(:proposal_relays_left, :integer)
field(:proposal_relays_quota, :integer)
field(:proposal_relays_used, :integer)
field(:vote_relays_left, :integer)
field(:vote_relays_quota, :integer)
field(:vote_relays_used, :integer)
field(:can_relay_proposal, :boolean)
field(:can_relay_vote, :boolean)
end
object :public_insights_count do
field(:total_count, :integer)
field(:paywall_count, :integer)
field(:pulse_count, :integer)
end
object :insights_count do
field(:total_count, :integer)
field(:draft_count, :integer)
field(:paywall_count, :integer)
field(:pulse_count, :integer)
end
object :access_restriction do
field(:type, non_null(:string))
field(:name, non_null(:string))
field(:min_interval, :string)
field(:is_restricted, non_null(:boolean))
field(:is_accessible, non_null(:boolean))
field(:restricted_from, :datetime)
field(:restricted_to, :datetime)
end
object :api_call_data do
field(:datetime, non_null(:datetime))
field(:api_calls_count, non_null(:integer))
end
@desc ~s"""
A type describing an Ethereum address. Beside the address itself it returns
the SAN balance of that address.
"""
object :eth_account do
field(:address, non_null(:string))
field :san_balance, non_null(:float) do
cache_resolve(&EthAccountResolver.san_balance/3)
end
end
object :post_author do
field(:id, non_null(:id))
field(:avatar_url, :string)
field(:username, :string)
end
object :access_level do
field(:api, non_null(:boolean))
field(:sanbase, non_null(:boolean))
field(:spreadsheet, non_null(:boolean))
field(:sandata, non_null(:boolean))
end
object :follower_data do
field(:count, non_null(:integer))
field(:users, non_null(list_of(:public_user)))
end
object :follower_data2 do
field(:count, non_null(:integer))
field(:users, non_null(list_of(:user_follower)))
end
object :user_follower do
field(:user_id, non_null(:id))
field(:follower_id, non_null(:id))
field(:is_notification_disabled, :boolean)
field(:user, :public_user)
end
end
| 28.268293 | 91 | 0.698632 |
e822027f697fa4d982b3e1b3b1cd8c030aab1afc | 1,176 | ex | Elixir | lib/wechat/tools/auth.ex | sjava/ex_wechat | 521e119be8cc960453002c099d57f7474bfd7735 | [
"MIT"
] | null | null | null | lib/wechat/tools/auth.ex | sjava/ex_wechat | 521e119be8cc960453002c099d57f7474bfd7735 | [
"MIT"
] | null | null | null | lib/wechat/tools/auth.ex | sjava/ex_wechat | 521e119be8cc960453002c099d57f7474bfd7735 | [
"MIT"
] | null | null | null | defmodule Wechat.Auth do
alias Wechat.Http
@api_endpoint "https://api.weixin.qq.com"
@api_path "/sns/oauth2/access_token"
@userinfo_path "/sns/userinfo"
def info(code, options) do
callback = &Http.parse_wechat_site/1
# fetch for openid
result = Http.get(request_auth_opts(code, options), callback)
case result do
%{errcode: _code} ->
{:error, "bad code or code has been used"}
_ ->
if options[:scope] == "snsapi_base" do
{:ok, result}
else
{:ok, Http.get(request_info_opts(result), callback)}
end
end
end
defp request_auth_opts(code, options) do
api = options[:api] || Wechat.Api
appid = apply(api, :appid, [])
secret = apply(api, :secret, [])
[url: api_url(), params: [appid: appid, secret: secret,
code: code, grant_type: "authorization_code"]]
end
defp request_info_opts(result) do
[ url: userinfo_url(),
params: [access_token: result.access_token, openid: result.openid,
lang: "zh_CN"]]
end
defp api_url do
@api_endpoint <> @api_path
end
defp userinfo_url() do
@api_endpoint <> @userinfo_path
end
end
| 23.058824 | 72 | 0.630102 |
e8221655a1fe30fb62a3a1e3c4ed75dfa7934972 | 1,689 | ex | Elixir | clients/content/lib/google_api/content/v2/model/lia_on_display_to_order_settings.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v2/model/lia_on_display_to_order_settings.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v2/model/lia_on_display_to_order_settings.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Content.V2.Model.LiaOnDisplayToOrderSettings do
@moduledoc """
## Attributes
* `shippingCostPolicyUrl` (*type:* `String.t`, *default:* `nil`) - Shipping cost and policy URL.
* `status` (*type:* `String.t`, *default:* `nil`) - The status of the ?On display to order? feature.
Acceptable values are:
- "`active`"
- "`inactive`"
- "`pending`"
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:shippingCostPolicyUrl => String.t(),
:status => String.t()
}
field(:shippingCostPolicyUrl)
field(:status)
end
defimpl Poison.Decoder, for: GoogleApi.Content.V2.Model.LiaOnDisplayToOrderSettings do
def decode(value, options) do
GoogleApi.Content.V2.Model.LiaOnDisplayToOrderSettings.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V2.Model.LiaOnDisplayToOrderSettings do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 30.709091 | 104 | 0.714032 |
e8221bcaf4b05f80221b096bb43f5e6ab023b722 | 5,866 | exs | Elixir | elixir/day14/day14-2.exs | teemid/aoc2021 | 9cfea309e01fc463ffa20ca64a4a6468ffdc23d4 | [
"MIT"
] | null | null | null | elixir/day14/day14-2.exs | teemid/aoc2021 | 9cfea309e01fc463ffa20ca64a4a6468ffdc23d4 | [
"MIT"
] | null | null | null | elixir/day14/day14-2.exs | teemid/aoc2021 | 9cfea309e01fc463ffa20ca64a4a6468ffdc23d4 | [
"MIT"
] | null | null | null | defmodule Day14 do
def read_input(filename) do
parts = File.open!(filename)
|> IO.read(:all)
|> String.split("\n\n")
polymer_template = Enum.at(parts, 0)
mapping = parts
|> Enum.at(1)
|> String.split("\n", trim: true)
|> Enum.map(&Day14.parse_line/1)
|> Enum.reduce(%{}, fn {key, value}, acc -> add_to_mapping(acc, key, value) end)
{polymer_template, mapping}
end
def parse_line(line) do
parts = String.split(line, " -> ", trim: true)
{Enum.at(parts, 0), Enum.at(parts, 1)}
end
defp add_to_mapping(acc, key, value), do: Map.put(acc, key, value)
def task1(polymer_template, mapping, step_count) do
character_frequencies = polymer_template
|> create_frequency_table()
|> step(mapping, step_count)
|> calculate_character_frequencies(polymer_template)
max = get_max_frequency(character_frequencies)
min = get_min_frequency(character_frequencies)
max - min
end
def create_frequency_table(polymer_template) do
length = String.length(polymer_template) - 2
0..length
|> Enum.map(fn e -> String.slice(polymer_template, e..e+1) end)
|> Enum.reduce(%{}, fn e, acc -> Map.update(acc, e, 1, &(&1 + 1)) end)
end
defp step(frequencies, _mapping, 0) do
frequencies
end
defp step(frequencies, mapping, step_count) do
keys = Map.keys(frequencies)
frequencies = do_replace(keys, mapping, frequencies)
step(frequencies, mapping, step_count - 1)
end
def do_replace(keys, mapping, frequencies) do
do_replace(keys, mapping, frequencies, %{})
end
defp do_replace([], _mapping, _old_frequencies, new_frequencies) do
new_frequencies
end
defp do_replace([key | keys], mapping, old_frequencies, new_frequencies) do
char = Map.get(mapping, key)
a = String.at(key, 0)
b = String.at(key, 1)
replace_1 = a <> char
replace_2 = char <> b
original_frequency = Map.get(old_frequencies, key, 0)
replace_1_frequency = Map.get(new_frequencies, replace_1, 0)
replace_2_frequency = Map.get(new_frequencies, replace_2, 0)
new_frequencies = new_frequencies
|> Map.put(replace_1, original_frequency + replace_1_frequency)
|> Map.put(replace_2, original_frequency + replace_2_frequency)
do_replace(keys, mapping, old_frequencies, new_frequencies)
end
def calculate_character_frequencies(frequencies, polymer_template) do
keys = Map.keys(frequencies)
character_frequencies = calculate_character_frequencies(keys, frequencies, %{})
first = String.first(polymer_template)
last = String.last(polymer_template)
# NOTE (Emil): Since we are operating on pairs of letters we count each letter twice
# so we divide the count by two and then add in the letter at the front and back,
# since they never change.
character_frequencies
|> halve_frequencies()
|> Map.update(first, 0, fn e -> e + 1 end)
|> Map.update(last, 0, fn e -> e + 1 end)
end
def halve_frequencies(character_frequencies) do
halve_frequencies(Map.keys(character_frequencies), character_frequencies)
end
defp halve_frequencies([], character_frequencies), do: character_frequencies
defp halve_frequencies([key | keys], character_frequencies) do
halve_frequencies(keys, Map.update(character_frequencies, key, 0, fn e -> Integer.floor_div(e, 2) end))
end
defp calculate_character_frequencies([], _frequencies, character_frequencies) do
character_frequencies
end
defp calculate_character_frequencies([key | keys], frequencies, character_frequencies) do
original_frequency = Map.get(frequencies, key, 0)
a = String.at(key, 0)
b = String.at(key, 1)
character_frequencies = character_frequencies
|> update_character_frequencies(a, original_frequency)
|> update_character_frequencies(b, original_frequency)
calculate_character_frequencies(keys, frequencies, character_frequencies)
end
def update_character_frequencies(character_frequencies, character, original_frequency) do
frequency = Map.get(character_frequencies, character, 0)
Map.put(character_frequencies, character, original_frequency + frequency)
end
def get_max_frequency(frequencies) do
keys = Map.keys(frequencies)
get_max_frequency(keys, frequencies, 0)
end
defp get_max_frequency([], _frequencies, max) do
max
end
defp get_max_frequency([key | keys], frequencies, max) do
frequency = Map.get(frequencies, key)
if frequency > max do
get_max_frequency(keys, frequencies, frequency)
else
get_max_frequency(keys, frequencies, max)
end
end
def get_min_frequency(frequencies) do
keys = Map.keys(frequencies)
count = frequencies
|> Map.values()
|> Enum.sum()
get_min_frequency(keys, frequencies, count)
end
defp get_min_frequency([], _frequencies, min) do
min
end
defp get_min_frequency([key | keys], frequencies, min) do
frequency = Map.get(frequencies, key)
if frequency < min do
get_min_frequency(keys, frequencies, frequency)
else
get_min_frequency(keys, frequencies, min)
end
end
end
[filename, step_count] = System.argv()
step_count = String.to_integer(step_count)
{polymer_template, mapping} = Day14.read_input(filename)
result1 = Day14.task1(polymer_template, mapping, step_count)
IO.inspect(result1)
| 32.230769 | 111 | 0.650528 |
e82238d4e50b475c12cc524c327c75b24970c2b0 | 2,203 | ex | Elixir | lib/syslog.ex | evnu/syslog | 03da5ff2b8c0f148aba6dcec2e7fc35cae2e2fc4 | [
"MIT"
] | 41 | 2015-01-14T05:57:05.000Z | 2021-12-21T13:40:57.000Z | lib/syslog.ex | evnu/syslog | 03da5ff2b8c0f148aba6dcec2e7fc35cae2e2fc4 | [
"MIT"
] | 9 | 2015-10-14T14:29:00.000Z | 2019-06-12T08:22:04.000Z | lib/syslog.ex | evnu/syslog | 03da5ff2b8c0f148aba6dcec2e7fc35cae2e2fc4 | [
"MIT"
] | 17 | 2015-05-07T10:04:50.000Z | 2021-01-05T13:40:40.000Z | defmodule Logger.Backends.Syslog do
@behaviour :gen_event
use Bitwise
def init(_) do
if user = Process.whereis(:user) do
Process.group_leader(self(), user)
{:ok, socket} = :gen_udp.open(0)
{:ok, configure([socket: socket])}
else
{:error, :ignore}
end
end
def handle_call({:configure, options}, _state) do
{:ok, :ok, configure(options)}
end
def handle_event({_level, gl, _event}, state) when node(gl) != node() do
{:ok, state}
end
def handle_event({level, _gl, {Logger, msg, ts, md}}, %{level: min_level} = state) do
if is_nil(min_level) or Logger.compare_levels(level, min_level) != :lt do
log_event(level, msg, ts, md, state)
end
{:ok, state}
end
def handle_info(_msg, state) do
{:ok, state}
end
## Helpers
defp configure(options) do
syslog = Keyword.merge(Application.get_env(:logger, :syslog, []), options)
socket = Keyword.get(options, :socket)
Application.put_env(:logger, :syslog, syslog)
format = syslog
|> Keyword.get(:format)
|> Logger.Formatter.compile
level = Keyword.get(syslog, :level)
metadata = Keyword.get(syslog, :metadata, [])
host = Keyword.get(syslog, :host, '127.0.0.1')
port = Keyword.get(syslog, :port, 514)
facility = Keyword.get(syslog, :facility, :local2) |> Logger.Syslog.Utils.facility
appid = Keyword.get(syslog, :appid, :elixir)
[hostname | _] = String.split("#{:net_adm.localhost()}", ".")
%{format: format, metadata: metadata, level: level, socket: socket,
host: host, port: port, facility: facility, appid: appid, hostname: hostname}
end
defp log_event(level, msg, ts, md, state) do
%{format: format, metadata: metadata, facility: facility, appid: appid,
hostname: _hostname, host: host, port: port, socket: socket} = state
level_num = Logger.Syslog.Utils.level(level)
pre = :io_lib.format('<~B>~s ~s~p: ', [facility ||| level_num,
Logger.Syslog.Utils.iso8601_timestamp(ts), appid, self()])
packet = [pre, Logger.Formatter.format(format, level, msg, ts, Keyword.take(md, metadata))]
if socket, do: :gen_udp.send(socket, host, port, packet)
end
end
| 32.397059 | 95 | 0.643668 |
e822503b2d039546672c187170a65e689e9c6fb7 | 72 | exs | Elixir | test/test_helper.exs | Fomich686/buy_the_food | ca4638a4efbb5f86347948ad07bcf7827f6523f9 | [
"Apache-2.0"
] | null | null | null | test/test_helper.exs | Fomich686/buy_the_food | ca4638a4efbb5f86347948ad07bcf7827f6523f9 | [
"Apache-2.0"
] | null | null | null | test/test_helper.exs | Fomich686/buy_the_food | ca4638a4efbb5f86347948ad07bcf7827f6523f9 | [
"Apache-2.0"
] | null | null | null | ExUnit.start()
Ecto.Adapters.SQL.Sandbox.mode(BuyTheFood.Repo, :manual)
| 24 | 56 | 0.791667 |
e82255af16f7459cff4820dc1d543a8840e6f076 | 1,343 | ex | Elixir | lib/live_store/middlewares.ex | patricklafleur/live_store | 575b151188bf58246471aa4147eb3fa7cea4aa61 | [
"MIT"
] | 2 | 2021-01-29T03:02:21.000Z | 2021-01-29T20:00:23.000Z | lib/live_store/middlewares.ex | patricklafleur/live_store | 575b151188bf58246471aa4147eb3fa7cea4aa61 | [
"MIT"
] | null | null | null | lib/live_store/middlewares.ex | patricklafleur/live_store | 575b151188bf58246471aa4147eb3fa7cea4aa61 | [
"MIT"
] | null | null | null | defmodule LiveStore.Middleware do
require Logger
def log_action() do
fn _store ->
fn next ->
fn %{} = state, {_, _} = action ->
Logger.info("dispatching action #{inspect(action)}...")
new_state = next.(state, action)
Logger.debug("processed action #{inspect(action)}.")
new_state
end
end
end
end
def log_state() do
fn _store ->
fn next ->
fn %{} = state, {_, _} = action ->
Logger.debug("state before processing #{inspect(state, pretty: true)}.")
new_state = next.(state, action)
Logger.debug("state after processing #{inspect(state, pretty: true)}.")
new_state
end
end
end
end
def diff_state() do
fn _store ->
fn next ->
fn %{} = state, {_, _} = action ->
Logger.debug("[middleware#diff_state]")
new_state = next.(state, action)
diff = MapDiff.diff(Map.from_struct(state), Map.from_struct(new_state))
Logger.debug("states diff for action #{inspect(action)}:\n")
Logger.debug("[added]\n#{inspect(diff[:added], pretty: true)}\n", ansi_color: :green)
Logger.debug("[removed]\n#{inspect(diff[:added], pretty: true)}\n", ansi_color: :orange)
new_state
end
end
end
end
end
| 29.195652 | 98 | 0.559196 |
e8226758db9d4bc5b44a57a53ed559ef8b432c5d | 853 | exs | Elixir | twitter/config/config.exs | SushmitDharurkar/Twitter-Clone | 1f0c6126d79877549fc312e263c69a6e44dc3254 | [
"MIT"
] | null | null | null | twitter/config/config.exs | SushmitDharurkar/Twitter-Clone | 1f0c6126d79877549fc312e263c69a6e44dc3254 | [
"MIT"
] | null | null | null | twitter/config/config.exs | SushmitDharurkar/Twitter-Clone | 1f0c6126d79877549fc312e263c69a6e44dc3254 | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
use Mix.Config
# Configures the endpoint
config :twitter, TwitterWeb.Endpoint,
url: [host: "localhost"],
secret_key_base: "zlETDvMgLO+iYb4xBLfCEzjEZRDhc8u5JMaoFec1vWZm3GqS0kruUqLTZ+LeGkDI",
render_errors: [view: TwitterWeb.ErrorView, accepts: ~w(html json)],
pubsub: [name: Twitter.PubSub,
adapter: Phoenix.PubSub.PG2]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env}.exs"
| 35.541667 | 86 | 0.760844 |
e82274d1b7b50ea8a79d934de06d9858aeeba97e | 64,793 | ex | Elixir | data/auto_generated/video/3b9a13385e69b6b3bf18724083610d21.ex | breunigs/veloroute | ac3b1eeb2ef2369c27186a138f6ffd8284652dab | [
"0BSD"
] | 12 | 2018-06-15T10:18:43.000Z | 2022-01-24T12:50:54.000Z | data/auto_generated/video/3b9a13385e69b6b3bf18724083610d21.ex | breunigs/veloroute | ac3b1eeb2ef2369c27186a138f6ffd8284652dab | [
"0BSD"
] | 15 | 2018-06-21T18:04:12.000Z | 2021-10-16T12:54:39.000Z | data/auto_generated/video/3b9a13385e69b6b3bf18724083610d21.ex | breunigs/veloroute | ac3b1eeb2ef2369c27186a138f6ffd8284652dab | [
"0BSD"
] | 2 | 2020-03-09T19:21:36.000Z | 2022-01-16T03:29:51.000Z | defmodule(Data.AutoGenerated.Video.Rendered_3b9a13385e69b6b3bf18724083610d21) do
@moduledoc "#{"1. Grüner Ring (FR0): gegen Uhrzeigersinn entlang der Wallanlagen"}
AUTOGENERATED. To update this file, run mix velo.videos.generate.
See Video.Rendered for functionality.
"
@behaviour Video.Rendered
@impl Video.Rendered
def(name()) do
"1. Grüner Ring (FR0): gegen Uhrzeigersinn entlang der Wallanlagen"
end
@impl Video.Rendered
def(hash()) do
"3b9a13385e69b6b3bf18724083610d21"
end
@impl Video.Rendered
def(length_ms()) do
259_905
end
@impl Video.Rendered
def(sources()) do
[
{"2021-07-17-gruenerring1-11whburg-haubach/GX012312", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012313", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012314", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012315", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012316", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012317", :start, "00:00:03.203"},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012355", "00:00:03.837", "00:00:22.189"},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012317", "00:00:20.220", :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012318", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012319", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012320", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012321", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012322", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012323", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012324", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012325", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012326", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012327", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012328", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012329", :start, :end},
{"2021-07-17-gruenerring1-11whburg-haubach/GX012330", :start, :end}
]
end
@impl Video.Rendered
def(coords()) do
[
%Video.TimedPoint{lat: 53.545959, lon: 9.968983, time_offset_ms: 0},
%Video.TimedPoint{lat: 53.54596, lon: 9.969013, time_offset_ms: 334},
%Video.TimedPoint{lat: 53.545952, lon: 9.969108, time_offset_ms: 667},
%Video.TimedPoint{lat: 53.545933, lon: 9.969239, time_offset_ms: 1001},
%Video.TimedPoint{lat: 53.545904, lon: 9.969386, time_offset_ms: 1334},
%Video.TimedPoint{lat: 53.545873, lon: 9.969551, time_offset_ms: 1668},
%Video.TimedPoint{lat: 53.545838, lon: 9.96972, time_offset_ms: 2002},
%Video.TimedPoint{lat: 53.545796, lon: 9.969889, time_offset_ms: 2335},
%Video.TimedPoint{lat: 53.54575, lon: 9.970067, time_offset_ms: 2669},
%Video.TimedPoint{lat: 53.545708, lon: 9.970248, time_offset_ms: 3002},
%Video.TimedPoint{lat: 53.54567, lon: 9.970437, time_offset_ms: 3336},
%Video.TimedPoint{lat: 53.545624, lon: 9.97063, time_offset_ms: 3669},
%Video.TimedPoint{lat: 53.545567, lon: 9.97082, time_offset_ms: 4003},
%Video.TimedPoint{lat: 53.545511, lon: 9.971006, time_offset_ms: 4337},
%Video.TimedPoint{lat: 53.545463, lon: 9.971196, time_offset_ms: 4670},
%Video.TimedPoint{lat: 53.545417, lon: 9.971389, time_offset_ms: 5004},
%Video.TimedPoint{lat: 53.545377, lon: 9.971581, time_offset_ms: 5337},
%Video.TimedPoint{lat: 53.545347, lon: 9.971781, time_offset_ms: 5671},
%Video.TimedPoint{lat: 53.545338, lon: 9.971985, time_offset_ms: 6005},
%Video.TimedPoint{lat: 53.545333, lon: 9.972185, time_offset_ms: 6338},
%Video.TimedPoint{lat: 53.545323, lon: 9.97238, time_offset_ms: 6672},
%Video.TimedPoint{lat: 53.545299, lon: 9.972544, time_offset_ms: 7005},
%Video.TimedPoint{lat: 53.54528, lon: 9.972623, time_offset_ms: 7339},
%Video.TimedPoint{lat: 53.545295, lon: 9.972674, time_offset_ms: 7424},
%Video.TimedPoint{lat: 53.545353, lon: 9.972813, time_offset_ms: 8425},
%Video.TimedPoint{lat: 53.545412, lon: 9.973006, time_offset_ms: 9092},
%Video.TimedPoint{lat: 53.545396, lon: 9.97319, time_offset_ms: 9759},
%Video.TimedPoint{lat: 53.545372, lon: 9.97331, time_offset_ms: 10093},
%Video.TimedPoint{lat: 53.545244, lon: 9.973677, time_offset_ms: 11094},
%Video.TimedPoint{lat: 53.545095, lon: 9.974047, time_offset_ms: 12094},
%Video.TimedPoint{lat: 53.545003, lon: 9.974323, time_offset_ms: 12762},
%Video.TimedPoint{lat: 53.544832, lon: 9.974748, time_offset_ms: 13762},
%Video.TimedPoint{lat: 53.544777, lon: 9.974904, time_offset_ms: 14096},
%Video.TimedPoint{lat: 53.544684, lon: 9.975194, time_offset_ms: 14763},
%Video.TimedPoint{lat: 53.544544, lon: 9.975706, time_offset_ms: 15764},
%Video.TimedPoint{lat: 53.544334, lon: 9.976341, time_offset_ms: 17098},
%Video.TimedPoint{lat: 53.544292, lon: 9.976516, time_offset_ms: 17432},
%Video.TimedPoint{lat: 53.544232718304215, lon: 9.976721504299162, time_offset_ms: 17766},
%Video.TimedPoint{lat: 53.54418287391806, lon: 9.976966101852417, time_offset_ms: 18099},
%Video.TimedPoint{lat: 53.54416115482402, lon: 9.977326987766268, time_offset_ms: 18433},
%Video.TimedPoint{lat: 53.544175779764856, lon: 9.977756936176299, time_offset_ms: 19767},
%Video.TimedPoint{lat: 53.54419240471598, lon: 9.978252830688476, time_offset_ms: 20434},
%Video.TimedPoint{lat: 53.544274, lon: 9.978145, time_offset_ms: 20519},
%Video.TimedPoint{lat: 53.544275, lon: 9.978162, time_offset_ms: 20853},
%Video.TimedPoint{lat: 53.544276, lon: 9.978227, time_offset_ms: 21186},
%Video.TimedPoint{lat: 53.544282, lon: 9.978309, time_offset_ms: 21520},
%Video.TimedPoint{lat: 53.544275, lon: 9.978377, time_offset_ms: 21853},
%Video.TimedPoint{lat: 53.544231, lon: 9.978395, time_offset_ms: 22187},
%Video.TimedPoint{lat: 53.544129, lon: 9.978424, time_offset_ms: 22272},
%Video.TimedPoint{lat: 53.544121, lon: 9.978426, time_offset_ms: 22606},
%Video.TimedPoint{lat: 53.544096, lon: 9.978426, time_offset_ms: 22939},
%Video.TimedPoint{lat: 53.544082, lon: 9.978433, time_offset_ms: 23273},
%Video.TimedPoint{lat: 53.544076, lon: 9.978465, time_offset_ms: 23606},
%Video.TimedPoint{lat: 53.54406, lon: 9.978551, time_offset_ms: 23940},
%Video.TimedPoint{lat: 53.54405, lon: 9.978683, time_offset_ms: 24274},
%Video.TimedPoint{lat: 53.54405, lon: 9.978837, time_offset_ms: 24607},
%Video.TimedPoint{lat: 53.544048, lon: 9.979, time_offset_ms: 24941},
%Video.TimedPoint{lat: 53.544049, lon: 9.979171, time_offset_ms: 25274},
%Video.TimedPoint{lat: 53.544053, lon: 9.97934, time_offset_ms: 25608},
%Video.TimedPoint{lat: 53.544063, lon: 9.979506, time_offset_ms: 25942},
%Video.TimedPoint{lat: 53.544074, lon: 9.979676, time_offset_ms: 26275},
%Video.TimedPoint{lat: 53.544079, lon: 9.979849, time_offset_ms: 26609},
%Video.TimedPoint{lat: 53.544085, lon: 9.980021, time_offset_ms: 26942},
%Video.TimedPoint{lat: 53.544088, lon: 9.980197, time_offset_ms: 27276},
%Video.TimedPoint{lat: 53.54408, lon: 9.980371, time_offset_ms: 27609},
%Video.TimedPoint{lat: 53.544068, lon: 9.980537, time_offset_ms: 27943},
%Video.TimedPoint{lat: 53.544055, lon: 9.980702, time_offset_ms: 28277},
%Video.TimedPoint{lat: 53.544044, lon: 9.980865, time_offset_ms: 28610},
%Video.TimedPoint{lat: 53.544028, lon: 9.981023, time_offset_ms: 28944},
%Video.TimedPoint{lat: 53.544013, lon: 9.981189, time_offset_ms: 29277},
%Video.TimedPoint{lat: 53.544009, lon: 9.981349, time_offset_ms: 29611},
%Video.TimedPoint{lat: 53.544008, lon: 9.981502, time_offset_ms: 29945},
%Video.TimedPoint{lat: 53.544004, lon: 9.981629, time_offset_ms: 30278},
%Video.TimedPoint{lat: 53.544001, lon: 9.981712, time_offset_ms: 30612},
%Video.TimedPoint{lat: 53.543989, lon: 9.981777, time_offset_ms: 30697},
%Video.TimedPoint{lat: 53.543991, lon: 9.9818, time_offset_ms: 31031},
%Video.TimedPoint{lat: 53.543995, lon: 9.981883, time_offset_ms: 31364},
%Video.TimedPoint{lat: 53.543987, lon: 9.981995, time_offset_ms: 31698},
%Video.TimedPoint{lat: 53.543943, lon: 9.982103, time_offset_ms: 32031},
%Video.TimedPoint{lat: 53.543879, lon: 9.9822, time_offset_ms: 32365},
%Video.TimedPoint{lat: 53.543815, lon: 9.982307, time_offset_ms: 32699},
%Video.TimedPoint{lat: 53.543745, lon: 9.982424, time_offset_ms: 33032},
%Video.TimedPoint{lat: 53.543673, lon: 9.982541, time_offset_ms: 33366},
%Video.TimedPoint{lat: 53.543602, lon: 9.98266, time_offset_ms: 33699},
%Video.TimedPoint{lat: 53.543524, lon: 9.982779, time_offset_ms: 34033},
%Video.TimedPoint{lat: 53.543442, lon: 9.982907, time_offset_ms: 34366},
%Video.TimedPoint{lat: 53.54336, lon: 9.983038, time_offset_ms: 34700},
%Video.TimedPoint{lat: 53.543272, lon: 9.98316, time_offset_ms: 35034},
%Video.TimedPoint{lat: 53.543188, lon: 9.983279, time_offset_ms: 35367},
%Video.TimedPoint{lat: 53.543108, lon: 9.983408, time_offset_ms: 35701},
%Video.TimedPoint{lat: 53.543029, lon: 9.983542, time_offset_ms: 36034},
%Video.TimedPoint{lat: 53.542951, lon: 9.983669, time_offset_ms: 36368},
%Video.TimedPoint{lat: 53.542873, lon: 9.983797, time_offset_ms: 36702},
%Video.TimedPoint{lat: 53.542796, lon: 9.983926, time_offset_ms: 37035},
%Video.TimedPoint{lat: 53.542742, lon: 9.984059, time_offset_ms: 37369},
%Video.TimedPoint{lat: 53.542707, lon: 9.984206, time_offset_ms: 37702},
%Video.TimedPoint{lat: 53.542684, lon: 9.984371, time_offset_ms: 38036},
%Video.TimedPoint{lat: 53.542679, lon: 9.984514, time_offset_ms: 38370},
%Video.TimedPoint{lat: 53.542684, lon: 9.984613, time_offset_ms: 38703},
%Video.TimedPoint{lat: 53.542674, lon: 9.984724, time_offset_ms: 38788},
%Video.TimedPoint{lat: 53.542676, lon: 9.984743, time_offset_ms: 39122},
%Video.TimedPoint{lat: 53.542677, lon: 9.984822, time_offset_ms: 39455},
%Video.TimedPoint{lat: 53.54268, lon: 9.984943, time_offset_ms: 39789},
%Video.TimedPoint{lat: 53.542686, lon: 9.985092, time_offset_ms: 40122},
%Video.TimedPoint{lat: 53.542697, lon: 9.985256, time_offset_ms: 40456},
%Video.TimedPoint{lat: 53.542711, lon: 9.985424, time_offset_ms: 40790},
%Video.TimedPoint{lat: 53.542723, lon: 9.985604, time_offset_ms: 41123},
%Video.TimedPoint{lat: 53.542736, lon: 9.985792, time_offset_ms: 41457},
%Video.TimedPoint{lat: 53.542751, lon: 9.985981, time_offset_ms: 41790},
%Video.TimedPoint{lat: 53.542759425149704, lon: 9.986095943113773, time_offset_ms: 41991},
%Video.TimedPoint{lat: 53.54273351351351, lon: 9.986110303303303, time_offset_ms: 42076},
%Video.TimedPoint{lat: 53.542738, lon: 9.986211, time_offset_ms: 42242},
%Video.TimedPoint{lat: 53.542748, lon: 9.986414, time_offset_ms: 42576},
%Video.TimedPoint{lat: 53.542762, lon: 9.986625, time_offset_ms: 42909},
%Video.TimedPoint{lat: 53.542776, lon: 9.986834, time_offset_ms: 43243},
%Video.TimedPoint{lat: 53.542769, lon: 9.987012, time_offset_ms: 43577},
%Video.TimedPoint{lat: 53.542762, lon: 9.987155, time_offset_ms: 43910},
%Video.TimedPoint{lat: 53.542784, lon: 9.987289, time_offset_ms: 44244},
%Video.TimedPoint{lat: 53.542795, lon: 9.987455, time_offset_ms: 44577},
%Video.TimedPoint{lat: 53.542804, lon: 9.987635, time_offset_ms: 44911},
%Video.TimedPoint{lat: 53.542814, lon: 9.987819, time_offset_ms: 45245},
%Video.TimedPoint{lat: 53.542822, lon: 9.988001, time_offset_ms: 45578},
%Video.TimedPoint{lat: 53.54283, lon: 9.988186, time_offset_ms: 45912},
%Video.TimedPoint{lat: 53.542838, lon: 9.988373, time_offset_ms: 46245},
%Video.TimedPoint{lat: 53.542843, lon: 9.988559, time_offset_ms: 46579},
%Video.TimedPoint{lat: 53.542855, lon: 9.988742, time_offset_ms: 46913},
%Video.TimedPoint{lat: 53.542866, lon: 9.98893, time_offset_ms: 47246},
%Video.TimedPoint{lat: 53.54288, lon: 9.989113, time_offset_ms: 47580},
%Video.TimedPoint{lat: 53.542893, lon: 9.989299, time_offset_ms: 47913},
%Video.TimedPoint{lat: 53.542911, lon: 9.98949, time_offset_ms: 48247},
%Video.TimedPoint{lat: 53.542927, lon: 9.98968, time_offset_ms: 48581},
%Video.TimedPoint{lat: 53.542937, lon: 9.989871, time_offset_ms: 48914},
%Video.TimedPoint{lat: 53.542952, lon: 9.990058, time_offset_ms: 49248},
%Video.TimedPoint{lat: 53.542972, lon: 9.990242, time_offset_ms: 49581},
%Video.TimedPoint{lat: 53.542987, lon: 9.990434, time_offset_ms: 49915},
%Video.TimedPoint{lat: 53.542994, lon: 9.990622, time_offset_ms: 50249},
%Video.TimedPoint{lat: 53.542996, lon: 9.99081, time_offset_ms: 50582},
%Video.TimedPoint{lat: 53.543, lon: 9.990998, time_offset_ms: 50916},
%Video.TimedPoint{lat: 53.543009, lon: 9.991183, time_offset_ms: 51249},
%Video.TimedPoint{lat: 53.543017, lon: 9.991361, time_offset_ms: 51583},
%Video.TimedPoint{lat: 53.543025, lon: 9.991545, time_offset_ms: 51917},
%Video.TimedPoint{lat: 53.543036, lon: 9.991725, time_offset_ms: 52250},
%Video.TimedPoint{lat: 53.543053, lon: 9.991907, time_offset_ms: 52584},
%Video.TimedPoint{lat: 53.543065, lon: 9.992093, time_offset_ms: 52917},
%Video.TimedPoint{lat: 53.543079, lon: 9.992272, time_offset_ms: 53251},
%Video.TimedPoint{lat: 53.543087, lon: 9.992456, time_offset_ms: 53585},
%Video.TimedPoint{lat: 53.543096, lon: 9.992639, time_offset_ms: 53918},
%Video.TimedPoint{lat: 53.543112, lon: 9.992815, time_offset_ms: 54252},
%Video.TimedPoint{lat: 53.543122, lon: 9.992987, time_offset_ms: 54585},
%Video.TimedPoint{lat: 53.543134, lon: 9.993153, time_offset_ms: 54919},
%Video.TimedPoint{lat: 53.543145, lon: 9.993311, time_offset_ms: 55253},
%Video.TimedPoint{lat: 53.543163, lon: 9.993467, time_offset_ms: 55586},
%Video.TimedPoint{lat: 53.543181, lon: 9.993642, time_offset_ms: 55920},
%Video.TimedPoint{lat: 53.543191, lon: 9.993813, time_offset_ms: 56253},
%Video.TimedPoint{lat: 53.543212, lon: 9.993981, time_offset_ms: 56587},
%Video.TimedPoint{lat: 53.543223, lon: 9.994164, time_offset_ms: 56921},
%Video.TimedPoint{lat: 53.543236, lon: 9.994345, time_offset_ms: 57254},
%Video.TimedPoint{lat: 53.543254, lon: 9.994528, time_offset_ms: 57588},
%Video.TimedPoint{lat: 53.543279, lon: 9.994716, time_offset_ms: 57921},
%Video.TimedPoint{lat: 53.543296, lon: 9.994899, time_offset_ms: 58255},
%Video.TimedPoint{lat: 53.543321, lon: 9.995085, time_offset_ms: 58588},
%Video.TimedPoint{lat: 53.543357, lon: 9.995272, time_offset_ms: 58922},
%Video.TimedPoint{lat: 53.543398, lon: 9.995447, time_offset_ms: 59256},
%Video.TimedPoint{lat: 53.543436, lon: 9.995615, time_offset_ms: 59589},
%Video.TimedPoint{lat: 53.54347, lon: 9.99578, time_offset_ms: 59923},
%Video.TimedPoint{lat: 53.543493, lon: 9.99594, time_offset_ms: 60256},
%Video.TimedPoint{lat: 53.54350484431137, lon: 9.9960208502994, time_offset_ms: 60428},
%Video.TimedPoint{lat: 53.54357132335329, lon: 9.996027335329341, time_offset_ms: 60513},
%Video.TimedPoint{lat: 53.543583, lon: 9.99609, time_offset_ms: 60643},
%Video.TimedPoint{lat: 53.543594, lon: 9.996255, time_offset_ms: 60976},
%Video.TimedPoint{lat: 53.543628, lon: 9.996412, time_offset_ms: 61310},
%Video.TimedPoint{lat: 53.54367, lon: 9.996566, time_offset_ms: 61643},
%Video.TimedPoint{lat: 53.543705, lon: 9.996727, time_offset_ms: 61977},
%Video.TimedPoint{lat: 53.543738, lon: 9.996882, time_offset_ms: 62311},
%Video.TimedPoint{lat: 53.543773, lon: 9.997031, time_offset_ms: 62644},
%Video.TimedPoint{lat: 53.543806, lon: 9.997181, time_offset_ms: 62978},
%Video.TimedPoint{lat: 53.543845, lon: 9.997314, time_offset_ms: 63311},
%Video.TimedPoint{lat: 53.543884, lon: 9.997441, time_offset_ms: 63645},
%Video.TimedPoint{lat: 53.543909, lon: 9.997572, time_offset_ms: 63979},
%Video.TimedPoint{lat: 53.54393, lon: 9.997661, time_offset_ms: 64312},
%Video.TimedPoint{lat: 53.54394, lon: 9.997722, time_offset_ms: 64397},
%Video.TimedPoint{lat: 53.543945, lon: 9.997746, time_offset_ms: 64731},
%Video.TimedPoint{lat: 53.543962, lon: 9.997818, time_offset_ms: 65064},
%Video.TimedPoint{lat: 53.543983, lon: 9.997922, time_offset_ms: 65398},
%Video.TimedPoint{lat: 53.544015, lon: 9.998042, time_offset_ms: 65731},
%Video.TimedPoint{lat: 53.544055, lon: 9.998178, time_offset_ms: 66065},
%Video.TimedPoint{lat: 53.544095, lon: 9.998321, time_offset_ms: 66399},
%Video.TimedPoint{lat: 53.544141, lon: 9.998469, time_offset_ms: 66732},
%Video.TimedPoint{lat: 53.54419, lon: 9.998625, time_offset_ms: 67066},
%Video.TimedPoint{lat: 53.544238, lon: 9.998782, time_offset_ms: 67399},
%Video.TimedPoint{lat: 53.544286, lon: 9.998941, time_offset_ms: 67733},
%Video.TimedPoint{lat: 53.544339, lon: 9.999109, time_offset_ms: 68066},
%Video.TimedPoint{lat: 53.544387, lon: 9.999282, time_offset_ms: 68400},
%Video.TimedPoint{lat: 53.544429, lon: 9.999452, time_offset_ms: 68734},
%Video.TimedPoint{lat: 53.544474, lon: 9.999614, time_offset_ms: 69067},
%Video.TimedPoint{lat: 53.544519, lon: 9.999766, time_offset_ms: 69401},
%Video.TimedPoint{lat: 53.544565, lon: 9.999917, time_offset_ms: 69734},
%Video.TimedPoint{lat: 53.544611, lon: 10.000068, time_offset_ms: 70068},
%Video.TimedPoint{lat: 53.544655, lon: 10.000217, time_offset_ms: 70402},
%Video.TimedPoint{lat: 53.544698, lon: 10.000363, time_offset_ms: 70735},
%Video.TimedPoint{lat: 53.544742, lon: 10.000503, time_offset_ms: 71069},
%Video.TimedPoint{lat: 53.544776, lon: 10.000609, time_offset_ms: 71402},
%Video.TimedPoint{lat: 53.544795, lon: 10.000673, time_offset_ms: 71736},
%Video.TimedPoint{lat: 53.544808, lon: 10.000713, time_offset_ms: 72070},
%Video.TimedPoint{lat: 53.54482, lon: 10.000752, time_offset_ms: 72403},
%Video.TimedPoint{lat: 53.544832, lon: 10.00085, time_offset_ms: 72488},
%Video.TimedPoint{lat: 53.544837, lon: 10.000866, time_offset_ms: 72822},
%Video.TimedPoint{lat: 53.544857, lon: 10.000924, time_offset_ms: 73155},
%Video.TimedPoint{lat: 53.544884, lon: 10.001015, time_offset_ms: 73489},
%Video.TimedPoint{lat: 53.544916, lon: 10.001127, time_offset_ms: 73822},
%Video.TimedPoint{lat: 53.544954, lon: 10.001258, time_offset_ms: 74156},
%Video.TimedPoint{lat: 53.544995, lon: 10.001398, time_offset_ms: 74490},
%Video.TimedPoint{lat: 53.545036, lon: 10.001541, time_offset_ms: 74823},
%Video.TimedPoint{lat: 53.545074, lon: 10.001695, time_offset_ms: 75157},
%Video.TimedPoint{lat: 53.545121, lon: 10.001852, time_offset_ms: 75490},
%Video.TimedPoint{lat: 53.54517, lon: 10.002006, time_offset_ms: 75824},
%Video.TimedPoint{lat: 53.545222, lon: 10.002165, time_offset_ms: 76158},
%Video.TimedPoint{lat: 53.545274, lon: 10.002322, time_offset_ms: 76491},
%Video.TimedPoint{lat: 53.545334, lon: 10.002467, time_offset_ms: 76825},
%Video.TimedPoint{lat: 53.545398, lon: 10.002605, time_offset_ms: 77158},
%Video.TimedPoint{lat: 53.545472, lon: 10.002717, time_offset_ms: 77492},
%Video.TimedPoint{lat: 53.545554, lon: 10.002785, time_offset_ms: 77826},
%Video.TimedPoint{lat: 53.545631, lon: 10.002875, time_offset_ms: 78159},
%Video.TimedPoint{lat: 53.545707, lon: 10.002976, time_offset_ms: 78493},
%Video.TimedPoint{lat: 53.54578, lon: 10.00307, time_offset_ms: 78826},
%Video.TimedPoint{lat: 53.545853, lon: 10.003171, time_offset_ms: 79160},
%Video.TimedPoint{lat: 53.545931, lon: 10.003281, time_offset_ms: 79494},
%Video.TimedPoint{lat: 53.546015, lon: 10.003393, time_offset_ms: 79827},
%Video.TimedPoint{lat: 53.546111, lon: 10.003496, time_offset_ms: 80161},
%Video.TimedPoint{lat: 53.546201, lon: 10.003611, time_offset_ms: 80494},
%Video.TimedPoint{lat: 53.546287, lon: 10.003727, time_offset_ms: 80828},
%Video.TimedPoint{lat: 53.546372, lon: 10.003849, time_offset_ms: 81162},
%Video.TimedPoint{lat: 53.54645, lon: 10.00398, time_offset_ms: 81495},
%Video.TimedPoint{lat: 53.54653, lon: 10.004106, time_offset_ms: 81829},
%Video.TimedPoint{lat: 53.546616, lon: 10.004232, time_offset_ms: 82162},
%Video.TimedPoint{lat: 53.546696, lon: 10.004365, time_offset_ms: 82496},
%Video.TimedPoint{lat: 53.546773, lon: 10.004488, time_offset_ms: 82830},
%Video.TimedPoint{lat: 53.546853, lon: 10.004612, time_offset_ms: 83163},
%Video.TimedPoint{lat: 53.546934, lon: 10.004729, time_offset_ms: 83497},
%Video.TimedPoint{lat: 53.54702, lon: 10.004854, time_offset_ms: 83830},
%Video.TimedPoint{lat: 53.547103, lon: 10.004979, time_offset_ms: 84164},
%Video.TimedPoint{lat: 53.547181, lon: 10.005093, time_offset_ms: 84498},
%Video.TimedPoint{lat: 53.54726, lon: 10.005206, time_offset_ms: 84831},
%Video.TimedPoint{lat: 53.547335, lon: 10.005313, time_offset_ms: 85165},
%Video.TimedPoint{lat: 53.547409, lon: 10.005421, time_offset_ms: 85498},
%Video.TimedPoint{lat: 53.547478, lon: 10.005535, time_offset_ms: 85832},
%Video.TimedPoint{lat: 53.547532, lon: 10.005657, time_offset_ms: 86166},
%Video.TimedPoint{lat: 53.547575, lon: 10.005792, time_offset_ms: 86499},
%Video.TimedPoint{lat: 53.547611, lon: 10.005932, time_offset_ms: 86833},
%Video.TimedPoint{lat: 53.547643, lon: 10.006069, time_offset_ms: 87166},
%Video.TimedPoint{lat: 53.547663, lon: 10.006196, time_offset_ms: 87500},
%Video.TimedPoint{lat: 53.547674, lon: 10.006296, time_offset_ms: 87834},
%Video.TimedPoint{lat: 53.547689, lon: 10.006344, time_offset_ms: 88167},
%Video.TimedPoint{lat: 53.547695, lon: 10.006371, time_offset_ms: 88252},
%Video.TimedPoint{lat: 53.54771, lon: 10.006379, time_offset_ms: 88586},
%Video.TimedPoint{lat: 53.547753, lon: 10.006395, time_offset_ms: 88919},
%Video.TimedPoint{lat: 53.547811, lon: 10.006408, time_offset_ms: 89253},
%Video.TimedPoint{lat: 53.547869, lon: 10.006426, time_offset_ms: 89586},
%Video.TimedPoint{lat: 53.547924, lon: 10.006442, time_offset_ms: 89920},
%Video.TimedPoint{lat: 53.547966, lon: 10.006456, time_offset_ms: 90254},
%Video.TimedPoint{lat: 53.548003, lon: 10.00647, time_offset_ms: 90587},
%Video.TimedPoint{lat: 53.548033, lon: 10.006479, time_offset_ms: 90921},
%Video.TimedPoint{lat: 53.54805, lon: 10.006484, time_offset_ms: 91254},
%Video.TimedPoint{lat: 53.548066, lon: 10.006491, time_offset_ms: 91588},
%Video.TimedPoint{lat: 53.548109, lon: 10.006512, time_offset_ms: 91922},
%Video.TimedPoint{lat: 53.548172, lon: 10.006506, time_offset_ms: 92255},
%Video.TimedPoint{lat: 53.548235, lon: 10.006445, time_offset_ms: 92589},
%Video.TimedPoint{lat: 53.548307, lon: 10.006411, time_offset_ms: 92922},
%Video.TimedPoint{lat: 53.548382, lon: 10.006421, time_offset_ms: 93256},
%Video.TimedPoint{lat: 53.54846, lon: 10.006434, time_offset_ms: 93590},
%Video.TimedPoint{lat: 53.548541, lon: 10.006455, time_offset_ms: 93923},
%Video.TimedPoint{lat: 53.54862, lon: 10.006475, time_offset_ms: 94257},
%Video.TimedPoint{lat: 53.548699, lon: 10.006498, time_offset_ms: 94590},
%Video.TimedPoint{lat: 53.548777, lon: 10.006522, time_offset_ms: 94924},
%Video.TimedPoint{lat: 53.548852, lon: 10.006543, time_offset_ms: 95258},
%Video.TimedPoint{lat: 53.548928, lon: 10.006562, time_offset_ms: 95591},
%Video.TimedPoint{lat: 53.549005, lon: 10.006582, time_offset_ms: 95925},
%Video.TimedPoint{lat: 53.549082, lon: 10.006601, time_offset_ms: 96258},
%Video.TimedPoint{lat: 53.54916, lon: 10.00662, time_offset_ms: 96592},
%Video.TimedPoint{lat: 53.549237, lon: 10.00664, time_offset_ms: 96926},
%Video.TimedPoint{lat: 53.549314, lon: 10.006659, time_offset_ms: 97259},
%Video.TimedPoint{lat: 53.549392, lon: 10.006678, time_offset_ms: 97593},
%Video.TimedPoint{lat: 53.549472, lon: 10.006693, time_offset_ms: 97926},
%Video.TimedPoint{lat: 53.549553, lon: 10.006712, time_offset_ms: 98260},
%Video.TimedPoint{lat: 53.549633, lon: 10.006727, time_offset_ms: 98594},
%Video.TimedPoint{lat: 53.549712, lon: 10.006736, time_offset_ms: 98927},
%Video.TimedPoint{lat: 53.549788, lon: 10.006741, time_offset_ms: 99261},
%Video.TimedPoint{lat: 53.549864, lon: 10.006744, time_offset_ms: 99594},
%Video.TimedPoint{lat: 53.549941, lon: 10.006745, time_offset_ms: 99928},
%Video.TimedPoint{lat: 53.550014, lon: 10.006758, time_offset_ms: 100_262},
%Video.TimedPoint{lat: 53.550072, lon: 10.006796, time_offset_ms: 100_595},
%Video.TimedPoint{lat: 53.550119, lon: 10.006822, time_offset_ms: 100_929},
%Video.TimedPoint{lat: 53.550156, lon: 10.006821, time_offset_ms: 101_262},
%Video.TimedPoint{lat: 53.5502, lon: 10.006806, time_offset_ms: 101_596},
%Video.TimedPoint{lat: 53.550258, lon: 10.006787, time_offset_ms: 101_930},
%Video.TimedPoint{lat: 53.550324, lon: 10.00676, time_offset_ms: 102_263},
%Video.TimedPoint{lat: 53.550387, lon: 10.006703, time_offset_ms: 102_597},
%Video.TimedPoint{lat: 53.550453, lon: 10.006634, time_offset_ms: 102_930},
%Video.TimedPoint{lat: 53.550529, lon: 10.006585, time_offset_ms: 103_264},
%Video.TimedPoint{lat: 53.550611, lon: 10.006535, time_offset_ms: 103_598},
%Video.TimedPoint{lat: 53.550691, lon: 10.00649, time_offset_ms: 103_931},
%Video.TimedPoint{lat: 53.550774, lon: 10.00645, time_offset_ms: 104_265},
%Video.TimedPoint{lat: 53.550855, lon: 10.006416, time_offset_ms: 104_598},
%Video.TimedPoint{lat: 53.550937, lon: 10.006385, time_offset_ms: 104_932},
%Video.TimedPoint{lat: 53.551017, lon: 10.006357, time_offset_ms: 105_266},
%Video.TimedPoint{lat: 53.551097, lon: 10.006325, time_offset_ms: 105_599},
%Video.TimedPoint{lat: 53.551174, lon: 10.006287, time_offset_ms: 105_933},
%Video.TimedPoint{lat: 53.551254, lon: 10.006268, time_offset_ms: 106_266},
%Video.TimedPoint{lat: 53.551329, lon: 10.006264, time_offset_ms: 106_600},
%Video.TimedPoint{lat: 53.5514, lon: 10.006244, time_offset_ms: 106_934},
%Video.TimedPoint{lat: 53.551461, lon: 10.006219, time_offset_ms: 107_267},
%Video.TimedPoint{lat: 53.551502, lon: 10.006201, time_offset_ms: 107_601},
%Video.TimedPoint{lat: 53.551527, lon: 10.006188, time_offset_ms: 107_934},
%Video.TimedPoint{lat: 53.551547, lon: 10.006172, time_offset_ms: 108_268},
%Video.TimedPoint{lat: 53.551558, lon: 10.006182, time_offset_ms: 108_353},
%Video.TimedPoint{lat: 53.551567, lon: 10.006177, time_offset_ms: 108_687},
%Video.TimedPoint{lat: 53.551605, lon: 10.006158, time_offset_ms: 109_020},
%Video.TimedPoint{lat: 53.551664, lon: 10.006126, time_offset_ms: 109_354},
%Video.TimedPoint{lat: 53.551735, lon: 10.00609, time_offset_ms: 109_687},
%Video.TimedPoint{lat: 53.551813, lon: 10.006054, time_offset_ms: 110_021},
%Video.TimedPoint{lat: 53.551896, lon: 10.006025, time_offset_ms: 110_355},
%Video.TimedPoint{lat: 53.551984, lon: 10.006006, time_offset_ms: 110_688},
%Video.TimedPoint{lat: 53.552075, lon: 10.005996, time_offset_ms: 111_022},
%Video.TimedPoint{lat: 53.552164, lon: 10.005998, time_offset_ms: 111_355},
%Video.TimedPoint{lat: 53.552255, lon: 10.005986, time_offset_ms: 111_689},
%Video.TimedPoint{lat: 53.552342, lon: 10.005942, time_offset_ms: 112_023},
%Video.TimedPoint{lat: 53.552436, lon: 10.00591, time_offset_ms: 112_356},
%Video.TimedPoint{lat: 53.552535, lon: 10.00587, time_offset_ms: 112_690},
%Video.TimedPoint{lat: 53.552627, lon: 10.005811, time_offset_ms: 113_023},
%Video.TimedPoint{lat: 53.552687, lon: 10.005733, time_offset_ms: 113_357},
%Video.TimedPoint{lat: 53.552733, lon: 10.005656, time_offset_ms: 113_691},
%Video.TimedPoint{lat: 53.552797, lon: 10.005581, time_offset_ms: 114_024},
%Video.TimedPoint{lat: 53.552873, lon: 10.005509, time_offset_ms: 114_358},
%Video.TimedPoint{lat: 53.552953, lon: 10.005428, time_offset_ms: 114_691},
%Video.TimedPoint{lat: 53.553033, lon: 10.005339, time_offset_ms: 115_025},
%Video.TimedPoint{lat: 53.553112, lon: 10.005247, time_offset_ms: 115_359},
%Video.TimedPoint{lat: 53.553193, lon: 10.005149, time_offset_ms: 115_692},
%Video.TimedPoint{lat: 53.553276, lon: 10.005051, time_offset_ms: 116_026},
%Video.TimedPoint{lat: 53.553356, lon: 10.004953, time_offset_ms: 116_359},
%Video.TimedPoint{lat: 53.553437, lon: 10.004849, time_offset_ms: 116_693},
%Video.TimedPoint{lat: 53.553518, lon: 10.004752, time_offset_ms: 117_026},
%Video.TimedPoint{lat: 53.5536, lon: 10.004668, time_offset_ms: 117_360},
%Video.TimedPoint{lat: 53.55368, lon: 10.004579, time_offset_ms: 117_694},
%Video.TimedPoint{lat: 53.553758, lon: 10.004483, time_offset_ms: 118_027},
%Video.TimedPoint{lat: 53.553828, lon: 10.004399, time_offset_ms: 118_361},
%Video.TimedPoint{lat: 53.553889, lon: 10.004326, time_offset_ms: 118_694},
%Video.TimedPoint{lat: 53.553927, lon: 10.004284, time_offset_ms: 119_028},
%Video.TimedPoint{lat: 53.553947, lon: 10.00427, time_offset_ms: 119_362},
%Video.TimedPoint{lat: 53.553971, lon: 10.004254, time_offset_ms: 119_695},
%Video.TimedPoint{lat: 53.554002, lon: 10.004238, time_offset_ms: 120_029},
%Video.TimedPoint{lat: 53.554023, lon: 10.004225, time_offset_ms: 120_362},
%Video.TimedPoint{lat: 53.554024, lon: 10.00421, time_offset_ms: 120_447},
%Video.TimedPoint{lat: 53.554034, lon: 10.004203, time_offset_ms: 120_781},
%Video.TimedPoint{lat: 53.55407, lon: 10.004176, time_offset_ms: 121_114},
%Video.TimedPoint{lat: 53.554119, lon: 10.004133, time_offset_ms: 121_448},
%Video.TimedPoint{lat: 53.554173, lon: 10.004071, time_offset_ms: 121_781},
%Video.TimedPoint{lat: 53.554232, lon: 10.003998, time_offset_ms: 122_115},
%Video.TimedPoint{lat: 53.55427, lon: 10.003886, time_offset_ms: 122_449},
%Video.TimedPoint{lat: 53.554302, lon: 10.003763, time_offset_ms: 122_782},
%Video.TimedPoint{lat: 53.554351, lon: 10.003658, time_offset_ms: 123_116},
%Video.TimedPoint{lat: 53.554409, lon: 10.003559, time_offset_ms: 123_449},
%Video.TimedPoint{lat: 53.554469, lon: 10.003458, time_offset_ms: 123_783},
%Video.TimedPoint{lat: 53.55453, lon: 10.003357, time_offset_ms: 124_117},
%Video.TimedPoint{lat: 53.554597, lon: 10.003239, time_offset_ms: 124_450},
%Video.TimedPoint{lat: 53.554665, lon: 10.003109, time_offset_ms: 124_784},
%Video.TimedPoint{lat: 53.554739, lon: 10.002981, time_offset_ms: 125_117},
%Video.TimedPoint{lat: 53.55482, lon: 10.002849, time_offset_ms: 125_451},
%Video.TimedPoint{lat: 53.554906, lon: 10.002721, time_offset_ms: 125_785},
%Video.TimedPoint{lat: 53.55499, lon: 10.002587, time_offset_ms: 126_118},
%Video.TimedPoint{lat: 53.555067, lon: 10.002442, time_offset_ms: 126_452},
%Video.TimedPoint{lat: 53.555136, lon: 10.002302, time_offset_ms: 126_785},
%Video.TimedPoint{lat: 53.5552, lon: 10.002194, time_offset_ms: 127_119},
%Video.TimedPoint{lat: 53.555245, lon: 10.00212, time_offset_ms: 127_453},
%Video.TimedPoint{lat: 53.555249, lon: 10.002084, time_offset_ms: 127_538},
%Video.TimedPoint{lat: 53.555254, lon: 10.002068, time_offset_ms: 127_872},
%Video.TimedPoint{lat: 53.555266, lon: 10.001996, time_offset_ms: 128_205},
%Video.TimedPoint{lat: 53.555282, lon: 10.001886, time_offset_ms: 128_539},
%Video.TimedPoint{lat: 53.555313, lon: 10.001767, time_offset_ms: 128_872},
%Video.TimedPoint{lat: 53.555365, lon: 10.001657, time_offset_ms: 129_206},
%Video.TimedPoint{lat: 53.555423, lon: 10.001559, time_offset_ms: 129_540},
%Video.TimedPoint{lat: 53.555478, lon: 10.001467, time_offset_ms: 129_873},
%Video.TimedPoint{lat: 53.555529, lon: 10.001374, time_offset_ms: 130_207},
%Video.TimedPoint{lat: 53.555576, lon: 10.001289, time_offset_ms: 130_540},
%Video.TimedPoint{lat: 53.55561, lon: 10.001229, time_offset_ms: 130_874},
%Video.TimedPoint{lat: 53.555636, lon: 10.001174, time_offset_ms: 131_208},
%Video.TimedPoint{lat: 53.555678, lon: 10.001086, time_offset_ms: 131_541},
%Video.TimedPoint{lat: 53.555728, lon: 10.000981, time_offset_ms: 131_875},
%Video.TimedPoint{lat: 53.555787, lon: 10.000864, time_offset_ms: 132_208},
%Video.TimedPoint{lat: 53.555847, lon: 10.000737, time_offset_ms: 132_542},
%Video.TimedPoint{lat: 53.555905, lon: 10.000614, time_offset_ms: 132_876},
%Video.TimedPoint{lat: 53.555963, lon: 10.000491, time_offset_ms: 133_209},
%Video.TimedPoint{lat: 53.556022, lon: 10.000366, time_offset_ms: 133_543},
%Video.TimedPoint{lat: 53.556078, lon: 10.000245, time_offset_ms: 133_876},
%Video.TimedPoint{lat: 53.556134, lon: 10.000121, time_offset_ms: 134_210},
%Video.TimedPoint{lat: 53.556191, lon: 9.999995, time_offset_ms: 134_544},
%Video.TimedPoint{lat: 53.556245, lon: 9.999869, time_offset_ms: 134_877},
%Video.TimedPoint{lat: 53.556299, lon: 9.999738, time_offset_ms: 135_211},
%Video.TimedPoint{lat: 53.556349, lon: 9.999604, time_offset_ms: 135_544},
%Video.TimedPoint{lat: 53.556397, lon: 9.999472, time_offset_ms: 135_878},
%Video.TimedPoint{lat: 53.556445, lon: 9.999339, time_offset_ms: 136_212},
%Video.TimedPoint{lat: 53.556503, lon: 9.999218, time_offset_ms: 136_545},
%Video.TimedPoint{lat: 53.556555, lon: 9.999097, time_offset_ms: 136_879},
%Video.TimedPoint{lat: 53.556598, lon: 9.998965, time_offset_ms: 137_212},
%Video.TimedPoint{lat: 53.556631, lon: 9.99882, time_offset_ms: 137_546},
%Video.TimedPoint{lat: 53.556656, lon: 9.998672, time_offset_ms: 137_879},
%Video.TimedPoint{lat: 53.556683, lon: 9.998522, time_offset_ms: 138_213},
%Video.TimedPoint{lat: 53.556715, lon: 9.998372, time_offset_ms: 138_547},
%Video.TimedPoint{lat: 53.556752, lon: 9.998228, time_offset_ms: 138_880},
%Video.TimedPoint{lat: 53.556792, lon: 9.998083, time_offset_ms: 139_214},
%Video.TimedPoint{lat: 53.556832, lon: 9.997937, time_offset_ms: 139_547},
%Video.TimedPoint{lat: 53.55687, lon: 9.997794, time_offset_ms: 139_881},
%Video.TimedPoint{lat: 53.556907, lon: 9.997651, time_offset_ms: 140_215},
%Video.TimedPoint{lat: 53.556945, lon: 9.997502, time_offset_ms: 140_548},
%Video.TimedPoint{lat: 53.556982, lon: 9.997358, time_offset_ms: 140_882},
%Video.TimedPoint{lat: 53.55702, lon: 9.99721, time_offset_ms: 141_215},
%Video.TimedPoint{lat: 53.557062, lon: 9.997057, time_offset_ms: 141_549},
%Video.TimedPoint{lat: 53.557101, lon: 9.996902, time_offset_ms: 141_883},
%Video.TimedPoint{lat: 53.557141, lon: 9.996742, time_offset_ms: 142_216},
%Video.TimedPoint{lat: 53.557186, lon: 9.996578, time_offset_ms: 142_550},
%Video.TimedPoint{lat: 53.557234, lon: 9.996403, time_offset_ms: 142_883},
%Video.TimedPoint{lat: 53.557279, lon: 9.996226, time_offset_ms: 143_217},
%Video.TimedPoint{lat: 53.557322, lon: 9.996047, time_offset_ms: 143_551},
%Video.TimedPoint{lat: 53.557366, lon: 9.995862, time_offset_ms: 143_884},
%Video.TimedPoint{lat: 53.55741, lon: 9.995683, time_offset_ms: 144_218},
%Video.TimedPoint{lat: 53.557452, lon: 9.995496, time_offset_ms: 144_551},
%Video.TimedPoint{lat: 53.557491, lon: 9.995318, time_offset_ms: 144_885},
%Video.TimedPoint{lat: 53.557533, lon: 9.995143, time_offset_ms: 145_219},
%Video.TimedPoint{lat: 53.557567, lon: 9.994967, time_offset_ms: 145_552},
%Video.TimedPoint{lat: 53.557602, lon: 9.994792, time_offset_ms: 145_886},
%Video.TimedPoint{lat: 53.557636, lon: 9.994626, time_offset_ms: 146_219},
%Video.TimedPoint{lat: 53.557671, lon: 9.994461, time_offset_ms: 146_553},
%Video.TimedPoint{lat: 53.557701, lon: 9.99431, time_offset_ms: 146_887},
%Video.TimedPoint{lat: 53.557729, lon: 9.994202, time_offset_ms: 147_220},
%Video.TimedPoint{lat: 53.557763, lon: 9.994115, time_offset_ms: 147_554},
%Video.TimedPoint{lat: 53.557769, lon: 9.994008, time_offset_ms: 147_887},
%Video.TimedPoint{lat: 53.557769, lon: 9.993911, time_offset_ms: 148_221},
%Video.TimedPoint{lat: 53.557776, lon: 9.993871, time_offset_ms: 148_555},
%Video.TimedPoint{lat: 53.557777, lon: 9.993841, time_offset_ms: 148_640},
%Video.TimedPoint{lat: 53.557782, lon: 9.993821, time_offset_ms: 148_974},
%Video.TimedPoint{lat: 53.557791, lon: 9.993771, time_offset_ms: 149_307},
%Video.TimedPoint{lat: 53.557803, lon: 9.993713, time_offset_ms: 149_641},
%Video.TimedPoint{lat: 53.557816, lon: 9.993658, time_offset_ms: 149_974},
%Video.TimedPoint{lat: 53.557824, lon: 9.993614, time_offset_ms: 150_308},
%Video.TimedPoint{lat: 53.55783, lon: 9.993575, time_offset_ms: 150_641},
%Video.TimedPoint{lat: 53.55784, lon: 9.993545, time_offset_ms: 150_975},
%Video.TimedPoint{lat: 53.557847, lon: 9.993541, time_offset_ms: 151_060},
%Video.TimedPoint{lat: 53.557849, lon: 9.993535, time_offset_ms: 151_394},
%Video.TimedPoint{lat: 53.557856, lon: 9.993487, time_offset_ms: 151_727},
%Video.TimedPoint{lat: 53.557872, lon: 9.993403, time_offset_ms: 152_061},
%Video.TimedPoint{lat: 53.557895, lon: 9.993303, time_offset_ms: 152_394},
%Video.TimedPoint{lat: 53.557925, lon: 9.993204, time_offset_ms: 152_728},
%Video.TimedPoint{lat: 53.557969, lon: 9.993146, time_offset_ms: 153_062},
%Video.TimedPoint{lat: 53.558017, lon: 9.993169, time_offset_ms: 153_395},
%Video.TimedPoint{lat: 53.55806, lon: 9.993225, time_offset_ms: 153_729},
%Video.TimedPoint{lat: 53.558109, lon: 9.99327, time_offset_ms: 154_062},
%Video.TimedPoint{lat: 53.558148, lon: 9.993292, time_offset_ms: 154_396},
%Video.TimedPoint{lat: 53.558176, lon: 9.993263, time_offset_ms: 154_730},
%Video.TimedPoint{lat: 53.558203, lon: 9.993196, time_offset_ms: 155_063},
%Video.TimedPoint{lat: 53.558228, lon: 9.993093, time_offset_ms: 155_397},
%Video.TimedPoint{lat: 53.558253, lon: 9.992975, time_offset_ms: 155_730},
%Video.TimedPoint{lat: 53.558263, lon: 9.992845, time_offset_ms: 156_064},
%Video.TimedPoint{lat: 53.558279, lon: 9.992723, time_offset_ms: 156_398},
%Video.TimedPoint{lat: 53.558312, lon: 9.992596, time_offset_ms: 156_731},
%Video.TimedPoint{lat: 53.55833, lon: 9.992465, time_offset_ms: 157_065},
%Video.TimedPoint{lat: 53.55835, lon: 9.992337, time_offset_ms: 157_398},
%Video.TimedPoint{lat: 53.558364, lon: 9.992201, time_offset_ms: 157_732},
%Video.TimedPoint{lat: 53.558396, lon: 9.992068, time_offset_ms: 158_066},
%Video.TimedPoint{lat: 53.558429, lon: 9.991945, time_offset_ms: 158_399},
%Video.TimedPoint{lat: 53.55845, lon: 9.99182, time_offset_ms: 158_733},
%Video.TimedPoint{lat: 53.558464, lon: 9.991683, time_offset_ms: 159_066},
%Video.TimedPoint{lat: 53.558487, lon: 9.991547, time_offset_ms: 159_400},
%Video.TimedPoint{lat: 53.558532, lon: 9.991432, time_offset_ms: 159_734},
%Video.TimedPoint{lat: 53.558586, lon: 9.991343, time_offset_ms: 160_067},
%Video.TimedPoint{lat: 53.558645, lon: 9.991269, time_offset_ms: 160_401},
%Video.TimedPoint{lat: 53.558681, lon: 9.991186, time_offset_ms: 160_734},
%Video.TimedPoint{lat: 53.558693, lon: 9.991091, time_offset_ms: 161_068},
%Video.TimedPoint{lat: 53.558703, lon: 9.990992, time_offset_ms: 161_402},
%Video.TimedPoint{lat: 53.558716, lon: 9.990889, time_offset_ms: 161_735},
%Video.TimedPoint{lat: 53.558735, lon: 9.990766, time_offset_ms: 162_069},
%Video.TimedPoint{lat: 53.558754, lon: 9.990633, time_offset_ms: 162_402},
%Video.TimedPoint{lat: 53.558773, lon: 9.990503, time_offset_ms: 162_736},
%Video.TimedPoint{lat: 53.558789, lon: 9.990371, time_offset_ms: 163_070},
%Video.TimedPoint{lat: 53.558806, lon: 9.990239, time_offset_ms: 163_403},
%Video.TimedPoint{lat: 53.558818, lon: 9.990123, time_offset_ms: 163_737},
%Video.TimedPoint{lat: 53.558808, lon: 9.990032, time_offset_ms: 164_070},
%Video.TimedPoint{lat: 53.558767, lon: 9.989955, time_offset_ms: 164_404},
%Video.TimedPoint{lat: 53.558724, lon: 9.989884, time_offset_ms: 164_737},
%Video.TimedPoint{lat: 53.558684, lon: 9.989812, time_offset_ms: 165_071},
%Video.TimedPoint{lat: 53.558638, lon: 9.989755, time_offset_ms: 165_405},
%Video.TimedPoint{lat: 53.558584, lon: 9.989713, time_offset_ms: 165_738},
%Video.TimedPoint{lat: 53.55853, lon: 9.98968, time_offset_ms: 166_072},
%Video.TimedPoint{lat: 53.558481, lon: 9.989648, time_offset_ms: 166_405},
%Video.TimedPoint{lat: 53.558462, lon: 9.989622, time_offset_ms: 166_739},
%Video.TimedPoint{lat: 53.558464, lon: 9.989611, time_offset_ms: 167_073},
%Video.TimedPoint{lat: 53.558471, lon: 9.989605, time_offset_ms: 167_406},
%Video.TimedPoint{lat: 53.558478, lon: 9.989592, time_offset_ms: 167_740},
%Video.TimedPoint{lat: 53.558481, lon: 9.989553, time_offset_ms: 168_073},
%Video.TimedPoint{lat: 53.558484, lon: 9.989515, time_offset_ms: 168_407},
%Video.TimedPoint{lat: 53.558482, lon: 9.989489, time_offset_ms: 168_741},
%Video.TimedPoint{lat: 53.558472, lon: 9.989423, time_offset_ms: 168_826},
%Video.TimedPoint{lat: 53.558471, lon: 9.989408, time_offset_ms: 169_160},
%Video.TimedPoint{lat: 53.558465, lon: 9.989353, time_offset_ms: 169_493},
%Video.TimedPoint{lat: 53.558453, lon: 9.989282, time_offset_ms: 169_827},
%Video.TimedPoint{lat: 53.558461, lon: 9.989217, time_offset_ms: 170_160},
%Video.TimedPoint{lat: 53.55847, lon: 9.98915, time_offset_ms: 170_494},
%Video.TimedPoint{lat: 53.558482, lon: 9.989071, time_offset_ms: 170_828},
%Video.TimedPoint{lat: 53.558496, lon: 9.988997, time_offset_ms: 171_161},
%Video.TimedPoint{lat: 53.558482, lon: 9.988924, time_offset_ms: 171_495},
%Video.TimedPoint{lat: 53.558455, lon: 9.988828, time_offset_ms: 171_828},
%Video.TimedPoint{lat: 53.558435, lon: 9.98871, time_offset_ms: 172_162},
%Video.TimedPoint{lat: 53.558412, lon: 9.988583, time_offset_ms: 172_496},
%Video.TimedPoint{lat: 53.558392, lon: 9.988453, time_offset_ms: 172_829},
%Video.TimedPoint{lat: 53.558379, lon: 9.988319, time_offset_ms: 173_163},
%Video.TimedPoint{lat: 53.558365, lon: 9.988179, time_offset_ms: 173_496},
%Video.TimedPoint{lat: 53.558352, lon: 9.988043, time_offset_ms: 173_830},
%Video.TimedPoint{lat: 53.558332, lon: 9.987907, time_offset_ms: 174_164},
%Video.TimedPoint{lat: 53.558312, lon: 9.98777, time_offset_ms: 174_497},
%Video.TimedPoint{lat: 53.558293, lon: 9.987631, time_offset_ms: 174_831},
%Video.TimedPoint{lat: 53.558278, lon: 9.98749, time_offset_ms: 175_164},
%Video.TimedPoint{lat: 53.558263, lon: 9.987342, time_offset_ms: 175_498},
%Video.TimedPoint{lat: 53.558253, lon: 9.987198, time_offset_ms: 175_832},
%Video.TimedPoint{lat: 53.558243, lon: 9.98705, time_offset_ms: 176_165},
%Video.TimedPoint{lat: 53.55823, lon: 9.986898, time_offset_ms: 176_499},
%Video.TimedPoint{lat: 53.558216, lon: 9.986747, time_offset_ms: 176_832},
%Video.TimedPoint{lat: 53.558207, lon: 9.986595, time_offset_ms: 177_166},
%Video.TimedPoint{lat: 53.558196, lon: 9.98644, time_offset_ms: 177_500},
%Video.TimedPoint{lat: 53.558181, lon: 9.986289, time_offset_ms: 177_833},
%Video.TimedPoint{lat: 53.558166, lon: 9.986135, time_offset_ms: 178_167},
%Video.TimedPoint{lat: 53.558155, lon: 9.985984, time_offset_ms: 178_500},
%Video.TimedPoint{lat: 53.558148, lon: 9.985836, time_offset_ms: 178_834},
%Video.TimedPoint{lat: 53.558138, lon: 9.985685, time_offset_ms: 179_168},
%Video.TimedPoint{lat: 53.558124, lon: 9.985535, time_offset_ms: 179_501},
%Video.TimedPoint{lat: 53.558116, lon: 9.985387, time_offset_ms: 179_835},
%Video.TimedPoint{lat: 53.558104, lon: 9.985236, time_offset_ms: 180_168},
%Video.TimedPoint{lat: 53.558089, lon: 9.985088, time_offset_ms: 180_502},
%Video.TimedPoint{lat: 53.55807, lon: 9.984947, time_offset_ms: 180_836},
%Video.TimedPoint{lat: 53.558046, lon: 9.984811, time_offset_ms: 181_169},
%Video.TimedPoint{lat: 53.558028, lon: 9.984669, time_offset_ms: 181_503},
%Video.TimedPoint{lat: 53.55801, lon: 9.984533, time_offset_ms: 181_836},
%Video.TimedPoint{lat: 53.557981, lon: 9.984395, time_offset_ms: 182_170},
%Video.TimedPoint{lat: 53.557954, lon: 9.984262, time_offset_ms: 182_504},
%Video.TimedPoint{lat: 53.557924, lon: 9.984129, time_offset_ms: 182_837},
%Video.TimedPoint{lat: 53.5579, lon: 9.983997, time_offset_ms: 183_171},
%Video.TimedPoint{lat: 53.557906, lon: 9.983864, time_offset_ms: 183_504},
%Video.TimedPoint{lat: 53.557904, lon: 9.983732, time_offset_ms: 183_838},
%Video.TimedPoint{lat: 53.557868, lon: 9.9836, time_offset_ms: 184_172},
%Video.TimedPoint{lat: 53.557811, lon: 9.983495, time_offset_ms: 184_505},
%Video.TimedPoint{lat: 53.55774, lon: 9.983415, time_offset_ms: 184_839},
%Video.TimedPoint{lat: 53.557685, lon: 9.983301, time_offset_ms: 185_172},
%Video.TimedPoint{lat: 53.557645, lon: 9.983172, time_offset_ms: 185_506},
%Video.TimedPoint{lat: 53.557605, lon: 9.983049, time_offset_ms: 185_840},
%Video.TimedPoint{lat: 53.557562, lon: 9.982924, time_offset_ms: 186_173},
%Video.TimedPoint{lat: 53.557517, lon: 9.982797, time_offset_ms: 186_507},
%Video.TimedPoint{lat: 53.557471, lon: 9.982671, time_offset_ms: 186_840},
%Video.TimedPoint{lat: 53.557425, lon: 9.982543, time_offset_ms: 187_174},
%Video.TimedPoint{lat: 53.557375, lon: 9.982414, time_offset_ms: 187_508},
%Video.TimedPoint{lat: 53.557325, lon: 9.982293, time_offset_ms: 187_841},
%Video.TimedPoint{lat: 53.557275, lon: 9.982172, time_offset_ms: 188_175},
%Video.TimedPoint{lat: 53.557222, lon: 9.98205, time_offset_ms: 188_508},
%Video.TimedPoint{lat: 53.557171, lon: 9.98193, time_offset_ms: 188_842},
%Video.TimedPoint{lat: 53.557116, lon: 9.981809, time_offset_ms: 189_176},
%Video.TimedPoint{lat: 53.557059, lon: 9.981686, time_offset_ms: 189_509},
%Video.TimedPoint{lat: 53.557003, lon: 9.981566, time_offset_ms: 189_843},
%Video.TimedPoint{lat: 53.556944, lon: 9.981451, time_offset_ms: 190_176},
%Video.TimedPoint{lat: 53.556885, lon: 9.981336, time_offset_ms: 190_510},
%Video.TimedPoint{lat: 53.556825, lon: 9.981225, time_offset_ms: 190_844},
%Video.TimedPoint{lat: 53.556763, lon: 9.981116, time_offset_ms: 191_177},
%Video.TimedPoint{lat: 53.556703, lon: 9.981004, time_offset_ms: 191_511},
%Video.TimedPoint{lat: 53.556642, lon: 9.980891, time_offset_ms: 191_844},
%Video.TimedPoint{lat: 53.556579, lon: 9.980776, time_offset_ms: 192_178},
%Video.TimedPoint{lat: 53.556512, lon: 9.980661, time_offset_ms: 192_512},
%Video.TimedPoint{lat: 53.556449, lon: 9.980547, time_offset_ms: 192_845},
%Video.TimedPoint{lat: 53.556388, lon: 9.980431, time_offset_ms: 193_179},
%Video.TimedPoint{lat: 53.556327, lon: 9.980321, time_offset_ms: 193_512},
%Video.TimedPoint{lat: 53.556269, lon: 9.980219, time_offset_ms: 193_846},
%Video.TimedPoint{lat: 53.556211, lon: 9.980119, time_offset_ms: 194_180},
%Video.TimedPoint{lat: 53.556149, lon: 9.980009, time_offset_ms: 194_513},
%Video.TimedPoint{lat: 53.556085, lon: 9.979899, time_offset_ms: 194_847},
%Video.TimedPoint{lat: 53.556025, lon: 9.979801, time_offset_ms: 195_180},
%Video.TimedPoint{lat: 53.555983, lon: 9.979732, time_offset_ms: 195_514},
%Video.TimedPoint{lat: 53.555964, lon: 9.9797, time_offset_ms: 195_848},
%Video.TimedPoint{lat: 53.55595, lon: 9.979685, time_offset_ms: 195_933},
%Video.TimedPoint{lat: 53.55594, lon: 9.979668, time_offset_ms: 196_267},
%Video.TimedPoint{lat: 53.555909, lon: 9.979615, time_offset_ms: 196_600},
%Video.TimedPoint{lat: 53.555859, lon: 9.979546, time_offset_ms: 196_934},
%Video.TimedPoint{lat: 53.5558, lon: 9.979465, time_offset_ms: 197_267},
%Video.TimedPoint{lat: 53.555737, lon: 9.979368, time_offset_ms: 197_601},
%Video.TimedPoint{lat: 53.555668, lon: 9.979271, time_offset_ms: 197_935},
%Video.TimedPoint{lat: 53.555595, lon: 9.979174, time_offset_ms: 198_268},
%Video.TimedPoint{lat: 53.555516, lon: 9.979078, time_offset_ms: 198_602},
%Video.TimedPoint{lat: 53.55544, lon: 9.978979, time_offset_ms: 198_935},
%Video.TimedPoint{lat: 53.555368, lon: 9.978867, time_offset_ms: 199_269},
%Video.TimedPoint{lat: 53.555294, lon: 9.978753, time_offset_ms: 199_603},
%Video.TimedPoint{lat: 53.555214, lon: 9.97866, time_offset_ms: 199_936},
%Video.TimedPoint{lat: 53.555127, lon: 9.978581, time_offset_ms: 200_270},
%Video.TimedPoint{lat: 53.555035, lon: 9.97851, time_offset_ms: 200_603},
%Video.TimedPoint{lat: 53.554946, lon: 9.978446, time_offset_ms: 200_937},
%Video.TimedPoint{lat: 53.554862, lon: 9.978363, time_offset_ms: 201_271},
%Video.TimedPoint{lat: 53.554779, lon: 9.978266, time_offset_ms: 201_604},
%Video.TimedPoint{lat: 53.554701, lon: 9.978167, time_offset_ms: 201_938},
%Video.TimedPoint{lat: 53.554619, lon: 9.97807, time_offset_ms: 202_271},
%Video.TimedPoint{lat: 53.554534, lon: 9.977969, time_offset_ms: 202_605},
%Video.TimedPoint{lat: 53.554452, lon: 9.977866, time_offset_ms: 202_939},
%Video.TimedPoint{lat: 53.554373, lon: 9.977764, time_offset_ms: 203_272},
%Video.TimedPoint{lat: 53.554288, lon: 9.977669, time_offset_ms: 203_606},
%Video.TimedPoint{lat: 53.554204, lon: 9.977566, time_offset_ms: 203_939},
%Video.TimedPoint{lat: 53.554126, lon: 9.977467, time_offset_ms: 204_273},
%Video.TimedPoint{lat: 53.554047, lon: 9.977369, time_offset_ms: 204_607},
%Video.TimedPoint{lat: 53.553966, lon: 9.977267, time_offset_ms: 204_940},
%Video.TimedPoint{lat: 53.553883, lon: 9.977171, time_offset_ms: 205_274},
%Video.TimedPoint{lat: 53.553801, lon: 9.977075, time_offset_ms: 205_607},
%Video.TimedPoint{lat: 53.55372, lon: 9.976974, time_offset_ms: 205_941},
%Video.TimedPoint{lat: 53.553642, lon: 9.976879, time_offset_ms: 206_275},
%Video.TimedPoint{lat: 53.553566, lon: 9.976787, time_offset_ms: 206_608},
%Video.TimedPoint{lat: 53.553488, lon: 9.976696, time_offset_ms: 206_942},
%Video.TimedPoint{lat: 53.553411, lon: 9.97661, time_offset_ms: 207_275},
%Video.TimedPoint{lat: 53.553336, lon: 9.976522, time_offset_ms: 207_609},
%Video.TimedPoint{lat: 53.553262, lon: 9.976426, time_offset_ms: 207_942},
%Video.TimedPoint{lat: 53.55319, lon: 9.976333, time_offset_ms: 208_276},
%Video.TimedPoint{lat: 53.553113, lon: 9.976248, time_offset_ms: 208_610},
%Video.TimedPoint{lat: 53.553029, lon: 9.976165, time_offset_ms: 208_943},
%Video.TimedPoint{lat: 53.552945, lon: 9.97608, time_offset_ms: 209_277},
%Video.TimedPoint{lat: 53.552862, lon: 9.975994, time_offset_ms: 209_610},
%Video.TimedPoint{lat: 53.55278, lon: 9.975909, time_offset_ms: 209_944},
%Video.TimedPoint{lat: 53.552695, lon: 9.975831, time_offset_ms: 210_278},
%Video.TimedPoint{lat: 53.552611, lon: 9.975756, time_offset_ms: 210_611},
%Video.TimedPoint{lat: 53.552523, lon: 9.975676, time_offset_ms: 210_945},
%Video.TimedPoint{lat: 53.552435, lon: 9.975594, time_offset_ms: 211_278},
%Video.TimedPoint{lat: 53.55235, lon: 9.97551, time_offset_ms: 211_612},
%Video.TimedPoint{lat: 53.552264, lon: 9.97543, time_offset_ms: 211_946},
%Video.TimedPoint{lat: 53.552181, lon: 9.975348, time_offset_ms: 212_279},
%Video.TimedPoint{lat: 53.552097, lon: 9.975267, time_offset_ms: 212_613},
%Video.TimedPoint{lat: 53.55201, lon: 9.975188, time_offset_ms: 212_946},
%Video.TimedPoint{lat: 53.551926, lon: 9.975106, time_offset_ms: 213_280},
%Video.TimedPoint{lat: 53.551843, lon: 9.975028, time_offset_ms: 213_614},
%Video.TimedPoint{lat: 53.551761, lon: 9.974947, time_offset_ms: 213_947},
%Video.TimedPoint{lat: 53.551678, lon: 9.974868, time_offset_ms: 214_281},
%Video.TimedPoint{lat: 53.5516, lon: 9.974777, time_offset_ms: 214_614},
%Video.TimedPoint{lat: 53.551514, lon: 9.974691, time_offset_ms: 214_948},
%Video.TimedPoint{lat: 53.551427, lon: 9.974615, time_offset_ms: 215_282},
%Video.TimedPoint{lat: 53.55134, lon: 9.974553, time_offset_ms: 215_615},
%Video.TimedPoint{lat: 53.551253, lon: 9.974487, time_offset_ms: 215_949},
%Video.TimedPoint{lat: 53.551169, lon: 9.974414, time_offset_ms: 216_282},
%Video.TimedPoint{lat: 53.551087, lon: 9.97434, time_offset_ms: 216_616},
%Video.TimedPoint{lat: 53.551002, lon: 9.974263, time_offset_ms: 216_950},
%Video.TimedPoint{lat: 53.550916, lon: 9.974189, time_offset_ms: 217_283},
%Video.TimedPoint{lat: 53.550829, lon: 9.974118, time_offset_ms: 217_617},
%Video.TimedPoint{lat: 53.550747, lon: 9.974048, time_offset_ms: 217_950},
%Video.TimedPoint{lat: 53.55066, lon: 9.973978, time_offset_ms: 218_284},
%Video.TimedPoint{lat: 53.550575, lon: 9.973912, time_offset_ms: 218_618},
%Video.TimedPoint{lat: 53.550489, lon: 9.973849, time_offset_ms: 218_951},
%Video.TimedPoint{lat: 53.550409, lon: 9.973779, time_offset_ms: 219_285},
%Video.TimedPoint{lat: 53.550333, lon: 9.973708, time_offset_ms: 219_618},
%Video.TimedPoint{lat: 53.550252, lon: 9.973636, time_offset_ms: 219_952},
%Video.TimedPoint{lat: 53.550178, lon: 9.97356, time_offset_ms: 220_286},
%Video.TimedPoint{lat: 53.550114, lon: 9.973473, time_offset_ms: 220_619},
%Video.TimedPoint{lat: 53.550073, lon: 9.973384, time_offset_ms: 220_953},
%Video.TimedPoint{lat: 53.55004, lon: 9.973305, time_offset_ms: 221_286},
%Video.TimedPoint{lat: 53.550007, lon: 9.973304, time_offset_ms: 221_620},
%Video.TimedPoint{lat: 53.549981, lon: 9.973378, time_offset_ms: 221_954},
%Video.TimedPoint{lat: 53.549962, lon: 9.973478, time_offset_ms: 222_287},
%Video.TimedPoint{lat: 53.54994, lon: 9.973578, time_offset_ms: 222_621},
%Video.TimedPoint{lat: 53.549916, lon: 9.973675, time_offset_ms: 222_954},
%Video.TimedPoint{lat: 53.549895, lon: 9.973754, time_offset_ms: 223_288},
%Video.TimedPoint{lat: 53.549874, lon: 9.973785, time_offset_ms: 223_622},
%Video.TimedPoint{lat: 53.549867, lon: 9.973785, time_offset_ms: 223_707},
%Video.TimedPoint{lat: 53.549858, lon: 9.973779, time_offset_ms: 224_041},
%Video.TimedPoint{lat: 53.549819, lon: 9.973758, time_offset_ms: 224_374},
%Video.TimedPoint{lat: 53.549771, lon: 9.973735, time_offset_ms: 224_708},
%Video.TimedPoint{lat: 53.549722, lon: 9.973708, time_offset_ms: 225_041},
%Video.TimedPoint{lat: 53.549688, lon: 9.97369, time_offset_ms: 225_375},
%Video.TimedPoint{lat: 53.549667, lon: 9.973678, time_offset_ms: 225_708},
%Video.TimedPoint{lat: 53.549652, lon: 9.973671, time_offset_ms: 226_042},
%Video.TimedPoint{lat: 53.549638, lon: 9.973667, time_offset_ms: 226_376},
%Video.TimedPoint{lat: 53.549612, lon: 9.973658, time_offset_ms: 226_461},
%Video.TimedPoint{lat: 53.5496, lon: 9.973655, time_offset_ms: 226_795},
%Video.TimedPoint{lat: 53.54957, lon: 9.973641, time_offset_ms: 227_128},
%Video.TimedPoint{lat: 53.549532, lon: 9.973626, time_offset_ms: 227_462},
%Video.TimedPoint{lat: 53.549507, lon: 9.973586, time_offset_ms: 227_795},
%Video.TimedPoint{lat: 53.549494, lon: 9.97354, time_offset_ms: 228_129},
%Video.TimedPoint{lat: 53.549489, lon: 9.973493, time_offset_ms: 228_463},
%Video.TimedPoint{lat: 53.54948, lon: 9.973446, time_offset_ms: 228_796},
%Video.TimedPoint{lat: 53.549472, lon: 9.973406, time_offset_ms: 229_130},
%Video.TimedPoint{lat: 53.549469, lon: 9.973362, time_offset_ms: 229_463},
%Video.TimedPoint{lat: 53.549469, lon: 9.973318, time_offset_ms: 229_797},
%Video.TimedPoint{lat: 53.549472, lon: 9.973275, time_offset_ms: 230_131},
%Video.TimedPoint{lat: 53.549473, lon: 9.973236, time_offset_ms: 230_464},
%Video.TimedPoint{lat: 53.549475, lon: 9.973196, time_offset_ms: 230_798},
%Video.TimedPoint{lat: 53.549476, lon: 9.973155, time_offset_ms: 231_131},
%Video.TimedPoint{lat: 53.549477, lon: 9.973107, time_offset_ms: 231_465},
%Video.TimedPoint{lat: 53.549481, lon: 9.973055, time_offset_ms: 231_799},
%Video.TimedPoint{lat: 53.549488, lon: 9.972997, time_offset_ms: 232_132},
%Video.TimedPoint{lat: 53.549495, lon: 9.97294, time_offset_ms: 232_466},
%Video.TimedPoint{lat: 53.549507, lon: 9.972881, time_offset_ms: 232_799},
%Video.TimedPoint{lat: 53.549515, lon: 9.972811, time_offset_ms: 233_133},
%Video.TimedPoint{lat: 53.549518, lon: 9.972731, time_offset_ms: 233_467},
%Video.TimedPoint{lat: 53.549516, lon: 9.97265, time_offset_ms: 233_800},
%Video.TimedPoint{lat: 53.549512, lon: 9.972566, time_offset_ms: 234_134},
%Video.TimedPoint{lat: 53.549502, lon: 9.972464, time_offset_ms: 234_467},
%Video.TimedPoint{lat: 53.549492, lon: 9.97236, time_offset_ms: 234_801},
%Video.TimedPoint{lat: 53.549483, lon: 9.972253, time_offset_ms: 235_135},
%Video.TimedPoint{lat: 53.54947, lon: 9.97214, time_offset_ms: 235_468},
%Video.TimedPoint{lat: 53.549456, lon: 9.97203, time_offset_ms: 235_802},
%Video.TimedPoint{lat: 53.549442, lon: 9.971923, time_offset_ms: 236_135},
%Video.TimedPoint{lat: 53.549424, lon: 9.971819, time_offset_ms: 236_469},
%Video.TimedPoint{lat: 53.549404, lon: 9.971725, time_offset_ms: 236_803},
%Video.TimedPoint{lat: 53.54938, lon: 9.971647, time_offset_ms: 237_136},
%Video.TimedPoint{lat: 53.549355, lon: 9.971583, time_offset_ms: 237_470},
%Video.TimedPoint{lat: 53.549342, lon: 9.971528, time_offset_ms: 237_803},
%Video.TimedPoint{lat: 53.549329, lon: 9.971481, time_offset_ms: 238_137},
%Video.TimedPoint{lat: 53.549319, lon: 9.971429, time_offset_ms: 238_471},
%Video.TimedPoint{lat: 53.54931, lon: 9.971371, time_offset_ms: 238_804},
%Video.TimedPoint{lat: 53.549292, lon: 9.971312, time_offset_ms: 239_138},
%Video.TimedPoint{lat: 53.549266, lon: 9.971249, time_offset_ms: 239_471},
%Video.TimedPoint{lat: 53.54923, lon: 9.971173, time_offset_ms: 239_805},
%Video.TimedPoint{lat: 53.549193, lon: 9.971109, time_offset_ms: 240_139},
%Video.TimedPoint{lat: 53.549151, lon: 9.971043, time_offset_ms: 240_472},
%Video.TimedPoint{lat: 53.549102, lon: 9.970987, time_offset_ms: 240_806},
%Video.TimedPoint{lat: 53.549057, lon: 9.970931, time_offset_ms: 241_139},
%Video.TimedPoint{lat: 53.549024, lon: 9.970858, time_offset_ms: 241_473},
%Video.TimedPoint{lat: 53.549008, lon: 9.970774, time_offset_ms: 241_807},
%Video.TimedPoint{lat: 53.549003, lon: 9.970696, time_offset_ms: 242_140},
%Video.TimedPoint{lat: 53.549001, lon: 9.97062, time_offset_ms: 242_474},
%Video.TimedPoint{lat: 53.548995, lon: 9.970552, time_offset_ms: 242_807},
%Video.TimedPoint{lat: 53.548985, lon: 9.970488, time_offset_ms: 243_141},
%Video.TimedPoint{lat: 53.548957, lon: 9.970439, time_offset_ms: 243_475},
%Video.TimedPoint{lat: 53.548925, lon: 9.970425, time_offset_ms: 243_808},
%Video.TimedPoint{lat: 53.548895, lon: 9.970414, time_offset_ms: 244_142},
%Video.TimedPoint{lat: 53.548867, lon: 9.970389, time_offset_ms: 244_475},
%Video.TimedPoint{lat: 53.548852, lon: 9.970352, time_offset_ms: 244_809},
%Video.TimedPoint{lat: 53.548833, lon: 9.970302, time_offset_ms: 245_143},
%Video.TimedPoint{lat: 53.548803, lon: 9.970255, time_offset_ms: 245_476},
%Video.TimedPoint{lat: 53.548777, lon: 9.970238, time_offset_ms: 245_810},
%Video.TimedPoint{lat: 53.54874, lon: 9.970254, time_offset_ms: 246_143},
%Video.TimedPoint{lat: 53.548682, lon: 9.970299, time_offset_ms: 246_477},
%Video.TimedPoint{lat: 53.548611, lon: 9.970347, time_offset_ms: 246_811},
%Video.TimedPoint{lat: 53.548534, lon: 9.970401, time_offset_ms: 247_144},
%Video.TimedPoint{lat: 53.548449, lon: 9.970462, time_offset_ms: 247_478},
%Video.TimedPoint{lat: 53.548361, lon: 9.970523, time_offset_ms: 247_811},
%Video.TimedPoint{lat: 53.548273, lon: 9.970591, time_offset_ms: 248_145},
%Video.TimedPoint{lat: 53.548185, lon: 9.970658, time_offset_ms: 248_479},
%Video.TimedPoint{lat: 53.548095, lon: 9.970722, time_offset_ms: 248_812},
%Video.TimedPoint{lat: 53.548004, lon: 9.970784, time_offset_ms: 249_146},
%Video.TimedPoint{lat: 53.547908, lon: 9.970851, time_offset_ms: 249_479},
%Video.TimedPoint{lat: 53.547815, lon: 9.970916, time_offset_ms: 249_813},
%Video.TimedPoint{lat: 53.54772, lon: 9.97097, time_offset_ms: 250_147},
%Video.TimedPoint{lat: 53.54762, lon: 9.971004, time_offset_ms: 250_480},
%Video.TimedPoint{lat: 53.547522, lon: 9.971021, time_offset_ms: 250_814},
%Video.TimedPoint{lat: 53.547424, lon: 9.971009, time_offset_ms: 251_147},
%Video.TimedPoint{lat: 53.547332, lon: 9.970956, time_offset_ms: 251_481},
%Video.TimedPoint{lat: 53.547237, lon: 9.970887, time_offset_ms: 251_815},
%Video.TimedPoint{lat: 53.54715, lon: 9.970804, time_offset_ms: 252_148},
%Video.TimedPoint{lat: 53.547063, lon: 9.970697, time_offset_ms: 252_482},
%Video.TimedPoint{lat: 53.546983, lon: 9.97058, time_offset_ms: 252_815},
%Video.TimedPoint{lat: 53.546906, lon: 9.970468, time_offset_ms: 253_149},
%Video.TimedPoint{lat: 53.54683, lon: 9.970362, time_offset_ms: 253_483},
%Video.TimedPoint{lat: 53.54676, lon: 9.970273, time_offset_ms: 253_816},
%Video.TimedPoint{lat: 53.546699, lon: 9.970187, time_offset_ms: 254_150},
%Video.TimedPoint{lat: 53.546638, lon: 9.970085, time_offset_ms: 254_483},
%Video.TimedPoint{lat: 53.54658, lon: 9.969976, time_offset_ms: 254_817},
%Video.TimedPoint{lat: 53.546532, lon: 9.969858, time_offset_ms: 255_151},
%Video.TimedPoint{lat: 53.546484, lon: 9.969733, time_offset_ms: 255_484},
%Video.TimedPoint{lat: 53.546429, lon: 9.969614, time_offset_ms: 255_818},
%Video.TimedPoint{lat: 53.546367, lon: 9.969505, time_offset_ms: 256_151},
%Video.TimedPoint{lat: 53.5463, lon: 9.969411, time_offset_ms: 256_485},
%Video.TimedPoint{lat: 53.546244, lon: 9.96932, time_offset_ms: 256_819},
%Video.TimedPoint{lat: 53.546202, lon: 9.969238, time_offset_ms: 257_152},
%Video.TimedPoint{lat: 53.546168, lon: 9.969179, time_offset_ms: 257_486},
%Video.TimedPoint{lat: 53.546153, lon: 9.969156, time_offset_ms: 257_819},
%Video.TimedPoint{lat: 53.546143, lon: 9.969143, time_offset_ms: 257_904},
%Video.TimedPoint{lat: 53.546135, lon: 9.969139, time_offset_ms: 258_238},
%Video.TimedPoint{lat: 53.546106, lon: 9.96913, time_offset_ms: 258_571},
%Video.TimedPoint{lat: 53.546066, lon: 9.969117, time_offset_ms: 258_905},
%Video.TimedPoint{lat: 53.546019, lon: 9.9691, time_offset_ms: 259_238},
%Video.TimedPoint{lat: 53.54598, lon: 9.969093, time_offset_ms: 259_572},
%Video.TimedPoint{lat: 53.545965, lon: 9.969111, time_offset_ms: 259_905}
]
end
@impl Video.Rendered
def(rendered?()) do
true
end
end | 77.503589 | 96 | 0.700955 |
e82287bd2df39b73c182c9a7087a65dbd3c6dac2 | 718 | ex | Elixir | lib/wanikani/review.ex | eltercero/wanikani | 5ed47f315ee9800ab5dbbc21848a337d24d52778 | [
"MIT"
] | 2 | 2018-01-29T02:02:05.000Z | 2018-07-09T18:24:28.000Z | lib/wanikani/review.ex | eltercero/wanikani | 5ed47f315ee9800ab5dbbc21848a337d24d52778 | [
"MIT"
] | null | null | null | lib/wanikani/review.ex | eltercero/wanikani | 5ed47f315ee9800ab5dbbc21848a337d24d52778 | [
"MIT"
] | null | null | null | defmodule Wanikani.Review do
@moduledoc """
Module for the requests to the Reviews endpoint
"""
@doc """
Get a list of reviews.
Accepted params:
* ids Integers Return results tied to ids
* subject_ids Integers Return result tied to subject_ids
* updated_after ISO8601 timestamp in the past Return results which have been updated after the timestamp
```
Wanikani.Review.list()
```
"""
def list(params \\ %{}, client \\ Wanikani.Api) do
client.request("reviews", params)
end
@doc """
Get a single review study material.
```
Wanikani.Review.get(592160)
```
"""
def get(subject_id, client \\ Wanikani.Api) do
client.request("reviews/#{subject_id}")
end
end | 21.757576 | 106 | 0.671309 |
e82294f664b3ff75025eb3b98e39e55166d13972 | 260 | ex | Elixir | apps/massa_proxy/lib/massa_proxy/runtime/grpc/protocol/eventsourced/eventsourced_handler.ex | drowzy/massa | 624cb02e0039b0624c534636f96fd157b1e34a95 | [
"Apache-2.0"
] | 5 | 2021-04-21T22:26:04.000Z | 2021-11-08T12:00:24.000Z | apps/massa_proxy/lib/massa_proxy/runtime/grpc/protocol/eventsourced/eventsourced_handler.ex | drowzy/massa | 624cb02e0039b0624c534636f96fd157b1e34a95 | [
"Apache-2.0"
] | 15 | 2021-08-03T15:39:44.000Z | 2022-02-28T19:21:35.000Z | apps/massa_proxy/lib/massa_proxy/runtime/grpc/protocol/eventsourced/eventsourced_handler.ex | drowzy/massa | 624cb02e0039b0624c534636f96fd157b1e34a95 | [
"Apache-2.0"
] | null | null | null | defmodule MassaProxy.Runtime.Grpc.Protocol.EventSourced.Handler do
@moduledoc """
This module is responsible for handling requests of the EventSourced protocol
"""
@behaviour MassaProxy.Protocol.Handler
@impl true
def handle(payload) do
end
end
| 23.636364 | 79 | 0.773077 |
e822d52a2a4ba042a56598515095b0280fee2eb5 | 2,106 | exs | Elixir | hello/config/dev.exs | MarioMorales7x7/Slack-Elixir | f858c698f035ee8a04a1b8837f1512a69d6b1dc0 | [
"MIT"
] | null | null | null | hello/config/dev.exs | MarioMorales7x7/Slack-Elixir | f858c698f035ee8a04a1b8837f1512a69d6b1dc0 | [
"MIT"
] | 2 | 2021-03-09T21:31:07.000Z | 2021-05-10T18:21:47.000Z | hello/config/dev.exs | MarioMorales7x7/Slack-Elixir | f858c698f035ee8a04a1b8837f1512a69d6b1dc0 | [
"MIT"
] | null | null | null | use Mix.Config
# Configure your database
config :hello, Hello.Repo,
username: "root",
password: "",
database: "hello_dev",
hostname: "localhost",
show_sensitive_data_on_connection_error: true,
pool_size: 10
# For development, we disable any cache and enable
# debugging and code reloading.
#
# The watchers configuration can be used to run external
# watchers to your application. For example, we use it
# with webpack to recompile .js and .css sources.
config :hello, HelloWeb.Endpoint,
http: [port: 4000],
debug_errors: true,
code_reloader: true,
check_origin: false,
watchers: [
node: [
"node_modules/webpack/bin/webpack.js",
"--mode",
"development",
"--watch-stdin",
cd: Path.expand("../assets", __DIR__)
]
]
# ## SSL Support
#
# In order to use HTTPS in development, a self-signed
# certificate can be generated by running the following
# Mix task:
#
# mix phx.gen.cert
#
# Note that this task requires Erlang/OTP 20 or later.
# Run `mix help phx.gen.cert` for more information.
#
# The `http:` config above can be replaced with:
#
# https: [
# port: 4001,
# cipher_suite: :strong,
# keyfile: "priv/cert/selfsigned_key.pem",
# certfile: "priv/cert/selfsigned.pem"
# ],
#
# If desired, both `http:` and `https:` keys can be
# configured to run both http and https servers on
# different ports.
# Watch static and templates for browser reloading.
config :hello, HelloWeb.Endpoint,
live_reload: [
patterns: [
~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$",
~r"priv/gettext/.*(po)$",
~r"lib/hello_web/{live,views}/.*(ex)$",
~r"lib/hello_web/templates/.*(eex)$"
]
]
# Do not include metadata nor timestamps in development logs
config :logger, :console, format: "[$level] $message\n"
# Set a higher stacktrace during development. Avoid configuring such
# in production as building large stacktraces may be expensive.
config :phoenix, :stacktrace_depth, 20
# Initialize plugs at runtime for faster development compilation
config :phoenix, :plug_init_mode, :runtime
| 27.350649 | 68 | 0.686135 |
e822d856f301042212fa536e229976f48ffaf939 | 221 | exs | Elixir | test/regressions/i041_hr_incorrect_html_test.exs | brianbroderick/monocle | eeabecea658468479c04a02352271f6304447736 | [
"Apache-2.0"
] | 2 | 2018-02-11T01:18:24.000Z | 2020-01-12T17:19:22.000Z | test/regressions/i041_hr_incorrect_html_test.exs | brianbroderick/monocle | eeabecea658468479c04a02352271f6304447736 | [
"Apache-2.0"
] | null | null | null | test/regressions/i041_hr_incorrect_html_test.exs | brianbroderick/monocle | eeabecea658468479c04a02352271f6304447736 | [
"Apache-2.0"
] | null | null | null | defmodule Regressions.I041HrIncorrectHtmlTest do
use ExUnit.Case
test "https://github.com/pragdave/earmark/issues/41" do
result = Monocle.as_html! "****"
assert result == ~s[<hr class="thick"/>\n]
end
end
| 22.1 | 57 | 0.692308 |
e8230088f9263aae9622b7f199d01e0f4e59c767 | 1,247 | ex | Elixir | test/support/conn_case.ex | EasterPeanut/phoenix-playground | 391e52c7cf805d8acabb265989c801923438c624 | [
"MIT"
] | null | null | null | test/support/conn_case.ex | EasterPeanut/phoenix-playground | 391e52c7cf805d8acabb265989c801923438c624 | [
"MIT"
] | null | null | null | test/support/conn_case.ex | EasterPeanut/phoenix-playground | 391e52c7cf805d8acabb265989c801923438c624 | [
"MIT"
] | null | null | null | defmodule PlaygroundWeb.ConnCase do
@moduledoc """
This module defines the test case to be used by
tests that require setting up a connection.
Such tests rely on `Phoenix.ConnTest` and also
import other functionality to make it easier
to build common data structures and query the data layer.
Finally, if the test case interacts with the database,
we enable the SQL sandbox, so changes done to the database
are reverted at the end of every test. If you are using
PostgreSQL, you can even run database tests asynchronously
by setting `use PlaygroundWeb.ConnCase, async: true`, although
this option is not recommended for other databases.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with connections
import Plug.Conn
import Phoenix.ConnTest
import PlaygroundWeb.ConnCase
alias PlaygroundWeb.Router.Helpers, as: Routes
# The default endpoint for testing
@endpoint PlaygroundWeb.Endpoint
end
end
setup tags do
pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Playground.Repo, shared: not tags[:async])
on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
{:ok, conn: Phoenix.ConnTest.build_conn()}
end
end
| 31.175 | 91 | 0.736969 |
e8232a574b61111c5abe7bf25f8872d2924c8815 | 769 | ex | Elixir | lib/scrivener/page.ex | vernomcrp/scrivener | 22f4a1b4f9b7c37417a849deed102a2dac75f48b | [
"MIT"
] | 1 | 2019-02-15T18:10:53.000Z | 2019-02-15T18:10:53.000Z | lib/scrivener/page.ex | vernomcrp/scrivener | 22f4a1b4f9b7c37417a849deed102a2dac75f48b | [
"MIT"
] | null | null | null | lib/scrivener/page.ex | vernomcrp/scrivener | 22f4a1b4f9b7c37417a849deed102a2dac75f48b | [
"MIT"
] | null | null | null | defmodule Scrivener.Page do
@moduledoc """
A `Scrivener.Page` has 5 fields that can be accessed: `entries`, `page_number`, `page_size`, `total_entries` and `total_pages`.
page = MyApp.Module.paginate(params)
page.entries
page.page_number
page.page_size
page.total_entries
page.total_pages
"""
defstruct [:entries, :page_number, :page_size, :total_entries, :total_pages]
@type t :: %__MODULE__{}
defimpl Enumerable, for: Scrivener.Page do
def count(_page), do: {:error, __MODULE__}
def member?(_page, _value), do: {:error, __MODULE__}
def reduce(%Scrivener.Page{entries: entries}, acc, fun) do
Enumerable.reduce(entries, acc, fun)
end
def slice(_page), do: {:error, __MODULE__}
end
end
| 25.633333 | 129 | 0.672302 |
e8232ae8ad3144c63238937e4f3342e97fa174b7 | 3,185 | exs | Elixir | test/threehundred60/locations_test.exs | nesimtunc/ThreeHundred60 | 1dd8465b44f037ecf7d43be9ebd99b9eadcee6af | [
"MIT"
] | 13 | 2020-06-03T08:17:45.000Z | 2021-03-11T04:37:52.000Z | test/threehundred60/locations_test.exs | nesimtunc/ThreeHundred60 | 1dd8465b44f037ecf7d43be9ebd99b9eadcee6af | [
"MIT"
] | null | null | null | test/threehundred60/locations_test.exs | nesimtunc/ThreeHundred60 | 1dd8465b44f037ecf7d43be9ebd99b9eadcee6af | [
"MIT"
] | 1 | 2020-06-26T16:53:14.000Z | 2020-06-26T16:53:14.000Z | defmodule Threehundred60.LocationsTest do
use Threehundred60.DataCase
alias Threehundred60.Locations
alias Threehundred60.Categories
describe "locations" do
alias Threehundred60.Locations.Location
@valid_attrs %{information: "some information", latitude: 120.5, longtitude: 120.5, name: "some name", video_url: "some video_url"}
@update_attrs %{information: "some updated information", latitude: 456.7, longtitude: 456.7, name: "some updated name", video_url: "some updated video_url"}
@invalid_attrs %{information: nil, latitude: nil, longtitude: nil, name: nil, video_url: nil, category_id: nil}
def location_fixture() do
category = create_category()
attrs = Map.merge(@valid_attrs, %{category_id: category.id})
{:ok, location} =
attrs
|> Enum.into(attrs)
|> Locations.create_location()
location
end
defp create_category() do
{:ok, category} = Categories.create_category(%{ name: "Test Category"})
category
end
test "list_locations/0 returns all locations" do
location = location_fixture()
assert Locations.list_locations() == [location]
end
test "get_location!/1 returns the location with given id" do
location = location_fixture()
assert Locations.get_location!(location.id) == location
end
test "create_location/1 with valid data creates a location" do
category = create_category()
attrs = Map.merge(@valid_attrs, %{category_id: category.id})
assert {:ok, %Location{} = location} = Locations.create_location(attrs)
assert location.information == "some information"
assert location.latitude == 120.5
assert location.longtitude == 120.5
assert location.name == "some name"
assert location.video_url == "some video_url"
end
test "create_location/1 with invalid data returns error changeset" do
assert {:error, %Ecto.Changeset{}} = Locations.create_location(@invalid_attrs)
end
test "update_location/2 with valid data updates the location" do
location = location_fixture()
assert {:ok, %Location{} = location} = Locations.update_location(location, @update_attrs)
assert location.information == "some updated information"
assert location.latitude == 456.7
assert location.longtitude == 456.7
assert location.name == "some updated name"
assert location.video_url == "some updated video_url"
end
test "update_location/2 with invalid data returns error changeset" do
location = location_fixture()
assert {:error, %Ecto.Changeset{}} = Locations.update_location(location, @invalid_attrs)
assert location == Locations.get_location!(location.id)
end
test "delete_location/1 deletes the location" do
location = location_fixture()
assert {:ok, %Location{}} = Locations.delete_location(location)
assert_raise Ecto.NoResultsError, fn -> Locations.get_location!(location.id) end
end
test "change_location/1 returns a location changeset" do
location = location_fixture()
assert %Ecto.Changeset{} = Locations.change_location(location)
end
end
end
| 36.609195 | 160 | 0.697959 |
e8232e02f7bd164c3bf5defd64dde3585b055ae8 | 739 | exs | Elixir | priv/repo/seeds.exs | jgsmith/exinfiltr8 | cabe92dd44e9d6ac390856e9983f5897989a5bfc | [
"Apache-2.0"
] | null | null | null | priv/repo/seeds.exs | jgsmith/exinfiltr8 | cabe92dd44e9d6ac390856e9983f5897989a5bfc | [
"Apache-2.0"
] | 2 | 2021-03-10T14:09:03.000Z | 2021-05-11T09:54:54.000Z | priv/repo/seeds.exs | jgsmith/exinfiltr8 | cabe92dd44e9d6ac390856e9983f5897989a5bfc | [
"Apache-2.0"
] | null | null | null | # Script for populating the database. You can run it as:
#
# mix run priv/repo/seeds.exs
#
# Inside the script, you can read and write to any of your
# repositories directly:
#
# Exinfiltr8.Repo.insert!(%Exinfiltr8.SomeSchema{})
#
# We recommend using the bang functions (`insert!`, `update!`
# and so on) as they will fail if something goes wrong.
if !Militerm.Accounts.get_group_by_name("admin") do
Exinfiltr8.Repo.insert!(%Militerm.Accounts.Group{
name: "admin",
description: "Administrators have all permissions."
})
end
if !Militerm.Accounts.get_group_by_name("players") do
Exinfiltr8.Repo.insert!(%Militerm.Accounts.Group{
name: "players",
description: "All players are always in this group."
})
end
| 29.56 | 61 | 0.719892 |
e8234d6700785c3639aa2f17b8fe18e3cb18a8f0 | 1,010 | ex | Elixir | lib/booking/application.ex | mattiaslundberg/booking | 469d1469f306b2ab62ce1ee971a825101af6fc7e | [
"MIT"
] | null | null | null | lib/booking/application.ex | mattiaslundberg/booking | 469d1469f306b2ab62ce1ee971a825101af6fc7e | [
"MIT"
] | 2 | 2021-03-10T16:49:07.000Z | 2021-05-11T12:50:22.000Z | lib/booking/application.ex | mattiaslundberg/booking | 469d1469f306b2ab62ce1ee971a825101af6fc7e | [
"MIT"
] | null | null | null | defmodule Booking.Application do
# See https://hexdocs.pm/elixir/Application.html
# for more information on OTP Applications
@moduledoc false
use Application
def start(_type, _args) do
children = [
# Start the Ecto repository
Booking.Repo,
# Start the Telemetry supervisor
BookingWeb.Telemetry,
# Start the PubSub system
{Phoenix.PubSub, name: Booking.PubSub},
# Start the Endpoint (http/https)
BookingWeb.Endpoint
# Start a worker by calling: Booking.Worker.start_link(arg)
# {Booking.Worker, arg}
]
# See https://hexdocs.pm/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: Booking.Supervisor]
Supervisor.start_link(children, opts)
end
# Tell Phoenix to update the endpoint configuration
# whenever the application is updated.
def config_change(changed, _new, removed) do
BookingWeb.Endpoint.config_change(changed, removed)
:ok
end
end
| 28.857143 | 65 | 0.70198 |
e823549750feefaf1dca01fccf1538976cc6babb | 313 | ex | Elixir | web/views/auth_view.ex | AlexKovalevych/evolution | 2271546f045d475d676f5993c25824496026694b | [
"MIT"
] | null | null | null | web/views/auth_view.ex | AlexKovalevych/evolution | 2271546f045d475d676f5993c25824496026694b | [
"MIT"
] | null | null | null | web/views/auth_view.ex | AlexKovalevych/evolution | 2271546f045d475d676f5993c25824496026694b | [
"MIT"
] | null | null | null | defmodule Evolution.AuthView do
use Evolution.Web, :view
def render("login.json", %{token: token, user: user}) do
%{
token: token,
user: user
}
end
def render("signup.json", %{error: {field, error}}) do
errors = Map.new() |> Map.put(field, error)
%{errors: errors}
end
end
| 19.5625 | 58 | 0.600639 |
e8236130c8edcfa9ada5faaa57b1fc98620b39c0 | 6,308 | ex | Elixir | lib/carrier/messaging/tracker.ex | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 1,003 | 2016-02-23T17:21:12.000Z | 2022-02-20T14:39:35.000Z | lib/carrier/messaging/tracker.ex | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 906 | 2016-02-22T22:54:19.000Z | 2022-03-11T15:19:43.000Z | lib/carrier/messaging/tracker.ex | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 95 | 2016-02-23T13:42:31.000Z | 2021-11-30T14:39:55.000Z | defmodule Carrier.Messaging.Tracker do
defstruct [reply_endpoints: %{}, subscriptions: %{}, monitors: %{}, unused_topics: []]
@spec add_reply_endpoint(Tracker.t, pid()) :: {Tracker.t, String.t}
def add_reply_endpoint(%__MODULE__{reply_endpoints: reps}=tracker, subscriber) do
case Map.get(reps, subscriber) do
nil ->
topic = make_reply_endpoint_topic()
reps = reps
|> Map.put(subscriber, topic)
|> Map.put(topic, subscriber)
{maybe_monitor_subscriber(%{tracker | reply_endpoints: reps}, subscriber), topic}
topic ->
{tracker, topic}
end
end
@spec get_reply_endpoint(Tracker.t, pid()) :: String.t | nil
def get_reply_endpoint(%__MODULE__{reply_endpoints: reps}, subscriber) do
Map.get(reps, subscriber)
end
@spec add_subscription(Tracker.t, String.t, pid()) :: Tracker.t
def add_subscription(%__MODULE__{subscriptions: subs}=tracker, topic, subscriber) do
subs = Map.update(subs, topic, {subscription_matcher(topic), [subscriber]},
fn({matcher, subscribed}) -> {matcher, Enum.uniq([subscriber|subscribed])} end)
maybe_monitor_subscriber(%{tracker | subscriptions: subs}, subscriber)
end
@spec del_subscription(Tracker.t, String.t, pid()) :: {Tracker.t, boolean()}
def del_subscription(%__MODULE__{subscriptions: subs, unused_topics: ut}=tracker, topic, subscriber) do
case Map.get(subs, topic) do
{_matcher, [^subscriber]} ->
subs = Map.delete(subs, topic)
{maybe_unmonitor_subscriber(%{tracker | subscriptions: subs, unused_topics: Enum.uniq([topic|ut])}, subscriber), true}
{matcher, subscribed} ->
case List.delete(subscribed, subscriber) do
^subscribed ->
{tracker, false}
updated ->
{maybe_unmonitor_subscriber(%{tracker | subscriptions: Map.put(subs, topic, {matcher, updated})}, subscriber), true}
end
nil ->
{tracker, false}
end
end
@spec find_reply_subscriber(Tracker.t, String.t) :: pid() | nil
def find_reply_subscriber(%__MODULE__{reply_endpoints: reps}, topic) do
Map.get(reps, topic)
end
@spec find_subscribers(Tracker.t, String.t) :: [] | [pid()]
def find_subscribers(%__MODULE__{subscriptions: subs}, topic) do
Enum.reduce(subs, [], &(find_matching_subscriptions(&1, topic, &2)))
end
@spec del_subscriber(Tracker.t, pid()) :: Tracker.t
def del_subscriber(tracker, subscriber) do
tracker
|> del_reply_endpoint(subscriber)
|> del_all_subscriptions(subscriber)
|> unmonitor_subscriber(subscriber)
end
@spec unused?(Tracker.t) :: boolean()
def unused?(%__MODULE__{reply_endpoints: reps, subscriptions: subs, monitors: monitors}) do
Enum.empty?(reps) and Enum.empty?(subs) and Enum.empty?(monitors)
end
@spec get_and_reset_unused_topics(Tracker.t) :: {Tracker.t, [String.t]}
def get_and_reset_unused_topics(tracker) do
{%{tracker | unused_topics: []}, tracker.unused_topics}
end
defp make_reply_endpoint_topic() do
id = UUID.uuid4(:hex)
"carrier/call/reply/#{id}"
end
defp find_matching_subscriptions({_, {matcher, subscribed}}, topic, accum) do
if Regex.match?(matcher, topic) do
accum ++ subscribed
else
accum
end
end
# Tracker users regexes to find subscribers for a given MQTT topic because
# the MQTT standard describes two types of wildcard subscriptions:
# * "foo/+" means "subscribe to foo and all topics one level down"
# * "foo/*" means "subscribe to foo and all subtopics regardless of depth"
defp subscription_matcher(sub_topic) do
regex = case String.slice(sub_topic, -2, 2) do
"/+" ->
"^#{String.slice(sub_topic, 0, String.length(sub_topic) - 1)}[a-zA-Z0-9_\-]+$"
"/*" ->
"^#{String.slice(sub_topic, 0, String.length(sub_topic) - 1)}.*"
_ ->
"^#{sub_topic}$"
end
Regex.compile!(regex)
end
def maybe_monitor_subscriber(%__MODULE__{monitors: monitors}=tracker, subscriber) do
case Map.get(monitors, subscriber) do
nil ->
mref = :erlang.monitor(:process, subscriber)
%{tracker | monitors: Map.put(monitors, subscriber, mref)}
_ ->
tracker
end
end
def maybe_unmonitor_subscriber(%__MODULE__{monitors: monitors}=tracker, subscriber) do
if has_subscriptions?(tracker, subscriber) do
tracker
else
{_, monitors} = Map.get_and_update(monitors, subscriber,
fn(nil) -> :pop
(mref) -> :erlang.demonitor(mref, [:flush])
:pop end)
%{tracker | monitors: monitors}
end
end
def unmonitor_subscriber(%__MODULE__{monitors: monitors}=tracker, subscriber) do
case Map.pop(monitors, subscriber) do
{nil, _monitors} ->
tracker
{mref, monitors} ->
:erlang.demonitor(mref, [:flush])
%{tracker | monitors: monitors}
end
end
defp has_subscriptions?(tracker, subscriber) do
Map.has_key?(tracker.reply_endpoints, subscriber) or
Enum.any?(tracker.subscriptions, &(has_subscription?(&1, subscriber)))
end
defp has_subscription?({_, {_, subscribed}}, subscriber) do
Enum.member?(subscribed, subscriber)
end
defp del_reply_endpoint(%__MODULE__{reply_endpoints: reps, unused_topics: ut}=tracker, subscriber) do
case Map.get(reps, subscriber) do
nil ->
tracker
rep ->
reps = reps
|> Map.delete(rep)
|> Map.delete(subscriber)
%{tracker | reply_endpoints: reps, unused_topics: Enum.uniq([rep|ut])}
end
end
defp del_all_subscriptions(%__MODULE__{subscriptions: subs}=tracker, subscriber) do
{subs, unused_topics} = Enum.reduce(Map.keys(subs), {subs, []}, &(delete_subscription(&1, subscriber, &2)))
%{tracker | subscriptions: subs, unused_topics: tracker.unused_topics ++ unused_topics}
end
defp delete_subscription(topic, subscriber, {subs, unused_topics}) do
case Map.get(subs, topic) do
nil ->
{subs, unused_topics}
{_matcher, [^subscriber]} ->
{Map.delete(subs, topic), [topic|unused_topics]}
{matcher, subscribed} ->
{Map.put(subs, topic, {matcher, List.delete(subscribed, subscriber)}), unused_topics}
end
end
end
| 35.840909 | 128 | 0.656785 |
e8238866fb5592cd8b8e109c7963f92a65369afc | 315 | exs | Elixir | priv/repo/migrations/20180612040545_create_images.exs | the-mikedavis/mole | 73d884b5dca4e5371b1b399d7e65c0f4a0229851 | [
"BSD-3-Clause"
] | 1 | 2020-07-15T14:39:10.000Z | 2020-07-15T14:39:10.000Z | priv/repo/migrations/20180612040545_create_images.exs | the-mikedavis/mole | 73d884b5dca4e5371b1b399d7e65c0f4a0229851 | [
"BSD-3-Clause"
] | 59 | 2018-11-05T23:09:10.000Z | 2020-07-11T20:44:14.000Z | priv/repo/migrations/20180612040545_create_images.exs | the-mikedavis/mole | 73d884b5dca4e5371b1b399d7e65c0f4a0229851 | [
"BSD-3-Clause"
] | null | null | null | defmodule Mole.Repo.Migrations.CreateImages do
use Ecto.Migration
def change do
create table(:images) do
add :origin_id, :string
add :malignant, :boolean, default: false, null: false
add :type, :string
timestamps()
end
create unique_index(:images, [:origin_id])
end
end
| 19.6875 | 59 | 0.666667 |
e823a02fefe15630dbe3389956c03163f2326556 | 890 | ex | Elixir | apps/badge_fw/lib/badge_fw.ex | nickveys/elixirconf-2016-badge | 9c49e0b3c4a6eeffa7ff60a073ad12c50523a882 | [
"Unlicense"
] | null | null | null | apps/badge_fw/lib/badge_fw.ex | nickveys/elixirconf-2016-badge | 9c49e0b3c4a6eeffa7ff60a073ad12c50523a882 | [
"Unlicense"
] | null | null | null | apps/badge_fw/lib/badge_fw.ex | nickveys/elixirconf-2016-badge | 9c49e0b3c4a6eeffa7ff60a073ad12c50523a882 | [
"Unlicense"
] | null | null | null | defmodule BadgeFw do
use Application
alias Nerves.InterimWiFi, as: WiFi
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
import Supervisor.Spec, warn: false
# load driver for wifi device
:os.cmd('modprobe mt7603e')
# Define workers and child supervisors to be supervised
children = [
worker(Task, [fn -> network end], restart: :transient),
worker(BadgeLib.Firmata, []),
worker(BadgeFw.Worker, []),
]
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: BadgeFw.Supervisor]
Supervisor.start_link(children, opts)
end
def network do
wlan_config = Application.get_env(:badge_fw, :wlan0)
WiFi.setup "wlan0", wlan_config
end
end
| 28.709677 | 67 | 0.697753 |
e823c4a34180d7e214cce0e7b6a12a7bb50a3bb7 | 434 | exs | Elixir | test/server/dummy_test.exs | stirlab/elixir-dynamic-server-manager | 1b8dc327373456ab6b1f6b9997eee160d140c5f6 | [
"MIT"
] | 2 | 2020-08-07T14:56:42.000Z | 2020-08-26T12:17:26.000Z | test/server/dummy_test.exs | stirlab/elixir-dynamic-server-manager | 1b8dc327373456ab6b1f6b9997eee160d140c5f6 | [
"MIT"
] | null | null | null | test/server/dummy_test.exs | stirlab/elixir-dynamic-server-manager | 1b8dc327373456ab6b1f6b9997eee160d140c5f6 | [
"MIT"
] | 1 | 2020-08-07T14:56:49.000Z | 2020-08-07T14:56:49.000Z | defmodule DynamicServerManager.Server.DummyTest do
use ExUnit.Case
doctest DynamicServerManager
alias DynamicServerManager.Server.Dummy
test "application starts with permanent arg, named server" do
assert is_pid(Process.whereis(Dummy))
end
test "started with no permanent arg, unnamed server" do
assert {:ok, pid} = Dummy.start_link()
assert Process.whereis(Dummy) != pid
GenServer.stop(pid)
end
end
| 24.111111 | 63 | 0.751152 |
e824009edf12f011e8bbd2d4ce760c0d88aaeaa5 | 1,183 | exs | Elixir | apps/discovery_api/config/test.exs | msomji/smartcitiesdata | fc96abc1ef1306f7af6bd42bbcb4ed041a6d922c | [
"Apache-2.0"
] | null | null | null | apps/discovery_api/config/test.exs | msomji/smartcitiesdata | fc96abc1ef1306f7af6bd42bbcb4ed041a6d922c | [
"Apache-2.0"
] | null | null | null | apps/discovery_api/config/test.exs | msomji/smartcitiesdata | fc96abc1ef1306f7af6bd42bbcb4ed041a6d922c | [
"Apache-2.0"
] | null | null | null | use Mix.Config
config :prestige, :session_opts, url: "http://localhost:8080"
config :discovery_api, DiscoveryApiWeb.Endpoint,
http: [port: 4001],
server: false,
url: [scheme: "https", host: "data.tests.example.com", port: 443]
config :discovery_api,
user_info_endpoint: "pretend-this-is-a-url/userinfo",
jwks_endpoint: "pretend-this-is-a-url/jwks",
allowed_origins: ["tests.example.com"],
test_mode: true,
hsts_enabled: false
config :logger, level: :warn
config :ex_json_schema,
:remote_schema_resolver,
fn url -> URLResolver.resolve_url(url) end
config :discovery_api, :brook,
instance: :discovery_api,
driver: [
module: Brook.Driver.Test,
init_arg: []
],
handlers: [DiscoveryApi.Event.EventHandler],
storage: [
module: Brook.Storage.Ets,
init_arg: []
]
config :discovery_api,
user_visualization_limit: 4
config :discovery_api, DiscoveryApiWeb.Auth.TokenHandler,
issuer: "https://smartcolumbusos-demo.auth0.com/",
allowed_algos: ["RS256"],
verify_issuer: false,
allowed_drift: 3_000_000_000_000
config :discovery_api, ecto_repos: [DiscoveryApi.Repo]
config :guardian, Guardian.DB, repo: DiscoveryApi.Repo
| 25.717391 | 67 | 0.72612 |
e82409b0e80be42e51c1434048734717b290e9d2 | 3,970 | exs | Elixir | apps/activity_logger/test/activity_logger/activity_repo_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/activity_logger/test/activity_logger/activity_repo_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/activity_logger/test/activity_logger/activity_repo_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule ActivityLogger.ActivityRepoTest do
use ExUnit.Case
use ActivityLogger.ActivityLogging
import ActivityLogger.Factory
alias ActivityLogger.{ActivityLog, TestDocument}
alias Ecto.Adapters.SQL.Sandbox
alias Ecto.Changeset
defmodule TestRepo do
use ActivityLogger.ActivityRepo, repo: ActivityLogger.Repo
end
setup do
:ok = Sandbox.checkout(ActivityLogger.Repo)
ActivityLogger.configure(%{
ActivityLogger.System => %{type: "system", identifier: nil},
ActivityLogger.TestDocument => %{type: "test_document", identifier: :id},
ActivityLogger.TestUser => %{type: "test_user", identifier: :id}
})
attrs = %{
title: "A title",
body: "some body that we don't want to save",
secret_data: %{something: "secret"},
originator: insert(:test_user)
}
%{attrs: attrs}
end
describe "insert_record_with_activity_log/3" do
test "inserts the record and the activity log", meta do
changeset = Changeset.cast(%TestDocument{}, meta.attrs, [:title, :originator])
# Test for the inserted record
{res, record} = TestRepo.insert_record_with_activity_log(changeset)
assert res == :ok
assert %TestDocument{} = record
assert record.title == meta.attrs.title
# Test for the inserted activity log
activity_logs = ActivityLog.all_for_target(TestDocument, record.uuid)
assert length(activity_logs) == 1
assert Enum.any?(activity_logs, fn a ->
a.action == "insert" && a.originator_uuid == meta.attrs.originator.uuid
end)
end
end
describe "update_record_with_activity_log/3" do
test "updates the record and inserts the activity log", meta do
{:ok, document} = :test_document |> params_for() |> TestDocument.insert()
changeset = Changeset.cast(document, meta.attrs, [:title, :originator])
# Test for the updated record
{res, record} = TestRepo.update_record_with_activity_log(changeset)
assert res == :ok
assert record.title == meta.attrs.title
# Test for the inserted activity log
activity_logs = ActivityLog.all_for_target(TestDocument, record.uuid)
assert length(activity_logs) == 2
assert Enum.any?(activity_logs, fn a ->
a.action == "insert" && a.originator_type == "system"
end)
assert Enum.any?(activity_logs, fn a ->
a.action == "update" && a.originator_uuid == meta.attrs.originator.uuid
end)
end
end
describe "delete_record_with_activity_log/3" do
test "deletes the record and inserts the activity log", meta do
{:ok, document} = :test_document |> params_for() |> TestDocument.insert()
changeset = Changeset.cast(document, meta.attrs, [:originator])
# Test for the deleted record
{res, record} = TestRepo.delete_record_with_activity_log(changeset)
assert res == :ok
# Test for the deleted activity log
activity_logs = ActivityLog.all_for_target(TestDocument, record.uuid)
assert length(activity_logs) == 2
assert Enum.any?(activity_logs, fn a ->
a.action == "insert" && a.originator_type == "system"
end)
assert Enum.any?(activity_logs, fn a ->
a.action == "delete" && a.originator_uuid == meta.attrs.originator.uuid
end)
end
end
end
| 33.361345 | 86 | 0.675063 |
e82418aa5b78e31e3a81c3a87e859d66b5650524 | 9,752 | ex | Elixir | lib/sentry/event.ex | Wojnr/sentry-elixir | 3d06ac3cf5210ca6c206e674ef0224af402e91c4 | [
"MIT"
] | null | null | null | lib/sentry/event.ex | Wojnr/sentry-elixir | 3d06ac3cf5210ca6c206e674ef0224af402e91c4 | [
"MIT"
] | null | null | null | lib/sentry/event.ex | Wojnr/sentry-elixir | 3d06ac3cf5210ca6c206e674ef0224af402e91c4 | [
"MIT"
] | null | null | null | defmodule Sentry.Event do
@moduledoc """
Provides an Event Struct as well as transformation of Logger
entries into Sentry Events.
### Configuration
* `:in_app_module_allow_list` - Expects a list of modules that is used to distinguish among stacktrace frames that belong to your app and ones that are part of libraries or core Elixir. This is used to better display the significant part of stacktraces. The logic is greedy, so if your app's root module is `MyApp` and your setting is `[MyApp]`, that module as well as any submodules like `MyApp.Submodule` would be considered part of your app. Defaults to `[]`.
* `:report_deps` - Flag for whether to include the loaded dependencies when reporting an error. Defaults to true.
"""
defstruct event_id: nil,
culprit: nil,
timestamp: nil,
message: nil,
tags: %{},
level: "error",
platform: "elixir",
server_name: nil,
environment: nil,
exception: nil,
original_exception: nil,
release: nil,
stacktrace: %{
frames: []
},
request: %{},
extra: %{},
user: %{},
breadcrumbs: [],
fingerprint: [],
modules: %{},
event_source: nil
@type sentry_exception :: %{type: String.t(), value: String.t(), module: any()}
@type t :: %__MODULE__{
event_id: String.t() | nil,
culprit: String.t() | nil,
timestamp: String.t() | nil,
message: String.t() | nil,
tags: map(),
level: String.t(),
platform: String.t(),
server_name: any(),
environment: any(),
exception: [sentry_exception()],
original_exception: Exception.t() | nil,
release: any(),
stacktrace: %{
frames: [map()]
},
request: map(),
extra: map(),
user: map(),
breadcrumbs: list(),
fingerprint: list(),
modules: map(),
event_source: any()
}
alias Sentry.{Config, Event, Util}
@source_code_context_enabled Config.enable_source_code_context()
@source_files if(@source_code_context_enabled, do: Sentry.Sources.load_files(), else: nil)
@enable_deps_reporting Config.report_deps()
@deps if(
@enable_deps_reporting,
do: Util.mix_deps(),
else: []
)
@doc """
Creates an Event struct out of context collected and options
## Options
* `:exception` - Sentry-structured exception
* `:original_exception` - Original Elixir exception struct
* `:message` - message
* `:stacktrace` - a list of Exception.stacktrace()
* `:extra` - map of extra context
* `:user` - map of user context
* `:tags` - map of tags context
* `:request` - map of request context
* `:breadcrumbs` - list of breadcrumbs
* `:event_source` - the source of the event
* `:level` - error level
* `:fingerprint` - list of the fingerprint for grouping this event
"""
@spec create_event(keyword()) :: Event.t()
def create_event(opts) do
%{
user: user_context,
tags: tags_context,
extra: extra_context,
breadcrumbs: breadcrumbs_context,
request: request_context
} = Sentry.Context.get_all()
exception = Keyword.get(opts, :exception)
original_exception = Keyword.get(opts, :original_exception)
message = Keyword.get(opts, :message)
event_source = Keyword.get(opts, :event_source)
stacktrace =
Keyword.get(opts, :stacktrace, [])
|> coerce_stacktrace()
fingerprint = Keyword.get(opts, :fingerprint, ["{{ default }}"])
extra =
extra_context
|> Map.merge(Keyword.get(opts, :extra, %{}))
user =
user_context
|> Map.merge(Keyword.get(opts, :user, %{}))
tags =
Config.tags()
|> Map.merge(tags_context)
|> Map.merge(Keyword.get(opts, :tags, %{}))
request =
request_context
|> Map.merge(Keyword.get(opts, :request, %{}))
breadcrumbs =
Keyword.get(opts, :breadcrumbs, [])
|> Kernel.++(breadcrumbs_context)
|> Enum.take(-1 * Config.max_breadcrumbs())
level = Keyword.get(opts, :level, "error")
release = Config.release()
server_name = Config.server_name()
env = Config.environment_name()
exception =
exception
|> Map.put(:stacktrace, %{
frames: stacktrace_to_frames(stacktrace)
})
%Event{
culprit: culprit_from_stacktrace(stacktrace),
message: message,
level: level,
platform: "elixir",
environment: env,
server_name: server_name,
exception: [exception],
original_exception: original_exception,
stacktrace: %{
frames: stacktrace_to_frames(stacktrace)
},
release: release,
extra: extra,
tags: tags,
user: user,
breadcrumbs: breadcrumbs,
request: request,
fingerprint: fingerprint,
modules: Util.mix_deps_versions(@deps),
event_source: event_source
}
|> add_metadata()
end
@doc """
Transforms an Exception to a Sentry event.
## Options
* `:stacktrace` - a list of Exception.stacktrace()
* `:extra` - map of extra context
* `:user` - map of user context
* `:tags` - map of tags context
* `:request` - map of request context
* `:breadcrumbs` - list of breadcrumbs
* `:level` - error level
* `:fingerprint` - list of the fingerprint for grouping this event
"""
@spec transform_exception(Exception.t(), keyword()) :: Event.t()
def transform_exception(%_{} = exception, opts) do
type =
exception.__struct__
|> to_string()
|> String.trim_leading("Elixir.")
value = Exception.message(exception)
module = Keyword.get(opts, :module)
transformed_exception = %{type: type, value: value, module: module}
message = "(#{type} #{value})"
opts
|> Keyword.put(:exception, transformed_exception)
|> Keyword.put(:message, message)
|> Keyword.put(:original_exception, exception)
|> create_event()
end
@spec add_metadata(Event.t()) :: Event.t()
def add_metadata(%Event{} = state) do
%{state | event_id: Util.uuid4_hex(), timestamp: Util.iso8601_timestamp()}
|> Map.update(:server_name, nil, fn server_name ->
server_name || to_string(:net_adm.localhost())
end)
end
defmacrop put_source_context(frame, file, line_number) do
if Config.enable_source_code_context() do
quote do
do_put_source_context(unquote(frame), unquote(file), unquote(line_number))
end
else
quote do
unquote(frame)
end
end
end
@spec stacktrace_to_frames(Exception.stacktrace()) :: [map]
def stacktrace_to_frames(stacktrace) do
in_app_module_allow_list = Config.in_app_module_allow_list()
stacktrace
|> Enum.map(fn line ->
{mod, function, arity_or_args, location} = line
f_args = args_from_stacktrace([line])
arity = arity_to_integer(arity_or_args)
file = Keyword.get(location, :file)
file = if(file, do: String.Chars.to_string(file), else: file)
line_number = Keyword.get(location, :line)
%{
filename: file && to_string(file),
function: Exception.format_mfa(mod, function, arity),
module: mod,
lineno: line_number,
in_app: is_in_app?(mod, in_app_module_allow_list),
vars: f_args
}
|> put_source_context(file, line_number)
end)
|> Enum.reverse()
end
@spec do_put_source_context(map(), String.t(), integer()) :: map()
def do_put_source_context(frame, file, line_number) do
{pre_context, context, post_context} =
Sentry.Sources.get_source_context(@source_files, file, line_number)
frame
|> Map.put(:context_line, context)
|> Map.put(:pre_context, pre_context)
|> Map.put(:post_context, post_context)
end
@spec culprit_from_stacktrace(Exception.stacktrace()) :: String.t() | nil
def culprit_from_stacktrace([]), do: nil
def culprit_from_stacktrace([{m, f, a, _} | _]) do
Exception.format_mfa(m, f, arity_to_integer(a))
end
def culprit_from_stacktrace([{m, f, a} | _]) do
Exception.format_mfa(m, f, arity_to_integer(a))
end
@doc """
Builds a map from argument value list. For Sentry, typically the
key in the map would be the name of the variable, but we don't have that
available.
"""
@spec args_from_stacktrace(Exception.stacktrace()) :: map()
def args_from_stacktrace([{_m, _f, a, _} | _]) when is_list(a) do
Enum.with_index(a)
|> Enum.into(%{}, fn {arg, index} ->
{"arg#{index}", String.slice(inspect(arg, limit: 513, printable_limit: 513), 0, 513)}
end)
end
def args_from_stacktrace(_), do: %{}
defp arity_to_integer(arity) when is_list(arity), do: Enum.count(arity)
defp arity_to_integer(arity) when is_integer(arity), do: arity
defp is_in_app?(nil, _in_app_allow_list), do: false
defp is_in_app?(_, []), do: false
defp is_in_app?(module, in_app_module_allow_list) do
split_modules = module_split(module)
Enum.any?(in_app_module_allow_list, fn module ->
allowed_split_modules = module_split(module)
count = Enum.count(allowed_split_modules)
Enum.take(split_modules, count) == allowed_split_modules
end)
end
defp module_split(module) when is_binary(module) do
String.split(module, ".")
|> Enum.reject(&(&1 == "Elixir"))
end
defp module_split(module), do: module_split(String.Chars.to_string(module))
defp coerce_stacktrace({m, f, a}), do: [{m, f, a, []}]
defp coerce_stacktrace(stacktrace), do: stacktrace
end
| 30.763407 | 467 | 0.627051 |
e82429849ee726a2a5ce7e8e47506a7ae362699e | 3,701 | exs | Elixir | apps/ex_wire/test/ex_wire/struct/block_queue_test.exs | wolflee/mana | db66dac85addfaad98d40da5bd4082b3a0198bb1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 152 | 2018-10-27T04:52:03.000Z | 2022-03-26T10:34:00.000Z | apps/ex_wire/test/ex_wire/struct/block_queue_test.exs | wolflee/mana | db66dac85addfaad98d40da5bd4082b3a0198bb1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 270 | 2018-04-14T07:34:57.000Z | 2018-10-25T18:10:45.000Z | apps/ex_wire/test/ex_wire/struct/block_queue_test.exs | wolflee/mana | db66dac85addfaad98d40da5bd4082b3a0198bb1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 25 | 2018-10-27T12:15:13.000Z | 2022-01-25T20:31:14.000Z | defmodule ExWire.Struct.BlockQueueTest do
use ExUnit.Case, async: true
doctest ExWire.Struct.BlockQueue
test "add_header/7" do
chain = Blockchain.Test.ropsten_chain()
db = MerklePatriciaTree.Test.random_ets_db(:proces_block_queue)
header = %Block.Header{
number: 5,
parent_hash: <<0::256>>,
beneficiary: <<2, 3, 4>>,
difficulty: 100,
timestamp: 11,
mix_hash: <<1>>,
nonce: <<2>>
}
header_hash =
<<78, 28, 127, 10, 192, 253, 127, 239, 254, 179, 39, 34, 245, 44, 152, 98, 128, 71, 238,
155, 100, 161, 199, 71, 243, 223, 172, 191, 74, 99, 128, 63>>
assert {block_queue, block_tree, _block_trie, false} =
ExWire.Struct.BlockQueue.add_header(
%ExWire.Struct.BlockQueue{do_validation: false},
Blockchain.Blocktree.new_tree(),
header,
header_hash,
"remote_id",
chain,
MerklePatriciaTree.Trie.new(db)
)
assert block_queue.queue == %{}
assert block_tree.best_block.header.number == 5
# TODO: Add a second addition example
end
test "add_block_struct/5" do
chain = Blockchain.Test.ropsten_chain()
db = MerklePatriciaTree.Test.random_ets_db(:add_block_struct)
header = %Block.Header{
transactions_root:
<<200, 70, 164, 239, 152, 124, 5, 149, 40, 10, 157, 9, 210, 181, 93, 89, 5, 119, 158, 112,
221, 58, 94, 86, 206, 113, 120, 51, 241, 9, 154, 150>>,
ommers_hash:
<<109, 111, 130, 71, 136, 238, 173, 201, 249, 31, 178, 95, 22, 55, 184, 127, 214, 229,
135, 243, 4, 81, 0, 18, 81, 183, 165, 189, 6, 18, 197, 174>>
}
block_struct = %ExWire.Struct.Block{
transactions_rlp: [[1], [2], [3]],
transactions: ["trx"],
ommers_rlp: [[1]],
ommers: ["ommers"]
}
block_queue = %ExWire.Struct.BlockQueue{
queue: %{
1 => %{
<<1::256>> => %{
commitments: MapSet.new([]),
header: header,
block: %Blockchain.Block{header: header, block_hash: <<1::256>>},
ready: false
}
}
},
do_validation: false
}
assert {block_queue, _block_tree, _trie} =
ExWire.Struct.BlockQueue.add_block_struct(
block_queue,
Blockchain.Blocktree.new_tree(),
block_struct,
chain,
db
)
assert block_queue.queue[1][<<1::256>>].block.transactions == ["trx"]
assert block_queue.queue[1][<<1::256>>].block.ommers == ["ommers"]
end
test "process_block_queue/4" do
chain = Blockchain.Test.ropsten_chain()
db = MerklePatriciaTree.Test.random_ets_db(:process_block_queue)
header = %Block.Header{
number: 1,
parent_hash: <<0::256>>,
beneficiary: <<2, 3, 4>>,
difficulty: 100,
timestamp: 11,
mix_hash: <<1>>,
nonce: <<2>>
}
block_queue = %ExWire.Struct.BlockQueue{
queue: %{
1 => %{
<<1::256>> => %{
commitments: MapSet.new([1, 2]),
header: header,
block: %Blockchain.Block{header: header, block_hash: <<1::256>>},
ready: true
}
}
},
do_validation: false
}
assert {new_block_queue, block_tree, _new_trie} =
ExWire.Struct.BlockQueue.process_block_queue(
block_queue,
Blockchain.Blocktree.new_tree(),
chain,
MerklePatriciaTree.Trie.new(db)
)
assert block_tree.best_block.header.number == 1
assert new_block_queue.queue == %{}
end
end
| 29.141732 | 98 | 0.545798 |
e8245e327f5d83356f4776c828b24fe62f05c724 | 1,912 | ex | Elixir | clients/data_catalog/lib/google_api/data_catalog/v1beta1/model/google_cloud_datacatalog_v1beta1_schema.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/data_catalog/lib/google_api/data_catalog/v1beta1/model/google_cloud_datacatalog_v1beta1_schema.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/data_catalog/lib/google_api/data_catalog/v1beta1/model/google_cloud_datacatalog_v1beta1_schema.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1Schema do
@moduledoc """
Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema).
## Attributes
* `columns` (*type:* `list(GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1ColumnSchema.t)`, *default:* `nil`) - Required. Schema of columns. A maximum of 10,000 columns and sub-columns can be specified.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:columns =>
list(
GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1ColumnSchema.t()
)
| nil
}
field(:columns,
as: GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1ColumnSchema,
type: :list
)
end
defimpl Poison.Decoder,
for: GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1Schema do
def decode(value, options) do
GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1Schema.decode(value, options)
end
end
defimpl Poison.Encoder,
for: GoogleApi.DataCatalog.V1beta1.Model.GoogleCloudDatacatalogV1beta1Schema do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 34.142857 | 224 | 0.745816 |
e8247e7fc66735772670c7e261d99f04cadcb6a7 | 2,175 | exs | Elixir | packages/tai_events/test/tai_events_test.exs | ccamateur/tai | 41c4b3e09dafc77987fa3f6b300c15461d981e16 | [
"MIT"
] | 276 | 2018-01-16T06:36:06.000Z | 2021-03-20T21:48:01.000Z | packages/tai_events/test/tai_events_test.exs | ccamateur/tai | 41c4b3e09dafc77987fa3f6b300c15461d981e16 | [
"MIT"
] | 73 | 2018-10-05T18:45:06.000Z | 2021-02-08T05:46:33.000Z | packages/tai_events/test/tai_events_test.exs | ccamateur/tai | 41c4b3e09dafc77987fa3f6b300c15461d981e16 | [
"MIT"
] | 43 | 2018-06-09T09:54:51.000Z | 2021-03-07T07:35:17.000Z | defmodule TaiEventsTest do
use ExUnit.Case, async: false
defmodule MyEvent do
defstruct []
end
defmodule MyOtherEvent do
defstruct []
end
describe ".firehose_subscribe/0" do
test "creates a registry entry for the :firehose topic" do
start_supervised!({TaiEvents, 1})
TaiEvents.firehose_subscribe()
Registry.dispatch(TaiEvents, :firehose, fn entries ->
for {pid, _} <- entries, do: send(pid, :firehose)
end)
assert_receive :firehose
end
end
describe ".subscribe/1" do
test "creates a registry entry for all events of the given type" do
event = %MyEvent{}
other_event = %MyOtherEvent{}
start_supervised!({TaiEvents, 1})
TaiEvents.subscribe(MyEvent)
Registry.dispatch(TaiEvents, MyEvent, fn entries ->
for {pid, _} <- entries, do: send(pid, event)
end)
Registry.dispatch(TaiEvents, MyOtherEvent, fn entries ->
for {pid, _} <- entries, do: send(pid, other_event)
end)
assert_receive %MyEvent{}
refute_receive %MyOtherEvent{}
end
end
[:debug, :info, :warning, :error]
|> Enum.each(fn level ->
describe ".#{level}/1" do
@level level
test "sends the event to all subscribers of the event type" do
level = @level
event = %MyEvent{}
other_event = %MyOtherEvent{}
start_supervised!({TaiEvents, 1})
Registry.register(TaiEvents, MyEvent, [])
apply(TaiEvents, @level, [event])
apply(TaiEvents, @level, [other_event])
assert_receive {TaiEvents.Event, %MyEvent{}, ^level}
refute_receive {TaiEvents.Event, %MyOtherEvent{}, ^level}
end
test "sends the event to firehose subscribers" do
level = @level
event = %MyEvent{}
other_event = %MyOtherEvent{}
start_supervised!({TaiEvents, 1})
Registry.register(TaiEvents, :firehose, [])
apply(TaiEvents, level, [event])
apply(TaiEvents, level, [other_event])
assert_receive {TaiEvents.Event, %MyEvent{}, ^level}
assert_receive {TaiEvents.Event, %MyOtherEvent{}, ^level}
end
end
end)
end
| 25.892857 | 71 | 0.626207 |
e824b91a726be6e35aa390cec13150bfd1550929 | 761 | ex | Elixir | lib/extatus/sandbox/counter.ex | gmtprime/extatus | e9bf16cb1e83d2c4225c66d7c0bfdfb40993de03 | [
"MIT"
] | 17 | 2017-03-01T07:19:46.000Z | 2021-11-25T21:57:38.000Z | lib/extatus/sandbox/counter.ex | gmtprime/extatus | e9bf16cb1e83d2c4225c66d7c0bfdfb40993de03 | [
"MIT"
] | 1 | 2018-04-02T11:09:28.000Z | 2018-04-02T14:33:30.000Z | lib/extatus/sandbox/counter.ex | gmtprime/extatus | e9bf16cb1e83d2c4225c66d7c0bfdfb40993de03 | [
"MIT"
] | 1 | 2021-12-14T20:49:02.000Z | 2021-12-14T20:49:02.000Z | defmodule Extatus.Sandbox.Counter do
@moduledoc """
This module defines a sandbox for testing Prometheus counter calls without a
prometheus server.
"""
alias Extatus.Sandbox.Metric
@doc false
def new(spec) do
Metric.new(__MODULE__, spec)
end
@doc false
def declare(spec) do
Metric.declare(__MODULE__, spec)
end
@doc false
def inc(spec, value \\ 1) do
Metric.inc(__MODULE__, spec, value)
end
@doc false
def dinc(spec, value \\ 1) do
Metric.dinc(__MODULE__, spec, value)
end
@doc false
def remove(spec) do
Metric.remove(__MODULE__, spec)
end
@doc false
def reset(spec) do
Metric.reset(__MODULE__, spec)
end
@doc false
def value(spec) do
Metric.value(__MODULE__, spec)
end
end
| 17.697674 | 78 | 0.680683 |
e824c6aa73b7264e26ca493525c30760e375e826 | 515 | exs | Elixir | backend/priv/repo/migrations/20191130184230_create_messages.exs | danesjenovdan/kjer.si | 185410ede2d42892e4d91c000099283254c5dc7a | [
"Unlicense"
] | 2 | 2019-11-02T21:28:34.000Z | 2019-11-28T18:01:08.000Z | backend/priv/repo/migrations/20191130184230_create_messages.exs | danesjenovdan/kjer.si | 185410ede2d42892e4d91c000099283254c5dc7a | [
"Unlicense"
] | 17 | 2019-11-29T16:23:38.000Z | 2022-02-14T05:11:41.000Z | backend/priv/repo/migrations/20191130184230_create_messages.exs | danesjenovdan/kjer.si | 185410ede2d42892e4d91c000099283254c5dc7a | [
"Unlicense"
] | null | null | null | defmodule KjerSi.Repo.Migrations.CreateMessages do
use Ecto.Migration
def change do
create table(:messages, primary_key: false) do
add :id, :binary_id, primary_key: true
add :content, :string
add :user_id, references(:users, on_delete: :nothing, type: :binary_id)
add :room_id, references(:rooms, on_delete: :delete_all, type: :binary_id)
timestamps(type: :utc_datetime_usec)
end
create index(:messages, [:user_id])
create index(:messages, [:room_id])
end
end
| 28.611111 | 80 | 0.693204 |
e824cc1b6782201b28293995bac502323193d5be | 1,258 | ex | Elixir | lib/ecto_gss/schema.ex | danielgrieve/ecto_gss | 4c5c82c5252fbd01be287b4e80ab7f86057ce9aa | [
"MIT"
] | null | null | null | lib/ecto_gss/schema.ex | danielgrieve/ecto_gss | 4c5c82c5252fbd01be287b4e80ab7f86057ce9aa | [
"MIT"
] | null | null | null | lib/ecto_gss/schema.ex | danielgrieve/ecto_gss | 4c5c82c5252fbd01be287b4e80ab7f86057ce9aa | [
"MIT"
] | null | null | null | defmodule EctoGSS.Schema do
def model(opts) do
spreadsheet = Keyword.get(opts, :spreadsheet)
list = Keyword.get(opts, :list)
for gss_column <- Keyword.get(opts, :columns, []) do
code =
quote do
@behaviour Ecto.Type
def type, do: :string
def cast(integer) when is_integer(integer), do: {:ok, to_string(integer)}
def cast(string) when is_bitstring(string), do: {:ok, string}
def cast(_), do: :error
def load(string) when is_bitstring(string), do: {:ok, string}
def dump(string) when is_bitstring(string), do: {:ok, string}
def dump(_), do: :error
def column, do: unquote(gss_column)
def list, do: unquote(list)
def gss_schema_type, do: true
end
safe_list_name = String.replace(list, " ", "")
Module.create(
Module.concat(EctoGSS.Schema, "#{safe_list_name}.#{gss_column}"),
code,
Macro.Env.location(__ENV__)
)
end
quote do
def spreadsheet, do: unquote(spreadsheet)
def list, do: unquote(list)
def gss_schema, do: true
end
end
defmacro __using__({which, opts}) when is_atom(which) do
apply(__MODULE__, which, [opts])
end
end
| 27.347826 | 83 | 0.597774 |
e824e5deaa4fe6215bee3c4c48f21864bafe5f7c | 1,733 | ex | Elixir | clients/ad_exchange_buyer/lib/google_api/ad_exchange_buyer/v2beta1/model/row_dimensions.ex | nuxlli/elixir-google-api | ecb8679ac7282b7dd314c3e20c250710ec6a7870 | [
"Apache-2.0"
] | null | null | null | clients/ad_exchange_buyer/lib/google_api/ad_exchange_buyer/v2beta1/model/row_dimensions.ex | nuxlli/elixir-google-api | ecb8679ac7282b7dd314c3e20c250710ec6a7870 | [
"Apache-2.0"
] | null | null | null | clients/ad_exchange_buyer/lib/google_api/ad_exchange_buyer/v2beta1/model/row_dimensions.ex | nuxlli/elixir-google-api | ecb8679ac7282b7dd314c3e20c250710ec6a7870 | [
"Apache-2.0"
] | 1 | 2020-11-10T16:58:27.000Z | 2020-11-10T16:58:27.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.AdExchangeBuyer.V2beta1.Model.RowDimensions do
@moduledoc """
A response may include multiple rows, breaking down along various dimensions. Encapsulates the values of all dimensions for a given row.
## Attributes
- timeInterval (TimeInterval): The time interval that this row represents. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:timeInterval => GoogleApi.AdExchangeBuyer.V2beta1.Model.TimeInterval.t()
}
field(:timeInterval, as: GoogleApi.AdExchangeBuyer.V2beta1.Model.TimeInterval)
end
defimpl Poison.Decoder, for: GoogleApi.AdExchangeBuyer.V2beta1.Model.RowDimensions do
def decode(value, options) do
GoogleApi.AdExchangeBuyer.V2beta1.Model.RowDimensions.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.AdExchangeBuyer.V2beta1.Model.RowDimensions do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 36.104167 | 138 | 0.766301 |
e8250ab00a8c6fe7c0a8a1a74a16de7d71e02d66 | 2,897 | exs | Elixir | test/oled/display/impl/ssd_1306/line_h_test.exs | pappersverk/oled | bae090796bc289268b06b7db58eb62111c422690 | [
"Apache-2.0"
] | 11 | 2019-12-06T10:32:33.000Z | 2022-02-03T19:28:06.000Z | test/oled/display/impl/ssd_1306/line_h_test.exs | pappersverk/oled | bae090796bc289268b06b7db58eb62111c422690 | [
"Apache-2.0"
] | 7 | 2020-03-04T19:36:07.000Z | 2021-12-26T01:23:36.000Z | test/oled/display/impl/ssd_1306/line_h_test.exs | pappersverk/oled | bae090796bc289268b06b7db58eb62111c422690 | [
"Apache-2.0"
] | 1 | 2022-01-12T05:04:53.000Z | 2022-01-12T05:04:53.000Z | defmodule OLED.Display.Impl.SSD1306.LineHTest do
use ExUnit.Case
alias OLED.Display.Impl.SSD1306.Draw
import OLED.BufferTestHelper, only: [build_state: 2, ascii_render: 1]
@w 32
@h 8
test "draw inside" do
assert build_state(@w, @h)
|> Draw.line_h(1, 1, 10, [])
|> ascii_render() == [
" ",
" ########## ",
" ",
" ",
" ",
" ",
" ",
" "
]
end
test "draw out 1" do
assert build_state(@w, @h)
|> Draw.line_h(-4, 1, 8, [])
|> ascii_render() == [
" ",
"#### ",
" ",
" ",
" ",
" ",
" ",
" "
]
end
test "draw out 2" do
assert build_state(@w, @h)
|> Draw.line_h(30, 1, 8, [])
|> ascii_render() == [
" ",
" ##",
" ",
" ",
" ",
" ",
" ",
" "
]
end
test "draw out 3" do
assert build_state(@w, @h)
|> Draw.line_h(-10, 1, 8, [])
|> ascii_render() == [
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "
]
end
test "draw out 4" do
assert build_state(@w, @h)
|> Draw.line_h(32, 1, 8, [])
|> ascii_render() == [
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" "
]
end
end
| 33.686047 | 71 | 0.158785 |
e82576fa58d7321cd2ce193b68a9b8754a535b14 | 1,763 | ex | Elixir | clients/calendar/lib/google_api/calendar/v3/model/event_source.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/calendar/lib/google_api/calendar/v3/model/event_source.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/calendar/lib/google_api/calendar/v3/model/event_source.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Calendar.V3.Model.EventSource do
@moduledoc """
Source from which the event was created. For example, a web page, an email message or any document identifiable by an URL with HTTP or HTTPS scheme. Can only be seen or modified by the creator of the event.
## Attributes
* `title` (*type:* `String.t`, *default:* `nil`) - Title of the source; for example a title of a web page or an email subject.
* `url` (*type:* `String.t`, *default:* `nil`) - URL of the source pointing to a resource. The URL scheme must be HTTP or HTTPS.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:title => String.t(),
:url => String.t()
}
field(:title)
field(:url)
end
defimpl Poison.Decoder, for: GoogleApi.Calendar.V3.Model.EventSource do
def decode(value, options) do
GoogleApi.Calendar.V3.Model.EventSource.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Calendar.V3.Model.EventSource do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.26 | 208 | 0.72093 |
e82590bfd18fc17cd3b3db135e2c29c124dcac7e | 17,953 | ex | Elixir | lib/mix/lib/releases/models/release.ex | rozap/distillery | 1853c58b36c56bb5a996ac5a278ae31410d81de9 | [
"MIT"
] | null | null | null | lib/mix/lib/releases/models/release.ex | rozap/distillery | 1853c58b36c56bb5a996ac5a278ae31410d81de9 | [
"MIT"
] | null | null | null | lib/mix/lib/releases/models/release.ex | rozap/distillery | 1853c58b36c56bb5a996ac5a278ae31410d81de9 | [
"MIT"
] | null | null | null | defmodule Mix.Releases.Release do
@moduledoc """
Represents metadata about a release
"""
alias Mix.Releases.App
alias Mix.Releases.Profile
alias Mix.Releases.Overlays
alias Mix.Releases.Config
alias Mix.Releases.Utils
alias Mix.Releases.Environment
alias Mix.Releases.Shell
defstruct name: nil,
version: "0.1.0",
applications: [
# required for elixir apps
:elixir,
# included so the elixir shell works
:iex,
# required for Mix config provider
:mix,
# required for upgrades
:sasl,
# required for some command tooling
:runtime_tools,
# needed for config provider API
:distillery
# can also use `app_name: type`, as in `some_dep: load`,
# to only load the application, not start it
],
is_upgrade: false,
upgrade_from: :latest,
resolved_overlays: [],
profile: %Profile{
erl_opts: "",
run_erl_env: "",
executable: [enabled: false, transient: false],
dev_mode: false,
include_erts: true,
include_src: false,
include_system_libs: true,
included_configs: [],
config_providers: [],
appup_transforms: [],
strip_debug_info: false,
plugins: [],
overlay_vars: [],
overlays: [],
commands: [],
overrides: []
},
env: nil
@type t :: %__MODULE__{
name: atom,
version: String.t(),
applications: list(atom | {atom, App.start_type()} | App.t()),
is_upgrade: boolean,
upgrade_from: nil | String.t() | :latest,
resolved_overlays: [Overlays.overlay()],
profile: Profile.t(),
env: atom
}
@type app_resource ::
{atom, app_version :: charlist}
| {atom, app_version :: charlist, App.start_type()}
@type resource ::
{:release, {name :: charlist, version :: charlist}, {:erts, erts_version :: charlist},
[app_resource]}
@doc """
Creates a new Release with the given name, version, and applications.
"""
@spec new(atom, String.t()) :: t
@spec new(atom, String.t(), [atom]) :: t
def new(name, version, apps \\ []) do
build_path = Mix.Project.build_path()
output_dir = Path.relative_to_cwd(Path.join([build_path, "rel", "#{name}"]))
definition = %__MODULE__{name: name, version: version}
%__MODULE__{
definition
| applications: definition.applications ++ apps,
profile: %Profile{definition.profile | output_dir: output_dir}
}
end
@doc """
Load a fully configured Release object given a release name and environment name.
"""
@spec get(atom) :: {:ok, t} | {:error, term}
@spec get(atom, atom) :: {:ok, t} | {:error, term}
@spec get(atom, atom, Keyword.t()) :: {:ok, t} | {:error, term}
def get(name, env \\ :default, opts \\ [])
def get(name, env, opts) when is_atom(name) and is_atom(env) do
# load release configuration
default_opts = [
selected_environment: env,
selected_release: name,
is_upgrade: Keyword.get(opts, :is_upgrade, false),
upgrade_from: Keyword.get(opts, :upgrade_from, false)
]
case Config.get(Keyword.merge(default_opts, opts)) do
{:error, _} = err ->
err
{:ok, config} ->
with {:ok, env} <- select_environment(config),
{:ok, rel} <- select_release(config),
rel <- apply_environment(rel, env),
do: apply_configuration(rel, config, false)
end
end
@doc """
Converts a Release struct to a release resource structure.
The format of release resources is documented [in the Erlang manual](http://erlang.org/doc/design_principles/release_structure.html#res_file)
"""
@spec to_resource(t) :: resource
def to_resource(
%__MODULE__{applications: apps, profile: %Profile{erts_version: erts}} = release
) do
rel_name = Atom.to_charlist(release.name)
rel_version = String.to_charlist(release.version)
erts = String.to_charlist(erts)
{
:release,
{rel_name, rel_version},
{:erts, erts},
for %App{name: name, vsn: vsn, start_type: start_type} <- apps do
if is_nil(start_type) do
{name, '#{vsn}'}
else
{name, '#{vsn}', start_type}
end
end
}
end
@doc """
Returns true if the release is executable
"""
@spec executable?(t) :: boolean()
def executable?(%__MODULE__{profile: %Profile{executable: false}}),
do: false
def executable?(%__MODULE__{profile: %Profile{executable: true}}),
do: true
def executable?(%__MODULE__{profile: %Profile{executable: e}}),
do: Keyword.get(e, :enabled, false)
@doc """
Get the path to which release binaries will be output
"""
@spec bin_path(t) :: String.t()
def bin_path(%__MODULE__{profile: %Profile{output_dir: output_dir}}) do
[output_dir, "bin"]
|> Path.join()
|> Path.expand()
end
@doc """
Get the path to which versioned release data will be output
"""
@spec version_path(t) :: String.t()
def version_path(%__MODULE__{profile: %Profile{output_dir: output_dir}} = r) do
[output_dir, "releases", "#{r.version}"]
|> Path.join()
|> Path.expand()
end
@doc """
Get the path to which compiled applications will be output
"""
@spec lib_path(t) :: String.t()
def lib_path(%__MODULE__{profile: %Profile{output_dir: output_dir}}) do
[output_dir, "lib"]
|> Path.join()
|> Path.expand()
end
@doc """
Get the path to which the release tarball will be output
"""
@spec archive_path(t) :: String.t()
def archive_path(%__MODULE__{profile: %Profile{executable: e} = p} = r) when is_list(e) do
if Keyword.get(e, :enabled, false) do
Path.join([bin_path(r), "#{r.name}.run"])
else
archive_path(%__MODULE__{r | profile: %{p | executable: false}})
end
end
def archive_path(%__MODULE__{profile: %Profile{executable: false}} = r) do
Path.join([version_path(r), "#{r.name}.tar.gz"])
end
# Returns the environment that the provided Config has selected
@doc false
@spec select_environment(Config.t()) :: {:ok, Environment.t()} | {:error, :missing_environment}
def select_environment(
%Config{selected_environment: :default, default_environment: :default} = c
) do
case Map.get(c.environments, :default) do
nil ->
{:error, :missing_environment}
env ->
{:ok, env}
end
end
def select_environment(%Config{selected_environment: :default, default_environment: name} = c),
do: select_environment(%Config{c | selected_environment: name})
def select_environment(%{selected_environment: name} = c) do
case Map.get(c.environments, name) do
nil ->
{:error, :missing_environment}
env ->
{:ok, env}
end
end
# Returns the release that the provided Config has selected
@doc false
@spec select_release(Config.t()) :: {:ok, t} | {:error, :missing_release}
def select_release(%Config{selected_release: :default, default_release: :default} = c),
do: {:ok, List.first(Map.values(c.releases))}
def select_release(%Config{selected_release: :default, default_release: name} = c),
do: select_release(%Config{c | selected_release: name})
def select_release(%Config{selected_release: name} = c) do
case Map.get(c.releases, name) do
nil ->
{:error, :missing_release}
release ->
{:ok, release}
end
end
# Applies the environment settings to a release
@doc false
@spec apply_environment(t, Environment.t()) :: t
def apply_environment(%__MODULE__{profile: rel_profile} = r, %Environment{name: env_name} = env) do
env_profile = Map.from_struct(env.profile)
profile =
Enum.reduce(env_profile, rel_profile, fn
{:plugins, ps}, acc when ps not in [nil, []] ->
# Merge plugins
rel_plugins = Map.get(acc, :plugins, [])
Map.put(acc, :plugins, rel_plugins ++ ps)
{k, v}, acc ->
case v do
ignore when ignore in [nil, []] ->
acc
_ ->
Map.put(acc, k, v)
end
end)
%{r | :env => env_name, :profile => profile}
end
@doc false
defdelegate validate(release), to: Mix.Releases.Checks, as: :run
# Applies global configuration options to the release profile
@doc false
@spec apply_configuration(t, Config.t()) :: {:ok, t} | {:error, term}
@spec apply_configuration(t, Config.t(), log? :: boolean) :: {:ok, t} | {:error, term}
def apply_configuration(%__MODULE__{} = release, %Config{} = config, log? \\ false) do
profile = release.profile
profile =
case profile.config do
p when is_binary(p) ->
%{profile | config: p}
_ ->
%{profile | config: Keyword.get(Mix.Project.config(), :config_path)}
end
profile =
case profile.include_erts do
p when is_binary(p) ->
case Utils.detect_erts_version(p) do
{:error, _} = err ->
throw(err)
{:ok, vsn} ->
%{profile | erts_version: vsn, include_system_libs: true}
end
true ->
%{profile | erts_version: Utils.erts_version(), include_system_libs: true}
_ ->
%{profile | erts_version: Utils.erts_version(), include_system_libs: false}
end
profile =
case profile.cookie do
nil ->
profile
c when is_atom(c) ->
profile
c when is_binary(c) ->
%{profile | cookie: String.to_atom(c)}
c ->
throw({:error, {:assembler, {:invalid_cookie, c}}})
end
release = %{release | profile: profile}
release =
case apps(release) do
{:error, _} = err ->
throw(err)
apps ->
%{release | applications: apps}
end
if config.is_upgrade do
apply_upgrade_configuration(release, config, log?)
else
{:ok, release}
end
catch
:throw, {:error, _} = err ->
err
end
defp apply_upgrade_configuration(%__MODULE__{} = release, %Config{upgrade_from: :latest}, log?) do
current_version = release.version
upfrom =
case Utils.get_release_versions(release.profile.output_dir) do
[] ->
:no_upfrom
[^current_version, v | _] ->
v
[v | _] ->
v
end
case upfrom do
:no_upfrom ->
if log? do
Shell.warn(
"An upgrade was requested, but there are no " <>
"releases to upgrade from, no upgrade will be performed."
)
end
{:ok, %{release | :is_upgrade => false, :upgrade_from => nil}}
v ->
{:ok, %{release | :is_upgrade => true, :upgrade_from => v}}
end
end
defp apply_upgrade_configuration(%__MODULE__{version: v}, %Config{upgrade_from: v}, _log?) do
{:error, {:assembler, {:bad_upgrade_spec, :upfrom_is_current, v}}}
end
defp apply_upgrade_configuration(
%__MODULE__{name: name} = release,
%Config{upgrade_from: v},
log?
) do
current_version = release.version
if log?,
do: Shell.debug("Upgrading #{name} from #{v} to #{current_version}")
upfrom_path = Path.join([release.profile.output_dir, "releases", v])
if File.exists?(upfrom_path) do
{:ok, %{release | :is_upgrade => true, :upgrade_from => v}}
else
{:error, {:assembler, {:bad_upgrade_spec, :doesnt_exist, v, upfrom_path}}}
end
end
@doc """
Returns a list of all code_paths of all appliactions included in the release
"""
@spec get_code_paths(t) :: [charlist]
def get_code_paths(%__MODULE__{profile: %Profile{output_dir: output_dir}} = release) do
release.applications
|> Enum.flat_map(fn %App{name: name, vsn: version, path: path} ->
lib_dir = Path.join([output_dir, "lib", "#{name}-#{version}", "ebin"])
[String.to_charlist(lib_dir), String.to_charlist(Path.join(path, "ebin"))]
end)
end
@doc """
Gets a list of {app, vsn} tuples for the current release.
An optional second parameter enables/disables debug logging of discovered apps.
"""
@spec apps(t) :: [App.t()] | {:error, term}
def apps(%__MODULE__{name: name, applications: apps} = release) do
# The list of applications which have been _manually_ specified
# to be part of this release - it is not required to be exhaustive
apps =
if Enum.member?(apps, name) do
apps
else
cond do
Mix.Project.umbrella?() ->
# Nothing to do
apps
Mix.Project.config()[:app] == name ->
# This is a non-umbrella project, with a release named the same as the app
# Make sure the app is part of the release, or it makes no sense
apps ++ [name]
:else ->
# The release is named something different, nothing to do
apps
end
end
# Validate listed apps
for app <- apps do
app_name =
case app do
{name, start_type} ->
if App.valid_start_type?(start_type) do
name
else
throw({:invalid_start_type, name, start_type})
end
name ->
name
end
case Application.load(app_name) do
:ok ->
:ok
{:error, {:already_loaded, _}} ->
:ok
{:error, reason} ->
throw({:invalid_app, app_name, reason})
end
end
# A graph of relationships between applications
dg = :digraph.new([:acyclic, :protected])
as = :ets.new(name, [:set, :protected])
try do
# Add app relationships to the graph
the_apps = add_apps(dg, as, apps)
# Perform topological sort
result =
case :digraph_utils.topsort(dg) do
false ->
raise "Unable to topologically sort the dependency graph!"
sorted ->
sorted
|> Enum.reverse()
|> Enum.map(fn a -> elem(hd(:ets.lookup(as, a)), 1) end)
|> Enum.map(&correct_app_path_and_vsn(&1, release))
end
print_discovered_apps(result)
the_apps
after
:ets.delete(as)
:digraph.delete(dg)
end
catch
:throw, err ->
{:error, {:apps, err}}
end
defp add_apps(_dg, _as, []),
do: []
defp add_apps(dg, as, [app | apps]) do
add_app(dg, as, nil, app) ++ add_apps(dg, as, apps)
end
defp add_app(dg, as, parent, {name, start_type}) do
case :digraph.vertex(dg, name) do
false ->
# Haven't seen this app yet, and it is not excluded
do_add_app(dg, as, parent, App.new(name, start_type))
_ ->
# Already visited
[]
end
end
defp add_app(dg, as, parent, name) do
add_app(dg, as, parent, {name, nil})
end
defp do_add_app(dg, as, nil, app) do
:digraph.add_vertex(dg, app.name)
:ets.insert(as, {app.name, app})
[app | do_add_children(dg, as, app.name, app.applications ++ app.included_applications)]
end
defp do_add_app(dg, as, parent, app) do
:digraph.add_vertex(dg, app.name)
:ets.insert(as, {app.name, app})
case :digraph.add_edge(dg, parent, app.name) do
{:error, reason} ->
raise "edge from #{parent} to #{app.name} would result in cycle: #{inspect(reason)}"
_ ->
[app | do_add_children(dg, as, app.name, app.applications ++ app.included_applications)]
end
end
defp do_add_children(_dg, _as, _parent, []),
do: []
defp do_add_children(dg, as, parent, [app | apps]) do
add_app(dg, as, parent, app) ++ do_add_children(dg, as, parent, apps)
end
defp correct_app_path_and_vsn(%App{} = app, %__MODULE__{profile: %Profile{include_erts: ie}})
when ie in [true, false] do
app
end
defp correct_app_path_and_vsn(%App{} = app, %__MODULE__{profile: %Profile{include_erts: p}}) do
# Correct any ERTS libs which should be pulled from the correct
# ERTS directory, not from the current environment.
lib_dir = Path.expand(Path.join(p, "lib"))
if Utils.is_erts_lib?(app.path) do
case Path.wildcard(Path.join(lib_dir, "#{app.name}-*")) do
[corrected_app_path | _] ->
[_, corrected_app_vsn] =
String.split(Path.basename(corrected_app_path), "-", trim: true)
%App{app | vsn: corrected_app_vsn, path: corrected_app_path}
_ ->
throw({:apps, {:missing_required_lib, app.name, lib_dir}})
end
else
app
end
end
defp print_discovered_apps(apps) do
Shell.debug("Discovered applications:")
do_print_discovered_apps(apps)
end
defp do_print_discovered_apps([]), do: :ok
defp do_print_discovered_apps([app | apps]) do
where = Path.relative_to_cwd(app.path)
Shell.debugf(" > #{Shell.colorf("#{app.name}-#{app.vsn}", :white)}")
Shell.debugf("\n |\n | from: #{where}\n")
case app.applications do
[] ->
Shell.debugf(" | applications: none\n")
apps ->
display_apps =
apps
|> Enum.map(&inspect/1)
|> Enum.join("\n | ")
Shell.debugf(" | applications:\n | #{display_apps}\n")
end
case app.included_applications do
[] ->
Shell.debugf(" | includes: none\n")
included_apps ->
display_apps =
included_apps
|> Enum.map(&inspect/1)
|> Enum.join("\n | ")
Shell.debugf(" | includes:\n | #{display_apps}")
end
Shell.debugf(" |_____\n\n")
do_print_discovered_apps(apps)
end
end
| 29.097245 | 143 | 0.586754 |
e8259b75a4fb30c16de6ac889f3b01e393c2876c | 1,998 | exs | Elixir | test/git_diff_test.exs | mononym/git_diff | 230a3a554c624e02b73c4a56cc172d7964d2851e | [
"MIT"
] | 14 | 2019-10-28T13:52:48.000Z | 2021-01-03T08:42:52.000Z | test/git_diff_test.exs | mononym/git_diff | 230a3a554c624e02b73c4a56cc172d7964d2851e | [
"MIT"
] | 5 | 2019-10-26T19:31:28.000Z | 2021-01-13T11:59:26.000Z | test/git_diff_test.exs | mononym/git_diff | 230a3a554c624e02b73c4a56cc172d7964d2851e | [
"MIT"
] | 6 | 2018-09-24T21:08:24.000Z | 2021-01-01T05:34:26.000Z | defmodule GitDiffTest do
use ExUnit.Case
import File
test "parse a valid diff" do
text = read!("test/diff.txt")
{flag, _} = GitDiff.parse_patch(text)
assert flag == :ok
end
test "stream a valid diff" do
stream = stream!("test/diff.txt")
Enum.to_list(GitDiff.stream_patch(stream))
end
test "relative_from and relative_to adjust patch dirs" do
{:ok, patch} =
stream!("test/diff.txt")
|> GitDiff.stream_patch(relative_to: "/tmp/foo", relative_from: "/tmp/foo")
|> Enum.to_list()
|> List.last()
assert patch.from == "package-phx_new-1.0.0-BD5E394E/my_app/web/static/assets/favicon.ico"
assert patch.to == "package-phx_new-1.5.7-086C1921/my_app/assets/static/favicon.ico"
assert patch.headers["rename from"] == "package-phx_new-1.0.0-BD5E394E/my_app/web/static/assets/favicon.ico"
assert patch.headers["rename to"] == "package-phx_new-1.5.7-086C1921/my_app/assets/static/favicon.ico"
end
test "reads renames" do
{:ok, patch} =
stream!("test/diff.txt")
|> GitDiff.stream_patch()
|> Enum.to_list()
|> List.last()
assert patch.from == "/tmp/foo/package-phx_new-1.0.0-BD5E394E/my_app/web/static/assets/favicon.ico"
assert patch.to == "/tmp/foo/package-phx_new-1.5.7-086C1921/my_app/assets/static/favicon.ico"
assert patch.headers["rename from"] == "/tmp/foo/package-phx_new-1.0.0-BD5E394E/my_app/web/static/assets/favicon.ico"
assert patch.headers["rename to"] == "/tmp/foo/package-phx_new-1.5.7-086C1921/my_app/assets/static/favicon.ico"
end
test "parse an invalid diff" do
dir = "test/bad_diffs"
Enum.each(ls!(dir), fn(file) ->
text = read!("#{dir}/#{file}")
{flag, _} = GitDiff.parse_patch(text)
assert flag == :error
end)
end
test "stream an invalid diff" do
dir = "test/bad_diffs"
Enum.each(ls!(dir), fn(file) ->
stream = stream!("#{dir}/#{file}")
Enum.to_list(GitDiff.stream_patch(stream))
end)
end
end
| 33.864407 | 121 | 0.657157 |
e825b9c0074ec55c134731ca3b65ee6708f5fcbe | 1,091 | ex | Elixir | lib/rocketpay_web/controllers/accounts_controller.ex | gabrielkim13/nlw4-rocketpay | 7d43ba56beb99bdf271be2af73d4b16df7b81317 | [
"RSA-MD"
] | 1 | 2021-12-15T17:32:15.000Z | 2021-12-15T17:32:15.000Z | lib/rocketpay_web/controllers/accounts_controller.ex | gabrielkim13/nlw4-rocketpay | 7d43ba56beb99bdf271be2af73d4b16df7b81317 | [
"RSA-MD"
] | null | null | null | lib/rocketpay_web/controllers/accounts_controller.ex | gabrielkim13/nlw4-rocketpay | 7d43ba56beb99bdf271be2af73d4b16df7b81317 | [
"RSA-MD"
] | null | null | null | defmodule RocketpayWeb.AccountsController do
use RocketpayWeb, :controller
alias Rocketpay.Account
alias Rocketpay.Accounts.Transactions.Response, as: TransactionResponse
action_fallback RocketpayWeb.FallbackController
def deposit(conn, params) do
with {:ok, %Account{} = account} <- Rocketpay.deposit(params) do
conn
|> put_status(:created)
|> render("update.json", %{account: account})
end
end
def withdraw(conn, params) do
with {:ok, %Account{} = account} <- Rocketpay.withdraw(params) do
conn
|> put_status(:created)
|> render("update.json", %{account: account})
end
end
def transaction(conn, params) do
# Async / await example.
task = Task.async(fn -> Rocketpay.transaction(params) end)
# Only start the task, without awaiting its return.
# Task.start(fn -> Rocketpay.transaction(params) end)
with {:ok, %TransactionResponse{} = transaction} <- Task.await(task) do
conn
|> put_status(:created)
|> render("transaction.json", %{transaction: transaction})
end
end
end
| 27.974359 | 75 | 0.672777 |
e825c60287355bfb00a300262983ef4dc87cfced | 3,574 | ex | Elixir | lib/mix/lib/mix/tasks/deps.clean.ex | Joe-noh/elixir | 34bf464bc1a035b6015366463e04f1bf9d2065f3 | [
"Apache-2.0"
] | null | null | null | lib/mix/lib/mix/tasks/deps.clean.ex | Joe-noh/elixir | 34bf464bc1a035b6015366463e04f1bf9d2065f3 | [
"Apache-2.0"
] | null | null | null | lib/mix/lib/mix/tasks/deps.clean.ex | Joe-noh/elixir | 34bf464bc1a035b6015366463e04f1bf9d2065f3 | [
"Apache-2.0"
] | null | null | null | defmodule Mix.Tasks.Deps.Clean do
use Mix.Task
@shortdoc "Deletes the given dependencies' files"
@moduledoc """
Deletes the given dependencies' files, including build artifacts and fetched
sources.
Since this is a destructive action, cleaning of dependencies
can only happen by passing arguments/options:
* `dep1 dep2` - the name of dependencies to be deleted separated by a space
* `--unlock` - also unlocks the deleted dependencies
* `--build` - deletes only compiled files (keeps source files)
* `--all` - deletes all dependencies
* `--unused` - deletes only unused dependencies
(i.e. dependencies no longer mentioned in `mix.exs` are removed)
By default this task works across all environments,
unless `--only` is given which will clean all dependencies
leaving only the ones for chosen environment.
"""
@switches [unlock: :boolean, all: :boolean, only: :string, unused: :boolean,
build: :boolean]
@spec run(OptionParser.argv) :: :ok
def run(args) do
Mix.Project.get!
{opts, apps, _} = OptionParser.parse(args, switches: @switches)
build_path =
Mix.Project.build_path
|> Path.dirname
|> Path.join("#{opts[:only] || :*}/lib")
deps_path = Mix.Project.deps_path
loaded_opts = if only = opts[:only], do: [env: :"#{only}"], else: []
loaded_deps = Mix.Dep.loaded(loaded_opts)
apps_to_clean = cond do
opts[:all] -> checked_deps(build_path, deps_path)
opts[:unused] -> checked_deps(build_path, deps_path) |> filter_loaded(loaded_deps)
apps != [] -> apps
true ->
Mix.raise "\"mix deps.clean\" expects dependencies as arguments or " <>
"a flag indicating which dependencies to clean. " <>
"The --all option will clean all dependencies while " <>
"the --unused option cleans unused dependencies"
end
do_clean(apps_to_clean, loaded_deps, build_path, deps_path, opts[:build])
if opts[:unlock] do
Mix.Task.run "deps.unlock", args
else
:ok
end
end
defp checked_deps(build_path, deps_path) do
for root <- [deps_path, build_path],
path <- Path.wildcard(Path.join(root, "*")),
File.dir?(path) do
Path.basename(path)
end
|> Enum.uniq()
|> List.delete(to_string(Mix.Project.config[:app]))
end
defp filter_loaded(apps, deps) do
apps -- Enum.map(deps, &Atom.to_string(&1.app))
end
defp maybe_warn_for_invalid_path([], dependency) do
Mix.shell.error "warning: the dependency #{dependency} is not present in the build directory"
[]
end
defp maybe_warn_for_invalid_path(paths, _dependency) do
paths
end
defp do_clean(apps, deps, build_path, deps_path, build_only?) do
shell = Mix.shell
local =
for %{scm: scm, app: app} <- deps,
not scm.fetchable?,
do: Atom.to_string(app)
Enum.each apps, fn app ->
shell.info "* Cleaning #{app}"
# Remove everything from the build directory of dependencies
build_path
|> Path.join(to_string(app))
|> Path.wildcard
|> maybe_warn_for_invalid_path(app)
|> Enum.each(&File.rm_rf!/1)
# Remove everything from the source directory of dependencies.
# Skip this step if --build option is specified or if
# the dependency is local, ie, referenced using :path.
if build_only? || app in local do
:do_not_delete_source
else
deps_path
|> Path.join(to_string(app))
|> File.rm_rf!
end
end
end
end
| 31.078261 | 97 | 0.645215 |
e825d1dbd969cb557e9d08dcb57068ca8dceeb36 | 80,598 | ex | Elixir | lib/aws/generated/s3_control.ex | justinludwig/aws-elixir | c66dfebecec62587dada50602c31c76d307d812c | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/s3_control.ex | justinludwig/aws-elixir | c66dfebecec62587dada50602c31c76d307d812c | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/s3_control.ex | justinludwig/aws-elixir | c66dfebecec62587dada50602c31c76d307d812c | [
"Apache-2.0"
] | null | null | null | # WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
# See https://github.com/aws-beam/aws-codegen for more details.
defmodule AWS.S3Control do
@moduledoc """
AWS S3 Control provides access to Amazon S3 control plane actions.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-08-20",
content_type: "text/xml",
credential_scope: nil,
endpoint_prefix: "s3-control",
global?: false,
protocol: "rest-xml",
service_id: "S3 Control",
signature_version: "s3v4",
signing_name: "s3",
target_prefix: nil
}
end
@doc """
Creates an access point and associates it with the specified bucket.
For more information, see [Managing Data Access with Amazon S3 Access Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
in the *Amazon Simple Storage Service User Guide*.
S3 on Outposts only supports VPC-style Access Points.
For more information, see [ Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only Access
Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html#API_control_CreateAccessPoint_Examples) section.
The following actions are related to `CreateAccessPoint`:
*
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html)
*
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) *
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html)
"""
def create_access_point(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates an Object Lambda Access Point.
For more information, see [Transforming objects with Object Lambda Access Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html)
in the *Amazon Simple Storage Service User Guide*.
The following actions are related to `CreateAccessPointForObjectLambda`:
*
[DeleteAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html) *
[GetAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html)
*
[ListAccessPointsForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html)
"""
def create_access_point_for_object_lambda(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action creates an Amazon S3 on Outposts bucket.
To create an S3 bucket, see [Create Bucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
in the *Amazon Simple Storage Service API*.
Creates a new Outposts bucket. By creating the bucket, you become the bucket
owner. To create an Outposts bucket, you must have S3 on Outposts. For more
information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in *Amazon Simple Storage Service User Guide*.
Not every string is an acceptable bucket name. For information on bucket naming
restrictions, see [Working with Amazon S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/BucketRestrictions.html#bucketnamingrules).
S3 on Outposts buckets support:
* Tags
* LifecycleConfigurations for deleting expired objects
For a complete list of restrictions and Amazon S3 feature limitations on S3 on
Outposts, see [ Amazon S3 on Outposts Restrictions and Limitations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3OnOutpostsRestrictionsLimitations.html).
For an example of the request syntax for Amazon S3 on Outposts that uses the S3
on Outposts endpoint hostname prefix and `x-amz-outpost-id` in your API request,
see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html#API_control_CreateBucket_Examples) section.
The following actions are related to `CreateBucket` for Amazon S3 on Outposts:
*
[PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
*
[GetBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html) *
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html)
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) *
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html)
"""
def create_bucket(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}"
{headers, input} =
[
{"ACL", "x-amz-acl"},
{"GrantFullControl", "x-amz-grant-full-control"},
{"GrantRead", "x-amz-grant-read"},
{"GrantReadACP", "x-amz-grant-read-acp"},
{"GrantWrite", "x-amz-grant-write"},
{"GrantWriteACP", "x-amz-grant-write-acp"},
{"ObjectLockEnabledForBucket", "x-amz-bucket-object-lock-enabled"},
{"OutpostId", "x-amz-outpost-id"}
]
|> Request.build_params(input)
query_params = []
options =
Keyword.put(
options,
:response_header_parameters,
[{"Location", "Location"}]
)
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
You can use S3 Batch Operations to perform large-scale batch actions on Amazon
S3 objects.
Batch Operations can run a single action on lists of Amazon S3 objects that you
specify. For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service User Guide*.
This action creates a S3 Batch Operations job.
Related actions include:
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
*
[JobOperation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_JobOperation.html)
"""
def create_job(%Client{} = client, input, options \\ []) do
url_path = "/v20180820/jobs"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html#API_control_DeleteAccessPoint_Examples) section.
The following actions are related to `DeleteAccessPoint`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html)
*
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) *
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html)
"""
def delete_access_point(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the specified Object Lambda Access Point.
The following actions are related to `DeleteAccessPointForObjectLambda`:
*
[CreateAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html) *
[GetAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html)
*
[ListAccessPointsForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html)
"""
def delete_access_point_for_object_lambda(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the access point policy for the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html#API_control_DeleteAccessPointPolicy_Examples) section.
The following actions are related to `DeleteAccessPointPolicy`:
*
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html)
*
[GetAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html)
"""
def delete_access_point_policy(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes the resource policy for an Object Lambda Access Point.
The following actions are related to `DeleteAccessPointPolicyForObjectLambda`:
*
[GetAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicyForObjectLambda.html) *
[PutAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicyForObjectLambda.html)
"""
def delete_access_point_policy_for_object_lambda(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action deletes an Amazon S3 on Outposts bucket.
To delete an S3 bucket, see
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) in the *Amazon Simple Storage Service API*.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object
versions and delete markers) in the bucket must be deleted before the bucket
itself can be deleted. For more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in *Amazon Simple Storage Service User Guide*.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html#API_control_DeleteBucket_Examples) section.
## Related Resources
*
[CreateBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html)
*
[GetBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html) *
[DeleteObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
"""
def delete_bucket(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration.
To delete an S3 bucket's lifecycle configuration, see
[DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) in the *Amazon Simple Storage Service API*.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon
S3 on Outposts removes all the lifecycle configuration rules in the lifecycle
subresource associated with the bucket. Your objects never expire, and Amazon S3
on Outposts no longer automatically deletes any objects on the basis of rules
contained in the deleted lifecycle configuration. For more information, see
[Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in *Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3-outposts:DeleteLifecycleConfiguration` action. By default, the bucket owner
has this permission and the Outposts bucket owner can grant this permission to
others.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html#API_control_DeleteBucketLifecycleConfiguration_Examples) section.
For more information about object expiration, see [Elements to Describe
Lifecycle
Actions](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions).
Related actions include:
*
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) *
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html)
"""
def delete_bucket_lifecycle_configuration(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action deletes an Amazon S3 on Outposts bucket policy.
To delete an S3 bucket policy, see
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html) in the *Amazon Simple Storage Service API*.
This implementation of the DELETE action uses the policy subresource to delete
the policy of a specified Amazon S3 on Outposts bucket. If you are using an
identity other than the root user of the AWS account that owns the bucket, the
calling identity must have the `s3-outposts:DeleteBucketPolicy` permissions on
the specified Outposts bucket and belong to the bucket owner's account to use
this action. For more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in *Amazon Simple Storage Service User Guide*.
If you don't have `DeleteBucketPolicy` permissions, Amazon S3 returns a `403
Access Denied` error. If you have the correct permissions, but you're not using
an identity that belongs to the bucket owner's account, Amazon S3 returns a `405
Method Not Allowed` error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this action, even if the policy explicitly denies the root user
the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html#API_control_DeleteBucketPolicy_Examples) section.
The following actions are related to `DeleteBucketPolicy`:
*
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html)
*
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html)
"""
def delete_bucket_policy(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action deletes an Amazon S3 on Outposts bucket's tags.
To delete an S3 bucket tags, see
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) in the *Amazon Simple Storage Service API*.
Deletes the tags from the Outposts bucket. For more information, see [Using
Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in *Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the `PutBucketTagging`
action. By default, the bucket owner has this permission and can grant this
permission to others.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html#API_control_DeleteBucketTagging_Examples) section.
The following actions are related to `DeleteBucketTagging`:
*
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html)
*
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html)
"""
def delete_bucket_tagging(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Removes the entire tag set from the specified S3 Batch Operations job.
To use this operation, you must have permission to perform the
`s3:DeleteJobTagging` action. For more information, see [Controlling access and labeling jobs using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html)
*
[PutJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html)
"""
def delete_job_tagging(%Client{} = client, job_id, input, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[GetPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) *
[PutPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html)
"""
def delete_public_access_block(%Client{} = client, input, options \\ []) do
url_path = "/v20180820/configuration/publicAccessBlock"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the Amazon S3 Storage Lens configuration.
For more information about S3 Storage Lens, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:DeleteStorageLensConfiguration` action. For more information, see [Setting permissions to use Amazon S3 Storage
Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def delete_storage_lens_configuration(%Client{} = client, config_id, input, options \\ []) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the Amazon S3 Storage Lens configuration tags.
For more information about S3 Storage Lens, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:DeleteStorageLensConfigurationTagging` action. For more information, see
[Setting permissions to use Amazon S3 Storage Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def delete_storage_lens_configuration_tagging(
%Client{} = client,
config_id,
input,
options \\ []
) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Retrieves the configuration parameters and status for a Batch Operations job.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def describe_job(%Client{} = client, job_id, account_id, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns configuration information about the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) section.
The following actions are related to `GetAccessPoint`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html)
*
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) *
[ListAccessPoints](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html)
"""
def get_access_point(%Client{} = client, name, account_id, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns configuration for an Object Lambda Access Point.
The following actions are related to
`GetAccessPointConfigurationForObjectLambda`:
*
[PutAccessPointConfigurationForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointConfigurationForObjectLambda.html)
"""
def get_access_point_configuration_for_object_lambda(
%Client{} = client,
name,
account_id,
options \\ []
) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/configuration"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns configuration information about the specified Object Lambda Access Point
The following actions are related to `GetAccessPointForObjectLambda`:
*
[CreateAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html) *
[DeleteAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html)
*
[ListAccessPointsForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html)
"""
def get_access_point_for_object_lambda(%Client{} = client, name, account_id, options \\ []) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the access point policy associated with the specified access point.
The following actions are related to `GetAccessPointPolicy`:
*
[PutAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) *
[DeleteAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html)
"""
def get_access_point_policy(%Client{} = client, name, account_id, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the resource policy for an Object Lambda Access Point.
The following actions are related to `GetAccessPointPolicyForObjectLambda`:
*
[DeleteAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicyForObjectLambda.html) *
[PutAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicyForObjectLambda.html)
"""
def get_access_point_policy_for_object_lambda(
%Client{} = client,
name,
account_id,
options \\ []
) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/policy"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Indicates whether the specified access point currently has a policy that allows
public access.
For more information about public access through access points, see [Managing Data Access with Amazon S3 Access
Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
in the *Amazon Simple Storage Service Developer Guide*.
"""
def get_access_point_policy_status(%Client{} = client, name, account_id, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}/policyStatus"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the status of the resource policy associated with an Object Lambda
Access Point.
"""
def get_access_point_policy_status_for_object_lambda(
%Client{} = client,
name,
account_id,
options \\ []
) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/policyStatus"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets an Amazon S3 on Outposts bucket.
For more information, see [ Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
If you are using an identity other than the root user of the AWS account that
owns the Outposts bucket, the calling identity must have the
`s3-outposts:GetBucket` permissions on the specified Outposts bucket and belong
to the Outposts bucket owner's account in order to use this action. Only users
from Outposts bucket owner account with the right permissions can perform
actions on an Outposts bucket.
If you don't have `s3-outposts:GetBucket` permissions or you're not using an
identity that belongs to the bucket owner's account, Amazon S3 returns a `403
Access Denied` error.
The following actions are related to `GetBucket` for Amazon S3 on Outposts:
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html#API_control_GetBucket_Examples) section.
*
[PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
*
[CreateBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) *
[DeleteBucket](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html)
"""
def get_bucket(%Client{} = client, bucket, account_id, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
This action gets an Amazon S3 on Outposts bucket's lifecycle configuration.
To get an S3 bucket's lifecycle configuration, see
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) in the *Amazon Simple Storage Service API*.
Returns the lifecycle configuration information set on the Outposts bucket. For
more information, see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
and for information about lifecycle configuration, see [ Object Lifecycle Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
in *Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3-outposts:GetLifecycleConfiguration` action. The Outposts bucket owner has
this permission, by default. The bucket owner can grant this permission to
others. For more information about permissions, see [Permissions Related to Bucket Subresource
Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing Access Permissions to Your Amazon S3 Resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html#API_control_GetBucketLifecycleConfiguration_Examples) section.
`GetBucketLifecycleConfiguration` has the following special error:
* Error code: `NoSuchLifecycleConfiguration`
* Description: The lifecycle configuration does not
exist.
* HTTP Status Code: 404 Not Found
* SOAP Fault Code Prefix: Client
The following actions are related to `GetBucketLifecycleConfiguration`:
*
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html)
*
[DeleteBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html)
"""
def get_bucket_lifecycle_configuration(%Client{} = client, bucket, account_id, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
This action gets a bucket policy for an Amazon S3 on Outposts bucket.
To get a policy for an S3 bucket, see
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html) in the *Amazon Simple Storage Service API*.
Returns the policy of a specified Outposts bucket. For more information, see
[Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
If you are using an identity other than the root user of the AWS account that
owns the bucket, the calling identity must have the `GetBucketPolicy`
permissions on the specified bucket and belong to the bucket owner's account in
order to use this action.
Only users from Outposts bucket owner account with the right permissions can
perform actions on an Outposts bucket. If you don't have
`s3-outposts:GetBucketPolicy` permissions or you're not using an identity that
belongs to the bucket owner's account, Amazon S3 returns a `403 Access Denied`
error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this action, even if the policy explicitly denies the root user
the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html#API_control_GetBucketPolicy_Examples) section.
The following actions are related to `GetBucketPolicy`:
*
[GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
*
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html) *
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html)
"""
def get_bucket_policy(%Client{} = client, bucket, account_id, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
This action gets an Amazon S3 on Outposts bucket's tags.
To get an S3 bucket tags, see
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) in the *Amazon Simple Storage Service API*.
Returns the tag set associated with the Outposts bucket. For more information,
see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the `GetBucketTagging`
action. By default, the bucket owner has this permission and can grant this
permission to others.
`GetBucketTagging` has the following special error:
* Error code: `NoSuchTagSetError`
* Description: There is no tag set associated with the
bucket.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html#API_control_GetBucketTagging_Examples) section.
The following actions are related to `GetBucketTagging`:
*
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html)
*
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html)
"""
def get_bucket_tagging(%Client{} = client, bucket, account_id, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the tags on an S3 Batch Operations job.
To use this operation, you must have permission to perform the
`s3:GetJobTagging` action. For more information, see [Controlling access and labeling jobs using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[PutJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html)
*
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html)
"""
def get_job_tagging(%Client{} = client, job_id, account_id, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Retrieves the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[DeletePublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) *
[PutPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html)
"""
def get_public_access_block(%Client{} = client, account_id, options \\ []) do
url_path = "/v20180820/configuration/publicAccessBlock"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the Amazon S3 Storage Lens configuration.
For more information, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:GetStorageLensConfiguration` action. For more information, see [Setting permissions to use Amazon S3 Storage
Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def get_storage_lens_configuration(%Client{} = client, config_id, account_id, options \\ []) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the tags of Amazon S3 Storage Lens configuration.
For more information about S3 Storage Lens, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:GetStorageLensConfigurationTagging` action. For more information, see
[Setting permissions to use Amazon S3 Storage Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def get_storage_lens_configuration_tagging(
%Client{} = client,
config_id,
account_id,
options \\ []
) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}/tagging"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of the access points currently associated with the specified
bucket.
You can retrieve up to 1000 access points per call. If the specified bucket has
more than 1,000 access points (or the number specified in `maxResults`,
whichever is less), the response will include a continuation token that you can
use to list the additional access points.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) section.
The following actions are related to `ListAccessPoints`:
*
[CreateAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html)
*
[DeleteAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) *
[GetAccessPoint](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html)
"""
def list_access_points(
%Client{} = client,
bucket \\ nil,
max_results \\ nil,
next_token \\ nil,
account_id,
options \\ []
) do
url_path = "/v20180820/accesspoint"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(bucket) do
[{"bucket", bucket} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of the access points associated with the Object Lambda Access
Point.
You can retrieve up to 1000 access points per call. If there are more than 1,000
access points (or the number specified in `maxResults`, whichever is less), the
response will include a continuation token that you can use to list the
additional access points.
The following actions are related to `ListAccessPointsForObjectLambda`:
*
[CreateAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html) *
[DeleteAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html)
*
[GetAccessPointForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html)
"""
def list_access_points_for_object_lambda(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
account_id,
options \\ []
) do
url_path = "/v20180820/accesspointforobjectlambda"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Lists current S3 Batch Operations jobs and jobs that have ended within the last
30 days for the AWS account making the request.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html)
*
[UpdateJobPriority](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def list_jobs(
%Client{} = client,
job_statuses \\ nil,
max_results \\ nil,
next_token \\ nil,
account_id,
options \\ []
) do
url_path = "/v20180820/jobs"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(job_statuses) do
[{"jobStatuses", job_statuses} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of all Outposts buckets in an Outpost that are owned by the
authenticated sender of the request.
For more information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3
on Outposts endpoint hostname prefix and `x-amz-outpost-id` in your request, see
the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListRegionalBuckets.html#API_control_ListRegionalBuckets_Examples)
section.
"""
def list_regional_buckets(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
account_id,
outpost_id \\ nil,
options \\ []
) do
url_path = "/v20180820/bucket"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
headers =
if !is_nil(outpost_id) do
[{"x-amz-outpost-id", outpost_id} | headers]
else
headers
end
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets a list of Amazon S3 Storage Lens configurations.
For more information about S3 Storage Lens, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:ListStorageLensConfigurations` action. For more information, see [Setting permissions to use Amazon S3 Storage
Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def list_storage_lens_configurations(
%Client{} = client,
next_token \\ nil,
account_id,
options \\ []
) do
url_path = "/v20180820/storagelens"
headers = []
headers =
if !is_nil(account_id) do
[{"x-amz-account-id", account_id} | headers]
else
headers
end
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Replaces configuration for an Object Lambda Access Point.
The following actions are related to
`PutAccessPointConfigurationForObjectLambda`:
*
[GetAccessPointConfigurationForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointConfigurationForObjectLambda.html)
"""
def put_access_point_configuration_for_object_lambda(
%Client{} = client,
name,
input,
options \\ []
) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/configuration"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Associates an access policy with the specified access point.
Each access point can have only one policy, so a request made to this API
replaces any existing policy associated with the specified access point.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html#API_control_PutAccessPointPolicy_Examples) section.
The following actions are related to `PutAccessPointPolicy`:
*
[GetAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html)
*
[DeleteAccessPointPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html)
"""
def put_access_point_policy(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspoint/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates or replaces resource policy for an Object Lambda Access Point.
For an example policy, see [Creating Object Lambda Access Points](https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-create.html#olap-create-cli)
in the *Amazon Simple Storage Service User Guide*.
The following actions are related to `PutAccessPointPolicyForObjectLambda`:
*
[DeleteAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicyForObjectLambda.html) *
[GetAccessPointPolicyForObjectLambda](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicyForObjectLambda.html)
"""
def put_access_point_policy_for_object_lambda(%Client{} = client, name, input, options \\ []) do
url_path = "/v20180820/accesspointforobjectlambda/#{URI.encode(name)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket.
To put a lifecycle configuration to an S3 bucket, see
[PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) in the *Amazon Simple Storage Service API*.
Creates a new lifecycle configuration for the S3 on Outposts bucket or replaces
an existing lifecycle configuration. Outposts buckets only support lifecycle
configurations that delete/expire objects after a certain period of time and
abort incomplete multipart uploads.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html#API_control_PutBucketLifecycleConfiguration_Examples)
section.
The following actions are related to `PutBucketLifecycleConfiguration`:
*
[GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) *
[DeleteBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html)
"""
def put_bucket_lifecycle_configuration(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/lifecycleconfiguration"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action puts a bucket policy to an Amazon S3 on Outposts bucket.
To put a policy on an S3 bucket, see
[PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html) in the *Amazon Simple Storage Service API*.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information,
see [Using Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
If you are using an identity other than the root user of the AWS account that
owns the Outposts bucket, the calling identity must have the `PutBucketPolicy`
permissions on the specified Outposts bucket and belong to the bucket owner's
account in order to use this action.
If you don't have `PutBucketPolicy` permissions, Amazon S3 returns a `403 Access
Denied` error. If you have the correct permissions, but you're not using an
identity that belongs to the bucket owner's account, Amazon S3 returns a `405
Method Not Allowed` error.
As a security precaution, the root user of the AWS account that owns a bucket
can always use this action, even if the policy explicitly denies the root user
the ability to perform this action.
For more information about bucket policies, see [Using Bucket Policies and User Policies](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html#API_control_PutBucketPolicy_Examples) section.
The following actions are related to `PutBucketPolicy`:
*
[GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html)
*
[DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html)
"""
def put_bucket_policy(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/policy"
{headers, input} =
[
{"AccountId", "x-amz-account-id"},
{"ConfirmRemoveSelfBucketAccess", "x-amz-confirm-remove-self-bucket-access"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
This action puts tags on an Amazon S3 on Outposts bucket.
To put tags on an S3 bucket, see
[PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) in the *Amazon Simple Storage Service API*.
Sets the tags for an S3 on Outposts bucket. For more information, see [Using
Amazon S3 on
Outposts](https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
in the *Amazon Simple Storage Service User Guide*.
Use tags to organize your AWS bill to reflect your own cost structure. To do
this, sign up to get your AWS account bill with tag key values included. Then,
to see the cost of combined resources, organize your billing information
according to resources with the same tag key values. For example, you can tag
several resources with a specific application name, and then organize your
billing information to see the total cost of that application across several
services. For more information, see [Cost allocation and tagging](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
Within a bucket, if you add a tag that has the same key as an existing tag, the
new value overwrites the old value. For more information, see [ Using cost allocation in Amazon S3 bucket
tags](https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html).
To use this action, you must have permissions to perform the
`s3-outposts:PutBucketTagging` action. The Outposts bucket owner has this
permission by default and can grant this permission to others. For more
information about permissions, see [ Permissions Related to Bucket Subresource Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
and [Managing access permissions to your Amazon S3 resources](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
`PutBucketTagging` has the following special errors:
* Error code: `InvalidTagError`
* Description: The tag provided was not a valid tag.
This error can occur if the tag did not pass input validation. For information
about tag restrictions, see [ User-Defined Tag Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
and [ AWS-Generated Cost Allocation Tag Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
* Error code: `MalformedXMLError`
* Description: The XML provided does not match the
schema.
* Error code: `OperationAbortedError `
* Description: A conflicting conditional action is
currently in progress against this resource. Try again.
* Error code: `InternalError`
* Description: The service was unable to apply the
provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an
additional parameter of `x-amz-outpost-id` to be passed with the request and an
S3 on Outposts endpoint hostname prefix instead of `s3-control`. For an example
of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts
endpoint hostname prefix and the `x-amz-outpost-id` derived using the access
point ARN, see the
[Examples](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html#API_control_PutBucketTagging_Examples) section.
The following actions are related to `PutBucketTagging`:
*
[GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html)
*
[DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html)
"""
def put_bucket_tagging(%Client{} = client, bucket, input, options \\ []) do
url_path = "/v20180820/bucket/#{URI.encode(bucket)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any
job by sending a PUT request against the tagging subresource that is associated
with the job. To modify the existing tag set, you can either replace the
existing tag set entirely, or make changes within the existing tag set by
retrieving the existing tag set using
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html), modify that tag set, and use this action to replace the tag set with the one you
modified. For more information, see [Controlling access and labeling jobs using
tags](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags)
in the *Amazon Simple Storage Service User Guide*.
* If you send this request with an empty tag set, Amazon S3 deletes
the existing tag set on the Batch Operations job. If you use this method, you
are charged for a Tier 1 Request (PUT). For more information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing/).
* For deleting existing tags for your Batch Operations job, a
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) request is preferred because it achieves the same result without incurring
charges.
* A few things to consider about using tags:
* Amazon S3 limits the maximum number of tags to 50 tags
per job.
* You can associate up to 50 tags with a job as long as
they have unique tag keys.
* A tag key can be up to 128 Unicode characters in
length, and tag values can be up to 256 Unicode characters in length.
* The key and values are case sensitive.
* For tagging-related restrictions related to characters
and encodings, see [User-Defined Tag
Restrictions](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
in the *AWS Billing and Cost Management User Guide*.
To use this action, you must have permission to perform the `s3:PutJobTagging`
action.
Related actions include:
*
[CreatJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[GetJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html)
*
[DeleteJobTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html)
"""
def put_job_tagging(%Client{} = client, job_id, input, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates or modifies the `PublicAccessBlock` configuration for an AWS account.
For more information, see [ Using Amazon S3 block public access](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html).
Related actions include:
*
[GetPublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) *
[DeletePublicAccessBlock](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html)
"""
def put_public_access_block(%Client{} = client, input, options \\ []) do
url_path = "/v20180820/configuration/publicAccessBlock"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Puts an Amazon S3 Storage Lens configuration.
For more information about S3 Storage Lens, see [Working with Amazon S3 Storage Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:PutStorageLensConfiguration` action. For more information, see [Setting permissions to use Amazon S3 Storage
Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def put_storage_lens_configuration(%Client{} = client, config_id, input, options \\ []) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Put or replace tags on an existing Amazon S3 Storage Lens configuration.
For more information about S3 Storage Lens, see [Assessing your storage activity and usage with Amazon S3 Storage Lens
](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html) in the
*Amazon Simple Storage Service User Guide*.
To use this action, you must have permission to perform the
`s3:PutStorageLensConfigurationTagging` action. For more information, see
[Setting permissions to use Amazon S3 Storage Lens](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html)
in the *Amazon Simple Storage Service User Guide*.
"""
def put_storage_lens_configuration_tagging(%Client{} = client, config_id, input, options \\ []) do
url_path = "/v20180820/storagelens/#{URI.encode(config_id)}/tagging"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates an existing S3 Batch Operations job's priority.
For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def update_job_priority(%Client{} = client, job_id, input, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}/priority"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
{query_params, input} =
[
{"Priority", "priority"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the status for the specified job.
Use this action to confirm that you want to run a job or to cancel an existing
job. For more information, see [S3 Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html)
in the *Amazon Simple Storage Service User Guide*.
Related actions include:
*
[CreateJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) *
[ListJobs](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html)
*
[DescribeJob](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) *
[UpdateJobStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html)
"""
def update_job_status(%Client{} = client, job_id, input, options \\ []) do
url_path = "/v20180820/jobs/#{URI.encode(job_id)}/status"
{headers, input} =
[
{"AccountId", "x-amz-account-id"}
]
|> Request.build_params(input)
{query_params, input} =
[
{"RequestedJobStatus", "requestedJobStatus"},
{"StatusUpdateReason", "statusUpdateReason"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end | 32.736799 | 227 | 0.700414 |
e825d2080c937918b007c133942b1dd91b8e3eff | 5,731 | ex | Elixir | lib/livebook_web/live/settings_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | null | null | null | lib/livebook_web/live/settings_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | null | null | null | lib/livebook_web/live/settings_live.ex | doyobi/livebook | 136d5039c42b406dd0b31aea188deb4fce3b1328 | [
"Apache-2.0"
] | null | null | null | defmodule LivebookWeb.SettingsLive do
use LivebookWeb, :live_view
import LivebookWeb.UserHelpers
alias LivebookWeb.{SidebarHelpers, PageHelpers}
@impl true
def mount(_params, _session, socket) do
file_systems = Livebook.Settings.file_systems()
{:ok,
socket
|> SidebarHelpers.shared_home_handlers()
|> assign(
file_systems: file_systems,
page_title: "Livebook - Settings"
)}
end
@impl true
def render(assigns) do
~H"""
<div class="flex grow h-full">
<SidebarHelpers.sidebar>
<SidebarHelpers.logo_item socket={@socket} />
<SidebarHelpers.shared_home_footer
socket={@socket}
current_user={@current_user}
user_path={Routes.settings_path(@socket, :user)} />
</SidebarHelpers.sidebar>
<div class="grow px-6 py-8 overflow-y-auto">
<div class="max-w-screen-md w-full mx-auto px-4 pb-8 space-y-16">
<!-- System settings section -->
<div class="flex flex-col space-y-8">
<div>
<PageHelpers.title text="System settings" socket={@socket} />
<p class="mt-4 text-gray-700">
Here you can change global Livebook configuration. Keep in mind
that this configuration is not persisted and gets discarded as
soon as you stop the application.
</p>
</div>
<!-- System details -->
<div class="flex flex-col space-y-4">
<h1 class="text-xl text-gray-800 font-semibold">
About
</h1>
<div class="flex items-center justify-between border border-gray-200 rounded-lg p-4">
<div class="flex items-center space-x-12">
<.labeled_text label="Livebook" text={"v#{Application.spec(:livebook, :vsn)}"} />
<.labeled_text label="Elixir" text={"v#{System.version()}"} />
</div>
<%= live_redirect to: Routes.live_dashboard_path(@socket, :home),
class: "button-base button-outlined-gray" do %>
<.remix_icon icon="dashboard-2-line" class="align-middle mr-1" />
<span>Open dashboard</span>
<% end %>
</div>
</div>
<!-- File systems configuration -->
<div class="flex flex-col space-y-4">
<div class="flex justify-between items-center">
<h2 class="text-xl text-gray-800 font-semibold">
File systems
</h2>
</div>
<LivebookWeb.SettingsLive.FileSystemsComponent.render
file_systems={@file_systems}
socket={@socket} />
</div>
</div>
<!-- User settings section -->
<div class="flex flex-col space-y-8">
<div>
<h1 class="text-3xl text-gray-800 font-semibold">
User settings
</h1>
<p class="mt-4 text-gray-700">
The configuration in this section changes only your Livebook
experience and is saved in your browser.
</p>
</div>
<!-- Editor configuration -->
<div class="flex flex-col space-y-4">
<h2 class="text-xl text-gray-800 font-semibold">
Code editor
</h2>
<div class="flex flex-col space-y-3"
id="editor-settings"
phx-hook="EditorSettings"
phx-update="ignore">
<.switch_checkbox
name="editor_auto_completion"
label="Show completion list while typing"
checked={false} />
<.switch_checkbox
name="editor_auto_signature"
label="Show function signature while typing"
checked={false} />
<.switch_checkbox
name="editor_font_size"
label="Increase font size"
checked={false} />
<.switch_checkbox
name="editor_high_contrast"
label="Use high contrast theme"
checked={false} />
</div>
</div>
</div>
</div>
</div>
</div>
<%= if @live_action == :user do %>
<.current_user_modal
return_to={Routes.settings_path(@socket, :page)}
current_user={@current_user} />
<% end %>
<%= if @live_action == :add_file_system do %>
<.modal class="w-full max-w-3xl" return_to={Routes.settings_path(@socket, :page)}>
<.live_component module={LivebookWeb.SettingsLive.AddFileSystemComponent}
id="add-file-system"
return_to={Routes.settings_path(@socket, :page)} />
</.modal>
<% end %>
<%= if @live_action == :detach_file_system do %>
<.modal class="w-full max-w-xl" return_to={Routes.settings_path(@socket, :page)}>
<.live_component module={LivebookWeb.SettingsLive.RemoveFileSystemComponent}
id="detach-file-system"
return_to={Routes.settings_path(@socket, :page)}
file_system_id={@file_system_id}
/>
</.modal>
<% end %>
"""
end
@impl true
def handle_params(%{"file_system_id" => file_system_id}, _url, socket) do
{:noreply, assign(socket, file_system_id: file_system_id)}
end
def handle_params(_params, _url, socket), do: {:noreply, socket}
@impl true
def handle_info({:file_systems_updated, file_systems}, socket) do
{:noreply, assign(socket, file_systems: file_systems)}
end
def handle_info(_message, socket), do: {:noreply, socket}
end
| 36.272152 | 97 | 0.548595 |
e825e5b2bbfab64c1751515331ca6ed5b3a86810 | 1,166 | ex | Elixir | 2021/03/3a.ex | cybrox/adventofcode | 974b1265f84e7769385adcc4883ea4bec3dd0482 | [
"MIT"
] | null | null | null | 2021/03/3a.ex | cybrox/adventofcode | 974b1265f84e7769385adcc4883ea4bec3dd0482 | [
"MIT"
] | null | null | null | 2021/03/3a.ex | cybrox/adventofcode | 974b1265f84e7769385adcc4883ea4bec3dd0482 | [
"MIT"
] | null | null | null | input =
"./3.input.txt"
|> File.read!()
|> String.trim()
|> String.split("\n", trim: true)
|> Enum.map(&String.split(&1, "", trim: true))
defmodule Solution do
def evaluate(input) do
gamma = zipmap(input)
epsilon = invert(gamma)
list_to_int(gamma) * list_to_int(epsilon)
end
defp zipmap(input) do
input
|> Enum.zip()
|> Enum.map(&Tuple.to_list/1)
|> Enum.map(fn bits ->
total = Enum.count(bits)
ones = Enum.count(bits, &(&1 == "1"))
if ones > total / 2, do: "1", else: "0"
end)
end
# We invert the list of bits manually instaed of using Bitwise.~~~, as that
# does seem to play into signed/unsigned integer evaluation somehow. In its
# docs it states that ~~~2 = -3. No idea why (0b1) would be evaluated to -3
defp invert(list), do: invert(list, [])
defp invert([], acc), do: acc
defp invert(["1" | t], acc), do: invert(t, acc ++ ["0"])
defp invert(["0" | t], acc), do: invert(t, acc ++ ["1"])
defp list_to_int(list),
do:
list
|> Enum.join("")
|> Integer.parse(2)
|> elem(0)
end
IO.puts("Diagnostic gamma * epsilon is #{Solution.evaluate(input)}")
| 26.5 | 77 | 0.585763 |
e82602d994f44beb5e4be9206cb98a7ad8dfbfcb | 72 | exs | Elixir | apps/omg_performance/config/test.exs | SingularityMatrix/elixir-omg | 7db3fcc3adfa303e30ff7703148cc5110b587d20 | [
"Apache-2.0"
] | null | null | null | apps/omg_performance/config/test.exs | SingularityMatrix/elixir-omg | 7db3fcc3adfa303e30ff7703148cc5110b587d20 | [
"Apache-2.0"
] | null | null | null | apps/omg_performance/config/test.exs | SingularityMatrix/elixir-omg | 7db3fcc3adfa303e30ff7703148cc5110b587d20 | [
"Apache-2.0"
] | null | null | null | use Mix.Config
config :omg_api, rootchain_height_sync_interval_ms: 100
| 18 | 55 | 0.847222 |
e8263469364ce3fc66484b3e74dcf18a1713ce79 | 413 | ex | Elixir | hackerrank/simple_array_sum/simple_array_sum.ex | lowks/levelup | 805e48cbf33d48616f55ef1b3740bb15b548054d | [
"MIT"
] | null | null | null | hackerrank/simple_array_sum/simple_array_sum.ex | lowks/levelup | 805e48cbf33d48616f55ef1b3740bb15b548054d | [
"MIT"
] | 39 | 2018-04-15T12:36:36.000Z | 2018-09-16T15:31:50.000Z | hackerrank/simple_array_sum/simple_array_sum.ex | lowks/levelup | 805e48cbf33d48616f55ef1b3740bb15b548054d | [
"MIT"
] | null | null | null | defmodule Solution do
#Enter your code here. Read input from STDIN. Print output to STDOUT
def sum() do
times = IO.gets("") |> String.trim |> String.to_integer
result = IO.gets("") |>
String.trim |>
String.split("") |>
Enum.map(fn(x) -> String.to_integer(x) end) |>
Enum.take(times) |>
Enum.reduce(0, fn(x, acc) -> x + acc end)
IO.puts result
end
end
Solution.sum | 29.5 | 68 | 0.598063 |
e8264ca85fb719c434a6b910cf2326535b8ea0ef | 2,425 | ex | Elixir | clients/fitness/lib/google_api/fitness/v1/model/list_sessions_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/fitness/lib/google_api/fitness/v1/model/list_sessions_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/fitness/lib/google_api/fitness/v1/model/list_sessions_response.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Fitness.V1.Model.ListSessionsResponse do
@moduledoc """
## Attributes
* `deletedSession` (*type:* `list(GoogleApi.Fitness.V1.Model.Session.t)`, *default:* `nil`) - If includeDeleted is set to true in the request, this list will contain sessions deleted with original end times that are within the startTime and endTime frame.
* `hasMoreData` (*type:* `boolean()`, *default:* `nil`) - Flag to indicate server has more data to transfer
* `nextPageToken` (*type:* `String.t`, *default:* `nil`) - The continuation token, which is used to page through large result sets. Provide this value in a subsequent request to return the next page of results.
* `session` (*type:* `list(GoogleApi.Fitness.V1.Model.Session.t)`, *default:* `nil`) - Sessions with an end time that is between startTime and endTime of the request.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:deletedSession => list(GoogleApi.Fitness.V1.Model.Session.t()),
:hasMoreData => boolean(),
:nextPageToken => String.t(),
:session => list(GoogleApi.Fitness.V1.Model.Session.t())
}
field(:deletedSession, as: GoogleApi.Fitness.V1.Model.Session, type: :list)
field(:hasMoreData)
field(:nextPageToken)
field(:session, as: GoogleApi.Fitness.V1.Model.Session, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Fitness.V1.Model.ListSessionsResponse do
def decode(value, options) do
GoogleApi.Fitness.V1.Model.ListSessionsResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Fitness.V1.Model.ListSessionsResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 43.303571 | 259 | 0.729072 |
e82653014e856f9896884a45830fcaf7ab15c2c3 | 4,701 | ex | Elixir | core/sup_tree_common/alert_manager.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | core/sup_tree_common/alert_manager.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | core/sup_tree_common/alert_manager.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | # Copyright(c) 2015-2018 ACCESS CO., LTD. All rights reserved.
use Croma
defmodule AntikytheraCore.Alert.Manager do
@moduledoc """
An event manager hosting multiple `AntikytheraCore.Alert.Handler`s.
Managers are spawned per OTP application (antikythera and each gear).
Each handler has its own buffer to store messages before sending them as an alert.
"""
alias Antikythera.{GearName, Time}
require AntikytheraCore.Logger, as: L
alias AntikytheraCore.GearModule
alias AntikytheraCore.Alert.Handler, as: AHandler
alias AntikytheraCore.Alert.{HandlerConfigsMap, ErrorCountReporter}
@handler_module_prefix "AntikytheraCore.Alert.Handler."
def child_spec(args) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [args]},
}
end
def start_link([otp_app_name, name_to_register]) do
{:ok, pid} = :gen_event.start_link({:local, name_to_register})
update_handler_installations(otp_app_name, HandlerConfigsMap.get(otp_app_name))
{:ok, pid}
end
@doc """
Update handler installations according to alert config of an OTP application (antikythera or gear).
Handlers will be installed/uninstalled depending on contents of the config.
If the config has sufficient information for a handler, it will be installed (noop if already installed).
Otherwise, it will not be installed and will be uninstalled if it is already installed.
Any errors will be logged but this function always returns `:ok`.
"""
defun update_handler_installations(otp_app_name :: v[:antikythera | GearName.t], configs_map :: v[HandlerConfigsMap.t]) :: :ok do
case manager(otp_app_name) do
nil -> L.info("Alert manager process for '#{otp_app_name}' is not running!") # log as info since this will always happen on initial loading of gear configs (before gear installations)
pid -> update_handler_installations_impl(pid, otp_app_name, configs_map)
end
end
defp update_handler_installations_impl(manager, otp_app_name, configs_map) do
installed_handlers = :gen_event.which_handlers(manager)
handlers_to_install = handlers_to_install(configs_map)
to_remove = installed_handlers -- handlers_to_install
to_add = handlers_to_install -- installed_handlers
remove_results = Enum.map(to_remove, fn handler -> :gen_event.delete_handler(manager, handler, :remove_handler) end)
add_results = Enum.map(to_add , fn handler -> :gen_event.add_handler(manager, handler, handler_init_arg(otp_app_name, handler)) end)
case Enum.reject(remove_results ++ add_results, &(&1 == :ok)) do
[] -> :ok
errors ->
reasons = Enum.map(errors, fn {:error, reason} -> reason end)
L.error("Failed to install some alert handler(s) for '#{otp_app_name}':\n#{inspect(reasons)}")
:ok
end
end
defunp handlers_to_install(configs_map :: v[HandlerConfigsMap.t]) :: [:gen_event.handler] do
handler_pairs =
configs_map
|> Enum.map(fn {k, conf} -> {key_to_handler(k), conf} end)
|> Enum.filter(fn {h, conf} -> h.validate_config(conf) end)
|> Enum.map(fn {h, _} -> {AHandler, h} end)
[ErrorCountReporter | handler_pairs]
end
defp handler_init_arg(otp_app_name, ErrorCountReporter ), do: otp_app_name
defp handler_init_arg(otp_app_name, {AHandler, handler}), do: {otp_app_name, handler}
defp key_to_handler(key) do
temporary_module_name_str = @handler_module_prefix <> Macro.camelize(key)
Module.safe_concat([temporary_module_name_str]) # handlers must be chosen from list of existing handlers (in console UI), so this should never raise
end
defunp manager(otp_app_name :: v[:antikythera | GearName.t]) :: nil | pid do
try do
case otp_app_name do
:antikythera -> AntikytheraCore.Alert.Manager
gear_name -> GearModule.alert_manager(gear_name)
end
|> Process.whereis()
rescue
ArgumentError -> nil
end
end
#
# API for handlers
#
@doc """
Schedule next flushing from handlers. Assuming the `interval` is in seconds.
Note that handlers are kept as state in gen_event manager process,
thus calling `self/0` always refers to the manager process which the handler is installed in.
"""
defun schedule_handler_timeout(handler :: v[module], interval :: v[pos_integer]) :: :ok do
Process.send_after(self(), {:handler_timeout, handler}, interval * 1_000)
:ok
end
#
# Public API
#
@doc """
Notify a `manager` process of a message `body` with optional `time`.
"""
defun notify(manager :: v[atom],
body :: v[String.t],
time :: v[Time.t] \\ Time.now()) :: :ok do
:gen_event.notify(manager, {time, body})
end
end
| 39.504202 | 189 | 0.708147 |
e8266de30245fe0b62708ffcfe3846def3ad7209 | 17,079 | exs | Elixir | test/sengo/accounts_test.exs | henrikcoll/sengo | d5ef840843bffbc858dc18d331e4ebc74e6837cb | [
"MIT"
] | null | null | null | test/sengo/accounts_test.exs | henrikcoll/sengo | d5ef840843bffbc858dc18d331e4ebc74e6837cb | [
"MIT"
] | null | null | null | test/sengo/accounts_test.exs | henrikcoll/sengo | d5ef840843bffbc858dc18d331e4ebc74e6837cb | [
"MIT"
] | null | null | null | defmodule Sengo.AccountsTest do
use Sengo.DataCase
alias Sengo.Accounts
import Sengo.AccountsFixtures
alias Sengo.Accounts.{User, UserToken}
describe "get_user_by_email/1" do
test "does not return the user if the email does not exist" do
refute Accounts.get_user_by_email("unknown@example.com")
end
test "returns the user if the email exists" do
%{id: id} = user = user_fixture()
assert %User{id: ^id} = Accounts.get_user_by_email(user.email)
end
end
describe "get_user_by_email_and_password/2" do
test "does not return the user if the email does not exist" do
refute Accounts.get_user_by_email_and_password("unknown@example.com", "hello world!")
end
test "does not return the user if the password is not valid" do
user = user_fixture()
refute Accounts.get_user_by_email_and_password(user.email, "invalid")
end
test "returns the user if the email and password are valid" do
%{id: id} = user = user_fixture()
assert %User{id: ^id} =
Accounts.get_user_by_email_and_password(user.email, valid_user_password())
end
end
describe "get_user!/1" do
test "raises if id is invalid" do
assert_raise Ecto.NoResultsError, fn ->
Accounts.get_user!(-1)
end
end
test "returns the user with the given id" do
%{id: id} = user = user_fixture()
assert %User{id: ^id} = Accounts.get_user!(user.id)
end
end
describe "register_user/1" do
test "requires email and password to be set" do
{:error, changeset} = Accounts.register_user(%{})
assert %{
password: ["can't be blank"],
email: ["can't be blank"]
} = errors_on(changeset)
end
test "validates email and password when given" do
{:error, changeset} = Accounts.register_user(%{email: "not valid", password: "not valid"})
assert %{
email: ["must have the @ sign and no spaces"],
password: ["should be at least 12 character(s)"]
} = errors_on(changeset)
end
test "validates maximum values for email and password for security" do
too_long = String.duplicate("db", 100)
{:error, changeset} = Accounts.register_user(%{email: too_long, password: too_long})
assert "should be at most 160 character(s)" in errors_on(changeset).email
assert "should be at most 72 character(s)" in errors_on(changeset).password
end
test "validates email uniqueness" do
%{email: email} = user_fixture()
{:error, changeset} = Accounts.register_user(%{email: email})
assert "has already been taken" in errors_on(changeset).email
# Now try with the upper cased email too, to check that email case is ignored.
{:error, changeset} = Accounts.register_user(%{email: String.upcase(email)})
assert "has already been taken" in errors_on(changeset).email
end
test "registers users with a hashed password" do
email = unique_user_email()
{:ok, user} = Accounts.register_user(valid_user_attributes(email: email))
assert user.email == email
assert is_binary(user.hashed_password)
assert is_nil(user.confirmed_at)
assert is_nil(user.password)
end
end
describe "change_user_registration/2" do
test "returns a changeset" do
assert %Ecto.Changeset{} = changeset = Accounts.change_user_registration(%User{})
assert changeset.required == [:password, :email]
end
test "allows fields to be set" do
email = unique_user_email()
password = valid_user_password()
changeset =
Accounts.change_user_registration(
%User{},
valid_user_attributes(email: email, password: password)
)
assert changeset.valid?
assert get_change(changeset, :email) == email
assert get_change(changeset, :password) == password
assert is_nil(get_change(changeset, :hashed_password))
end
end
describe "change_user_email/2" do
test "returns a user changeset" do
assert %Ecto.Changeset{} = changeset = Accounts.change_user_email(%User{})
assert changeset.required == [:email]
end
end
describe "apply_user_email/3" do
setup do
%{user: user_fixture()}
end
test "requires email to change", %{user: user} do
{:error, changeset} = Accounts.apply_user_email(user, valid_user_password(), %{})
assert %{email: ["did not change"]} = errors_on(changeset)
end
test "validates email", %{user: user} do
{:error, changeset} =
Accounts.apply_user_email(user, valid_user_password(), %{email: "not valid"})
assert %{email: ["must have the @ sign and no spaces"]} = errors_on(changeset)
end
test "validates maximum value for email for security", %{user: user} do
too_long = String.duplicate("db", 100)
{:error, changeset} =
Accounts.apply_user_email(user, valid_user_password(), %{email: too_long})
assert "should be at most 160 character(s)" in errors_on(changeset).email
end
test "validates email uniqueness", %{user: user} do
%{email: email} = user_fixture()
{:error, changeset} =
Accounts.apply_user_email(user, valid_user_password(), %{email: email})
assert "has already been taken" in errors_on(changeset).email
end
test "validates current password", %{user: user} do
{:error, changeset} =
Accounts.apply_user_email(user, "invalid", %{email: unique_user_email()})
assert %{current_password: ["is not valid"]} = errors_on(changeset)
end
test "applies the email without persisting it", %{user: user} do
email = unique_user_email()
{:ok, user} = Accounts.apply_user_email(user, valid_user_password(), %{email: email})
assert user.email == email
assert Accounts.get_user!(user.id).email != email
end
end
describe "deliver_update_email_instructions/3" do
setup do
%{user: user_fixture()}
end
test "sends token through notification", %{user: user} do
token =
extract_user_token(fn url ->
Accounts.deliver_update_email_instructions(user, "current@example.com", url)
end)
{:ok, token} = Base.url_decode64(token, padding: false)
assert user_token = Repo.get_by(UserToken, token: :crypto.hash(:sha256, token))
assert user_token.user_id == user.id
assert user_token.sent_to == user.email
assert user_token.context == "change:current@example.com"
end
end
describe "update_user_email/2" do
setup do
user = user_fixture()
email = unique_user_email()
token =
extract_user_token(fn url ->
Accounts.deliver_update_email_instructions(%{user | email: email}, user.email, url)
end)
%{user: user, token: token, email: email}
end
test "updates the email with a valid token", %{user: user, token: token, email: email} do
assert Accounts.update_user_email(user, token) == :ok
changed_user = Repo.get!(User, user.id)
assert changed_user.email != user.email
assert changed_user.email == email
assert changed_user.confirmed_at
assert changed_user.confirmed_at != user.confirmed_at
refute Repo.get_by(UserToken, user_id: user.id)
end
test "does not update email with invalid token", %{user: user} do
assert Accounts.update_user_email(user, "oops") == :error
assert Repo.get!(User, user.id).email == user.email
assert Repo.get_by(UserToken, user_id: user.id)
end
test "does not update email if user email changed", %{user: user, token: token} do
assert Accounts.update_user_email(%{user | email: "current@example.com"}, token) == :error
assert Repo.get!(User, user.id).email == user.email
assert Repo.get_by(UserToken, user_id: user.id)
end
test "does not update email if token expired", %{user: user, token: token} do
{1, nil} = Repo.update_all(UserToken, set: [inserted_at: ~N[2020-01-01 00:00:00]])
assert Accounts.update_user_email(user, token) == :error
assert Repo.get!(User, user.id).email == user.email
assert Repo.get_by(UserToken, user_id: user.id)
end
end
describe "change_user_password/2" do
test "returns a user changeset" do
assert %Ecto.Changeset{} = changeset = Accounts.change_user_password(%User{})
assert changeset.required == [:password]
end
test "allows fields to be set" do
changeset =
Accounts.change_user_password(%User{}, %{
"password" => "new valid password"
})
assert changeset.valid?
assert get_change(changeset, :password) == "new valid password"
assert is_nil(get_change(changeset, :hashed_password))
end
end
describe "update_user_password/3" do
setup do
%{user: user_fixture()}
end
test "validates password", %{user: user} do
{:error, changeset} =
Accounts.update_user_password(user, valid_user_password(), %{
password: "not valid",
password_confirmation: "another"
})
assert %{
password: ["should be at least 12 character(s)"],
password_confirmation: ["does not match password"]
} = errors_on(changeset)
end
test "validates maximum values for password for security", %{user: user} do
too_long = String.duplicate("db", 100)
{:error, changeset} =
Accounts.update_user_password(user, valid_user_password(), %{password: too_long})
assert "should be at most 72 character(s)" in errors_on(changeset).password
end
test "validates current password", %{user: user} do
{:error, changeset} =
Accounts.update_user_password(user, "invalid", %{password: valid_user_password()})
assert %{current_password: ["is not valid"]} = errors_on(changeset)
end
test "updates the password", %{user: user} do
{:ok, user} =
Accounts.update_user_password(user, valid_user_password(), %{
password: "new valid password"
})
assert is_nil(user.password)
assert Accounts.get_user_by_email_and_password(user.email, "new valid password")
end
test "deletes all tokens for the given user", %{user: user} do
_ = Accounts.generate_user_session_token(user)
{:ok, _} =
Accounts.update_user_password(user, valid_user_password(), %{
password: "new valid password"
})
refute Repo.get_by(UserToken, user_id: user.id)
end
end
describe "generate_user_session_token/1" do
setup do
%{user: user_fixture()}
end
test "generates a token", %{user: user} do
token = Accounts.generate_user_session_token(user)
assert user_token = Repo.get_by(UserToken, token: token)
assert user_token.context == "session"
# Creating the same token for another user should fail
assert_raise Ecto.ConstraintError, fn ->
Repo.insert!(%UserToken{
token: user_token.token,
user_id: user_fixture().id,
context: "session"
})
end
end
end
describe "get_user_by_session_token/1" do
setup do
user = user_fixture()
token = Accounts.generate_user_session_token(user)
%{user: user, token: token}
end
test "returns user by token", %{user: user, token: token} do
assert session_user = Accounts.get_user_by_session_token(token)
assert session_user.id == user.id
end
test "does not return user for invalid token" do
refute Accounts.get_user_by_session_token("oops")
end
test "does not return user for expired token", %{token: token} do
{1, nil} = Repo.update_all(UserToken, set: [inserted_at: ~N[2020-01-01 00:00:00]])
refute Accounts.get_user_by_session_token(token)
end
end
describe "delete_session_token/1" do
test "deletes the token" do
user = user_fixture()
token = Accounts.generate_user_session_token(user)
assert Accounts.delete_session_token(token) == :ok
refute Accounts.get_user_by_session_token(token)
end
end
describe "deliver_user_confirmation_instructions/2" do
setup do
%{user: user_fixture()}
end
test "sends token through notification", %{user: user} do
token =
extract_user_token(fn url ->
Accounts.deliver_user_confirmation_instructions(user, url)
end)
{:ok, token} = Base.url_decode64(token, padding: false)
assert user_token = Repo.get_by(UserToken, token: :crypto.hash(:sha256, token))
assert user_token.user_id == user.id
assert user_token.sent_to == user.email
assert user_token.context == "confirm"
end
end
describe "confirm_user/1" do
setup do
user = user_fixture()
token =
extract_user_token(fn url ->
Accounts.deliver_user_confirmation_instructions(user, url)
end)
%{user: user, token: token}
end
test "confirms the email with a valid token", %{user: user, token: token} do
assert {:ok, confirmed_user} = Accounts.confirm_user(token)
assert confirmed_user.confirmed_at
assert confirmed_user.confirmed_at != user.confirmed_at
assert Repo.get!(User, user.id).confirmed_at
refute Repo.get_by(UserToken, user_id: user.id)
end
test "does not confirm with invalid token", %{user: user} do
assert Accounts.confirm_user("oops") == :error
refute Repo.get!(User, user.id).confirmed_at
assert Repo.get_by(UserToken, user_id: user.id)
end
test "does not confirm email if token expired", %{user: user, token: token} do
{1, nil} = Repo.update_all(UserToken, set: [inserted_at: ~N[2020-01-01 00:00:00]])
assert Accounts.confirm_user(token) == :error
refute Repo.get!(User, user.id).confirmed_at
assert Repo.get_by(UserToken, user_id: user.id)
end
end
describe "deliver_user_reset_password_instructions/2" do
setup do
%{user: user_fixture()}
end
test "sends token through notification", %{user: user} do
token =
extract_user_token(fn url ->
Accounts.deliver_user_reset_password_instructions(user, url)
end)
{:ok, token} = Base.url_decode64(token, padding: false)
assert user_token = Repo.get_by(UserToken, token: :crypto.hash(:sha256, token))
assert user_token.user_id == user.id
assert user_token.sent_to == user.email
assert user_token.context == "reset_password"
end
end
describe "get_user_by_reset_password_token/1" do
setup do
user = user_fixture()
token =
extract_user_token(fn url ->
Accounts.deliver_user_reset_password_instructions(user, url)
end)
%{user: user, token: token}
end
test "returns the user with valid token", %{user: %{id: id}, token: token} do
assert %User{id: ^id} = Accounts.get_user_by_reset_password_token(token)
assert Repo.get_by(UserToken, user_id: id)
end
test "does not return the user with invalid token", %{user: user} do
refute Accounts.get_user_by_reset_password_token("oops")
assert Repo.get_by(UserToken, user_id: user.id)
end
test "does not return the user if token expired", %{user: user, token: token} do
{1, nil} = Repo.update_all(UserToken, set: [inserted_at: ~N[2020-01-01 00:00:00]])
refute Accounts.get_user_by_reset_password_token(token)
assert Repo.get_by(UserToken, user_id: user.id)
end
end
describe "reset_user_password/2" do
setup do
%{user: user_fixture()}
end
test "validates password", %{user: user} do
{:error, changeset} =
Accounts.reset_user_password(user, %{
password: "not valid",
password_confirmation: "another"
})
assert %{
password: ["should be at least 12 character(s)"],
password_confirmation: ["does not match password"]
} = errors_on(changeset)
end
test "validates maximum values for password for security", %{user: user} do
too_long = String.duplicate("db", 100)
{:error, changeset} = Accounts.reset_user_password(user, %{password: too_long})
assert "should be at most 72 character(s)" in errors_on(changeset).password
end
test "updates the password", %{user: user} do
{:ok, updated_user} = Accounts.reset_user_password(user, %{password: "new valid password"})
assert is_nil(updated_user.password)
assert Accounts.get_user_by_email_and_password(user.email, "new valid password")
end
test "deletes all tokens for the given user", %{user: user} do
_ = Accounts.generate_user_session_token(user)
{:ok, _} = Accounts.reset_user_password(user, %{password: "new valid password"})
refute Repo.get_by(UserToken, user_id: user.id)
end
end
describe "inspect/2" do
test "does not include password" do
refute inspect(%User{password: "123456"}) =~ "password: \"123456\""
end
end
end
| 33.554028 | 97 | 0.658996 |
e82687d442b299d05bf42617307b4410cb9a180a | 181 | exs | Elixir | test/controllers/page_controller_test.exs | ne1ro/argo | 87f6c0de8311cb3e1e4eb05fa63bb0b71068a743 | [
"MIT"
] | null | null | null | test/controllers/page_controller_test.exs | ne1ro/argo | 87f6c0de8311cb3e1e4eb05fa63bb0b71068a743 | [
"MIT"
] | null | null | null | test/controllers/page_controller_test.exs | ne1ro/argo | 87f6c0de8311cb3e1e4eb05fa63bb0b71068a743 | [
"MIT"
] | null | null | null | defmodule Argo.PageControllerTest do
use Argo.ConnCase
test "GET /", %{conn: conn} do
conn = get conn, "/"
assert html_response(conn, 200) =~ "Арго Белгород"
end
end
| 20.111111 | 54 | 0.662983 |
e8268be057f959012913f6b471f149b4ee74b26e | 2,467 | exs | Elixir | test/search_test.exs | sandisk/tentacat | 523791d47d8454e87dc75cf77181b1fc78c125cf | [
"MIT"
] | 432 | 2015-01-19T20:38:35.000Z | 2022-01-11T14:32:28.000Z | test/search_test.exs | sandisk/tentacat | 523791d47d8454e87dc75cf77181b1fc78c125cf | [
"MIT"
] | 183 | 2015-01-19T08:55:29.000Z | 2022-03-01T20:26:03.000Z | test/search_test.exs | sandisk/tentacat | 523791d47d8454e87dc75cf77181b1fc78c125cf | [
"MIT"
] | 189 | 2015-01-04T14:56:59.000Z | 2021-12-14T20:48:18.000Z | defmodule Tentacat.SearchTest do
use ExUnit.Case, async: false
use ExVCR.Mock, adapter: ExVCR.Adapter.Hackney
import Tentacat.Search
doctest Tentacat.Search
@client Tentacat.Client.new()
setup_all do
HTTPoison.start()
end
test "code/2" do
use_cassette "search#code" do
params = %{q: "code language:elixir repo:edgurgel/tentacat", sort: "url"}
assert {_, %{"incomplete_results" => false, "items" => _}, _} = code(@client, params)
end
end
test "code/2 with none-pagination" do
use_cassette "search#code" do
params = %{q: "code language:elixir repo:edgurgel/tentacat", sort: "url", per_page: 1}
assert {_, %{"incomplete_results" => false, "items" => _}, _} =
code(@client, params, pagination: :none)
end
end
test "users/2" do
use_cassette "search#users" do
params = %{q: "elixir-lang language:elixir", sort: "followers"}
assert {_, %{"incomplete_results" => false, "items" => _}, _} = users(@client, params)
end
end
test "users/2 with none-pagination" do
use_cassette "search#users" do
params = %{q: "elixir-lang language:elixir", sort: "followers", per_page: 1}
assert {_, %{"incomplete_results" => false, "items" => _}, _} =
users(@client, params, pagination: :none)
end
end
test "repositories/2" do
use_cassette "search#repositories" do
params = %{q: "elixir-lang in:name language:elixir", sort: "stars"}
assert {_, %{"incomplete_results" => false, "items" => _}, _} =
repositories(@client, params)
end
end
test "repositories/2 with none-pagination" do
use_cassette "search#repositories" do
params = %{q: "elixir-lang in:name language:elixir", sort: "stars", per_page: 1}
assert {_, %{"incomplete_results" => false, "items" => _}, _} =
repositories(@client, params, pagination: :none)
end
end
test "issues/2" do
use_cassette "search#issues" do
params = %{q: "repo:edgurgel/tentacat is:merged", sort: "comments"}
assert {_, %{"incomplete_results" => false, "items" => _}, _} = issues(@client, params)
end
end
test "issues/2 with none-pagination" do
use_cassette "search#issues" do
params = %{q: "repo:edgurgel/tentacat is:open", sort: "comments"}
assert {_, %{"incomplete_results" => false, "items" => _}, _} =
issues(@client, params, pagination: :none)
end
end
end
| 30.45679 | 93 | 0.621808 |
e826a32204a433d9241c647d3075eeea2b937691 | 1,239 | ex | Elixir | lib/ueberauth/strategy/steam/openid.ex | selarom-epilef/ueberauth_steam | e8de1018f7447d7bf79c69b99a22bd91d6b46a38 | [
"MIT"
] | null | null | null | lib/ueberauth/strategy/steam/openid.ex | selarom-epilef/ueberauth_steam | e8de1018f7447d7bf79c69b99a22bd91d6b46a38 | [
"MIT"
] | null | null | null | lib/ueberauth/strategy/steam/openid.ex | selarom-epilef/ueberauth_steam | e8de1018f7447d7bf79c69b99a22bd91d6b46a38 | [
"MIT"
] | null | null | null | defmodule Ueberauth.Strategy.Steam.OpenID do
@url_namespace "http://specs.openid.net/auth/2.0"
@url_login "https://steamcommunity.com/openid/login"
def checkid_setup_url(callback_url) do
query = checkid_setup_query(callback_url, callback_url)
@url_login <> "?" <> URI.encode_query(query)
end
defp checkid_setup_query(realm, return_to) do
%{
"openid.mode" => "checkid_setup",
"openid.realm" => realm,
"openid.return_to" => return_to,
"openid.ns" => @url_namespace,
"openid.claimed_id" => @url_namespace <> "/identifier_select",
"openid.identity" => @url_namespace <> "/identifier_select",
}
end
def check_authentication(params) do
check_params = Map.put(params, "openid.mode", "check_authentication")
case HTTPoison.get(@url_login, [], params: check_params) do
{:ok, %{status_code: 200, body: "ns:" <> @url_namespace <> "\nis_valid:true\n"}} ->
{:ok, params}
_ ->
{:error, :invalid_request}
end
end
def get_steam_user_id("http://steamcommunity.com/openid/id/" <> id), do: {:ok, id}
def get_steam_user_id("https://steamcommunity.com/openid/id/" <> id), do: {:ok, id}
def get_steam_user_id(_), do: {:error, :badarg}
end
| 35.4 | 89 | 0.656981 |
e826da7a39f250dcc0e56c786d36f50e36d7e5a9 | 1,428 | ex | Elixir | test/support/data_case.ex | paulfioravanti/phoenix-and-elm | ed3dd942a597913fa675c6a60eff87f06a6f59cf | [
"MIT"
] | 2 | 2019-05-24T14:10:01.000Z | 2019-12-16T02:57:41.000Z | test/support/data_case.ex | paulfioravanti/phoenix-and-elm | ed3dd942a597913fa675c6a60eff87f06a6f59cf | [
"MIT"
] | 1 | 2018-11-01T08:12:02.000Z | 2018-11-01T09:37:56.000Z | test/support/data_case.ex | The-Vikings/phoenix-and-elm | c6c6bcc2415a479b81cae09cd62872873c7b7e78 | [
"MIT"
] | 4 | 2018-07-16T12:16:35.000Z | 2019-05-24T14:10:04.000Z | defmodule PhoenixAndElm.DataCase do
@moduledoc """
This module defines the setup for tests requiring
access to the application's data layer.
You may define functions here to be used as helpers in
your tests.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
alias Ecto.Adapters.SQL.Sandbox
alias Ecto.Changeset
alias PhoenixAndElm.Repo
using do
quote do
alias PhoenixAndElm.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import PhoenixAndElm.DataCase
end
end
setup tags do
:ok = Sandbox.checkout(Repo)
unless tags[:async] do
Sandbox.mode(Repo, {:shared, self()})
end
:ok
end
@doc """
A helper that transform changeset errors to a map of messages.
assert {:error, changeset} = Accounts.create_user(%{password: "short"})
assert "password is too short" in errors_on(changeset).password
assert %{password: ["password is too short"]} = errors_on(changeset)
"""
def errors_on(changeset) do
Changeset.traverse_errors(changeset, fn {message, opts} ->
Enum.reduce(opts, message, fn {key, value}, acc ->
String.replace(acc, "%{#{key}}", to_string(value))
end)
end)
end
end
| 25.052632 | 77 | 0.684174 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.