hexsha stringlengths 40 40 | size int64 2 991k | ext stringclasses 2 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 208 | max_stars_repo_name stringlengths 6 106 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses list | max_stars_count int64 1 33.5k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 208 | max_issues_repo_name stringlengths 6 106 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses list | max_issues_count int64 1 16.3k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 208 | max_forks_repo_name stringlengths 6 106 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses list | max_forks_count int64 1 6.91k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 991k | avg_line_length float64 1 36k | max_line_length int64 1 977k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c70a4c5cef0a514a3318e551a0fbfbc2f33067a | 3,262 | exs | Elixir | test/exdns/zone/cache_test.exs | jeanparpaillon/exdns | 53b7fc780399eda96d42052e11e03d5eb0dcd789 | [
"MIT"
] | 16 | 2016-05-26T10:11:57.000Z | 2021-01-08T15:09:19.000Z | test/exdns/zone/cache_test.exs | jeanparpaillon/exdns | 53b7fc780399eda96d42052e11e03d5eb0dcd789 | [
"MIT"
] | 9 | 2016-08-11T00:48:27.000Z | 2020-09-16T22:10:07.000Z | test/exdns/zone/cache_test.exs | jeanparpaillon/exdns | 53b7fc780399eda96d42052e11e03d5eb0dcd789 | [
"MIT"
] | 11 | 2016-08-10T08:13:36.000Z | 2021-04-03T10:20:11.000Z | defmodule Exdns.Zone.CacheTest do
use ExUnit.Case, async: false
require Exdns.Records
setup do
name = "example.com"
soa_record = Exdns.Records.dns_rr(name: name, type: :dns_terms_const.dns_type_soa, data: Exdns.Records.dns_rrdata_soa())
ns_record = Exdns.Records.dns_rr(name: name, type: :dns_terms_const.dns_type_ns, data: Exdns.Records.dns_rrdata_ns(dname: "ns1.example.com"))
cname_record = Exdns.Records.dns_rr(name: "www.#{name}", type: :dns_terms_const.dns_type_cname, data: Exdns.Records.dns_rrdata_cname(dname: "something.com"))
records = [soa_record, ns_record, cname_record]
zone = %Exdns.Zone{name: name, authority: soa_record, records: records, records_by_name: Exdns.Zone.Parser.build_named_index(records)}
Exdns.Zone.Cache.put_zone(name, zone)
{:ok, %{:zone => zone, delegation: ns_record, authority: soa_record, cname_record: cname_record}}
end
test "find zone not found" do
assert Exdns.Zone.Cache.find_zone("notfound.com") == {:error, :not_authoritative}
end
test "find zone", %{zone: zone} do
assert Exdns.Zone.Cache.find_zone(zone.name) == %{zone | records: [], records_by_name: :trimmed}
end
test "get zone", %{zone: zone} do
assert Exdns.Zone.Cache.get_zone(zone.name) == {:ok, %{zone | records: [], records_by_name: :trimmed}}
end
test "get authority for name", %{zone: zone} do
{:ok,authority} = Exdns.Zone.Cache.get_authority(zone.name)
assert Exdns.Records.dns_rr(authority, :type) == :dns_terms_const.dns_type_soa
end
test "get authority for message", %{zone: zone} do
question = Exdns.Records.dns_query(name: zone.name, type: :dns_terms_const.dns_type_a)
message = Exdns.Records.dns_message(questions: [question])
{:ok, authority} = Exdns.Zone.Cache.get_authority(message)
assert Exdns.Records.dns_rr(authority, :type) == :dns_terms_const.dns_type_soa
end
test "get records by name", %{zone: zone, cname_record: cname_record} do
assert Exdns.Zone.Cache.get_records_by_name("example.com") == zone.records -- [cname_record]
assert Exdns.Zone.Cache.get_records_by_name("www.example.com") == [cname_record]
end
test "in_zone?" do
assert Exdns.Zone.Cache.in_zone?("example.com")
end
test "get delegations returns delegation records", %{delegation: ns_record} do
assert Exdns.Zone.Cache.get_delegations("example.com") == []
assert Exdns.Zone.Cache.get_delegations("ns1.example.com") == [ns_record]
end
test "find zone in cache exact name", %{zone: zone} do
assert Exdns.Zone.Cache.find_zone_in_cache("example.com") == {:ok, zone}
end
test "find zone in cache subdomain", %{zone: zone} do
assert Exdns.Zone.Cache.find_zone_in_cache("fewf.afdaf.example.com") == {:ok, zone}
end
test "find zone in cache not found" do
assert Exdns.Zone.Cache.find_zone_in_cache("notfound.com") == {:error, :zone_not_found}
end
test "normalize name" do
assert Exdns.Zone.Cache.normalize_name("eXaMpLe.CoM") == "example.com"
end
test "fallback to wildcard zone when configured" do
Application.put_env(:exdns, :wildcard_fallback, true)
{:ok, _zone} = Exdns.Zone.Cache.find_zone_in_cache("notfound.com")
Application.put_env(:exdns, :wildcard_fallback, false)
end
end
| 38.833333 | 161 | 0.717045 |
1c70aa7801e5b090fe5e173eb3246c690f1d9c9c | 2,068 | exs | Elixir | chat-api/test/controllers/room_controller_test.exs | antonpetkoff/smolltolk | 64c6d4a52e7624302ad87a072fc108fe4c1382ec | [
"MIT"
] | null | null | null | chat-api/test/controllers/room_controller_test.exs | antonpetkoff/smolltolk | 64c6d4a52e7624302ad87a072fc108fe4c1382ec | [
"MIT"
] | 3 | 2018-01-23T10:53:08.000Z | 2018-01-23T10:58:23.000Z | chat-api/test/controllers/room_controller_test.exs | antonpetkoff/smolltolk | 64c6d4a52e7624302ad87a072fc108fe4c1382ec | [
"MIT"
] | null | null | null | defmodule Chat.RoomControllerTest do
use Chat.ConnCase
alias Chat.Room
@valid_attrs %{name: "some content", topic: "some content"}
@invalid_attrs %{}
setup %{conn: conn} do
{:ok, conn: put_req_header(conn, "accept", "application/json")}
end
test "lists all entries on index", %{conn: conn} do
conn = get conn, room_path(conn, :index)
assert json_response(conn, 200)["data"] == []
end
test "shows chosen resource", %{conn: conn} do
room = Repo.insert! %Room{}
conn = get conn, room_path(conn, :show, room)
assert json_response(conn, 200)["data"] == %{"id" => room.id,
"name" => room.name,
"topic" => room.topic}
end
test "renders page not found when id is nonexistent", %{conn: conn} do
assert_error_sent 404, fn ->
get conn, room_path(conn, :show, -1)
end
end
test "creates and renders resource when data is valid", %{conn: conn} do
conn = post conn, room_path(conn, :create), room: @valid_attrs
assert json_response(conn, 201)["data"]["id"]
assert Repo.get_by(Room, @valid_attrs)
end
test "does not create resource and renders errors when data is invalid", %{conn: conn} do
conn = post conn, room_path(conn, :create), room: @invalid_attrs
assert json_response(conn, 422)["errors"] != %{}
end
test "updates and renders chosen resource when data is valid", %{conn: conn} do
room = Repo.insert! %Room{}
conn = put conn, room_path(conn, :update, room), room: @valid_attrs
assert json_response(conn, 200)["data"]["id"]
assert Repo.get_by(Room, @valid_attrs)
end
test "does not update chosen resource and renders errors when data is invalid", %{conn: conn} do
room = Repo.insert! %Room{}
conn = put conn, room_path(conn, :update, room), room: @invalid_attrs
assert json_response(conn, 422)["errors"] != %{}
end
test "deletes chosen resource", %{conn: conn} do
room = Repo.insert! %Room{}
conn = delete conn, room_path(conn, :delete, room)
assert response(conn, 204)
refute Repo.get(Room, room.id)
end
end
| 33.354839 | 98 | 0.65764 |
1c70cd1630836145e66bb36d61544196bed2d515 | 804 | ex | Elixir | lib/nomad_client/model/sentinel_policy_list_stub.ex | mrmstn/nomad_client | a586022e5eb4d166acba08b55b198ec079d4b118 | [
"Apache-2.0"
] | 8 | 2021-09-04T21:22:53.000Z | 2022-02-22T22:48:38.000Z | lib/nomad_client/model/sentinel_policy_list_stub.ex | mrmstn/nomad_client | a586022e5eb4d166acba08b55b198ec079d4b118 | [
"Apache-2.0"
] | null | null | null | lib/nomad_client/model/sentinel_policy_list_stub.ex | mrmstn/nomad_client | a586022e5eb4d166acba08b55b198ec079d4b118 | [
"Apache-2.0"
] | null | null | null | # NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# https://openapi-generator.tech
# Do not edit the class manually.
defmodule NomadClient.Model.SentinelPolicyListStub do
@moduledoc """
"""
@derive [Poison.Encoder]
defstruct [
:Name,
:Description,
:Scope,
:EnforcementLevel,
:CreateIndex,
:ModifyIndex
]
@type t :: %__MODULE__{
:Name => String.t() | nil,
:Description => String.t() | nil,
:Scope => String.t() | nil,
:EnforcementLevel => String.t() | nil,
:CreateIndex => integer() | nil,
:ModifyIndex => integer() | nil
}
end
defimpl Poison.Decoder, for: NomadClient.Model.SentinelPolicyListStub do
def decode(value, _options) do
value
end
end
| 22.971429 | 91 | 0.620647 |
1c70d9424e98ce20cf9484f58bf71939a90764f7 | 446 | exs | Elixir | priv/repo/migrations/20210820161938_remove_context_types.exs | danielmarreirosdeoliveira/cometoid | 7a1c4f7700eb4dbb5f4c44fdc5185d048a429e50 | [
"Apache-2.0"
] | null | null | null | priv/repo/migrations/20210820161938_remove_context_types.exs | danielmarreirosdeoliveira/cometoid | 7a1c4f7700eb4dbb5f4c44fdc5185d048a429e50 | [
"Apache-2.0"
] | null | null | null | priv/repo/migrations/20210820161938_remove_context_types.exs | danielmarreirosdeoliveira/cometoid | 7a1c4f7700eb4dbb5f4c44fdc5185d048a429e50 | [
"Apache-2.0"
] | null | null | null | defmodule Cometoid.Repo.Migrations.RemoveContextTypes do
use Ecto.Migration
def change do
alter table(:contexts) do
add :context_type, :string
end
execute("UPDATE contexts SET context_type=context_types.title " <>
"FROM context_types " <>
"WHERE contexts.context_type_id=context_types.id")
alter table(:contexts) do
remove :context_type_id
end
drop table(:context_types)
end
end
| 26.235294 | 70 | 0.690583 |
1c70e25e53de20428e1c2b4e238b6d3a08ca8502 | 1,162 | ex | Elixir | lib/ex_oneroster/web/controllers/class_controller.ex | jrissler/ex_oneroster | cec492117bffc14aec91e2448643682ceeb449e9 | [
"Apache-2.0"
] | 3 | 2018-09-06T11:15:07.000Z | 2021-12-27T15:36:51.000Z | lib/ex_oneroster/web/controllers/class_controller.ex | jrissler/ex_oneroster | cec492117bffc14aec91e2448643682ceeb449e9 | [
"Apache-2.0"
] | null | null | null | lib/ex_oneroster/web/controllers/class_controller.ex | jrissler/ex_oneroster | cec492117bffc14aec91e2448643682ceeb449e9 | [
"Apache-2.0"
] | null | null | null | defmodule ExOneroster.Web.ClassController do
use ExOneroster.Web, :controller
alias ExOneroster.Classes
alias ExOneroster.Classes.Class
action_fallback ExOneroster.Web.FallbackController
def index(conn, _params) do
classes = Classes.list_classes()
render(conn, "index.json", classes: classes)
end
def create(conn, %{"class" => class_params}) do
with {:ok, %Class{} = class} <- Classes.create_class(class_params) do
conn
|> put_status(:created)
|> put_resp_header("location", class_path(conn, :show, class))
|> render("show.json", class: class)
end
end
def show(conn, %{"id" => id}) do
class = Classes.get_class!(id)
render(conn, "show.json", class: class)
end
def update(conn, %{"id" => id, "class" => class_params}) do
class = Classes.get_class!(id)
with {:ok, %Class{} = class} <- Classes.update_class(class, class_params) do
render(conn, "show.json", class: class)
end
end
def delete(conn, %{"id" => id}) do
class = Classes.get_class!(id)
with {:ok, %Class{}} <- Classes.delete_class(class) do
send_resp(conn, :no_content, "")
end
end
end
| 27.023256 | 80 | 0.649742 |
1c70ed08114d456549106af9d32c3712f357ee8e | 117 | exs | Elixir | .formatter.exs | fribmendes/firenest | 75ecb367b66a67266fc04a25e355f75369bd3fd0 | [
"Apache-2.0"
] | 266 | 2016-11-04T12:52:55.000Z | 2021-12-30T21:54:03.000Z | .formatter.exs | fribmendes/firenest | 75ecb367b66a67266fc04a25e355f75369bd3fd0 | [
"Apache-2.0"
] | 22 | 2016-12-28T23:08:20.000Z | 2018-11-20T19:09:52.000Z | .formatter.exs | fribmendes/firenest | 75ecb367b66a67266fc04a25e355f75369bd3fd0 | [
"Apache-2.0"
] | 21 | 2016-11-10T14:45:31.000Z | 2021-07-02T23:53:20.000Z | [
locals_without_parens: [
assert_hibernate: 1
],
inputs: ["mix.exs", "{config,lib,test}/**/*.{ex,exs}"]
]
| 16.714286 | 56 | 0.581197 |
1c7110cf94bdda15a200246d63092bd2d8c3ba5d | 2,063 | ex | Elixir | clients/vision/lib/google_api/vision/v1/model/image.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/vision/lib/google_api/vision/v1/model/image.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/vision/lib/google_api/vision/v1/model/image.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Vision.V1.Model.Image do
@moduledoc """
Client image to perform Google Cloud Vision API tasks over.
## Attributes
* `content` (*type:* `String.t`, *default:* `nil`) - Image content, represented as a stream of bytes. Note: As with all `bytes` fields, protobuffers use a pure binary representation, whereas JSON representations use base64. Currently, this field only works for BatchAnnotateImages requests. It does not work for AsyncBatchAnnotateImages requests.
* `source` (*type:* `GoogleApi.Vision.V1.Model.ImageSource.t`, *default:* `nil`) - Google Cloud Storage image location, or publicly-accessible image URL. If both `content` and `source` are provided for an image, `content` takes precedence and is used to perform the image annotation request.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:content => String.t() | nil,
:source => GoogleApi.Vision.V1.Model.ImageSource.t() | nil
}
field(:content)
field(:source, as: GoogleApi.Vision.V1.Model.ImageSource)
end
defimpl Poison.Decoder, for: GoogleApi.Vision.V1.Model.Image do
def decode(value, options) do
GoogleApi.Vision.V1.Model.Image.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Vision.V1.Model.Image do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 41.26 | 350 | 0.739215 |
1c7153add169b9c8b1a5c48e2174ecd612dd6416 | 59 | ex | Elixir | apps/theta_web/lib/theta_web/views/layout_view.ex | LangPham/thetaproject | c6479d1b761ff58fe6ae5f82e2d9de87a8658883 | [
"MIT"
] | null | null | null | apps/theta_web/lib/theta_web/views/layout_view.ex | LangPham/thetaproject | c6479d1b761ff58fe6ae5f82e2d9de87a8658883 | [
"MIT"
] | 11 | 2020-07-21T09:34:54.000Z | 2021-08-29T07:38:02.000Z | apps/theta_web/lib/theta_web/views/layout_view.ex | LangPham/thetaproject | c6479d1b761ff58fe6ae5f82e2d9de87a8658883 | [
"MIT"
] | null | null | null | defmodule ThetaWeb.LayoutView do
use ThetaWeb, :view
end
| 14.75 | 32 | 0.79661 |
1c7171799b1c3e11713a374893c914ef42bf2b57 | 465 | ex | Elixir | lib/cldr/unit/alias.ex | kianmeng/cldr_units | fae3ed1f658e6b4c5164c2ebbe786cc562014a8b | [
"Apache-2.0"
] | 11 | 2019-07-31T19:33:07.000Z | 2021-11-15T13:11:19.000Z | lib/cldr/unit/alias.ex | kianmeng/cldr_units | fae3ed1f658e6b4c5164c2ebbe786cc562014a8b | [
"Apache-2.0"
] | 19 | 2019-06-09T02:41:44.000Z | 2022-03-03T14:54:40.000Z | lib/cldr/unit/alias.ex | kianmeng/cldr_units | fae3ed1f658e6b4c5164c2ebbe786cc562014a8b | [
"Apache-2.0"
] | 3 | 2017-10-05T11:35:32.000Z | 2018-08-06T21:47:43.000Z | defmodule Cldr.Unit.Alias do
@moduledoc """
Functions to manage unit name aliases
"""
@aliases %{
metre: :meter,
kilometre: :kilometer
}
@aliases Cldr.Config.units()
|> Map.get(:aliases)
|> Map.merge(@aliases)
def aliases do
@aliases
end
@doc """
Un-aliases the provided unit if there
is one or return the argument unchanged.
"""
def alias(alias) do
Map.get(aliases(), alias) || alias
end
end
| 16.034483 | 42 | 0.608602 |
1c7179e8e2ba5039cbb95b7e4814b37af93878f5 | 2,812 | ex | Elixir | lib/response/response.ex | Code-Mercenaries/phoenix_alexa | 168d212f0a1653f0b08b1499f695020004634751 | [
"MIT"
] | null | null | null | lib/response/response.ex | Code-Mercenaries/phoenix_alexa | 168d212f0a1653f0b08b1499f695020004634751 | [
"MIT"
] | null | null | null | lib/response/response.ex | Code-Mercenaries/phoenix_alexa | 168d212f0a1653f0b08b1499f695020004634751 | [
"MIT"
] | null | null | null | defmodule PhoenixAlexa.Response do
alias PhoenixAlexa.{Response, TextOutputSpeech, SsmlOutputSpeech}
alias PhoenixAlexa.{SimpleCard, StandardCard, LinkAccountCard}
defstruct version: "1.0",
sessionAttributes: %{},
response: %{
shouldEndSession: false
}
# def session_attributes(response) do
# response[:sessionAttributes]
# end
def set_session_attributes(response, sessionAttributes) do
# response = response || %{}
# Map.put(response, :sessionAttributes, sessionAttributes)
%Response{response | sessionAttributes: sessionAttributes}
end
def set_output_speech(response, %TextOutputSpeech{} = outputspeech) do
%Response{response | response: response.response |> Map.put(:outputSpeech, outputspeech)}
end
def set_output_speech(response, %SsmlOutputSpeech{} = outputspeech) do
%Response{response | response: response.response |> Map.put(:outputSpeech, outputspeech)}
end
def set_card(response, %SimpleCard{} = card) do
%Response{response | response: response.response |> Map.put(:card, card)}
end
def set_card(response, %StandardCard{} = card) do
%Response{response | response: response.response |> Map.put(:card, card)}
end
def set_card(response, %LinkAccountCard{} = card) do
%Response{response | response: response.response |> Map.put(:card, card)}
end
def set_reprompt(response, %TextOutputSpeech{} = outputspeech) do
%Response{
response
| response: response.response |> Map.put(:reprompt, %{outputSpeech: outputspeech})
}
end
def set_reprompt(response, %SsmlOutputSpeech{} = outputspeech) do
%Response{
response
| response: response.response |> Map.put(:reprompt, %{outputSpeech: outputspeech})
}
end
def set_should_end_session(response, shouldEndSession) do
%Response{response | response: %{response.response | shouldEndSession: shouldEndSession}}
end
# Card helpers
def set_title(%SimpleCard{} = card, title) do
SimpleCard.set_title(card, title)
end
def set_title(%StandardCard{} = card, title) do
StandardCard.set_title(card, title)
end
def set_title(%LinkAccountCard{} = card, title) do
LinkAccountCard.set_title(card, title)
end
def set_content(%SimpleCard{} = card, content) do
SimpleCard.set_content(card, content)
end
def set_content(%LinkAccountCard{} = card, content) do
LinkAccountCard.set_content(card, content)
end
def set_text(%StandardCard{} = card, text) do
StandardCard.set_text(card, text)
end
def set_small_image_url(%StandardCard{} = card, image_url) do
StandardCard.set_small_image_url(card, image_url)
end
def set_large_image_url(%StandardCard{} = card, image_url) do
StandardCard.set_large_image_url(card, image_url)
end
end
| 30.236559 | 93 | 0.712304 |
1c718dd1e7ec363325fb90fb4f4353165d04c407 | 1,560 | ex | Elixir | test/support/data_case.ex | sdoering01/coop_minesweeper | 4fc7265c3734584f93a325c2ceaa172da578f7a5 | [
"MIT"
] | null | null | null | test/support/data_case.ex | sdoering01/coop_minesweeper | 4fc7265c3734584f93a325c2ceaa172da578f7a5 | [
"MIT"
] | null | null | null | test/support/data_case.ex | sdoering01/coop_minesweeper | 4fc7265c3734584f93a325c2ceaa172da578f7a5 | [
"MIT"
] | null | null | null | defmodule CoopMinesweeper.DataCase do
@moduledoc """
This module defines the setup for tests requiring
access to the application's data layer.
You may define functions here to be used as helpers in
your tests.
Finally, if the test case interacts with the database,
we enable the SQL sandbox, so changes done to the database
are reverted at the end of every test. If you are using
PostgreSQL, you can even run database tests asynchronously
by setting `use CoopMinesweeper.DataCase, async: true`, although
this option is not recommended for other databases.
"""
use ExUnit.CaseTemplate
using do
quote do
alias CoopMinesweeper.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import CoopMinesweeper.DataCase
end
end
setup tags do
pid = Ecto.Adapters.SQL.Sandbox.start_owner!(CoopMinesweeper.Repo, shared: not tags[:async])
on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
:ok
end
@doc """
A helper that transforms changeset errors into a map of messages.
assert {:error, changeset} = Accounts.create_user(%{password: "short"})
assert "password is too short" in errors_on(changeset).password
assert %{password: ["password is too short"]} = errors_on(changeset)
"""
def errors_on(changeset) do
Ecto.Changeset.traverse_errors(changeset, fn {message, opts} ->
Regex.replace(~r"%{(\w+)}", message, fn _, key ->
opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string()
end)
end)
end
end
| 30 | 96 | 0.703205 |
1c71a6ce08f80ee60f136f5fd54424bfb981adb8 | 4,496 | ex | Elixir | lib/declaimer/dsl.ex | Joe-noh/declaimer | b120e9c1367f42084abe06d5e753578fe768fff8 | [
"MIT"
] | null | null | null | lib/declaimer/dsl.ex | Joe-noh/declaimer | b120e9c1367f42084abe06d5e753578fe768fff8 | [
"MIT"
] | 2 | 2017-10-09T15:19:46.000Z | 2017-10-09T15:19:46.000Z | lib/declaimer/dsl.ex | Joe-noh/declaimer | b120e9c1367f42084abe06d5e753578fe768fff8 | [
"MIT"
] | null | null | null | defmodule Declaimer.DSL do
alias Declaimer.TagAttribute
import Declaimer.Builder
# metadata functions !!
def title(title), do: {:h1, [title], class: ["title"]}
def subtitle(subtitle), do: {:div, [subtitle], class: ["subtitle"]}
def date(date), do: {:div, [date], class: ["date"]}
def author(author), do: {:div, [author], class: ["author"]}
# macro generation !!
# tags which take optional options and a do-end block
[:cite, :table, :left, :right, :bullet, :numbered]
|> Enum.each fn (tag) ->
do_function_name = String.to_atom("do_" <> Atom.to_string(tag))
defmacro unquote(tag)(opts \\ [], content)
defmacro unquote(tag)(opts, do: {:__block__, _, contents}) do
fun = unquote(do_function_name)
quote do: unquote(fun)(unquote(opts), unquote(contents))
end
defmacro unquote(tag)(opts, do: content) do
fun = unquote(do_function_name)
quote do: unquote(fun)(unquote(opts), [unquote content])
end
end
# tags which accept one argument and optional options
[:o, :item, :text, :image, :takahashi]
|> Enum.each fn (tag) ->
do_function_name = String.to_atom("do_" <> Atom.to_string(tag))
defmacro unquote(tag)(arg, opts \\ []) do
fun = unquote(do_function_name)
quote do: unquote(fun)(unquote(arg), unquote(opts))
end
end
# tags which accept one argument, optional option and one block
[:slide, :code, :link]
|> Enum.each fn (tag) ->
do_function_name = String.to_atom("do_" <> Atom.to_string(tag))
defmacro unquote(tag)(arg, opts \\ [], content)
defmacro unquote(tag)(arg, opts, do: {:__block__, _, contents}) do
fun = unquote(do_function_name)
quote do: unquote(fun)(unquote(arg), unquote(opts), unquote(contents))
end
defmacro unquote(tag)(arg, opts, do: content) do
fun = unquote(do_function_name)
quote do: unquote(fun)(unquote(arg), unquote(opts), [unquote content])
end
end
# exceptional !!
defmacro link(arg, opts, content) when is_binary(content) do
quote do
do_link(unquote(arg), unquote(opts), [text unquote content])
end
end
defmacro presentation(opts \\ [], do: {:__block__, _, contents}) do
{slides, metadata} = Enum.partition(contents, &(elem(&1, 0) == :slide))
theme = opts[:theme]
quote do
slides = case unquote(theme) do
nil -> unquote(slides)
theme -> Enum.map unquote(slides), fn ({tag, content, attrs}) ->
{tag, content, TagAttribute.put_new_theme(attrs, theme)}
end
end
{html, themes} = build([{
:div,
unquote(metadata),
[class: ["cover", "slide"], theme: unquote(theme)]
} | slides])
{html, themes, unquote(opts)}
end
end
# worker functions !!
def do_slide(title, opts, contents) do
attrs = TagAttribute.apply([class: ["slide"]], opts)
{:div, [{:h2, [title], []} | contents], attrs}
end
def do_cite(opts, contents) do
{:blockquote, contents, TagAttribute.apply([], opts)}
end
[:do_o, :do_item]
|> Enum.each fn (name) ->
def unquote(name)(text, opts) do
{:li, [text], TagAttribute.apply([], opts)}
end
end
def do_text(text, opts) do
{:p, [text], TagAttribute.apply([], opts)}
end
def do_image(src, opts) do
{:img, [], TagAttribute.apply([src: src], opts)}
end
def do_takahashi(text, opts) do
attrs = TagAttribute.add_class([], opts[:deco])
{:div, [{:p, [text], attrs}], class: ["takahashi"]}
end
def do_code(lang, opts, contents) do
attrs = TagAttribute.apply([], opts)
{:pre, [{:code, contents, class: [lang]}], attrs}
end
def do_link(url, opts, contents) do
{:a, contents, TagAttribute.apply([href: url], opts)}
end
def do_table(opts, [first | rest] = rows) do
if Keyword.get(opts, :header, true) do
{:table, [do_table_header(first) | do_table_rows(rest)], []}
else
{:table, do_table_rows(rows), []}
end
end
defp do_table_header(headers) do
{:tr, Enum.map(headers, &{:th, [&1], []}), []}
end
defp do_table_rows(rows) do
Enum.map rows, fn (row) ->
{:tr, Enum.map(row, &{:td, [&1], []}), []}
end
end
def do_left(_, contents) do
{:div, contents, class: ["left-half"]}
end
def do_right(_, contents) do
{:div, contents, class: ["right-half"]}
end
def do_bullet(_, contents) do
{:ul, contents, []}
end
def do_numbered(_, contents) do
{:ol, contents, []}
end
end
| 27.925466 | 76 | 0.609875 |
1c71e0bf197d30bf9e234edda51c8abe21403a85 | 1,181 | exs | Elixir | darts/test/darts_test.exs | okaram/exercism-elixir | 66429a50738a16da5d352ad4455a483622625949 | [
"Unlicense"
] | null | null | null | darts/test/darts_test.exs | okaram/exercism-elixir | 66429a50738a16da5d352ad4455a483622625949 | [
"Unlicense"
] | null | null | null | darts/test/darts_test.exs | okaram/exercism-elixir | 66429a50738a16da5d352ad4455a483622625949 | [
"Unlicense"
] | null | null | null | defmodule DartsTest do
use ExUnit.Case
# @tag :pending
test "Missed target" do
assert Darts.score({-9, 9}) == 0
end
test "On the outer circle" do
assert Darts.score({0, 10}) == 1
end
test "On the middle circle" do
assert Darts.score({-5, 0}) == 5
end
test "On the inner circle" do
assert Darts.score({0, -1}) == 10
end
test "Exactly on centre" do
assert Darts.score({0, 0}) == 10
end
test "Near the centre" do
assert Darts.score({-0.1, -0.1}) == 10
end
test "Just within the inner circle" do
assert Darts.score({0.7, 0.7}) == 10
end
test "Just outside the inner circle" do
assert Darts.score({0.8, -0.8}) == 5
end
test "Just within the middle circle" do
assert Darts.score({-3.5, 3.5}) == 5
end
test "Just outside the middle circle" do
assert Darts.score({-3.6, -3.6}) == 1
end
test "Just within the outer circle" do
assert Darts.score({-7.0, 7.0}) == 1
end
test "Just outside the outer circle" do
assert Darts.score({7.1, -7.1}) == 0
end
test "Asymmetric position between the inner and middle circles" do
assert Darts.score({0.5, -4}) == 5
end
end
| 17.367647 | 68 | 0.607113 |
1c72019d01d188ce12488fd6dcef004c4998e2dc | 926 | exs | Elixir | mix.exs | jamesandariese/ex-utils-caddr | dca3ccae973cf5dda8874af343cac2eb207841e8 | [
"Apache-2.0"
] | null | null | null | mix.exs | jamesandariese/ex-utils-caddr | dca3ccae973cf5dda8874af343cac2eb207841e8 | [
"Apache-2.0"
] | null | null | null | mix.exs | jamesandariese/ex-utils-caddr | dca3ccae973cf5dda8874af343cac2eb207841e8 | [
"Apache-2.0"
] | null | null | null | defmodule Caddr.MixProject do
use Mix.Project
def project do
[
app: :caddr,
version: "0.2.0",
elixir: "~> 1.6",
start_permanent: Mix.env() == :prod,
package: [
licenses: ["apache-2.0"],
name: "ex_utils_caddr",
links: %{
"github" => "https://github.com/jamesandariese/ex-utils-caddr"
},
maintainers: [
"James Andariese (caddr) <james@strudelline.net>"
],
description: """
Things I didn't want to have to write again.
"""
],
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:ex_doc, "~> 0.16", only: :dev, runtime: false}
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
]
end
end
| 21.534884 | 88 | 0.582073 |
1c72554a840512fa5235bf34c9870360f2bf1f17 | 10,692 | ex | Elixir | lib/zeroth/user.ex | arnau/zeroth | 6826f140567002449cacc1502ade00e5da23c7ad | [
"MIT"
] | 1 | 2017-06-02T10:07:47.000Z | 2017-06-02T10:07:47.000Z | lib/zeroth/user.ex | arnau/zeroth | 6826f140567002449cacc1502ade00e5da23c7ad | [
"MIT"
] | null | null | null | lib/zeroth/user.ex | arnau/zeroth | 6826f140567002449cacc1502ade00e5da23c7ad | [
"MIT"
] | null | null | null | defmodule Zeroth.User do
@moduledoc """
Auth0 User management. https://auth0.com/docs/api/management/v2#!/Users
"""
alias Zeroth.Api
alias Zeroth.Token
alias Zeroth.Param
alias Lonely.Result
alias Lonely.Option
alias URI.Ext, as: URIE
@path URI.parse("/api/v2/users")
@derive [Poison.Encoder, Poison.Decoder]
defstruct [:email,
:email_verified,
:phone_number,
:phone_verified,
:user_id,
:created_at,
:updated_at,
:identities,
:app_metadata,
:user_metadata,
:picture,
:name,
:nickname,
:multifactor,
:last_ip,
:last_login,
:logins_count,
:blocked,
:given_name,
:family_name]
@type t :: %__MODULE__{email: String.t | nil,
email_verified: boolean | nil,
phone_number: String.t | nil,
phone_verified: boolean | nil,
user_id: String.t | nil,
created_at: DateTime.t |nil,
updated_at: DateTime.t | nil,
identities: list(String.t) | nil,
app_metadata: map | nil,
user_metadata: map | nil,
picture: String.t | nil,
name: String.t | nil,
nickname: String.t | nil,
multifactor: list(String.t) | nil,
last_ip: String.t | nil,
last_login: String.t | nil,
logins_count: integer | nil,
blocked: boolean | nil,
given_name: String.t | nil,
family_name: String.t | nil}
@doc """
https://auth0.com/docs/api/management/v2#!/Users/get_users
**Note**: `include_totals` is not supported.
## Options
* `q`: Search Criteria using Query String Syntax.
* `page`: The page number. Zero based.
* `per_page`: The amount of entries per page.
* `sort`: The field to use for sorting. Use field:order, where order is `1` for ascending and `-1` for descending. For example `date:-1`.
* `fields`: A comma separated list of fields to include or exclude (depending on `include_fields`) from the result, empty to retrieve all fields.
* `include_fields`: true if the fields specified are to be included in the result, false otherwise. Defaults to `true`.
**Note**: If there is no default `sort` field defined the results may be
inconsistent. E.g. Duplicate records or users that never logged in not
appearing.
## Examples
User.all(api_client, [q: "identities.connection:'myconnection'",
sort: "email:1"])
"""
@spec all(Api.t, list) :: Result.t(any, [t])
def all(api_client, options \\ []) do
query = options
|> Param.take([:per_page,
:page,
:sort,
:connection,
:fields,
:include_fields,
:q])
|> Option.map(&Keyword.put(&1, :search_engine, "v2"))
path = URIE.merge_query(@path, query)
api_client
|> Api.update_endpoint(path)
|> Api.get(headers: Token.http_header(api_client.credentials),
as: [%__MODULE__{}])
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/get_users_by_id
## Options
* `fields`: List of fields to include or exclude from the result.
* `include_fields`: If the fields specified are to be included in the result.
## Examples
User.get("foo", api_client)
User.get("foo", api_client, fields: ["email", "user_id"])
User.get("foo", api_client, fields: ["email"], include_fields: true)
"""
@spec get(String.t, Api.t, list) :: Result.t(any, t)
def get(id, api_client, options \\ []) when is_binary(id) do
query = Param.take(options, [:fields, :include_fields])
path = @path
|> URIE.merge_path(id)
|> URIE.merge_query(query)
api_client
|> Api.update_endpoint(path)
|> Api.get(headers: Token.http_header(api_client.credentials),
as: %__MODULE__{})
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/post_users
You must provide a valid `connection` name for any new user. You will have
to check what fields are required for the connection type. For example,
an Auth0 DB requires `email` and `password`.
## Examples
User.create(%{email: "m.burns@example.org",
password: "foobar",
connection: "Username-Password-Authentication",
verify_email: false,
email_verified: true,
user_metadata: %{name: "Montgomery Burns"}}, api_client)
"""
@spec create(map, Api.t) :: Result.t(any, t)
def create(body, api_client) when is_map(body) do
body[:connection] || {:error, "You must specify a Connection for a new User."}
api_client
|> Api.update_endpoint(@path)
|> Api.post(body, headers: Token.http_header(api_client.credentials),
as: %__MODULE__{})
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/patch_users_by_id
`user_metadata` and `app_metadata` fields will be shallow merged instead of
replaced.
You must provide the `connection` field when you want to update `email_verified`,
`phone_verified`, `username` or `password`. And the `client_id` as well if
you want to update `email` or `phone_number`.
## Examples
User.update(api_client, %{client_id: "xyz",
connection: "myconnection",
email: "new@example.org",
verify_email: true})
"""
@spec update(String.t, map, Api.t) :: Result.t(any, t)
def update(id, body, api_client) when is_map(body) do
path = URIE.merge_path(@path, id)
api_client
|> Api.update_endpoint(path)
|> Api.patch(body, headers: Token.http_header(api_client.credentials),
as: %__MODULE__{})
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/delete_users_by_id
"""
@spec delete(String.t, Api.t) :: Result.t(any, atom)
def delete(id, api_client) do
path = URIE.merge_path(@path, id)
api_client
|> Api.update_endpoint(path)
|> Api.delete(headers: Token.http_header(api_client.credentials))
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/get_enrollments
"""
@spec enrollments(String.t, Api.t) :: Result.t(any, list)
def enrollments(id, api_client) do
path = URIE.merge_path(@path, "#{id}/enrollments")
api_client
|> Api.update_endpoint(path)
|> Api.get(headers: Token.http_header(api_client.credentials),
as: [%Zeroth.User.Enrollment{}])
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/get_logs_by_user
**Note**: `include_totals` is not supported.
## Sortable fields
* `date`: The moment when the event occured.
* `connection`: The connection related to the event.
* `client_id`: The client id related to the event
* `client_name`: The name of the client related to the event.
* `ip`: The IP address from where the request that caused the log entry originated.
* `user_id`: The user id related to the event.
* `user_name`: The user name related to the event.
* `description`: The description of the event.
* `user_agent`: The user agent that is related to the event.
* `type`: The event type. Refer to the event acronym mappings above for a list of possible event types.
* `details`: The details object of the event.
* `strategy`: The connection strategy related to the event.
* `strategy_type`: The connection strategy type related to the event.
"""
@spec logs(String.t, Api.t, list) :: Result.t(any, [Zeroth.Log.t])
def logs(id, api_client, options \\ []) do
query = Param.take(options, [:per_page,
:page,
:sort])
path = @path
|> URIE.merge_path("#{id}/logs")
|> URIE.merge_query(query)
api_client
|> Api.update_endpoint(path)
|> Api.get(headers: Token.http_header(api_client.credentials),
as: [%Zeroth.Log{}])
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/post_identities
**Note**: The `link_with` way is not supported.
## Examples
User.link("auth0|59383521b3c34a15589c5577",
%{provider: "twitter", user_id: "1111111"},
api_client)
"""
@spec link(String.t, map, Api.t) :: Result.t(any, list)
def link(id, body, api_client) when is_map(body) do
path = URIE.merge_path(@path, "#{id}/identities")
api_client
|> Api.update_endpoint(path)
|> Api.post(body, headers: Token.http_header(api_client.credentials))
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/delete_provider_by_user_id
## Examples
User.unlink("auth0|59383521b3c34a15589c5577",
%{provider: "twitter", user_id: "1111111"},
api_client)
"""
@spec unlink(String.t, map, Api.t) :: Result.t(any, list)
def unlink(id, %{provider: provider, client_id: snd_id}, api_client) do
path = URIE.merge_path(@path, "#{id}/identities/#{provider}/#{snd_id}")
api_client
|> Api.update_endpoint(path)
|> Api.delete(headers: Token.http_header(api_client.credentials))
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider)
## Examples
User.delete_multifactor("auth0|59383521b3c34a15589c5577",
"google-authenticator",
api_client)
"""
@spec delete_multifactor(String.t, String.t, Api.t) :: Result.t(any, atom)
def delete_multifactor(id, provider, api_client) do
path = URIE.merge_path(@path, "#{id}/multifactor/#{provider}")
api_client
|> Api.update_endpoint(path)
|> Api.delete(headers: Token.http_header(api_client.credentials))
end
@doc """
https://auth0.com/docs/api/management/v2#!/Users/post_recovery_code_regeneration
"""
@spec generate_recovery_code(String.t, Api.t) :: Result.t(any, map)
def generate_recovery_code(id, api_client) do
path = URIE.merge_path(@path, "#{id}/recovery-code-regeneration")
api_client
|> Api.update_endpoint(path)
|> Api.post(headers: Token.http_header(api_client.credentials))
end
end
| 34.714286 | 147 | 0.597082 |
1c726ce34accd3f5788c968121f55fdea33aad91 | 1,115 | exs | Elixir | config/config.exs | cryptobird/ex_iss | b6124614e68f09bedb1ba8739aab1b4a66014b40 | [
"WTFPL"
] | 4 | 2015-12-08T14:00:37.000Z | 2020-05-11T11:24:45.000Z | config/config.exs | cryptobird/ex_iss | b6124614e68f09bedb1ba8739aab1b4a66014b40 | [
"WTFPL"
] | null | null | null | config/config.exs | cryptobird/ex_iss | b6124614e68f09bedb1ba8739aab1b4a66014b40 | [
"WTFPL"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure for your application as:
#
# config :ex_iss, key: :value
#
# And access this configuration in your application as:
#
# Application.get_env(:ex_iss, :key)
#
# Or configure a 3rd-party app:
#
# config :logger, level: :info
#
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env}.exs"
| 35.967742 | 73 | 0.750673 |
1c727058c72a197a240e62225f6018e1a7446087 | 3,477 | ex | Elixir | test/support/exemplars/reservation_focused.ex | brownt23/crit19 | c45c7b3ae580c193168d83144da0eeb9bc91c8a9 | [
"MIT"
] | 6 | 2019-07-16T19:31:23.000Z | 2021-06-05T19:01:05.000Z | test/support/exemplars/reservation_focused.ex | brownt23/crit19 | c45c7b3ae580c193168d83144da0eeb9bc91c8a9 | [
"MIT"
] | null | null | null | test/support/exemplars/reservation_focused.ex | brownt23/crit19 | c45c7b3ae580c193168d83144da0eeb9bc91c8a9 | [
"MIT"
] | 3 | 2020-02-24T23:38:27.000Z | 2020-08-01T23:50:17.000Z | defmodule Crit.Exemplars.ReservationFocused do
use ExUnit.CaseTemplate
use Crit.TestConstants
alias Crit.Schemas.{Animal, Procedure}
alias Crit.Sql
alias Ecto.Datespan
alias Crit.Servers.Institution
alias CritBiz.ViewModels.Reservation.AfterTheFact, as: VM
alias Crit.Reservations.ReservationApi
import Ecto.Query
import ExUnit.Assertions
defp named_thing_inserter(template) do
fn name ->
template
|> Map.put(:name, name)
|> Sql.insert!(@institution)
end
end
defp inserted_named_ids(names, template) do
names
|> Enum.map(named_thing_inserter template)
|> EnumX.ids
end
# These are available for any reasonable reservation date.
def inserted_animal_ids(names, species_id) do
inserted_named_ids names, %Animal{
species_id: species_id,
span: Datespan.inclusive_up(~D[1990-01-01])
}
end
def ignored_animal(name, species_id) do
inserted_animal_ids([name], species_id)
:ok
end
def inserted_procedure_ids(names, species_id) do
inserted_named_ids(
names,
%Procedure{species_id: species_id, frequency_id: @unlimited_frequency_id})
end
def ignored_procedure(name, species_id) do
inserted_procedure_ids([name], species_id)
:ok
end
def timeslot do
Institution.timeslots(@institution)
|> List.first
end
def morning_timeslot do
hard_coded = 1
assert Institution.timeslot_name(hard_coded, @institution) =~ "morning"
hard_coded
end
def evening_timeslot do
hard_coded = 3
assert Institution.timeslot_name(hard_coded, @institution) =~ "evening"
hard_coded
end
def timeslot_id do
timeslot().id
end
def timeslot_name do
timeslot().name
end
defp insert_or_create_x_ids(schema, inserter, names, species_id) do
existing =
(from x in schema, where: x.name in ^names)
|> Sql.all(@institution)
existing_ids = EnumX.ids(existing)
existing_names = EnumX.names(existing)
needed = Enum.reject(names, fn name ->
Enum.member?(existing_names, name)
end)
new_ids = apply(__MODULE__, inserter, [needed, species_id])
Enum.concat([existing_ids, new_ids])
end
defp insert_or_create_animal_ids(names, species_id),
do: insert_or_create_x_ids(Animal, :inserted_animal_ids, names, species_id)
defp insert_or_create_procedure_ids(names, species_id),
do: insert_or_create_x_ids(Procedure, :inserted_procedure_ids, names, species_id)
def ready_to_reserve!(species_id, animal_names, procedure_names, opts \\ []) do
opts = Enum.into(opts, %{timeslot_id: timeslot_id(),
date: ~D[2019-01-01],
responsible_person: Faker.Name.name()})
span = Institution.timespan(opts.date, opts.timeslot_id, @institution)
animal_ids = insert_or_create_animal_ids(animal_names, species_id)
procedure_ids = insert_or_create_procedure_ids(procedure_names, species_id)
%VM{
species_id: species_id,
timeslot_id: opts.timeslot_id,
date: opts.date,
span: span,
responsible_person: opts.responsible_person,
chosen_animal_ids: animal_ids,
chosen_procedure_ids: procedure_ids
}
end
def reserved!(species_id, animal_names, procedure_names, opts \\ []) do
{:ok, reservation} =
ready_to_reserve!(species_id, animal_names, procedure_names, opts)
|> ReservationApi.create(@institution)
reservation
end
end
| 27.377953 | 85 | 0.70463 |
1c728b75af6c89c2d8ccdf6e02101d0804283b90 | 3,953 | exs | Elixir | test/teiserver/coordinator/joining_test.exs | icexuick/teiserver | 22f2e255e7e21f977e6b262acf439803626a506c | [
"MIT"
] | null | null | null | test/teiserver/coordinator/joining_test.exs | icexuick/teiserver | 22f2e255e7e21f977e6b262acf439803626a506c | [
"MIT"
] | null | null | null | test/teiserver/coordinator/joining_test.exs | icexuick/teiserver | 22f2e255e7e21f977e6b262acf439803626a506c | [
"MIT"
] | null | null | null | defmodule Teiserver.Coordinator.JoiningTest do
use Central.ServerCase, async: false
alias Teiserver.Battle.Lobby
alias Teiserver.Common.PubsubListener
alias Teiserver.Coordinator
alias Teiserver.Client
alias Teiserver.Account.UserCache
import Teiserver.TeiserverTestLib,
only: [tachyon_auth_setup: 0, _tachyon_send: 2, _tachyon_recv: 1]
setup do
Teiserver.Coordinator.start_coordinator()
%{socket: socket, user: user} = tachyon_auth_setup()
# User needs to be a moderator (at this time) to start/stop Coordinator mode
UserCache.update_user(%{user | moderator: true})
Client.refresh_client(user.id)
battle_data = %{
cmd: "c.lobby.create",
name: "Coordinator #{:rand.uniform(999_999_999)}",
nattype: "none",
port: 1234,
game_hash: "string_of_characters",
map_hash: "string_of_characters",
map_name: "koom valley",
game_name: "BAR",
engine_name: "spring-105",
engine_version: "105.1.2.3",
settings: %{
max_players: 12
}
}
data = %{cmd: "c.lobby.create", lobby: battle_data}
_tachyon_send(socket, data)
reply = _tachyon_recv(socket)
lobby_id = reply["lobby"]["id"]
Lobby.start_coordinator_mode(lobby_id)
listener = PubsubListener.new_listener(["legacy_battle_updates:#{lobby_id}"])
{:ok, socket: socket, user: user, lobby_id: lobby_id, listener: listener}
end
test "welcome message", %{socket: socket, user: user, lobby_id: lobby_id, listener: listener} do
consul_state = Coordinator.call_consul(lobby_id, :get_all)
assert consul_state.welcome_message == nil
data = %{cmd: "c.lobby.message", userid: user.id, message: "!force welcome-message This is the welcome message"}
_tachyon_send(socket, data)
messages = PubsubListener.get(listener)
assert messages == [{:battle_updated, lobby_id, {user.id, "New welcome message set to: This is the welcome message", lobby_id}, :say}]
consul_state = Coordinator.call_consul(lobby_id, :get_all)
assert consul_state.welcome_message == "This is the welcome message"
# Now a new user joins the battle
%{socket: socket2, user: user2} = tachyon_auth_setup()
data = %{cmd: "c.lobby.join", lobby_id: lobby_id}
_tachyon_send(socket2, data)
reply = _tachyon_recv(socket2)
assert reply == %{
"cmd" => "s.lobby.join",
"result" => "waiting_for_host"
}
# Accept them
data = %{cmd: "c.lobby.respond_to_join_request", userid: user2.id, response: "approve"}
_tachyon_send(socket, data)
_battle = _tachyon_recv(socket2)
# Request status message for the player
status_request = _tachyon_recv(socket2)
assert status_request["cmd"] == "s.lobby.request_status"
# Send the battle status
data = %{
cmd: "c.lobby.update_status",
player: true,
sync: 1,
team_number: 0,
ally_team_number: 0,
side: 0,
team_colour: 0
}
_tachyon_send(socket2, data)
# Expect Coordinator mode announcement
reply = _tachyon_recv(socket2)
assert reply == %{
"cmd" => "s.lobby.announce",
"message" => "Coordinator mode enabled",
"sender" => user.id
}
# Expect welcome message
reply = _tachyon_recv(socket2)
assert reply == %{
"cmd" => "s.lobby.announce",
"message" => " #{user2.name}: ####################",
"sender" => Coordinator.get_coordinator_userid()
}
reply = _tachyon_recv(socket2)
assert reply == %{
"cmd" => "s.lobby.announce",
"message" => " #{user2.name}: This is the welcome message",
"sender" => Coordinator.get_coordinator_userid()
}
reply = _tachyon_recv(socket2)
assert reply == %{
"cmd" => "s.lobby.announce",
"message" => " #{user2.name}: ####################",
"sender" => Coordinator.get_coordinator_userid()
}
end
test "blacklist" do
end
test "whitelist" do
end
end
| 30.175573 | 138 | 0.649633 |
1c72ca333e91f57cc3365f0da9a7e7bad2e2c448 | 789 | ex | Elixir | lib/admissions/slack.ex | doomspork/admissions | a1fd7946dc980189f47a5b808c12e56655fdedfb | [
"Apache-2.0"
] | 18 | 2018-12-01T22:55:24.000Z | 2022-03-11T04:12:43.000Z | lib/admissions/slack.ex | doomspork/admissions | a1fd7946dc980189f47a5b808c12e56655fdedfb | [
"Apache-2.0"
] | 14 | 2018-12-01T01:00:33.000Z | 2021-06-24T00:40:39.000Z | lib/admissions/slack.ex | doomspork/admissions | a1fd7946dc980189f47a5b808c12e56655fdedfb | [
"Apache-2.0"
] | 1 | 2019-02-10T16:12:15.000Z | 2019-02-10T16:12:15.000Z | defmodule Admissions.Slack do
@moduledoc """
A module for interacting with the Slack API
"""
@invite_url "https://elixirschool.slack.com/api/users.admin.invite"
@spec invite(String.t()) :: :ok | {:error, String.t()}
def invite(email) do
email
|> slack_invite()
|> slack_response()
end
defp slack_invite(email) do
data = [email: email, set_active: true, token: slack_token()]
HTTPoison.post(@invite_url, {:form, data})
end
defp slack_response({:ok, %{body: body}}) do
case Jason.decode(body) do
{:ok, %{"ok" => true}} -> :ok
{:ok, %{"error" => reason}} -> {:error, reason}
end
end
defp slack_response({:error, _reason}) do
{:error, "unexpected_error"}
end
defp slack_token, do: System.get_env("SLACK_TOKEN")
end
| 23.909091 | 69 | 0.631179 |
1c72f5ff4e6cb47686885ae5c0bd410d1b8aeba4 | 563 | ex | Elixir | apps/rtmp/lib/rtmp/protocol/messages/abort.ex | tiensonqin/elixir-media-libs | 87f17e2b23bf8380e785423652910bfa7d3bb47c | [
"MIT"
] | null | null | null | apps/rtmp/lib/rtmp/protocol/messages/abort.ex | tiensonqin/elixir-media-libs | 87f17e2b23bf8380e785423652910bfa7d3bb47c | [
"MIT"
] | null | null | null | apps/rtmp/lib/rtmp/protocol/messages/abort.ex | tiensonqin/elixir-media-libs | 87f17e2b23bf8380e785423652910bfa7d3bb47c | [
"MIT"
] | null | null | null | defmodule Rtmp.Protocol.Messages.Abort do
@moduledoc """
Message used to notify the peer that if it is waiting
for chunks to complete a message, then discard the partially
received message
"""
@behaviour Rtmp.Protocol.RawMessage
@type t :: %__MODULE__{}
defstruct stream_id: nil
def deserialize(data) do
<<stream_id::32>> = data
%__MODULE__{stream_id: stream_id}
end
def serialize(message = %__MODULE__{}) do
{:ok, <<message.stream_id::size(4)-unit(8)>>}
end
def get_default_chunk_stream_id(%__MODULE__{}), do: 2
end
| 20.851852 | 62 | 0.699822 |
1c72fceff95411db5131352c637b1c6a9af0c514 | 3,116 | ex | Elixir | lib/seven.ex | mitchellhenke/AdventOfCode2020 | 8d5e89ce0d17697396344e35f76a24e4580d25dd | [
"MIT"
] | null | null | null | lib/seven.ex | mitchellhenke/AdventOfCode2020 | 8d5e89ce0d17697396344e35f76a24e4580d25dd | [
"MIT"
] | null | null | null | lib/seven.ex | mitchellhenke/AdventOfCode2020 | 8d5e89ce0d17697396344e35f76a24e4580d25dd | [
"MIT"
] | null | null | null | defmodule Adventofcode2020.Seven do
@external_resource "data/seven.txt"
@inverse_bags File.read!("data/seven.txt")
|> String.split("\n", trim: true)
|> Enum.map(fn bag ->
[name, bags] = String.split(bag, " contain ", trim: true)
bags =
String.split(bags, ~r/,\s*/)
|> Enum.map(fn bag ->
[number, bag_type] = String.split(bag, " ", parts: 2)
bag_type =
String.replace(bag_type, "bags", "bag")
|> String.replace(".", "")
if number == "no" do
nil
else
{String.to_integer(number), bag_type}
end
end)
{String.replace(name, "bags", "bag"), bags}
end)
|> Enum.reduce(%{}, fn {bag_name, bags}, acc ->
if bags == [nil] do
acc
else
Enum.reduce(bags, acc, fn {count, inner_bag_name}, acc ->
Map.put_new(acc, bag_name, [])
|> Map.update(
inner_bag_name,
[{count, bag_name}],
&[{count, bag_name} | &1]
)
end)
end
end)
@bags File.read!("data/seven.txt")
|> String.split("\n", trim: true)
|> Enum.map(fn bag ->
[name, bags] = String.split(bag, " contain ", trim: true)
bags =
String.split(bags, ~r/,\s*/)
|> Enum.map(fn bag ->
[number, bag_type] = String.split(bag, " ", parts: 2)
bag_type =
String.replace(bag_type, "bags", "bag")
|> String.replace(".", "")
if number == "no" do
nil
else
{String.to_integer(number), bag_type}
end
end)
{String.replace(name, "bags", "bag"), bags}
end)
|> Enum.reduce(%{}, fn {bag_name, bags}, acc ->
if bags == [nil] do
Map.put(acc, bag_name, [])
else
Map.put(acc, bag_name, bags)
end
end)
def first do
count_bags(@inverse_bags, "shiny gold bag", MapSet.new())
end
def second do
count_all_bags(@bags, "shiny gold bag")
end
defp count_bags(bags_map, name, bag_set) do
Map.fetch!(bags_map, name)
|> do_count_bags(bag_set, bags_map)
end
defp do_count_bags([], bag_set, _bags_map), do: bag_set
defp do_count_bags([{_count, bag} | rest], bag_set, bags_map) do
bags = Map.get(bags_map, bag, [])
do_count_bags(rest, do_count_bags(bags, MapSet.put(bag_set, bag), bags_map), bags_map)
end
defp count_all_bags(bags_map, bag_color) do
bags = Map.get(bags_map, bag_color, [])
Enum.reduce(bags, 1, fn {count, bag}, acc ->
acc + count * count_all_bags(bags_map, bag)
end)
end
end
| 31.16 | 90 | 0.453466 |
1c73217dad7a04797151193534173014817ed314 | 401 | ex | Elixir | 05-chapter/11_example.ex | herminiotorres/programming-elixir | 70add5ec9fe7f91129da0a4e39ab329afb9be598 | [
"MIT"
] | null | null | null | 05-chapter/11_example.ex | herminiotorres/programming-elixir | 70add5ec9fe7f91129da0a4e39ab329afb9be598 | [
"MIT"
] | null | null | null | 05-chapter/11_example.ex | herminiotorres/programming-elixir | 70add5ec9fe7f91129da0a4e39ab329afb9be598 | [
"MIT"
] | null | null | null | list = [1, 3, 5, 7, 9]
"# => List:" |> IO.puts()
list |> IO.inspect()
IO.puts("")
"# => List: element * 2" |> IO.puts()
Enum.map(list, fn elem -> elem * 2 end) |> IO.inspect()
IO.puts("")
"# => List: element * element" |> IO.puts()
Enum.map(list, fn elem -> elem * elem end) |> IO.inspect()
IO.puts("")
"# => List: element > 6" |> IO.puts()
Enum.map(list, fn elem -> elem > 6 end) |> IO.inspect()
| 23.588235 | 58 | 0.53616 |
1c734ed6dc22083c1a09bcc0a09eb9f43fd5e0db | 15,468 | ex | Elixir | lib/aws/request.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | null | null | null | lib/aws/request.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | null | null | null | lib/aws/request.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | 1 | 2020-10-28T08:56:54.000Z | 2020-10-28T08:56:54.000Z | defmodule AWS.Request do
@moduledoc false
alias AWS.Client
alias AWS.Request.Internal
alias AWS.ServiceMetadata
alias AWS.Util
@valid_protocols ~w(query json rest-json rest-xml)
@doc """
Request an AWS Service using a POST request with a protocol.
"""
def request_post(%Client{} = client, %ServiceMetadata{} = metadata, action, input, options) do
client = prepare_client(client, metadata)
host = build_host(client, metadata)
url = build_url(client, host)
headers = [
{"Host", host},
{"Content-Type", metadata.content_type}
]
headers =
if metadata.protocol == "json" do
[{"X-Amz-Target", "#{metadata.target_prefix}.#{action}"} | headers]
else
headers
end
input =
if metadata.protocol == "query" do
Map.merge(input, %{"Action" => action, "Version" => metadata.api_version})
else
input
end
payload = encode!(client, metadata.protocol, input)
headers = sign_v4(client, "POST", url, headers, payload)
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, metadata.protocol, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} ->
error
end
end
def request_rest(
%Client{} = client,
%ServiceMetadata{} = metadata,
method,
path,
query,
headers,
input,
options,
success_status_code
) do
client = prepare_client(client, metadata)
input = input || %{}
host =
if metadata.endpoint_prefix == "s3-control" do
account_id = :proplists.get_value("x-amz-account-id", headers)
build_host(account_id, client, metadata)
else
build_host(client, metadata)
end
url =
client
|> build_url(host, path)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", metadata.content_type}]
headers = add_headers(additional_headers, headers)
{send_body_as_binary?, options} = Keyword.pop(options, :send_body_as_binary?)
payload =
if send_body_as_binary? do
Map.fetch!(input, "Body")
else
encode!(client, metadata.protocol, input)
end
headers = sign_v4(client, method, url, headers, payload)
{response_header_parameters, options} = Keyword.pop(options, :response_header_parameters)
case Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body =
if body != "" do
decoded = decode!(client, metadata.protocol, body)
case response_header_parameters do
[_ | _] ->
merge_body_with_response_headers(decoded, response, response_header_parameters)
_ ->
decoded
end
end
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} ->
error
end
end
defp prepare_client(client, metadata) do
client = %{client | service: metadata.signing_name}
if metadata.global? do
%{client | region: metadata.credential_scope}
else
client
end
end
defp build_host(%Client{region: "local"} = client, _) do
if client.endpoint do
client.endpoint
else
"localhost"
end
end
defp build_host(%Client{} = client, %ServiceMetadata{} = metadata) do
if metadata.global? do
"#{metadata.endpoint_prefix}.#{client.endpoint}"
else
"#{metadata.endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp build_host(account_id, %Client{} = client, %ServiceMetadata{} = metadata) do
cond do
client.region == "local" ->
build_host(client, metadata)
account_id == :undefined ->
raise "missing account_id"
true ->
"#{account_id}.#{build_host(client, metadata)}"
end
end
defp build_url(%Client{} = client, host, path \\ "/") do
"#{client.proto}://#{host}:#{client.port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = Client.encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(%Client{} = client, protocol, payload) when protocol in @valid_protocols and is_map(payload) do
encode_format =
case protocol do
"query" -> :query
"rest-xml" -> :xml
json_type when json_type in ~w(json rest-json) -> :json
end
Client.encode!(client, payload, encode_format)
end
defp decode!(%Client{} = client, protocol, payload) when protocol in @valid_protocols do
decode_format =
case protocol do
xml_type when xml_type in ~w(query rest-xml) -> :xml
json_type when json_type in ~w(json rest-json) -> :json
end
Client.decode!(client, payload, decode_format)
end
defp merge_body_with_response_headers(body, response, response_header_parameters) do
Enum.reduce(response_header_parameters, body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
end
@doc """
Generate headers with an AWS signature version 4 for the specified request.
"""
def sign_v4(client, method, url, headers, body) do
sign_v4(client, now(), method, url, headers, body)
end
@doc """
Generate headers with an AWS signature version 4 for the specified request
using the specified time.
"""
def sign_v4(client, now, method, url, headers, body) do
long_date = NaiveDateTime.to_iso8601(now, :basic) <> "Z"
short_date = Date.to_iso8601(now, :basic)
headers =
headers
|> Internal.add_date_header(long_date)
|> Internal.add_content_hash(body)
|> Internal.add_security_token(client)
canonical_request = Internal.canonical_request(method, url, headers, body)
hashed_canonical_request = Util.sha256_hexdigest(canonical_request)
credential_scope = Internal.credential_scope(short_date, client.region,
client.service)
signing_key = Internal.signing_key(client.secret_access_key, short_date,
client.region, client.service)
string_to_sign = Internal.string_to_sign(long_date, credential_scope,
hashed_canonical_request)
signature = Util.hmac_sha256_hexdigest(signing_key, string_to_sign)
signed_headers = Internal.signed_headers(headers)
authorization = Internal.authorization(client.access_key_id,
credential_scope, signed_headers,
signature)
Internal.add_authorization_header(headers, authorization)
end
@doc """
Generate headers with an AWS signature version 4 for the specified request that can be transformed into a query string.
"""
def sign_v4_query(client, method, url, headers, body) do
sign_v4_query(client, now(), method, url, headers, body)
end
@doc """
Generate headers with an AWS signature version 4 for the specified request using the specified time that can be transformed into a query string.
"""
def sign_v4_query(client, now, method, url, headers, body) do
long_date = NaiveDateTime.to_iso8601(now, :basic) <> "Z"
short_date = Date.to_iso8601(now, :basic)
headers = Internal.add_date_header(headers, long_date)
canonical_request = Internal.canonical_request(method, url, headers, body)
hashed_canonical_request = Util.sha256_hexdigest(canonical_request)
credential_scope = Internal.credential_scope(short_date, client.region,
client.service)
signing_key = Internal.signing_key(client.secret_access_key, short_date,
client.region, client.service)
string_to_sign = Internal.string_to_sign(long_date, credential_scope,
hashed_canonical_request)
signature = Util.hmac_sha256_hexdigest(signing_key, string_to_sign)
signed_headers = Internal.signed_headers(headers)
credential = Enum.join([client.access_key_id, short_date, client.region,
client.service, "aws4_request"], "/")
result = [{"X-Amz-Algorithm", "AWS4-HMAC-SHA256"},
{"X-Amz-Credential", credential},
{"X-Amz-Date", long_date},
{"X-Amz-SignedHeaders", signed_headers},
{"X-Amz-Signature", signature}]
if expiry = :proplists.get_value("X-Amz-Expires", headers, nil) do
[{"X-Amz-Expires", expiry}|result]
else
result
end
end
@doc """
Build request params (header or querystring) based on a list of key-value
pairs representing the `mapping` from the input name (key) to the param
name (value) and a map with the input `params`.
"""
def build_params(mapping, %{} = params) when is_list(mapping) do
Enum.reduce(mapping, {[], params}, &move_param_to_header/2)
end
defp move_param_to_header({param_name, header_name}, {headers, params}) do
case Map.get(params, param_name) do
nil ->
{headers, params}
param_value ->
headers = [{header_name, param_value} | headers]
params = Map.delete(params, param_name)
{headers, params}
end
end
@doc """
Include additions only if they do not already exist in the provided list.
"""
def add_headers([], headers) do
headers
end
def add_headers([{name, _} = header | additions], headers) do
case List.keyfind(headers, name, 0) do
nil -> add_headers(additions, [header | headers])
{_, _} -> add_headers(additions, headers)
end
end
defp now() do
NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second)
end
end
defmodule AWS.Request.Internal do
@moduledoc false
@doc """
Add an `Authorization` header with an AWS4-HMAC-SHA256 signature to the list
of headers.
"""
def add_authorization_header(headers, authorization) do
[{"Authorization", authorization}|headers]
end
@doc """
Add an `X-Amz-Date` header with a long date value in `YYMMDDTHHMMSSZ` format
to a list of headers.
"""
def add_date_header(headers, date) do
[{"X-Amz-Date", date}|headers]
end
@doc """
Add an `X-Amz-Security-Token` if credentials configurations are configured for it
"""
def add_security_token(headers, %AWS.Client{session_token: nil}), do: headers
def add_security_token(headers, %AWS.Client{session_token: session_token}),
do: [{"X-Amz-Security-Token", session_token}|headers]
@doc """
Add an X-Amz-Content-SHA256 header which is the hash of the payload.
This header is required for S3 when using the v4 signature. Adding it
in requests for all services does not cause any issues.
"""
def add_content_hash(headers, body) do
[{"X-Amz-Content-SHA256", AWS.Util.sha256_hexdigest(body)} | headers]
end
@doc """
Generate an AWS4-HMAC-SHA256 authorization signature.
"""
def authorization(access_key_id, credential_scope, signed_headers, signature) do
Enum.join(["AWS4-HMAC-SHA256 ",
"Credential=", access_key_id, "/", credential_scope, ", ",
"SignedHeaders=", signed_headers, ", ",
"Signature=", signature],
"")
end
@doc """
Strip leading and trailing whitespace around `name` and `value`, convert
`name` to lowercase, and add a trailing newline.
"""
def canonical_header({name, value}) do
name = String.downcase(name) |> String.trim()
value = String.trim(value)
name <> ":" <> value <> "\n"
end
@doc """
Convert a list of headers to canonical header format. Leading and trailing
whitespace around header names and values is stripped, header names are
lowercased, and headers are newline-joined in alphabetical order (with a
trailing newline).
"""
def canonical_headers(headers) do
Enum.map(headers, &canonical_header/1) |> Enum.sort |> Enum.join
end
@doc """
Process and merge request values into a canonical request for AWS signature
version 4.
"""
def canonical_request(method, url, headers, body) when is_atom(method) do
Atom.to_string(method)
|> String.upcase
|> canonical_request(url, headers, body)
end
def canonical_request(method, url, headers, body) do
{canonical_url, canonical_query_string} = split_url(url)
canonical_headers = canonical_headers(headers)
signed_headers = signed_headers(headers)
payload_hash = AWS.Util.sha256_hexdigest(body)
Enum.join([method, canonical_url, canonical_query_string,
canonical_headers, signed_headers, payload_hash], "\n")
end
@doc """
Generate a credential scope from a short date in `YYMMDD` format, a region
identifier and a service identifier.
"""
def credential_scope(short_date, region, service) do
Enum.join([short_date, region, service, "aws4_request"], "/")
end
@doc """
Strip leading and trailing whitespace around Name and convert it to
lowercase.
"""
def signed_header({name, _value}) do
String.downcase(name) |> String.trim()
end
@doc """
Convert a list of headers to canonicals signed header format. Leading and
trailing whitespace around names is stripped, header names are lowercased,
and header names are semicolon-joined in alphabetical order.
"""
def signed_headers(headers) do
Enum.map(headers, &signed_header/1) |> Enum.sort |> Enum.join(";")
end
@doc """
Generate a signing key from a secret access key, a short date in `YYMMDD`
format, a region identifier and a service identifier.
"""
def signing_key(secret_access_key, short_date, region, service) do
"AWS4" <> secret_access_key
|> AWS.Util.hmac_sha256(short_date)
|> AWS.Util.hmac_sha256(region)
|> AWS.Util.hmac_sha256(service)
|> AWS.Util.hmac_sha256("aws4_request")
end
@doc """
Strip the query string from the URL, if one if present, and return the URL
and the normalized query string as separate values.
"""
def split_url(url) do
url = URI.parse(url)
{url.path, normalize_query(url.query)}
end
@doc """
Sort query params by name first, then by value (if present). Append "=" to
params with missing value.
Example: "foo=bar&baz" becomes "baz=&foo=bar"
"""
def normalize_query(nil), do: ""
def normalize_query(""), do: ""
def normalize_query(query) do
query
|> String.split("&")
|> Enum.map(&String.split(&1, "="))
|> Enum.sort()
|> Enum.map_join("&", fn
[key, value] -> key <> "=" <> value
[key] -> key <> "="
end)
end
@doc """
Generate the text to sign from a long date in `YYMMDDTHHMMSSZ` format, a
credential scope and a hashed canonical request.
"""
def string_to_sign(long_date, credential_scope, hashed_canonical_request) do
Enum.join(["AWS4-HMAC-SHA256", long_date,
credential_scope, hashed_canonical_request], "\n")
end
end
| 32.091286 | 146 | 0.651862 |
1c73820a6f3bedda3f705771156d0a53f50c8c43 | 1,722 | exs | Elixir | back/mix.exs | HugoLefebvre/Epitech_TM_MP03 | 0ed161c956f7a10aec245fe2e17eb5a9b55f6075 | [
"MIT"
] | null | null | null | back/mix.exs | HugoLefebvre/Epitech_TM_MP03 | 0ed161c956f7a10aec245fe2e17eb5a9b55f6075 | [
"MIT"
] | null | null | null | back/mix.exs | HugoLefebvre/Epitech_TM_MP03 | 0ed161c956f7a10aec245fe2e17eb5a9b55f6075 | [
"MIT"
] | null | null | null | defmodule Api.Mixfile do
use Mix.Project
def project do
[
app: :api,
version: "0.0.1",
elixir: "~> 1.4",
elixirc_paths: elixirc_paths(Mix.env),
compilers: [:phoenix, :gettext] ++ Mix.compilers,
start_permanent: Mix.env == :prod,
aliases: aliases(),
deps: deps()
]
end
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[
mod: {Api.Application, []},
extra_applications: [:logger, :runtime_tools]
]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
# Specifies your project dependencies.
#
# Type `mix help deps` for examples and options.
defp deps do
[
{:comeonin, "~> 4.0"},
{:bcrypt_elixir, "~> 1.0"},
{:cors_plug, "~> 2.0"},
{:joken, "~> 2.0"},
{:jason, "~> 1.1"},
{:phoenix, "~> 1.3.4"},
{:phoenix_pubsub, "~> 1.0"},
{:phoenix_ecto, "~> 3.2"},
{:postgrex, ">= 0.0.0"},
{:phoenix_html, "~> 2.10"},
{:phoenix_live_reload, "~> 1.0", only: :dev},
{:gettext, "~> 0.11"},
{:plug_cowboy, "~> 1.0"}
]
end
# Aliases are shortcuts or tasks specific to the current project.
# For example, to create, migrate and run the seeds file at once:
#
# $ mix ecto.setup
#
# See the documentation for `Mix` for more info on aliases.
defp aliases do
[
"ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"],
"ecto.reset": ["ecto.drop", "ecto.setup"],
test: ["ecto.create --quiet", "ecto.migrate", "test"]
]
end
end
| 26.090909 | 79 | 0.562718 |
1c73b246cfbb4a2363e8e2b257099ee78454c0b4 | 2,912 | ex | Elixir | lib/falcon_plus_api/api/host.ex | sawater/falcon-plus-api | a41b1cc04cc0d71b72341e803de88d16b4bba77d | [
"Apache-2.0"
] | 2 | 2018-05-03T14:35:37.000Z | 2018-07-30T09:32:38.000Z | lib/falcon_plus_api/api/host.ex | mon-suit/falcon-plus-api | a41b1cc04cc0d71b72341e803de88d16b4bba77d | [
"Apache-2.0"
] | null | null | null | lib/falcon_plus_api/api/host.ex | mon-suit/falcon-plus-api | a41b1cc04cc0d71b72341e803de88d16b4bba77d | [
"Apache-2.0"
] | null | null | null | defmodule FalconPlusApi.Api.Host do
alias Maxwell.Conn
alias FalconPlusApi.{Util, Sig, Api}
@doc """
* [Session](#/authentication) Required
### Request
```
{
"ids": [1,2,3,4],
"maintain_begin": 1497951907,
"maintain_end": 1497951907
}
```
or
```
{
"hosts": ["host.a","host.b"],
"maintain_begin": 1497951907,
"maintain_end": 1497951907
}
```
### Response
```Status: 200```
```{ "message": "Through: hosts, Affect row: 2" }```
"""
def maintain(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/host/maintain>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/host/1647/hostgroup
* grp_name: hostgroup name
### Response
```Status: 200```
```[
{
"id": 78,
"grp_name": "tplB",
"create_user": "userA"
},
{
"id": 145,
"grp_name": "Owl_Default_Group",
"create_user": "userA"
}
]```
"""
def related_hostgroup(host_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/host/#{host_id}/hostgroup>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/host/1647/template
* tpl_name: template name
### Response
```Status: 200```
```[
{
"id": 125,
"tpl_name": "tplA",
"parent_id": 0,
"action_id": 99,
"create_user": "root"
},
{
"id": 142,
"tpl_name": "tplB",
"parent_id": 0,
"action_id": 111,
"create_user": "root"
},
{
"id": 180,
"tpl_name": "tplC",
"parent_id": 0,
"action_id": 142,
"create_user": "root"
}
]```
"""
def related_template(host_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/host/#{host_id}/template>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
```{"ids": [1,2,3,4]}```
or
```{"hosts": ["host.a","host.b"]}```
### Response
```Status: 200```
```{ "message": "Through: hosts, Affect row: 2" }```
"""
def reset(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/host/maintain>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.delete
|> Api.get_result
end
end
| 20.8 | 58 | 0.490385 |
1c73b4f4346d709c59d4c13a7f03947e03aa30b2 | 1,103 | exs | Elixir | apps/day07/test/day07_test.exs | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | 2 | 2019-12-21T21:21:04.000Z | 2019-12-27T07:00:19.000Z | apps/day07/test/day07_test.exs | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | null | null | null | apps/day07/test/day07_test.exs | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | null | null | null | defmodule Day07Test do
use ExUnit.Case
doctest Day07
test "sample progs" do
p = Intcode.parse("3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0")
assert 43210 == Day07.max_thrust(p, 0..4)
p = Intcode.parse("3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0")
assert 54321 == Day07.max_thrust(p, 0..4)
p =
Intcode.parse(
"3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0"
)
assert 65210 == Day07.max_thrust(p, 0..4)
end
test "part1" do
assert 22012 == Day07.part1()
end
test "sample feedback progs" do
# p = Intcode.parse("3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5")
# assert 139629729 == Day07.max_feedback_thrust(p, 5..9)
p =
Intcode.parse(
"3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10"
)
assert 18216 == Day07.max_feedback_thrust(p, 5..9)
end
end
| 30.638889 | 183 | 0.611061 |
1c73b6cfb7be81ba4dd12f7e4ec67e894eac4246 | 2,151 | ex | Elixir | clients/people/lib/google_api/people/v1/model/event.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/people/lib/google_api/people/v1/model/event.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/people/lib/google_api/people/v1/model/event.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.People.V1.Model.Event do
@moduledoc """
An event related to the person.
## Attributes
* `date` (*type:* `GoogleApi.People.V1.Model.Date.t`, *default:* `nil`) - The date of the event.
* `formattedType` (*type:* `String.t`, *default:* `nil`) - Output only. The type of the event translated and formatted in the
viewer's account locale or the `Accept-Language` HTTP header locale.
* `metadata` (*type:* `GoogleApi.People.V1.Model.FieldMetadata.t`, *default:* `nil`) - Metadata about the event.
* `type` (*type:* `String.t`, *default:* `nil`) - The type of the event. The type can be custom or one of these predefined
values:
* `anniversary`
* `other`
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:date => GoogleApi.People.V1.Model.Date.t(),
:formattedType => String.t(),
:metadata => GoogleApi.People.V1.Model.FieldMetadata.t(),
:type => String.t()
}
field(:date, as: GoogleApi.People.V1.Model.Date)
field(:formattedType)
field(:metadata, as: GoogleApi.People.V1.Model.FieldMetadata)
field(:type)
end
defimpl Poison.Decoder, for: GoogleApi.People.V1.Model.Event do
def decode(value, options) do
GoogleApi.People.V1.Model.Event.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.People.V1.Model.Event do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.262295 | 129 | 0.696885 |
1c73bcc0bd6e7df3ea34904c023645c8bdcc262d | 1,553 | ex | Elixir | clients/android_enterprise/lib/google_api/android_enterprise/v1/model/administrator_web_token_spec_store_builder.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/android_enterprise/lib/google_api/android_enterprise/v1/model/administrator_web_token_spec_store_builder.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/android_enterprise/lib/google_api/android_enterprise/v1/model/administrator_web_token_spec_store_builder.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.AndroidEnterprise.V1.Model.AdministratorWebTokenSpecStoreBuilder do
@moduledoc """
## Attributes
* `enabled` (*type:* `boolean()`, *default:* `nil`) - Whether the Organize apps page is displayed. Default is true.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:enabled => boolean()
}
field(:enabled)
end
defimpl Poison.Decoder,
for: GoogleApi.AndroidEnterprise.V1.Model.AdministratorWebTokenSpecStoreBuilder do
def decode(value, options) do
GoogleApi.AndroidEnterprise.V1.Model.AdministratorWebTokenSpecStoreBuilder.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.AndroidEnterprise.V1.Model.AdministratorWebTokenSpecStoreBuilder do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 29.865385 | 119 | 0.746297 |
1c740193075b64b9f6c1993702986b5a75f36905 | 60 | ex | Elixir | server/lib/coyote_web/views/admin_view.ex | Spectory/SpectoPusher | d1ea61dce43a87169381f04080a7d5829d5d09f9 | [
"MIT"
] | null | null | null | server/lib/coyote_web/views/admin_view.ex | Spectory/SpectoPusher | d1ea61dce43a87169381f04080a7d5829d5d09f9 | [
"MIT"
] | 3 | 2018-05-23T20:07:41.000Z | 2018-06-01T11:58:23.000Z | server/lib/coyote_web/views/admin_view.ex | Spectory/coyote | d1ea61dce43a87169381f04080a7d5829d5d09f9 | [
"MIT"
] | 1 | 2017-06-16T18:29:44.000Z | 2017-06-16T18:29:44.000Z | defmodule CoyoteWeb.AdminView do
use CoyoteWeb, :view
end
| 15 | 32 | 0.8 |
1c741a21e70a0a344b939c6faa3a1920119da53e | 7,284 | ex | Elixir | lib/finch/telemetry.ex | qdentity/finch | ed28ffe378cefb44bc8912cfc82887bc6b4ee453 | [
"MIT"
] | null | null | null | lib/finch/telemetry.ex | qdentity/finch | ed28ffe378cefb44bc8912cfc82887bc6b4ee453 | [
"MIT"
] | null | null | null | lib/finch/telemetry.ex | qdentity/finch | ed28ffe378cefb44bc8912cfc82887bc6b4ee453 | [
"MIT"
] | null | null | null | defmodule Finch.Telemetry do
@moduledoc """
Telemetry integration.
Unless specified, all time's are in `:native` units.
Finch executes the following events:
* `[:finch, :queue, :start]` - Executed before checking out a connection from the pool.
#### Measurements
* `:system_time` - The system time
#### Metadata:
* `:pool` - The pool's pid
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :queue, :stop]` - Executed after a connection is retrieved from the pool.
#### Measurements
* `:duration` - Duration to check out a pool connection.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:pool` - The pool's pid
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :queue, :exception]` - Executed if checking out a connection throws an exception.
#### Measurements
* `:duration` - The time it took before raising an exception
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:kind` - The type of exception.
* `:error` - Error description or error data.
* `:stacktrace` - The stacktrace
* `[:finch, :connect, :start]` - Executed before opening a new connection.
If a connection is being re-used this event will *not* be executed.
#### Measurements
* `:system_time` - The system time
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :connect, :stop]` - Executed after a connection is opened.
#### Measurements
* `:duration` - Duration to connect to the host.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:error` - This value is optional. It includes any errors that occurred while opening the connection.
* `[:finch, :request, :start]` - Executed before sending a request.
#### Measurements:
* `:system_time` - The system time
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:path` - The request path.
* `:method` - The request method.
* `[:finch, :request, :stop]` - Executed after a request is finished.
#### Measurements:
* `:duration` - Duration to make the request.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:path` - The request path.
* `:method` - The request method.
* `:error` - This value is optional. It includes any errors that occurred while making the request.
* `[:finch, :response, :start]` - Executed before receiving the response.
#### Measurements:
* `:system_time` - The system time
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:path` - The request path.
* `:method` - The request method.
* `[:finch, :response, :stop]` - Executed after a response has been fully received.
#### Measurements:
* `:duration` - Duration to receive the response.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `:path` - The request path.
* `:method` - The request method.
* `:error` - This value is optional. It includes any errors that occurred while receiving the response.
* `[:finch, :reused_connection]` - Executed if an existing connection is reused. There are no measurements provided with this event.
#### Metadata:
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :conn_max_idle_time_exceeded] - Executed if a connection was discarded because the conn_max_idle_time had been reached.
#### Measurements:
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :max_idle_time_exceeded] - Executed if a connection was discarded because the max_idle_time had been reached.
Deprecated use :conn_max_idle_time_exceeded event instead.
#### Measurements:
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
* `[:finch, :pool_max_idle_time_exceeded]` - Executed if a pool was terminated because the pool_max_idle_time has been reached. There are no measurements provided with this event.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`
* `:host` - The host address
* `:port` - the port to connect on.
"""
@doc false
# emits a `start` telemetry event and returns the the start time
def start(event, meta \\ %{}, extra_measurements \\ %{}) do
start_time = System.monotonic_time()
:telemetry.execute(
[:finch, event, :start],
Map.merge(extra_measurements, %{system_time: System.system_time()}),
meta
)
start_time
end
@doc false
# Emits a stop event.
def stop(event, start_time, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
:telemetry.execute(
[:finch, event, :stop],
measurements,
meta
)
end
@doc false
def exception(event, start_time, kind, reason, stack, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
meta =
meta
|> Map.put(:kind, kind)
|> Map.put(:error, reason)
|> Map.put(:stacktrace, stack)
:telemetry.execute([:finch, event, :exception], measurements, meta)
end
@doc false
# Used for reporting generic events
def event(event, measurements, meta) do
:telemetry.execute([:finch, event], measurements, meta)
end
end
| 33.722222 | 181 | 0.642779 |
1c741bf12ca89a3cda77a60d63c169a1ae598ffa | 1,154 | exs | Elixir | test/advent_of_code_test.exs | danielburnley/Advent-Of-Code-1 | 84038e91cc9561755db0d5973ed6af27b50f9dd3 | [
"MIT"
] | null | null | null | test/advent_of_code_test.exs | danielburnley/Advent-Of-Code-1 | 84038e91cc9561755db0d5973ed6af27b50f9dd3 | [
"MIT"
] | null | null | null | test/advent_of_code_test.exs | danielburnley/Advent-Of-Code-1 | 84038e91cc9561755db0d5973ed6af27b50f9dd3 | [
"MIT"
] | null | null | null | defmodule AdventOfCodeHelperTest do
use ExUnit.Case, async: false
use ExVCR.Mock
import Mock
setup_all do
ExVCR.Config.cassette_library_dir("test/fixture/vcr_cassettes")
:ok
end
test "do we get the correct value" do
use_cassette("everything_should_work") do
{:ok, contents} = AdventOfCodeHelper.get_input(2015,1)
assert String.length(contents) == 7000
end
end
test "gets current year correctly" do
use_cassette("most_recent_year") do
with_mock Date, [utc_today: fn() -> ~D[2016-12-02] end] do
{:ok, _contents} = AdventOfCodeHelper.get_input(1)
assert File.exists?(".cache/input_#{calculate_year()}_1")
end
end
end
test "gets current year correctly in december" do
use_cassette("2016_day_2") do
with_mock Date, [utc_today: fn() -> ~D[2016-12-02] end] do
{:ok, _contents} = AdventOfCodeHelper.get_input(2)
assert File.exists?(".cache/input_#{calculate_year()}_1")
end
end
end
defp calculate_year do
today = Date.utc_today
case today.month < 12 do
true -> today.year-1
false -> today.year
end
end
end
| 25.644444 | 67 | 0.663778 |
1c74395e682f00de841f70028b41c06122853d37 | 1,726 | exs | Elixir | mix.exs | mugimaru73/clean_mixer | cd234a9b0c2823f1f9c2c2ba1cdab92a0cb08c3d | [
"Apache-2.0"
] | null | null | null | mix.exs | mugimaru73/clean_mixer | cd234a9b0c2823f1f9c2c2ba1cdab92a0cb08c3d | [
"Apache-2.0"
] | null | null | null | mix.exs | mugimaru73/clean_mixer | cd234a9b0c2823f1f9c2c2ba1cdab92a0cb08c3d | [
"Apache-2.0"
] | null | null | null | defmodule CleanMixer.MixProject do
use Mix.Project
def project do
[
app: :clean_mixer,
version: "0.5.1",
elixir: "~> 1.10",
start_permanent: Mix.env() == :prod,
deps: deps(),
elixirc_paths: elixirc_paths(Mix.env()),
erlc_paths: elixirc_paths(Mix.env()),
aliases: aliases(),
preferred_cli_env: [
cover: :test,
"cover.html": :test,
"cover.filter": :test,
"cover.lint": :test
],
test_coverage: [tool: ExCoveralls],
package: package(),
description: description()
]
end
def application do
[
extra_applications: [:logger, :mix, :tools]
]
end
defp elixirc_paths(env) when env in [:test], do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
defp package do
[
name: :clean_mixer,
files: ["lib", "priv", "mix.exs", "README*", "LICENSE"],
maintainers: ["Miroslav Malkin"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/miros/clean_mixer"
}
]
end
defp description() do
"Tools for code architecture analysis and linting"
end
defp deps do
[
{:optimus, "~> 0.1.6"},
{:dialyxir, "~> 1.0.0-rc.7", only: [:dev, :test], runtime: false},
{:excoveralls, "~> 0.11", only: :test},
{:excoveralls_linter, "~> 0.0.1", only: :test},
{:ex_doc, ">= 0.0.0", only: :dev, runtime: false}
]
end
defp aliases do
[
cover: ["coveralls --sort cov:desc"],
"cover.lint": ["coveralls.lint --missed-lines-threshold=2 --required-file-coverage=0.9"],
"cover.html": ["coveralls.html"],
"cover.detail": ["coveralls.detail --filter"]
]
end
end
| 24.657143 | 95 | 0.557358 |
1c74535e74b2b93635c4a87abcce0910d6eb1eef | 485 | exs | Elixir | priv/repo/migrations/20210128233121_move_locale_to_user_preferences.exs | Megami-Studios/glimesh.tv | 57dde3a328fabdcc3305be48ae1b82df27b83c9b | [
"MIT"
] | 328 | 2020-07-23T22:13:49.000Z | 2022-03-31T21:22:28.000Z | priv/repo/migrations/20210128233121_move_locale_to_user_preferences.exs | Megami-Studios/glimesh.tv | 57dde3a328fabdcc3305be48ae1b82df27b83c9b | [
"MIT"
] | 362 | 2020-07-23T22:38:38.000Z | 2022-03-24T02:11:16.000Z | priv/repo/migrations/20210128233121_move_locale_to_user_preferences.exs | Megami-Studios/glimesh.tv | 57dde3a328fabdcc3305be48ae1b82df27b83c9b | [
"MIT"
] | 72 | 2020-07-23T22:50:46.000Z | 2022-02-02T11:59:32.000Z | defmodule Glimesh.Repo.Migrations.MoveLocaleToUserPreferences do
use Ecto.Migration
import Ecto.Query
def change do
alter table(:user_preferences) do
add :locale, :string, default: "en"
end
flush()
Glimesh.Repo.update_all(
from(up in "user_preferences",
join: u in "users",
on: up.user_id == u.id,
update: [set: [locale: u.locale]]
),
[]
)
alter table(:users) do
remove :locale
end
end
end
| 17.962963 | 64 | 0.602062 |
1c74884cdde272253a68437b799c48e26d4b0c36 | 1,530 | ex | Elixir | web/controllers/user_controller.ex | itmaster921/crowdfunding-api | 994d75d6c28356b45531b71251eff39a309d414b | [
"MIT"
] | 1 | 2020-05-24T15:18:36.000Z | 2020-05-24T15:18:36.000Z | web/controllers/user_controller.ex | itmaster921/crowdfunding-api | 994d75d6c28356b45531b71251eff39a309d414b | [
"MIT"
] | null | null | null | web/controllers/user_controller.ex | itmaster921/crowdfunding-api | 994d75d6c28356b45531b71251eff39a309d414b | [
"MIT"
] | null | null | null | defmodule CrowdfundingApi.UserController do
use CrowdfundingApi.Web, :controller
alias CrowdfundingApi.User
def index(conn, _params) do
users = Repo.all(User)
render(conn, "index.json", users: users)
end
def create(conn, %{"user" => user_params}) do
changeset = User.changeset(%User{}, user_params)
case Repo.insert(changeset) do
{:ok, user} ->
conn
|> put_status(:created)
|> put_resp_header("location", user_path(conn, :show, user))
|> render("show.json", user: user)
{:error, changeset} ->
conn
|> put_status(:unprocessable_entity)
|> render(CrowdfundingApi.ChangesetView, "error.json", changeset: changeset)
end
end
def show(conn, %{"id" => id}) do
user = Repo.get!(User, id)
render(conn, "show.json", user: user)
end
def update(conn, %{"id" => id, "user" => user_params}) do
user = Repo.get!(User, id)
changeset = User.changeset(user, user_params)
case Repo.update(changeset) do
{:ok, user} ->
render(conn, "show.json", user: user)
{:error, changeset} ->
conn
|> put_status(:unprocessable_entity)
|> render(CrowdfundingApi.ChangesetView, "error.json", changeset: changeset)
end
end
def delete(conn, %{"id" => id}) do
user = Repo.get!(User, id)
# Here we use delete! (with a bang) because we expect
# it to always work (and if it does not, it will raise).
Repo.delete!(user)
send_resp(conn, :no_content, "")
end
end
| 27.321429 | 84 | 0.616993 |
1c749fb6885c0e26ab73776fe6afb5ba181c3366 | 59 | ex | Elixir | sdr-web/web/views/layout_view.ex | korczis/hackrf-rust | 73c76c23eb58ddfaa484f40e4b800f12b31d0a50 | [
"MIT"
] | 1 | 2022-02-15T23:17:51.000Z | 2022-02-15T23:17:51.000Z | sdr-web/web/views/layout_view.ex | korczis/hackrf-rust | 73c76c23eb58ddfaa484f40e4b800f12b31d0a50 | [
"MIT"
] | null | null | null | sdr-web/web/views/layout_view.ex | korczis/hackrf-rust | 73c76c23eb58ddfaa484f40e4b800f12b31d0a50 | [
"MIT"
] | 3 | 2020-05-09T10:08:07.000Z | 2021-02-22T11:42:00.000Z | defmodule SdrWeb.LayoutView do
use SdrWeb.Web, :view
end
| 14.75 | 30 | 0.779661 |
1c749fe75134f287462cb7bfefec511aaa3f7bbd | 2,023 | exs | Elixir | test/oops_logger_test.exs | smartrent/ramoops_logger | 4cf5bce36ca65a77a78b2d6f5c4321e058383c0c | [
"Apache-2.0"
] | 8 | 2019-07-09T20:26:03.000Z | 2020-12-23T17:44:29.000Z | test/oops_logger_test.exs | smartrent/ramoops_logger | 4cf5bce36ca65a77a78b2d6f5c4321e058383c0c | [
"Apache-2.0"
] | 1 | 2019-07-08T22:51:07.000Z | 2019-07-08T22:51:07.000Z | test/oops_logger_test.exs | smartrent/ramoops_logger | 4cf5bce36ca65a77a78b2d6f5c4321e058383c0c | [
"Apache-2.0"
] | 1 | 2019-12-03T05:20:56.000Z | 2019-12-03T05:20:56.000Z | defmodule RamoopsLoggerTest do
use ExUnit.Case, async: false
@test_pmsg_file "__test_pmsg"
@test_recovered_path "__recovered_pmsg"
require Logger
setup do
Logger.remove_backend(:console)
# Start fresh each time
_ = File.rm(@test_pmsg_file)
_ = File.rm(@test_recovered_path)
File.touch!(@test_pmsg_file)
# Start the RamoopsLogger with the test path
Application.put_env(:logger, RamoopsLogger,
pmsg_path: @test_pmsg_file,
recovered_log_path: @test_recovered_path
)
Logger.add_backend(RamoopsLogger, flush: true)
on_exit(fn ->
Logger.remove_backend(RamoopsLogger)
_ = File.rm(@test_pmsg_file)
_ = File.rm(@test_recovered_path)
end)
:ok
end
test "logs a message" do
_ = Logger.debug("hello")
Logger.flush()
Process.sleep(100)
assert File.exists?(@test_pmsg_file)
contents = File.read!(@test_pmsg_file)
assert contents =~ "[debug] hello"
end
test "changing configuration" do
new_path = @test_pmsg_file <> ".new"
_ = File.rm(new_path)
Logger.configure_backend(RamoopsLogger, pmsg_path: new_path)
_ = Logger.info("changing configuration")
Logger.flush()
Process.sleep(100)
contents = File.read!(new_path)
assert contents =~ "[info] changing configuration"
File.rm!(new_path)
end
test "provides a reasonable error message for bad pmsg path" do
Logger.remove_backend(RamoopsLogger)
Application.put_env(:logger, RamoopsLogger, pmsg_path: "/dev/does/not/exist")
{:error, {reason, _stuff}} = Logger.add_backend(RamoopsLogger)
assert reason == "Unable to open '/dev/does/not/exist' (:enoent). RamoopsLogger won't work."
end
test "recovered log helpers" do
assert RamoopsLogger.recovered_log_path() == @test_recovered_path
refute RamoopsLogger.available_log?()
File.write!(@test_recovered_path, "test test test")
assert RamoopsLogger.available_log?()
assert {:ok, "test test test"} == RamoopsLogger.read()
end
end
| 25.935897 | 96 | 0.696985 |
1c74b53757a5e539d2f4f453067a1e0d9d717389 | 2,011 | exs | Elixir | implementations/elixir/ockam/ockam_node_web/mix.exs | illaz/ockam | d1073799a140f5f7e3312ea2d5d8b86b4e94154c | [
"Apache-2.0"
] | null | null | null | implementations/elixir/ockam/ockam_node_web/mix.exs | illaz/ockam | d1073799a140f5f7e3312ea2d5d8b86b4e94154c | [
"Apache-2.0"
] | null | null | null | implementations/elixir/ockam/ockam_node_web/mix.exs | illaz/ockam | d1073799a140f5f7e3312ea2d5d8b86b4e94154c | [
"Apache-2.0"
] | null | null | null | defmodule Ockam.Node.Web.MixProject do
use Mix.Project
@version "0.10.0-dev"
@elixir_requirement "~> 1.10"
@ockam_github_repo "https://github.com/ockam-network/ockam"
@ockam_github_repo_path "implementations/elixir/ockam/ockam_node_web"
def project do
[
app: :ockam_node_web,
version: @version,
elixir: @elixir_requirement,
consolidate_protocols: Mix.env() != :test,
elixirc_options: [warnings_as_errors: true],
deps: deps(),
aliases: aliases(),
# lint
dialyzer: [flags: ["-Wunmatched_returns", :error_handling, :underspecs]],
# test
test_coverage: [output: "_build/cover"],
preferred_cli_env: ["test.cover": :test],
# hex
description: "A web interface for an ockam node",
package: package(),
# docs
name: "Ockam Node Web",
docs: docs()
]
end
# mix help compile.app for more
def application do
[
mod: {Ockam.Node.Web, []},
extra_applications: [:logger]
]
end
defp deps do
[
{:cowboy, "~> 2.8"},
{:plug, "~> 1.11"},
{:plug_cowboy, "~> 2.4"},
{:jason, "~> 1.2"},
{:ex_doc, "~> 0.23.0", only: :dev, runtime: false},
{:credo, "~> 1.5", only: [:dev, :test], runtime: false},
{:dialyxir, "~> 1.0", only: [:dev], runtime: false}
]
end
# used by hex
defp package do
[
links: %{"GitHub" => @ockam_github_repo},
licenses: ["Apache-2.0"]
]
end
# used by ex_doc
defp docs do
[
main: "Ockam.Node.Web",
source_url_pattern:
"#{@ockam_github_repo}/blob/v#{@version}/#{@ockam_github_repo_path}/%{path}#L%{line}"
]
end
defp aliases do
[
docs: "docs --output _build/docs --formatter html",
"test.cover": "test --no-start --cover",
"lint.format": "format --check-formatted",
"lint.credo": "credo --strict",
"lint.dialyzer": "dialyzer --format dialyxir",
lint: ["lint.format", "lint.credo"]
]
end
end
| 23.383721 | 93 | 0.572352 |
1c74c29afef42ab407ab9126355fbc61c0df9181 | 2,115 | ex | Elixir | clients/compute/lib/google_api/compute/v1/model/disk_type_list_warning.ex | kyleVsteger/elixir-google-api | 3a0dd498af066a4361b5b0fd66ffc04a57539488 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/model/disk_type_list_warning.ex | kyleVsteger/elixir-google-api | 3a0dd498af066a4361b5b0fd66ffc04a57539488 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/model/disk_type_list_warning.ex | kyleVsteger/elixir-google-api | 3a0dd498af066a4361b5b0fd66ffc04a57539488 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Model.DiskTypeListWarning do
@moduledoc """
[Output Only] Informational warning message.
## Attributes
* `code` (*type:* `String.t`, *default:* `nil`) - [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
* `data` (*type:* `list(GoogleApi.Compute.V1.Model.DiskTypeListWarningData.t)`, *default:* `nil`) - [Output Only] Metadata about this warning in key: value format. For example:
"data": [ { "key": "scope", "value": "zones/us-east1-d" }
* `message` (*type:* `String.t`, *default:* `nil`) - [Output Only] A human-readable description of the warning code.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:code => String.t() | nil,
:data => list(GoogleApi.Compute.V1.Model.DiskTypeListWarningData.t()) | nil,
:message => String.t() | nil
}
field(:code)
field(:data, as: GoogleApi.Compute.V1.Model.DiskTypeListWarningData, type: :list)
field(:message)
end
defimpl Poison.Decoder, for: GoogleApi.Compute.V1.Model.DiskTypeListWarning do
def decode(value, options) do
GoogleApi.Compute.V1.Model.DiskTypeListWarning.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Compute.V1.Model.DiskTypeListWarning do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 39.166667 | 194 | 0.716312 |
1c74cd72a2b314743138bd0b71bd9c9cce648a32 | 67 | ex | Elixir | lib/transhook_web/views/layout_view.ex | linjunpop/transhook | 59000e5a346c6c059d95c5a1f48190f698b4e7a3 | [
"0BSD"
] | null | null | null | lib/transhook_web/views/layout_view.ex | linjunpop/transhook | 59000e5a346c6c059d95c5a1f48190f698b4e7a3 | [
"0BSD"
] | null | null | null | lib/transhook_web/views/layout_view.ex | linjunpop/transhook | 59000e5a346c6c059d95c5a1f48190f698b4e7a3 | [
"0BSD"
] | null | null | null | defmodule TranshookWeb.LayoutView do
use TranshookWeb, :view
end
| 16.75 | 36 | 0.820896 |
1c74cf95a417ca0cf05cb2234f3537f328fce90f | 121 | ex | Elixir | lib/options_tracker/repo.ex | mgwidmann/options_tracker | 5520f88a9a5873842a63a23d4bcc5da82a51feba | [
"MIT"
] | 12 | 2020-06-25T17:25:15.000Z | 2021-09-30T20:13:33.000Z | lib/options_tracker/repo.ex | mgwidmann/options_tracker | 5520f88a9a5873842a63a23d4bcc5da82a51feba | [
"MIT"
] | 5 | 2020-08-05T03:12:31.000Z | 2021-07-15T04:59:03.000Z | lib/options_tracker/repo.ex | mgwidmann/options_tracker | 5520f88a9a5873842a63a23d4bcc5da82a51feba | [
"MIT"
] | 2 | 2021-07-03T17:20:15.000Z | 2021-09-01T15:38:58.000Z | defmodule OptionsTracker.Repo do
use Ecto.Repo,
otp_app: :options_tracker,
adapter: Ecto.Adapters.Postgres
end
| 20.166667 | 35 | 0.760331 |
1c74d7112dd472032db80ce96bbc37b0892a8820 | 1,533 | exs | Elixir | api_user/mix.exs | roberttgt/Elixir-Course | 39a0bfc37125703d08114b54a6efac07f7ab7488 | [
"MIT"
] | null | null | null | api_user/mix.exs | roberttgt/Elixir-Course | 39a0bfc37125703d08114b54a6efac07f7ab7488 | [
"MIT"
] | null | null | null | api_user/mix.exs | roberttgt/Elixir-Course | 39a0bfc37125703d08114b54a6efac07f7ab7488 | [
"MIT"
] | null | null | null | defmodule ApiUser.MixProject do
use Mix.Project
def project do
[
app: :api_user,
version: "0.1.0",
elixir: "~> 1.5",
elixirc_paths: elixirc_paths(Mix.env()),
compilers: [:phoenix, :gettext] ++ Mix.compilers(),
start_permanent: Mix.env() == :prod,
aliases: aliases(),
deps: deps()
]
end
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[
mod: {ApiUser.Application, []},
extra_applications: [:logger, :runtime_tools]
]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
# Specifies your project dependencies.
#
# Type `mix help deps` for examples and options.
defp deps do
[
{:phoenix, "~> 1.4.0-rc"},
{:phoenix_pubsub, "~> 1.1"},
{:phoenix_ecto, "~> 3.2"},
{:postgrex, ">= 0.0.0"},
{:gettext, "~> 0.11"},
{:jason, "~> 1.0"},
{:cowboy, "~> 2.0"}
]
end
# Aliases are shortcuts or tasks specific to the current project.
# For example, to create, migrate and run the seeds file at once:
#
# $ mix ecto.setup
#
# See the documentation for `Mix` for more info on aliases.
defp aliases do
[
"ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"],
"ecto.reset": ["ecto.drop", "ecto.setup"],
test: ["ecto.create --quiet", "ecto.migrate", "test"]
]
end
end
| 25.55 | 79 | 0.584475 |
1c74fcb033b15cfb5ea85a67bb449deba7d0dd1a | 4,578 | ex | Elixir | lib/weddell/client/subscriber/stream.ex | ssmlee/weddell | 99a8de646058904ce4b2d35fc9738f124f0c3d97 | [
"MIT"
] | null | null | null | lib/weddell/client/subscriber/stream.ex | ssmlee/weddell | 99a8de646058904ce4b2d35fc9738f124f0c3d97 | [
"MIT"
] | null | null | null | lib/weddell/client/subscriber/stream.ex | ssmlee/weddell | 99a8de646058904ce4b2d35fc9738f124f0c3d97 | [
"MIT"
] | null | null | null | defmodule Weddell.Client.Subscriber.Stream do
@moduledoc """
A streaming connection to a subscription.
"""
alias GRPC.Client.Stream, as: GRPCStream
alias GRPC.Stub, as: GRPCStub
alias Google.Pubsub.V1.{Subscriber.Stub,
StreamingPullRequest}
alias Weddell.{Message,
Client,
Client.Util}
@typedoc "A Pub/Sub subscriber stream"
@opaque t :: %__MODULE__{client: Client.t,
subscription: String.t,
grpc_stream: GRPCStream.t}
defstruct [:client, :subscription, :grpc_stream]
@default_ack_deadline 10
@doc """
Open a new stream on a subscription.
Streams can be used to pull new messages from a subscription and also
respond with acknowledgements or delays.
## Example
{:ok, client} = Weddell.Client.connect("weddell-project")
Weddell.Client.Subscriber.Stream.open(client, "foo-subscription")
#=> %Weddell.Client.Subscriber.Stream{}
"""
@spec open(Client.t, subscription :: String.t) :: t
def open(client, subscription) do
stream =
%__MODULE__{client: client,
subscription: subscription,
grpc_stream: Stub.streaming_pull(client.channel, Client.request_opts())}
request =
StreamingPullRequest.new(subscription: Util.full_subscription(client.project, subscription),
stream_ack_deadline_seconds: @default_ack_deadline)
stream.grpc_stream
|> GRPCStub.stream_send(request)
stream
end
@doc """
Close an open stream.
## Example
{:ok, client} = Weddell.Client.connect("weddell-project")
stream = Weddell.Client.Subscriber.Stream.open(client, "foo-subscription")
Weddell.Client.Subscriber.Stream.close(stream)
#=> :ok
"""
@spec close(t) :: :ok
def close(stream) do
request =
StreamingPullRequest.new(
subscription: Util.full_subscription(stream.client.project, stream.subscription))
GRPCStub.stream_send(stream.grpc_stream, request, end_stream: true)
end
@typedoc "A message and a new deadline in seconds"
@type message_delay :: {Message.t, seconds :: pos_integer}
@typedoc "Option values used when writing to a stream"
@type send_opt :: {:ack, [Message.t]} |
{:delay, [message_delay]} |
{:stream_deadline, seconds :: pos_integer}
@typedoc "Options used when writing to a stream"
@type send_opts :: [send_opt]
@doc """
Send a response to a stream.
## Example
{:ok, client} = Weddell.Client.connect("weddell-project")
stream = Weddell.Client.Subscriber.Stream.open(client, "foo-subscription")
Weddell.Client.Subscriber.Stream.send(stream,
ack: [%Message{}],
delay: [{%Message{}, 60}],
stream_deadline: 120)
#=> :ok
## Options
* `ack` - Messages to be acknowledged. _(default: [])_
* `delay` - Messages to be delayed and the period for which to delay. _(default: [])_
* `stream_deadline` - The time period to wait before resending a
message on this stream. _(default: 10)_
"""
@spec send(stream :: t, send_opts) :: :ok
def send(stream, opts \\ []) do
ack_ids =
opts
|> Keyword.get(:ack, [])
|> Enum.map(&(&1.ack_id))
{deadline_ack_ids, deadline_seconds} =
opts
|> Keyword.get(:delay, [])
|> Enum.reduce({[], []}, fn ({message, seconds}, {ids, deadlines}) ->
{[message.ack_id | ids], [seconds | deadlines]}
end)
stream_deadline =
opts
|> Keyword.get(:stream_deadline, @default_ack_deadline)
request =
StreamingPullRequest.new(
ack_ids: ack_ids,
modify_deadline_ack_ids: deadline_ack_ids,
modify_deadline_seconds: deadline_seconds,
stream_ack_deadline_seconds: stream_deadline)
IO.inspect request
GRPCStub.stream_send(stream.grpc_stream, request)
end
@doc """
Receive messages from a stream.
## Example
{:ok, client} = Weddell.Client.connect("weddell-project")
client
|> Weddell.Client.Subscriber.Stream.open("foo-subscription")
|> Weddell.Client.Subscriber.Stream.recv()
|> Enum.take(1)
#=> [%Message{...}]
"""
@spec recv(stream :: t) :: Enumerable.t
def recv(stream) do
GRPCStub.recv(stream.grpc_stream)
|> Stream.map(fn response ->
response.received_messages
|> Enum.map(&Message.new/1)
end)
end
end
| 32.013986 | 98 | 0.620795 |
1c751241f6eb67aa02b64f5c583309ddf00a2c76 | 264 | ex | Elixir | lib/ctrlv/schema.ex | ryanwinchester/ctrlv | eee44962dda062ba2154cc8bb57d86a6d814c71f | [
"Apache-2.0"
] | 1 | 2022-03-31T17:55:16.000Z | 2022-03-31T17:55:16.000Z | lib/ctrlv/schema.ex | ryanwinchester/ctrlv | eee44962dda062ba2154cc8bb57d86a6d814c71f | [
"Apache-2.0"
] | 7 | 2022-03-30T02:52:54.000Z | 2022-03-30T23:11:01.000Z | lib/ctrlv/schema.ex | ryanwinchester/ctrlv | eee44962dda062ba2154cc8bb57d86a6d814c71f | [
"Apache-2.0"
] | 1 | 2022-03-31T03:37:16.000Z | 2022-03-31T03:37:16.000Z | defmodule Ctrlv.Schema do
@moduledoc false
defmacro __using__(_) do
quote do
use Ecto.Schema
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
@timestamps_opts [type: :utc_datetime]
end
end
end
| 18.857143 | 56 | 0.67803 |
1c75220cf35b534bd8b59145f4a40daa42831c3e | 15,313 | ex | Elixir | lib/credo/execution.ex | linduxed/credo | b4235a771eebcf0d1b047fc6b88d43905236e1d5 | [
"MIT"
] | null | null | null | lib/credo/execution.ex | linduxed/credo | b4235a771eebcf0d1b047fc6b88d43905236e1d5 | [
"MIT"
] | null | null | null | lib/credo/execution.ex | linduxed/credo | b4235a771eebcf0d1b047fc6b88d43905236e1d5 | [
"MIT"
] | null | null | null | defmodule Credo.Execution do
@moduledoc """
Every run of Credo is configured via a `Execution` struct, which is created and
manipulated via the `Credo.Execution` module.
"""
@doc """
The `Execution` struct is created and manipulated via the `Credo.Execution` module.
"""
defstruct argv: [],
cli_options: nil,
cli_switches: [
all_priorities: :boolean,
all: :boolean,
files_included: :keep,
files_excluded: :keep,
checks: :string,
config_name: :string,
config_file: :string,
color: :boolean,
crash_on_error: :boolean,
debug: :boolean,
mute_exit_status: :boolean,
format: :string,
help: :boolean,
ignore_checks: :string,
ignore: :string,
min_priority: :string,
only: :string,
read_from_stdin: :boolean,
strict: :boolean,
verbose: :boolean,
version: :boolean
],
cli_aliases: [
a: :all,
A: :all_priorities,
c: :checks,
C: :config_name,
d: :debug,
h: :help,
i: :ignore_checks,
v: :version
],
cli_switch_plugin_param_converters: [],
# config
files: nil,
color: true,
debug: false,
checks: nil,
requires: [],
plugins: [],
parse_timeout: 5000,
strict: false,
# checks if there is a new version of Credo
check_for_updates: true,
# options, set by the command line
min_priority: 0,
help: false,
version: false,
verbose: false,
all: false,
format: nil,
only_checks: nil,
ignore_checks: nil,
crash_on_error: true,
mute_exit_status: false,
read_from_stdin: false,
# state, which is accessed and changed over the course of Credo's execution
pipeline_map: %{},
commands: %{},
config_files: [],
current_task: nil,
parent_task: nil,
initializing_plugin: nil,
halted: false,
config_files_pid: nil,
source_files_pid: nil,
issues_pid: nil,
timing_pid: nil,
skipped_checks: nil,
assigns: %{},
results: %{},
config_comment_map: %{}
@type t :: %__MODULE__{}
@execution_pipeline [
__pre__: [
{Credo.Execution.Task.AppendDefaultConfig, []},
{Credo.Execution.Task.ParseOptions, []},
{Credo.Execution.Task.ConvertCLIOptionsToConfig, []},
{Credo.Execution.Task.InitializePlugins, []}
],
parse_cli_options: [
{Credo.Execution.Task.ParseOptions, []}
],
initialize_plugins: [
# This is where plugins can put their hooks to initialize themselves based on
# the params given in the config as well as in their own command line switches.
],
validate_cli_options: [
{Credo.Execution.Task.ValidateOptions, []}
],
convert_cli_options_to_config: [
{Credo.Execution.Task.ConvertCLIOptionsToConfig, []}
],
determine_command: [
{Credo.Execution.Task.DetermineCommand, []}
],
set_default_command: [
{Credo.Execution.Task.SetDefaultCommand, []}
],
resolve_config: [
{Credo.Execution.Task.UseColors, []},
{Credo.Execution.Task.RequireRequires, []}
],
validate_config: [
{Credo.Execution.Task.ValidateConfig, []}
],
run_command: [
{Credo.Execution.Task.RunCommand, []}
],
halt_execution: [
{Credo.Execution.Task.AssignExitStatusForIssues, []}
]
]
alias Credo.Execution.ExecutionConfigFiles
alias Credo.Execution.ExecutionIssues
alias Credo.Execution.ExecutionSourceFiles
alias Credo.Execution.ExecutionTiming
@doc "Builds an Execution struct for the the given `argv`."
def build(argv \\ []) when is_list(argv) do
%__MODULE__{argv: argv}
|> put_pipeline(__MODULE__, @execution_pipeline)
|> put_builtin_command("categories", Credo.CLI.Command.Categories.CategoriesCommand)
|> put_builtin_command("explain", Credo.CLI.Command.Explain.ExplainCommand)
|> put_builtin_command("gen.check", Credo.CLI.Command.GenCheck)
|> put_builtin_command("gen.config", Credo.CLI.Command.GenConfig)
|> put_builtin_command("help", Credo.CLI.Command.Help)
|> put_builtin_command("info", Credo.CLI.Command.Info.InfoCommand)
|> put_builtin_command("list", Credo.CLI.Command.List.ListCommand)
|> put_builtin_command("suggest", Credo.CLI.Command.Suggest.SuggestCommand)
|> put_builtin_command("version", Credo.CLI.Command.Version)
|> start_servers()
end
@doc false
defp start_servers(%__MODULE__{} = exec) do
exec
|> ExecutionConfigFiles.start_server()
|> ExecutionSourceFiles.start_server()
|> ExecutionIssues.start_server()
|> ExecutionTiming.start_server()
end
@doc """
Returns the checks that should be run for a given `exec` struct.
Takes all checks from the `checks:` field of the exec, matches those against
any patterns to include or exclude certain checks given via the command line.
"""
def checks(exec)
def checks(%__MODULE__{checks: nil}) do
{[], [], []}
end
def checks(%__MODULE__{checks: checks, only_checks: only_checks, ignore_checks: ignore_checks}) do
only_matching = filter_only_checks(checks, only_checks)
ignore_matching = filter_ignore_checks(checks, ignore_checks)
result = only_matching -- ignore_matching
{result, only_matching, ignore_matching}
end
defp filter_only_checks(checks, nil), do: checks
defp filter_only_checks(checks, []), do: checks
defp filter_only_checks(checks, patterns), do: filter_checks(checks, patterns)
defp filter_ignore_checks(_checks, nil), do: []
defp filter_ignore_checks(_checks, []), do: []
defp filter_ignore_checks(checks, patterns), do: filter_checks(checks, patterns)
defp filter_checks(checks, patterns) do
regexes =
patterns
|> List.wrap()
|> to_match_regexes
Enum.filter(checks, &match_regex(&1, regexes, true))
end
defp match_regex(_tuple, [], default_for_empty), do: default_for_empty
defp match_regex(tuple, regexes, _default_for_empty) do
check_name =
tuple
|> Tuple.to_list()
|> List.first()
|> to_string
Enum.any?(regexes, &Regex.run(&1, check_name))
end
defp to_match_regexes(list) do
Enum.map(list, fn match_check ->
{:ok, match_pattern} = Regex.compile(match_check, "i")
match_pattern
end)
end
@doc """
Sets the exec values which `strict` implies (if applicable).
"""
def set_strict(exec)
def set_strict(%__MODULE__{strict: true} = exec) do
%__MODULE__{exec | all: true, min_priority: -99}
end
def set_strict(%__MODULE__{strict: false} = exec) do
%__MODULE__{exec | min_priority: 0}
end
def set_strict(exec), do: exec
@doc false
def get_path(exec) do
exec.cli_options.path
end
# Commands
@doc "Returns the name of the command, which should be run by the given execution."
def get_command_name(exec) do
exec.cli_options.command
end
@doc "Returns all valid command names."
def get_valid_command_names(exec) do
Map.keys(exec.commands)
end
def get_command(exec, name) do
Map.get(exec.commands, name) ||
raise ~S'Command not found: "#{name}"\n\nRegistered commands: #{
inspect(exec.commands, pretty: true)
}'
end
@doc false
def put_command(exec, _plugin_mod, name, command_mod) do
commands = Map.put(exec.commands, name, command_mod)
%__MODULE__{exec | commands: commands}
|> init_command(command_mod)
end
@doc false
def set_initializing_plugin(%__MODULE__{initializing_plugin: nil} = exec, plugin_mod) do
%__MODULE__{exec | initializing_plugin: plugin_mod}
end
def set_initializing_plugin(exec, nil) do
%__MODULE__{exec | initializing_plugin: nil}
end
def set_initializing_plugin(%__MODULE__{initializing_plugin: mod1}, mod2) do
raise "Attempting to initialize plugin #{inspect(mod2)}, " <>
"while already initializing plugin #{mod1}"
end
# Plugin params
def get_plugin_param(exec, plugin_mod, param_name) do
exec.plugins[plugin_mod][param_name]
end
def put_plugin_param(exec, plugin_mod, param_name, param_value) do
plugins =
Keyword.update(exec.plugins, plugin_mod, [], fn list ->
Keyword.update(list, param_name, param_value, fn _ -> param_value end)
end)
%__MODULE__{exec | plugins: plugins}
end
# CLI switches
@doc false
def put_cli_switch(exec, _plugin_mod, name, type) do
%__MODULE__{exec | cli_switches: exec.cli_switches ++ [{name, type}]}
end
@doc false
def put_cli_switch_alias(exec, _plugin_mod, name, alias_name) do
%__MODULE__{exec | cli_aliases: exec.cli_aliases ++ [{alias_name, name}]}
end
@doc false
def put_cli_switch_plugin_param_converter(exec, plugin_mod, cli_switch_name, plugin_param_name) do
converter_tuple = {cli_switch_name, plugin_mod, plugin_param_name}
%__MODULE__{
exec
| cli_switch_plugin_param_converters:
exec.cli_switch_plugin_param_converters ++ [converter_tuple]
}
end
def get_given_cli_switch(exec, switch_name) do
if Map.has_key?(exec.cli_options.switches, switch_name) do
{:ok, exec.cli_options.switches[switch_name]}
else
:error
end
end
# Assigns
@doc "Returns the assign with the given `name` for the given `exec` struct (or return the given `default` value)."
def get_assign(exec, name, default \\ nil) do
Map.get(exec.assigns, name, default)
end
@doc "Puts the given `value` with the given `name` as assign into the given `exec` struct."
def put_assign(exec, name, value) do
%__MODULE__{exec | assigns: Map.put(exec.assigns, name, value)}
end
# Config Files
@doc "Returns all source files for the given `exec` struct."
def get_config_files(exec) do
Credo.Execution.ExecutionConfigFiles.get(exec)
end
@doc false
def append_config_file(exec, {_, _, _} = config_file) do
config_files = get_config_files(exec) ++ [config_file]
ExecutionConfigFiles.put(exec, config_files)
exec
end
# Source Files
@doc "Returns all source files for the given `exec` struct."
def get_source_files(exec) do
Credo.Execution.ExecutionSourceFiles.get(exec)
end
@doc "Puts the given `source_files` into the given `exec` struct."
def put_source_files(exec, source_files) do
ExecutionSourceFiles.put(exec, source_files)
exec
end
# Issues
@doc "Returns all issues for the given `exec` struct."
def get_issues(exec) do
exec
|> ExecutionIssues.to_map()
|> Map.values()
|> List.flatten()
end
@doc "Returns all issues for the given `exec` struct that relate to the given `filename`."
def get_issues(exec, filename) do
exec
|> ExecutionIssues.to_map()
|> Map.get(filename, [])
end
@doc "Sets the issues for the given `exec` struct, overwriting any existing issues."
def set_issues(exec, issues) do
ExecutionIssues.set(exec, issues)
exec
end
# Results
@doc "Returns the result with the given `name` for the given `exec` struct (or return the given `default` value)."
def get_result(exec, name, default \\ nil) do
Map.get(exec.results, name, default)
end
@doc "Puts the given `value` with the given `name` as result into the given `exec` struct."
def put_result(exec, name, value) do
%__MODULE__{exec | results: Map.put(exec.results, name, value)}
end
# Halt
@doc "Halts further execution of the pipeline."
def halt(exec) do
%__MODULE__{exec | halted: true}
end
# Task tracking
@doc false
def set_parent_and_current_task(exec, parent_task, current_task) do
%__MODULE__{exec | parent_task: parent_task, current_task: current_task}
end
# Running tasks
@doc false
def run(exec) do
run_pipeline(exec, __MODULE__)
end
@doc false
def run_pipeline(initial_exec, pipeline_key) do
initial_pipeline = get_pipeline(initial_exec, pipeline_key)
Enum.reduce(initial_pipeline, initial_exec, fn {group_name, _list}, outer_exec ->
outer_pipeline = get_pipeline(outer_exec, pipeline_key)
task_group = outer_pipeline[group_name]
Enum.reduce(task_group, outer_exec, fn {task_mod, opts}, inner_exec ->
Credo.Execution.Task.run(task_mod, inner_exec, opts)
end)
end)
end
@doc false
defp get_pipeline(exec, pipeline_key) do
case exec.pipeline_map[pipeline_key] do
nil -> raise "Could not find execution pipeline for '#{pipeline_key}'"
pipeline -> pipeline
end
end
def put_pipeline(exec, pipeline_key, pipeline) do
new_pipelines = Map.put(exec.pipeline_map, pipeline_key, pipeline)
%__MODULE__{exec | pipeline_map: new_pipelines}
end
@doc false
def prepend_task(exec, plugin_mod, nil, group_name, task_tuple) do
prepend_task(exec, plugin_mod, __MODULE__, group_name, task_tuple)
end
def prepend_task(exec, plugin_mod, pipeline_key, group_name, task_mod) when is_atom(task_mod) do
prepend_task(exec, plugin_mod, pipeline_key, group_name, {task_mod, []})
end
@doc false
def prepend_task(exec, _plugin_mod, pipeline_key, group_name, task_tuple) do
pipeline =
exec
|> get_pipeline(pipeline_key)
|> Enum.map(fn
{^group_name, list} -> {group_name, [task_tuple] ++ list}
value -> value
end)
put_pipeline(exec, __MODULE__, pipeline)
end
@doc false
def append_task(exec, plugin_mod, nil, group_name, task_tuple) do
append_task(exec, plugin_mod, __MODULE__, group_name, task_tuple)
end
def append_task(exec, plugin_mod, pipeline_key, group_name, task_mod) when is_atom(task_mod) do
append_task(exec, plugin_mod, pipeline_key, group_name, {task_mod, []})
end
@doc false
def append_task(exec, _plugin_mod, pipeline_key, group_name, task_tuple) do
pipeline =
exec
|> get_pipeline(pipeline_key)
|> Enum.map(fn
{^group_name, list} -> {group_name, list ++ [task_tuple]}
value -> value
end)
put_pipeline(exec, pipeline_key, pipeline)
end
defp put_builtin_command(exec, name, command_mod) do
put_command(exec, Credo, name, command_mod)
end
defp init_command(exec, command_mod) do
exec
|> command_mod.init()
|> ensure_execution_struct("#{command_mod}.init/1")
end
@doc ~S"""
Ensures that the given `value` is a `%Credo.Execution{}` struct, raises an error otherwise.
Example:
exec
|> mod.init()
|> Execution.ensure_execution_struct("#{mod}.init/1")
"""
def ensure_execution_struct(value, fun_name)
def ensure_execution_struct(%__MODULE__{} = exec, _fun_name), do: exec
def ensure_execution_struct(value, fun_name) do
raise("Expected #{fun_name} to return %Credo.Execution{}, got: #{inspect(value)}")
end
end
| 29.223282 | 116 | 0.658199 |
1c753a224b4bdff8c87ae8604cba09b0b1ad9379 | 1,991 | exs | Elixir | 2-resources/__DATA-Structures/ds-traversal-multi-lang/elixir/recursive.exs | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 2-resources/__DATA-Structures/ds-traversal-multi-lang/elixir/recursive.exs | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 2-resources/__DATA-Structures/ds-traversal-multi-lang/elixir/recursive.exs | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | defmodule Solver do
def solve([curr | map], accum, title_counter, lesson_counter) do
curr = Map.put(curr, "position", title_counter)
lesson_counter = if curr["reset_lesson_position"], do: 0, else: lesson_counter
curr =
Map.put(
curr,
"lessons",
Enum.map(Enum.with_index(curr["lessons"], 1), fn {lesson, index} ->
Map.put(lesson, "position", lesson_counter + index)
end)
)
solve(map, [curr | accum], title_counter + 1, lesson_counter + Enum.count(curr["lessons"]))
end
def solve([], accum, _title_counter, _lesson_counter) do
Enum.reverse(accum)
end
end
input = [
%{
"title" => "Getting started",
"reset_lesson_position" => false,
"lessons" => [
%{"name" => "Welcome"},
%{"name" => "Installation"}
]
},
%{
"title" => "Basic operator",
"reset_lesson_position" => false,
"lessons" => [
%{"name" => "Addition / Subtraction"},
%{"name" => "Multiplication / Division"}
]
},
%{
"title" => "Advanced topics",
"reset_lesson_position" => true,
"lessons" => [
%{"name" => "Mutability"},
%{"name" => "Immutability"}
]
}
]
test = [
%{
"title" => "Getting started",
"reset_lesson_position" => false,
"position" => 1,
"lessons" => [
%{"name" => "Welcome", "position" => 1},
%{"name" => "Installation", "position" => 2}
]
},
%{
"title" => "Basic operator",
"reset_lesson_position" => false,
"position" => 2,
"lessons" => [
%{"name" => "Addition / Subtraction", "position" => 3},
%{"name" => "Multiplication / Division", "position" => 4}
]
},
%{
"title" => "Advanced topics",
"reset_lesson_position" => true,
"position" => 3,
"lessons" => [
%{"name" => "Mutability", "position" => 1},
%{"name" => "Immutability", "position" => 2}
]
}
]
output = Solver.solve(input, [], 1, 0)
IO.inspect(output)
true = output == test
| 23.987952 | 95 | 0.536916 |
1c755c91b803221234347a29cab5740a25e69576 | 4,566 | exs | Elixir | test/langue/android/expectation_test.exs | samuelnygaard/accent | db753badab1d885397b48a42ac3fb43024345467 | [
"BSD-3-Clause"
] | 1 | 2020-07-01T16:08:34.000Z | 2020-07-01T16:08:34.000Z | test/langue/android/expectation_test.exs | samuelnygaard/accent | db753badab1d885397b48a42ac3fb43024345467 | [
"BSD-3-Clause"
] | 6 | 2021-03-11T07:37:48.000Z | 2022-02-13T21:10:33.000Z | test/langue/android/expectation_test.exs | doc-ai/accent | e337e16f3658cc0728364f952c0d9c13710ebb06 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T21:47:35.000Z | 2020-05-29T21:47:35.000Z | defmodule LangueTest.Formatter.Android.Expectation do
alias Langue.Entry
defmodule Simple do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="activity_open_in_chrome">Ouvrir avec Chrome</string>
<string name="activity_open_in_safari">Ouvrir avec Safari</string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "activity_open_in_chrome", value: "Ouvrir avec Chrome"},
%Entry{index: 2, key: "activity_open_in_safari", value: "Ouvrir avec Safari"}
]
end
end
defmodule EmptyValue do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="activity_open_in_chrome"></string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "activity_open_in_chrome", value: "", value_type: "empty"}
]
end
end
defmodule UnsupportedTag do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<unknown name="activity_open_in_chrome"></unknown>
</resources>
"""
end
def entries do
[]
end
end
defmodule RuntimeError do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<error></error>
"""
end
def entries do
[]
end
end
defmodule Commented do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<!-- Comment -->
<string name="activity_open_in_chrome">Ouvrir avec Chrome</string>
<string name="activity_open_in_safari">Ouvrir avec Safari</string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "activity_open_in_chrome", value: "Ouvrir avec Chrome", comment: " Comment "},
%Entry{index: 2, key: "activity_open_in_safari", value: "Ouvrir avec Safari"}
]
end
end
defmodule Array do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="drawer_menu_array">
<item>@string/browse_profile_view_title</item>
<item>@string/find_a_user</item>
</string-array>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "drawer_menu_array.__KEY__0", value: "@string/browse_profile_view_title", value_type: "array"},
%Entry{index: 2, key: "drawer_menu_array.__KEY__1", value: "@string/find_a_user", value_type: "array"}
]
end
end
defmodule StringsFormatEscape do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="height">Height (%s)</string>
<string name="agree_terms_policy">By using this application, you agree to the %1$s and %2$s.</string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "height", value: "Height (%@)", placeholders: ~w(%@)},
%Entry{index: 2, key: "agree_terms_policy", value: "By using this application, you agree to the %1$@ and %2$@.", placeholders: ~w(%1$@ %2$@)}
]
end
end
defmodule ValueEscaping do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="a">Test & 1,2,4 < > j\\'appelle</string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "a", value: "Test & 1,2,4 < > j'appelle"}
]
end
end
defmodule PlaceholderValues do
use Langue.Expectation.Case
def render do
"""
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="single">Hello, %s.</string>
<string name="multiple">Hello, %1$s %2$s.</string>
<string name="duplicate">Hello, %1$s %2$s. Welcome back %1$s %2$s.</string>
</resources>
"""
end
def entries do
[
%Entry{index: 1, key: "single", value: "Hello, %@.", placeholders: ~w(%@)},
%Entry{index: 2, key: "multiple", value: "Hello, %1$@ %2$@.", placeholders: ~w(%1$@ %2$@)},
%Entry{index: 3, key: "duplicate", value: "Hello, %1$@ %2$@. Welcome back %1$@ %2$@.", placeholders: ~w(%1$@ %2$@ %1$@ %2$@)}
]
end
end
end
| 24.815217 | 149 | 0.571616 |
1c7573bd0d353d99ec28599912984a69f666eb6d | 1,629 | ex | Elixir | islands_engine/lib/ie/rules.ex | moritzploss/islands | ae804c4a443469a49a5d54c69d86951e76050205 | [
"MIT"
] | 3 | 2020-04-14T20:21:33.000Z | 2021-03-18T16:47:04.000Z | islands_engine/lib/ie/rules.ex | moritzploss/islands | ae804c4a443469a49a5d54c69d86951e76050205 | [
"MIT"
] | null | null | null | islands_engine/lib/ie/rules.ex | moritzploss/islands | ae804c4a443469a49a5d54c69d86951e76050205 | [
"MIT"
] | null | null | null | defmodule IE.Rules do
alias IE.Rules
defstruct [
state: :initialized,
player1: :islands_not_set,
player2: :islands_not_set
]
def new, do: %Rules{}
defp both_players_islands_set?(rules) do
rules.player1 == :islands_set && rules.player2 == :islands_set
end
def check(%Rules{state: :initialized} = rules, :add_player) do
{:ok, %Rules{rules | state: :players_set}}
end
def check(%Rules{state: :players_set} = rules, {:position_island, player}) do
case Map.fetch!(rules, player) do
:islands_set -> :error
:islands_not_set -> {:ok, rules}
end
end
def check(%Rules{state: :players_set} = rules, {:set_islands, player}) do
new_rules = Map.put(rules, player, :islands_set)
case both_players_islands_set?(new_rules) do
true -> {:ok, %Rules{new_rules | state: :player1_turn}}
false -> {:ok, new_rules}
end
end
def check(%Rules{state: :player1_turn} = rules, {:guess_coordinate, :player1}) do
{:ok, %Rules{rules | state: :player2_turn}}
end
def check(%Rules{state: :player1_turn} = rules, {:win_check, win_or_not}) do
case win_or_not do
:no_win -> {:ok, rules}
:win -> {:ok, %Rules{rules | state: :game_over}}
end
end
def check(%Rules{state: :player2_turn} = rules, {:guess_coordinate, :player2}) do
{:ok, %Rules{rules | state: :player1_turn}}
end
def check(%Rules{state: :player2_turn} = rules, {:win_check, win_or_not}) do
case win_or_not do
:no_win -> {:ok, rules}
:win -> {:ok, %Rules{rules | state: :game_over}}
end
end
def check(_state, _action) do
:error
end
end
| 26.704918 | 83 | 0.641498 |
1c75837507f12afc8fc92b8ea5510d5333d978e7 | 109 | ex | Elixir | lib/battle_box_web/views/marooned_view.ex | GrantJamesPowell/battle_box | 301091955b68cd4672f6513d645eca4e3c4e17d0 | [
"Apache-2.0"
] | 2 | 2020-10-17T05:48:49.000Z | 2020-11-11T02:34:15.000Z | lib/battle_box_web/views/marooned_view.ex | FlyingDutchmanGames/battle_box | 301091955b68cd4672f6513d645eca4e3c4e17d0 | [
"Apache-2.0"
] | 3 | 2020-05-18T05:52:21.000Z | 2020-06-09T07:24:14.000Z | lib/battle_box_web/views/marooned_view.ex | FlyingDutchmanGames/battle_box | 301091955b68cd4672f6513d645eca4e3c4e17d0 | [
"Apache-2.0"
] | null | null | null | defmodule BattleBoxWeb.MaroonedView do
use BattleBoxWeb, :view
import BattleBox.Games.Marooned.Logic
end
| 21.8 | 39 | 0.825688 |
1c75883a203270dd58b10e806381bd4586e6ccd5 | 607 | ex | Elixir | lib/ex_okex/auth.ex | fremantle-capital/ex_okex | 1e5a4ac8c3713a0bfcd8fffc20dce1a9eaacf931 | [
"MIT"
] | null | null | null | lib/ex_okex/auth.ex | fremantle-capital/ex_okex | 1e5a4ac8c3713a0bfcd8fffc20dce1a9eaacf931 | [
"MIT"
] | null | null | null | lib/ex_okex/auth.ex | fremantle-capital/ex_okex | 1e5a4ac8c3713a0bfcd8fffc20dce1a9eaacf931 | [
"MIT"
] | 1 | 2021-02-24T05:03:35.000Z | 2021-02-24T05:03:35.000Z | defmodule ExOkex.Auth do
@moduledoc """
Handle request signing for API Authentication
https://www.okex.com/docs/en/#summary-yan-zheng
"""
@spec timestamp :: String.t()
def timestamp, do: (:os.system_time(:millisecond) / 1000) |> Float.to_string()
@spec sign(String.t(), String.t(), String.t(), map | [map], String.t()) :: String.t()
def sign(timestamp, method, path, body, api_secret) do
body = if Enum.empty?(body), do: "", else: Jason.encode!(body)
data = "#{timestamp}#{method}#{path}#{body}"
:sha256
|> :crypto.hmac(api_secret, data)
|> Base.encode64()
end
end
| 28.904762 | 87 | 0.639209 |
1c759f46e88f6d3a499f8a5da4b2537acb4d3caa | 1,593 | ex | Elixir | installer/templates/ecto/model_case.ex | manukall/phoenix | 5ce19ebb63b1b31e37da5fe6465edb6c4850772e | [
"MIT"
] | 1 | 2019-06-11T20:22:21.000Z | 2019-06-11T20:22:21.000Z | installer/templates/ecto/model_case.ex | DavidAlphaFox/phoenix | 560131ab3b48c6b6cf864a3d20b7667e40990237 | [
"MIT"
] | null | null | null | installer/templates/ecto/model_case.ex | DavidAlphaFox/phoenix | 560131ab3b48c6b6cf864a3d20b7667e40990237 | [
"MIT"
] | null | null | null | defmodule <%= application_module %>.ModelCase do
@moduledoc """
This module defines the test case to be used by
model tests.
You may define functions here to be used as helpers in
your model tests. See `errors_on/2`'s definition as reference.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
alias <%= application_module %>.Repo
import Ecto.Model
import Ecto.Query, only: [from: 2]
import <%= application_module %>.ModelCase
end
end
setup tags do
unless tags[:async] do
<%= adapter_config[:test_restart] %>
end
:ok
end
@doc """
Helper for returning list of errors in model when passed certain data.
## Examples
Given a User model that lists `:name` as a required field and validates
`:password` to be safe, it would return:
iex> errors_on(%User{}, %{password: "password"})
[password: "is unsafe", name: "is blank"]
You could then write your assertion like:
assert {:password, "is unsafe"} in errors_on(%User{}, %{password: "password"})
You can also create the changeset manually and retrieve the errors
field directly:
iex> changeset = User.changeset(%User{}, password: "password")
iex> {:password, "is unsafe"} in changeset.errors
true
"""
def errors_on(model, data) do
model.__struct__.changeset(model, data).errors
end
end
| 26.55 | 84 | 0.679222 |
1c75b9b9c064ff1323173367206cb2adbf3073ff | 437 | ex | Elixir | lib/membrane-element-rtp-aac/plugin_app.ex | membraneframework/membrane_rtp_aac_plugin | dd6a29dea3671770db4ce8b8a85e8c74feaccb38 | [
"Apache-2.0"
] | null | null | null | lib/membrane-element-rtp-aac/plugin_app.ex | membraneframework/membrane_rtp_aac_plugin | dd6a29dea3671770db4ce8b8a85e8c74feaccb38 | [
"Apache-2.0"
] | 1 | 2021-05-20T00:50:29.000Z | 2021-05-28T13:55:27.000Z | lib/membrane-element-rtp-aac/plugin_app.ex | membraneframework/membrane_rtp_aac_plugin | dd6a29dea3671770db4ce8b8a85e8c74feaccb38 | [
"Apache-2.0"
] | null | null | null | defmodule Membrane.RTP.AAC.Plugin.App do
@moduledoc false
use Application
alias Membrane.RTP.{AAC, PayloadFormat}
def start(_type, _args) do
PayloadFormat.register(%PayloadFormat{
encoding_name: :AAC,
payload_type: 127,
depayloader: AAC.Depayloader
})
PayloadFormat.register_payload_type_mapping(127, :AAC, 48_000)
Supervisor.start_link([], strategy: :one_for_one, name: __MODULE__)
end
end
| 25.705882 | 71 | 0.7254 |
1c75bcc01fc192b3ba06424fecb1310b6f00f9d1 | 847 | exs | Elixir | priv/repo/migrations/20190726123917_sync_new_plans_in_stripe.exs | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | 81 | 2017-11-20T01:20:22.000Z | 2022-03-05T12:04:25.000Z | priv/repo/migrations/20190726123917_sync_new_plans_in_stripe.exs | rmoorman/sanbase2 | 226784ab43a24219e7332c49156b198d09a6dd85 | [
"MIT"
] | 359 | 2017-10-15T14:40:53.000Z | 2022-01-25T13:34:20.000Z | priv/repo/migrations/20190726123917_sync_new_plans_in_stripe.exs | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | 16 | 2017-11-19T13:57:40.000Z | 2022-02-07T08:13:02.000Z | defmodule Sanbase.Repo.Migrations.SyncNewPlansInStripe do
use Ecto.Migration
require Sanbase.Utils.Config
alias Sanbase.Utils.Config
alias Sanbase.Billing.{Product, Plan}
alias Sanbase.Repo
def up do
Application.ensure_all_started(:tzdata)
Application.ensure_all_started(:prometheus_ecto)
Application.ensure_all_started(:stripity_stripe)
Sanbase.Prometheus.EctoInstrumenter.setup()
stripe_api_key = stripe_api_key()
if stripe_api_key != nil and stripe_api_key != "" do
Product |> Repo.all() |> Enum.map(&Product.maybe_create_product_in_stripe/1)
Plan
|> Repo.all()
|> Repo.preload(:product)
|> Enum.map(&Plan.maybe_create_plan_in_stripe/1)
end
end
def down do
:ok
end
defp stripe_api_key() do
Config.module_get(Sanbase.StripeConfig, :api_key)
end
end
| 23.527778 | 82 | 0.72137 |
1c75c94fc5b06cf3b3486a11736aacd2f15c8fc9 | 4,650 | ex | Elixir | lib/elixir/lib/access.ex | montague/elixir | ff2138b05345d0b3136a374259e9c3ba7208e3da | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/access.ex | montague/elixir | ff2138b05345d0b3136a374259e9c3ba7208e3da | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/access.ex | montague/elixir | ff2138b05345d0b3136a374259e9c3ba7208e3da | [
"Apache-2.0"
] | null | null | null | defmodule Access do
@moduledoc """
Dictionary-like access to data structures via the `foo[bar]` syntax.
This module also empowers `Kernel`s nested update functions
`Kernel.get_in/2`, `Kernel.put_in/3`, `Kernel.update_in/3` and
`Kernel.get_and_update_in/3`.
## Examples
Out of the box, Access works with built-in dictionaries: `Keyword`
and `Map`:
iex> keywords = [a: 1, b: 2]
iex> keywords[:a]
1
iex> map = %{a: 1, b: 2}
iex> map[:a]
1
iex> star_ratings = %{1.0 => "★", 1.5 => "★☆", 2.0 => "★★"}
iex> star_ratings[1.5]
"★☆"
Furthermore, Access transparently ignores `nil` values:
iex> keywords = [a: 1, b: 2]
iex> keywords[:c][:unknown]
nil
The key comparison must be implemented using the `===` operator.
"""
use Behaviour
@type t :: list | map | nil
@type key :: any
@type value :: any
defcallback fetch(t, key) :: {:ok, value} | :error
defcallback get(t, key, value) :: value
defcallback get_and_update(t, key, (value -> {value, value})) :: {value, t}
@doc """
Fetches value for the given key.
Returns a `{:ok, value}` if the tuple exists,
`:error` otherwise.
"""
@spec fetch(t, key) :: {:ok, value} | :error
def fetch(container, key)
def fetch(%{__struct__: struct} = container, key) do
struct.fetch(container, key)
end
def fetch(%{} = map, key) do
:maps.find(key, map)
end
def fetch(list, key) when is_list(list) do
case :lists.keyfind(key, 1, list) do
{^key, value} -> {:ok, value}
false -> :error
end
end
def fetch(nil, _key) do
:error
end
@doc """
Fetches value for the given key.
Returns `value` or raises otherwise.
"""
def fetch!(dict, key) do
case fetch(dict, key) do
{:ok, value} -> value
:error -> raise KeyError, key: key, term: dict
end
end
@doc """
Gets the container's value for the given key.
"""
@spec get(t, term, term) :: term
def get(container, key, default \\ nil)
def get(%{__struct__: struct} = container, key, default) do
struct.get(container, key, default)
end
def get(%{} = map, key, default) do
case :maps.find(key, map) do
{:ok, value} -> value
:error -> default
end
end
def get(list, key, default) when is_list(list) do
case :lists.keyfind(key, 1, list) do
{^key, value} -> value
false -> default
end
end
def get(nil, _key, default) do
default
end
@doc """
Gets and updates the container's value for the given key, in a single pass.
The argument function `fun` must receive the value for the given `key` (or
`nil` if the key doesn't exist in `container`). It must return a tuple
containing the `get` value and the new value to be stored in the `container`.
This function returns a two-element tuple.
The first element is the `get` value, as returned by `fun`.
The second element is the container, updated with the value returned by `fun`.
"""
@spec get_and_update(t, term, (term -> {get, term})) :: {get, t} when get: var
def get_and_update(container, key, fun)
def get_and_update(%{__struct__: struct} = container, key, fun) do
struct.get_and_update(container, key, fun)
end
def get_and_update(%{} = map, key, fun) do
current_value = case :maps.find(key, map) do
{:ok, value} -> value
:error -> nil
end
{get, update} = fun.(current_value)
{get, :maps.put(key, update, map)}
end
def get_and_update(list, key, fun) when is_list(list) do
Keyword.get_and_update(list, key, fun)
end
def get_and_update(nil, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key} on a nil value"
end
end
# Callbacks invoked when inlining code for *_in in Kernel.
# TODO: Remove me on 1.2
defmodule Access.Map do
@moduledoc false
def update!(%{} = map, key, fun) do
case :maps.find(key, map) do
{:ok, value} ->
:maps.put(key, fun.(value), map)
:error ->
raise KeyError, key: key, term: map
end
end
def update!(other, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key}. Expected map/struct, got: #{inspect other}"
end
def get_and_update!(%{} = map, key, fun) do
case :maps.find(key, map) do
{:ok, value} ->
{get, update} = fun.(value)
{get, :maps.put(key, update, map)}
:error ->
raise KeyError, key: key, term: map
end
end
def get_and_update!(other, key, _fun) do
raise ArgumentError,
"could not put/update key #{inspect key}. Expected map/struct, got: #{inspect other}"
end
end
| 25.135135 | 91 | 0.616989 |
1c75eaac2b22266ce114f70672ea6a20a3fa2fb4 | 3,036 | ex | Elixir | lib/secure.ex | manelli/secure.ex | 034843a9a9df92275c268a276637b6264399e275 | [
"MIT"
] | 2 | 2016-05-26T13:19:12.000Z | 2016-05-26T23:19:22.000Z | lib/secure.ex | manelli/secure.ex | 034843a9a9df92275c268a276637b6264399e275 | [
"MIT"
] | null | null | null | lib/secure.ex | manelli/secure.ex | 034843a9a9df92275c268a276637b6264399e275 | [
"MIT"
] | null | null | null | defmodule Secure do
use Bitwise
@spec random_bytes(pos_integer) :: binary
@doc """
Generates a binary of random bytes.
Returns a binary of 16 random bytes as default.
See: http://erlang.org/doc/man/crypto.html#strong_rand_bytes-1
"""
def random_bytes(n \\ 16) do
:crypto.strong_rand_bytes(n)
end
@spec base64(integer) :: bitstring
@doc """
Generates a random base64 string.
The argument specifies the length, in bytes,
of the random number to be generated (defaults to 16).
"""
def base64(n \\ 16) do
random_bytes(n)
|> :base64.encode_to_string
|> to_string
end
@spec urlsafe_base64(pos_integer) :: binary
@doc """
Generates a random URL-safe base64 string.
The argument specifies the length, in bytes,
of the random number to be generated (defaults to 16).
"""
def urlsafe_base64(n \\ 16) do
base64(n)
|> String.replace(~r/[\n\=]/, "")
|> String.replace(~r/\+/, "-")
|> String.replace(~r/\//, "_")
end
@spec compare(atom, atom) :: false
@spec compare(binary, binary) :: boolean
@doc """
Constant time string comparison.
This function executes in constant time only
when the two strings have the same length.
It short-circuits when they have different lengths.
Note that this does not prevent an attacker
from discovering the length of the strings.
Returns `true` if the two strings are equal, `false` otherwise.
"""
def compare(x, y) when is_nil(x) or is_nil(y), do: false
def compare(x, y) when is_binary(x) and is_binary(y) do
if byte_size(x) != byte_size(y) do
false
else
x_list = String.to_char_list(x)
y_list = String.to_char_list(y)
0 == Enum.zip(x_list, y_list) |> Enum.reduce(0, fn({x_byte, y_byte}, acc) ->
acc ||| bxor(x_byte, y_byte)
end)
end
end
@spec uuid() :: bitstring
@doc """
Generates a version 4 (random) UUID.
This type of UUID doesn’t contain meaningful information
such as MAC address, time, etc.
"""
def uuid do
bingenerate() |> encode
end
# Generates a version 4 (random) UUID in the binary format.
# See: https://hexdocs.pm/ecto/Ecto.UUID.html#bingenerate/0
@spec bingenerate() :: bitstring
defp bingenerate do
<<u0::48, _::4, u1::12, _::2, u2::62>> = :crypto.strong_rand_bytes(16)
<<u0::48, 4::4, u1::12, 2::2, u2::62>>
end
# Encodes binary version 4 (random) UUID to a formatted string
# in downcased 8-4-4-4-12 format.
@spec encode(bitstring) :: bitstring
defp encode(<<u0::32, u1::16, u2::16, u3::16, u4::48>>) do
hex_pad(u0, 8) <> "-" <>
hex_pad(u1, 4) <> "-" <>
hex_pad(u2, 4) <> "-" <>
hex_pad(u3, 4) <> "-" <>
hex_pad(u4, 12)
end
defp hex_pad(hex, count) do
hex = Integer.to_string(hex, 16)
lower(hex, :binary.copy("0", count - byte_size(hex)))
end
defp lower(<<h, t::binary>>, acc) when h in ?A..?F,
do: lower(t, acc <> <<h + 32>>)
defp lower(<<h, t::binary>>, acc),
do: lower(t, acc <> <<h>>)
defp lower(<<>>, acc), do: acc
end
| 27.107143 | 82 | 0.630105 |
1c75f45792255d7d7f43355edc9d3fb6507cc8ae | 19,376 | ex | Elixir | clients/calendar/lib/google_api/calendar/v3/api/calendar_list.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/calendar/lib/google_api/calendar/v3/api/calendar_list.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/calendar/lib/google_api/calendar/v3/api/calendar_list.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.Calendar.V3.Api.CalendarList do
@moduledoc """
API calls for all endpoints tagged `CalendarList`.
"""
alias GoogleApi.Calendar.V3.Connection
alias GoogleApi.Gax.{Request, Response}
@doc """
Removes a calendar from the user's calendar list.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- calendar_id (String.t): Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the \"primary\" keyword.
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
## Returns
{:ok, %{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_delete(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, nil} | {:error, Tesla.Env.t()}
def calendar_calendar_list_delete(connection, calendar_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url("/users/me/calendarList/{calendarId}", %{
"calendarId" => URI.encode_www_form(calendar_id)
})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [decode: false])
end
@doc """
Returns a calendar from the user's calendar list.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- calendar_id (String.t): Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the \"primary\" keyword.
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.CalendarListEntry{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_get(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.CalendarListEntry.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_get(connection, calendar_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/users/me/calendarList/{calendarId}", %{
"calendarId" => URI.encode_www_form(calendar_id)
})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.CalendarListEntry{}])
end
@doc """
Inserts an existing calendar into the user's calendar list.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :colorRgbFormat (boolean()): Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB). If this feature is used, the index-based colorId field will be set to the best matching option automatically. Optional. The default is False.
- :body (CalendarListEntry):
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.CalendarListEntry{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_insert(Tesla.Env.client(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.CalendarListEntry.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_insert(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:colorRgbFormat => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/users/me/calendarList")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.CalendarListEntry{}])
end
@doc """
Returns the calendars on the user's calendar list.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :maxResults (integer()): Maximum number of entries returned on one result page. By default the value is 100 entries. The page size can never be larger than 250 entries. Optional.
- :minAccessRole (String.t): The minimum access role for the user in the returned entries. Optional. The default is no restriction.
- :pageToken (String.t): Token specifying which result page to return. Optional.
- :showDeleted (boolean()): Whether to include deleted calendar list entries in the result. Optional. The default is False.
- :showHidden (boolean()): Whether to show hidden entries. Optional. The default is False.
- :syncToken (String.t): Token obtained from the nextSyncToken field returned on the last page of results from the previous list request. It makes the result of this list request contain only entries that have changed since then. If only read-only fields such as calendar properties or ACLs have changed, the entry won't be returned. All entries deleted and hidden since the previous list request will always be in the result set and it is not allowed to set showDeleted neither showHidden to False. To ensure client state consistency minAccessRole query parameter cannot be specified together with nextSyncToken. If the syncToken expires, the server will respond with a 410 GONE response code and the client should clear its storage and perform a full synchronization without any syncToken. Learn more about incremental synchronization. Optional. The default is to return all entries.
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.CalendarList{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_list(Tesla.Env.client(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.CalendarList.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_list(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:maxResults => :query,
:minAccessRole => :query,
:pageToken => :query,
:showDeleted => :query,
:showHidden => :query,
:syncToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/users/me/calendarList")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.CalendarList{}])
end
@doc """
Updates an existing calendar on the user's calendar list. This method supports patch semantics.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- calendar_id (String.t): Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the \"primary\" keyword.
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :colorRgbFormat (boolean()): Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB). If this feature is used, the index-based colorId field will be set to the best matching option automatically. Optional. The default is False.
- :body (CalendarListEntry):
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.CalendarListEntry{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_patch(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.CalendarListEntry.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_patch(connection, calendar_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:colorRgbFormat => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:patch)
|> Request.url("/users/me/calendarList/{calendarId}", %{
"calendarId" => URI.encode_www_form(calendar_id)
})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.CalendarListEntry{}])
end
@doc """
Updates an existing calendar on the user's calendar list.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- calendar_id (String.t): Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the \"primary\" keyword.
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :colorRgbFormat (boolean()): Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB). If this feature is used, the index-based colorId field will be set to the best matching option automatically. Optional. The default is False.
- :body (CalendarListEntry):
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.CalendarListEntry{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_update(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.CalendarListEntry.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_update(connection, calendar_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:colorRgbFormat => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:put)
|> Request.url("/users/me/calendarList/{calendarId}", %{
"calendarId" => URI.encode_www_form(calendar_id)
})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.CalendarListEntry{}])
end
@doc """
Watch for changes to CalendarList resources.
## Parameters
- connection (GoogleApi.Calendar.V3.Connection): Connection to server
- optional_params (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :maxResults (integer()): Maximum number of entries returned on one result page. By default the value is 100 entries. The page size can never be larger than 250 entries. Optional.
- :minAccessRole (String.t): The minimum access role for the user in the returned entries. Optional. The default is no restriction.
- :pageToken (String.t): Token specifying which result page to return. Optional.
- :showDeleted (boolean()): Whether to include deleted calendar list entries in the result. Optional. The default is False.
- :showHidden (boolean()): Whether to show hidden entries. Optional. The default is False.
- :syncToken (String.t): Token obtained from the nextSyncToken field returned on the last page of results from the previous list request. It makes the result of this list request contain only entries that have changed since then. If only read-only fields such as calendar properties or ACLs have changed, the entry won't be returned. All entries deleted and hidden since the previous list request will always be in the result set and it is not allowed to set showDeleted neither showHidden to False. To ensure client state consistency minAccessRole query parameter cannot be specified together with nextSyncToken. If the syncToken expires, the server will respond with a 410 GONE response code and the client should clear its storage and perform a full synchronization without any syncToken. Learn more about incremental synchronization. Optional. The default is to return all entries.
- :resource (Channel):
## Returns
{:ok, %GoogleApi.Calendar.V3.Model.Channel{}} on success
{:error, info} on failure
"""
@spec calendar_calendar_list_watch(Tesla.Env.client(), keyword()) ::
{:ok, GoogleApi.Calendar.V3.Model.Channel.t()} | {:error, Tesla.Env.t()}
def calendar_calendar_list_watch(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:maxResults => :query,
:minAccessRole => :query,
:pageToken => :query,
:showDeleted => :query,
:showHidden => :query,
:syncToken => :query,
:resource => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/users/me/calendarList/watch")
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Calendar.V3.Model.Channel{}])
end
end
| 50.327273 | 893 | 0.701073 |
1c75f6e1ce03a57baef16d4ceb91657a7a0bdaba | 1,846 | exs | Elixir | clients/compute/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | clients/compute/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | clients/compute/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.Mixfile do
use Mix.Project
@version "0.24.0"
def project() do
[
app: :google_api_compute,
version: @version,
elixir: "~> 1.6",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps(),
source_url: "https://github.com/googleapis/elixir-google-api/tree/master/clients/compute"
]
end
def application() do
[extra_applications: [:logger]]
end
defp deps() do
[
{:google_gax, "~> 0.4"},
{:ex_doc, "~> 0.16", only: :dev}
]
end
defp description() do
"""
Compute Engine API client library. Creates and runs virtual machines on Google Cloud Platform.
"""
end
defp package() do
[
files: ["lib", "mix.exs", "README*", "LICENSE"],
maintainers: ["Jeff Ching", "Daniel Azuma"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/googleapis/elixir-google-api/tree/master/clients/compute",
"Homepage" => "https://developers.google.com/compute/docs/reference/latest/"
}
]
end
end
| 27.552239 | 98 | 0.656013 |
1c75f781ddc0e6d173ea4ddd179f9f82a6d969d7 | 1,254 | exs | Elixir | config/prod.secret.exs | rheubach/live-share-prop | b92bcaca16fc42e31a3edb5d8922cbfcb2cceb88 | [
"MIT"
] | 68 | 2020-02-13T15:17:39.000Z | 2022-03-29T23:21:19.000Z | config/prod.secret.exs | rheubach/live-share-prop | b92bcaca16fc42e31a3edb5d8922cbfcb2cceb88 | [
"MIT"
] | 30 | 2020-04-16T17:21:22.000Z | 2021-09-20T21:47:12.000Z | config/prod.secret.exs | rheubach/live-share-prop | b92bcaca16fc42e31a3edb5d8922cbfcb2cceb88 | [
"MIT"
] | 7 | 2020-06-07T16:19:14.000Z | 2022-02-09T23:31:25.000Z | # In this file, we load production configuration and secrets
# from environment variables. You can also hardcode secrets,
# although such is generally not recommended and you have to
# remember to add this file to your .gitignore.
use Mix.Config
database_url =
System.get_env("DATABASE_URL") ||
raise """
environment variable DATABASE_URL is missing.
For example: ecto://USER:PASS@HOST/DATABASE
"""
config :live_deck, LiveDeck.Repo,
# ssl: true,
url: database_url,
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
secret_key_base =
System.get_env("SECRET_KEY_BASE") ||
raise """
environment variable SECRET_KEY_BASE is missing.
You can generate one by calling: mix phx.gen.secret
"""
config :live_deck, LiveDeckWeb.Endpoint,
http: [
port: String.to_integer(System.get_env("PORT") || "4000"),
transport_options: [socket_opts: [:inet6]]
],
secret_key_base: secret_key_base
# ## Using releases (Elixir v1.9+)
#
# If you are doing OTP releases, you need to instruct Phoenix
# to start each relevant endpoint:
#
# config :live_deck, LiveDeckWeb.Endpoint, server: true
#
# Then you can assemble a release by calling `mix release`.
# See `mix help release` for more information.
| 29.857143 | 67 | 0.720893 |
1c7600eae4af77d352e9ae6610a0a793a1a64f27 | 1,568 | exs | Elixir | apps/omg_db/mix.exs | PinkDiamond1/elixir-omg | 70dfd24a0a1ddf5d1d9d71aab61ea25300f889f7 | [
"Apache-2.0"
] | null | null | null | apps/omg_db/mix.exs | PinkDiamond1/elixir-omg | 70dfd24a0a1ddf5d1d9d71aab61ea25300f889f7 | [
"Apache-2.0"
] | null | null | null | apps/omg_db/mix.exs | PinkDiamond1/elixir-omg | 70dfd24a0a1ddf5d1d9d71aab61ea25300f889f7 | [
"Apache-2.0"
] | 1 | 2021-12-04T00:37:46.000Z | 2021-12-04T00:37:46.000Z | defmodule OMG.DB.MixProject do
use Mix.Project
def project do
[
app: :omg_db,
version: "#{String.trim(File.read!("../../VERSION"))}",
build_path: "../../_build",
config_path: "../../config/config.exs",
deps_path: "../../deps",
lockfile: "../../mix.lock",
elixir: "~> 1.6",
elixirc_paths: elixirc_paths(Mix.env()),
start_permanent: Mix.env() == :prod,
deps: deps() ++ rocksdb(),
test_coverage: [tool: ExCoveralls]
]
end
def application do
[
extra_applications: [:logger, :telemetry],
start_phases: [{:attach_telemetry, []}],
mod: {OMG.DB.Application, []}
]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:prod), do: ["lib"]
defp elixirc_paths(_), do: ["lib", "test/support"]
defp deps do
[
{:exleveldb, "~> 0.11"},
{:omg_status, in_umbrella: true},
# NOTE: we only need in :dev and :test here, but we need in :prod too in performance
# then there's some unexpected behavior of mix that won't allow to mix these, see
# [here](https://elixirforum.com/t/mix-dependency-is-not-locked-error-when-building-with-edeliver/7069/3)
# OMG-373 (Elixir 1.8) should fix this
# TEST ONLY
{:briefly, "~> 0.3.0", only: [:dev, :test], runtime: false},
{:telemetry, "~> 0.4.0"},
{:omg_utils, in_umbrella: true}
]
end
defp rocksdb do
case System.get_env("EXCLUDE_ROCKSDB") do
nil -> [{:rocksdb, "~> 1.2"}]
_ -> []
end
end
end
| 29.037037 | 117 | 0.575893 |
1c760140c6e112164a7083d9b6af0a677507b884 | 7,171 | exs | Elixir | test/plug/adapters/cowboy_test.exs | husafwan/plug | c9e91717ae3a624d94fc6e917db938731526e72a | [
"Apache-2.0"
] | null | null | null | test/plug/adapters/cowboy_test.exs | husafwan/plug | c9e91717ae3a624d94fc6e917db938731526e72a | [
"Apache-2.0"
] | null | null | null | test/plug/adapters/cowboy_test.exs | husafwan/plug | c9e91717ae3a624d94fc6e917db938731526e72a | [
"Apache-2.0"
] | null | null | null | defmodule Plug.Adapters.CowboyTest do
use ExUnit.Case, async: true
import Plug.Adapters.Cowboy
@moduletag :cowboy1
def init([]) do
[foo: :bar]
end
@dispatch [{:_, [], [
{:_, [], Plug.Adapters.Cowboy.Handler, {Plug.Adapters.CowboyTest, [foo: :bar]}}
]}]
if function_exported?(Supervisor, :child_spec, 2) do
test "supports Elixir v1.5 child specs" do
spec = {Plug.Adapters.Cowboy, [scheme: :http, plug: __MODULE__, options: [port: 4040]]}
assert %{id: {:ranch_listener_sup, Plug.Adapters.CowboyTest.HTTP},
modules: [:ranch_listener_sup],
restart: :permanent,
shutdown: :infinity,
start: {:ranch_listener_sup, :start_link, _},
type: :supervisor} = Supervisor.child_spec(spec, [])
end
end
test "builds args for cowboy dispatch" do
assert [Plug.Adapters.CowboyTest.HTTP,
100,
[port: 4000, max_connections: 16_384],
[env: [dispatch: @dispatch], onresponse: _]] =
args(:http, __MODULE__, [], [])
end
test "builds args with custom options" do
assert [Plug.Adapters.CowboyTest.HTTP,
25,
[max_connections: 16_384, port: 3000, other: true],
[env: [dispatch: @dispatch], onresponse: _]] =
args(:http, __MODULE__, [], [port: 3000, acceptors: 25, other: true])
end
test "builds args with non 2-element tuple options" do
assert [Plug.Adapters.CowboyTest.HTTP,
25,
[:inet6, {:raw, 1, 2, 3}, max_connections: 16_384, port: 3000, other: true],
[env: [dispatch: @dispatch], onresponse: _]] =
args(:http, __MODULE__, [], [:inet6, {:raw, 1, 2, 3}, port: 3000, acceptors: 25, other: true])
end
test "builds args with protocol option" do
assert [Plug.Adapters.CowboyTest.HTTP,
25,
[max_connections: 16_384, port: 3000],
[env: [dispatch: @dispatch], onresponse: _, compress: true, timeout: 30_000]] =
args(:http, __MODULE__, [], [port: 3000, acceptors: 25, compress: true, timeout: 30_000])
assert [Plug.Adapters.CowboyTest.HTTP,
25,
[max_connections: 16_384, port: 3000],
[env: [dispatch: @dispatch], onresponse: _, timeout: 30_000]] =
args(:http, __MODULE__, [], [port: 3000, acceptors: 25, protocol_options: [timeout: 30_000]])
end
test "builds args with single-atom protocol option" do
assert [Plug.Adapters.CowboyTest.HTTP,
25,
[:inet6, max_connections: 16_384, port: 3000],
[env: [dispatch: @dispatch], onresponse: _]] =
args(:http, __MODULE__, [], [:inet6, port: 3000, acceptors: 25])
end
test "builds child specs" do
assert {{:ranch_listener_sup, Plug.Adapters.CowboyTest.HTTP},
{:ranch_listener_sup, :start_link, _},
:permanent,
:infinity,
:supervisor,
[:ranch_listener_sup]} = child_spec(:http, __MODULE__, [], [])
end
describe "onresponse handling" do
test "includes the default onresponse handler" do
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: on_response]] =
args(:http, __MODULE__, [], [])
assert is_function(on_response)
end
test "elides the default onresponse handler if log_error_on_incomplete_requests is set to false" do
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch]]] =
args(:http, __MODULE__, [], [log_error_on_incomplete_requests: false])
end
test "elides the default onresponse handler if log_error_on_incomplete_requests is set to false and includes the user-provided onresponse handler" do
my_onresponse = fn (_, _, _, req) -> req end
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: on_response]] =
args(:http, __MODULE__, [], [log_error_on_incomplete_requests: false, protocol_options: [onresponse: my_onresponse]])
assert is_function(on_response)
assert on_response == my_onresponse
end
test "elides the default onresponse handler if log_error_on_incomplete_requests is set to false and handles user-provided onresponse tuple " do
my_onresponse = {Plug.Adapters.CowboyTest, :my_onresponse_handler}
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: default_response]] =
args(:http, __MODULE__, [], [])
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: on_response]] =
args(:http, __MODULE__, [], [log_error_on_incomplete_requests: false, protocol_options: [onresponse: my_onresponse]])
assert is_function(on_response)
assert on_response != default_response
end
test "includes the default onresponse handler and the user-provided onresponse handler" do
# Grab a ref to the default onresponse handler
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: default_response]] =
args(:http, __MODULE__, [], [])
my_onresponse = fn (_, _, _, req) -> req end
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: on_response]] =
args(:http, __MODULE__, [], [protocol_options: [onresponse: my_onresponse]])
assert is_function(on_response)
assert on_response != default_response
assert on_response != my_onresponse
end
test "includes the default onresponse handler and handles the user-provided onresponse handler tuple" do
# Grab a ref to the default onresponse handler
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: default_response]] =
args(:http, __MODULE__, [], [])
my_onresponse = {Plug.Adapters.CowboyTest, :my_onresponse_handler}
assert [Plug.Adapters.CowboyTest.HTTP,
_,
_,
[env: [dispatch: @dispatch], onresponse: on_response]] =
args(:http, __MODULE__, [], [protocol_options: [onresponse: my_onresponse]])
assert is_function(on_response)
assert on_response != default_response
assert on_response != my_onresponse
end
end
defmodule MyPlug do
def init(opts), do: opts
end
test "errors when trying to run on https" do
assert_raise ArgumentError, ~r/missing option :key\/:keyfile/, fn ->
Plug.Adapters.Cowboy.https MyPlug, [], []
end
assert_raise ArgumentError, ~r/ssl\/key\.pem required by SSL's :keyfile either does not exist/, fn ->
Plug.Adapters.Cowboy.https MyPlug, [],
keyfile: "priv/ssl/key.pem",
certfile: "priv/ssl/cert.pem",
otp_app: :plug
end
end
end
| 38.553763 | 153 | 0.608562 |
1c76017cc73583b4d9c80b00b6cbe220d005faa2 | 12,695 | ex | Elixir | lib/util/httpc.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | lib/util/httpc.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | lib/util/httpc.ex | ikeyasu/antikythera | 544fdd22e46b1f34177053d87d9e2a9708c74113 | [
"Apache-2.0"
] | null | null | null | # Copyright(c) 2015-2018 ACCESS CO., LTD. All rights reserved.
use Croma
defmodule Antikythera.Httpc do
@default_max_body 10 * 1024 * 1024
@maximum_max_body 100 * 1024 * 1024
@max_retry_attempts 3
@moduledoc """
HTTP client library.
This is a wrapper around [`hackney`](https://github.com/benoitc/hackney), an HTTP client library.
`Httpc` supports the following features:
- gzip-compression is tranparently handled
- headers are represented by maps instead of lists
- header names are always lower case
- TCP connections are automatically re-established when closed by server due to keepalive timeout (see #86169)
## Body format
The `body` argument in `post/4`, `put/4`, `patch/4`, `request/5` takes either:
- `binary` - Sends raw data
- `{:form, [{key, value}]}` - Sends key-value data as x-www-form-urlencoded
- `{:json, map}` - Converts map into JSON and sends as application/json
- `{:file, path}` - Sends given file contents
## Options
- `:timeout` - Timeout to establish a connection, in milliseconds. Default is `8000`.
- `:recv_timeout` - Timeout used when receiving a response. Default is `5000`.
- `:params` - An enumerable of 2-tuples that will be URL-encoded and appended to the URL as query string parameters.
- `:cookie` - An enumerable of name-value pairs of cookies. `Httpc` automatically URL-encodes the given names/values for you.
- `:basic_auth` - A pair of `{username, password}` tuple to be used for HTTP basic authentication.
- `:proxy` - A proxy to be used for the request; it can be a regular URL or a `{host, port}` tuple.
- `:proxy_auth` - Proxy authentication `{username, password}` tuple.
- `:ssl` - SSL options supported by the `ssl` erlang module.
- `:skip_ssl_verification` - Whether to verify server's SSL certificate or not. Defaults to `false`.
Specify `skip_ssl_verification: true` when accessing insecure server with HTTPS.
- `:max_body` - Maximum content-length of the response body (compressed size if it's compressed).
Defaults to `#{@default_max_body}` (#{div(@default_max_body, 1024 * 1024)}MB) and must not exceed #{div(@maximum_max_body, 1024 * 1024)}MB.
Responses having body larger than the specified size will be rejected with `{:error, :response_too_large}`.
- `:follow_redirect` - A boolean that causes redirects to be followed. Defaults to `false`.
- `:max_redirect` - An integer denoting the maximum number of redirects to follow if `follow_redirect: true` is given.
- `:skip_body_decompression` - By default gzip-compressed body is automatically decompressed (i.e. defaults to `false`).
Pass `skip_body_decompression: true` if compressed body is what you need.
"""
alias Croma.Result, as: R
alias Antikythera.{MapUtil, Url}
alias Antikythera.Http.{Status, Method, Headers, SetCookie, SetCookiesMap}
defmodule ReqBody do
@moduledoc """
Type for `Antikythera.Httpc`'s request body.
"""
@type json_obj :: %{(atom | String.t) => any}
@type t :: binary | {:form, [{term, term}]} | {:json, json_obj} | {:file, Path.t}
defun valid?(t :: term) :: boolean do
b when is_binary(b) -> true
{:form, l} when is_list(l) -> true
{:json, m} when is_map(m) -> true
{:file, b} when is_binary(b) -> true
_otherwise -> false
end
def convert_body_and_headers_by_body_type(body, headers) do
case body do
{:json, map} ->
Poison.encode(map)
|> R.map(fn json ->
{json, Map.put(headers, "content-type", "application/json")}
end)
other_body ->
# {:form, l} and {:file, b} can be left as-is because hackney handles this internally
{:ok, {other_body, headers}}
end
end
end
defmodule Response do
@moduledoc """
A struct to represent an HTTP response.
Response headers are converted to a `Antikythera.Http.Headers.t` and all header names are lower-cased.
`set-cookie` response headers are handled separately and stored in `cookies` field as a `Antikythera.Http.SetCookiesMap.t`.
"""
use Croma.Struct, recursive_new?: true, fields: [
status: Status.Int,
body: Croma.Binary,
headers: Headers,
cookies: SetCookiesMap,
]
end
defun request(method :: v[Method.t], url :: v[Url.t], body :: v[ReqBody.t], headers :: v[Headers.t] \\ %{}, options :: Keyword.t \\ []) :: R.t(Response.t) do
downcased_headers = Map.new(headers, fn {k, v} -> {String.downcase(k), v} end)
headers_with_encoding = Map.put_new(downcased_headers, "accept-encoding", "gzip")
options_map = normalize_options(options)
url_with_params =
case options_map[:params] do
nil -> url
params ->
uri = URI.parse(url)
query_string =
case uri.query do
nil -> URI.encode_query(params)
qs -> qs <> "&" <> URI.encode_query(params)
end
%URI{uri | query: query_string} |> URI.to_string()
end
hackney_opts = hackney_options(options_map)
ReqBody.convert_body_and_headers_by_body_type(body, headers_with_encoding)
|> R.bind(fn {hackney_body, headers} ->
request_impl(method, url_with_params, headers, hackney_body, hackney_opts, options_map)
end)
end
defun request!(method :: Method.t, url :: Url.t, body :: ReqBody.t, headers :: Headers.t \\ %{}, options :: Keyword.t \\ []) :: Response.t do
request(method, url, body, headers, options) |> R.get!()
end
Enum.each([:get, :delete, :options, :head], fn method ->
defun unquote(method)(url :: Url.t, headers :: Headers.t \\ %{}, options :: Keyword.t \\ []) :: R.t(Response.t) do
request(unquote(method), url, "", headers, options)
end
defun unquote(:"#{method}!")(url :: Url.t, headers :: Headers.t \\ %{}, options :: Keyword.t \\ []) :: Response.t do
request!(unquote(method), url, "", headers, options)
end
end)
Enum.each([:post, :put, :patch], fn method ->
defun unquote(method)(url :: Url.t, body :: ReqBody.t, headers :: Headers.t, options :: Keyword.t \\ []) :: R.t(Response.t) do
request(unquote(method), url, body, headers, options)
end
defun unquote(:"#{method}!")(url :: Url.t, body :: ReqBody.t, headers :: Headers.t, options :: Keyword.t \\ []) :: Response.t do
request!(unquote(method), url, body, headers, options)
end
end)
defp request_impl(method, url_with_params, headers, body, hackney_opts, options_map) do
send_request_with_retry(method, url_with_params, Map.to_list(headers), body, hackney_opts, options_map, 0)
end
defp send_request_with_retry(method, url, headers_list, body, hackney_opts, options_map, attempts) do
case send_request(method, url, headers_list, body, hackney_opts, options_map) do
{:ok, _} = ok -> ok
{:error, :closed} -> # connection is closed on server side
require AntikytheraCore.Logger, as: L
L.info("{:error, :closed} returned by hackney: attempts=#{attempts} url=#{url}")
attempts2 = attempts + 1
if attempts2 < @max_retry_attempts do
:timer.sleep(10) # Since hackney's socket pool may have not yet cleaned up the closed socket, we should wait for a moment
send_request_with_retry(method, url, headers_list, body, hackney_opts, options_map, attempts2)
else
{:error, :closed}
end
{:error, _} = error -> error
end
end
defp send_request(method, url, headers_list, body, hackney_opts, options_map) do
case :hackney.request(method, url, headers_list, body, hackney_opts) do
{:ok, resp_status, resp_headers} -> make_response(resp_status, resp_headers, "", options_map) # HEAD method
{:ok, resp_status, resp_headers, resp_body} -> make_response(resp_status, resp_headers, resp_body, options_map)
{:error, reason} -> {:error, reason}
end
end
defp make_response(status, headers_list, body1, options_map) do
if byte_size(body1) <= options_map[:max_body] do
headers_grouped1 = Enum.group_by(headers_list, fn {k, _} -> String.downcase(k) end, &elem(&1, 1))
{cookie_strings, headers_grouped2} = Map.pop(headers_grouped1, "set-cookie", [])
headers_map1 = MapUtil.map_values(headers_grouped2, fn {_, vs} -> Enum.join(vs, ", ") end)
{body2, headers_map2} =
if body1 != "" and !options_map[:skip_body_decompression] and headers_map1["content-encoding"] == "gzip" do
uncompressed = :zlib.gunzip(body1)
content_length = Integer.to_string(byte_size(uncompressed))
new_headers_map = headers_map1 |> Map.delete("content-encoding") |> Map.put("content-length", content_length)
{uncompressed, new_headers_map}
else
{body1, headers_map1}
end
cookies_map = Map.new(cookie_strings, &SetCookie.parse!/1)
{:ok, %Response{status: status, body: body2, headers: headers_map2, cookies: cookies_map}}
else
# The returned body might be truncated and thus we can't reliably uncompress the body if it's compressed.
# In this case we give up returning partial information and simply return an error.
{:error, :response_too_large}
end
end
defp normalize_options(options) do
options_map = Map.new(options)
case options_map[:max_body] do
nil -> Map.put(options_map, :max_body, @default_max_body)
max when max in 0..@maximum_max_body -> options_map
end
end
defp hackney_options(options_map) do
max_body = Map.fetch!(options_map, :max_body)
base_opts = [{:path_encode_fun, &encode_path/1}, {:max_body, max_body}, {:with_body, true}]
Enum.reduce(options_map, base_opts, fn({k, v}, opts) ->
case convert_option(k, v) do
nil -> opts
opt -> [opt | opts]
end
end)
end
defunp convert_option(name, value) :: any do
(:timeout , value ) -> {:connect_timeout, value}
(:recv_timeout , value ) -> {:recv_timeout , value}
# :params are used in URL, not a hackney option
(:cookie , cs ) -> {:cookie , Enum.map(cs, fn {n, v} -> {URI.encode_www_form(n), URI.encode_www_form(v)} end)}
(:basic_auth , {_u, _p} = t) -> {:basic_auth , t }
(:proxy , proxy ) -> {:proxy , proxy}
(:proxy_auth , {_u, _p} = t) -> {:proxy_auth , t }
(:ssl , ssl ) -> {:ssl_options , ssl }
(:skip_ssl_verification, true ) -> :insecure
# :max_body is treated differently as it has the default value
(:follow_redirect , true ) -> {:follow_redirect, true }
(:max_redirect , max ) -> {:max_redirect , max }
# :skip_body_decompression is used in processing response body, not here
(_ , _ ) -> nil
end
defunpt encode_path(path :: String.t) :: String.t do
encode_path_impl(path, "")
end
defp hex(n) when n <= 9, do: n + ?0
defp hex(n) , do: n + ?A - 10
defmacrop is_hex(c) do
quote do
unquote(c) in ?0..?9 or unquote(c) in ?A..?F or unquote(c) in ?a..?f
end
end
defunp encode_path_impl(path :: String.t, acc :: String.t) :: String.t do
("", acc) ->
acc
(<<?%, a :: 8, b :: 8, rest :: binary>>, acc) when is_hex(a) and is_hex(b) ->
encode_path_impl(rest, <<acc :: binary, ?%, a, b>>)
(<<c :: 8, rest :: binary>>, acc) ->
import Bitwise
case URI.char_unescaped?(c) do
true -> encode_path_impl(rest, <<acc :: binary, c>>)
false -> encode_path_impl(rest, <<acc :: binary, ?%, hex(bsr(c, 4)), hex(band(c, 15))>>)
end
end
end
defmodule Antikythera.Httpc.Mockable do
@moduledoc """
Just wrapping `Httpc` without any modification.
Can be mocked with `:meck.expect(Httpc.Mockable, :request, ...)` without interfering other Httpc action.
"""
defdelegate request( method, url, body, headers, options), to: Antikythera.Httpc
defdelegate request!(method, url, body, headers, options), to: Antikythera.Httpc
Enum.each([:get, :delete, :options, :head], fn method ->
defdelegate unquote(method )(url, headers, options), to: Antikythera.Httpc
defdelegate unquote(:"#{method}!")(url, headers, options), to: Antikythera.Httpc
end)
Enum.each([:post, :put, :patch], fn method ->
defdelegate unquote(method )(url, body, headers, options), to: Antikythera.Httpc
defdelegate unquote(:"#{method}!")(url, body, headers, options), to: Antikythera.Httpc
end)
end
| 45.665468 | 159 | 0.635526 |
1c76629f4408fe1e13c1c6240a126a1f7866430e | 231 | ex | Elixir | lib/sticky/config.ex | kawikadkekahuna/Stickylixer | b81a1953522d5b5873f8893d54ac01e87891af73 | [
"BSD-3-Clause"
] | null | null | null | lib/sticky/config.ex | kawikadkekahuna/Stickylixer | b81a1953522d5b5873f8893d54ac01e87891af73 | [
"BSD-3-Clause"
] | null | null | null | lib/sticky/config.ex | kawikadkekahuna/Stickylixer | b81a1953522d5b5873f8893d54ac01e87891af73 | [
"BSD-3-Clause"
] | null | null | null | defmodule Sticky.Config do
@moduledoc false
def send_to_http() do
Application.get_env(:stickylixer, :send_to_http, true)
end
def retry_attempts() do
Application.get_env(:stickylixer, :retry_attempts, 3)
end
end
| 19.25 | 58 | 0.74026 |
1c767852ec66c949554e93fa3857b67cc7a7eb6c | 1,551 | ex | Elixir | clients/logging/lib/google_api/logging/v2/connection.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/logging/lib/google_api/logging/v2/connection.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/logging/lib/google_api/logging/v2/connection.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Logging.V2.Connection do
@moduledoc """
Handle Tesla connections for GoogleApi.Logging.V2.
"""
@type t :: Tesla.Env.client()
use GoogleApi.Gax.Connection,
scopes: [
# View and manage your data across Google Cloud Platform services
"https://www.googleapis.com/auth/cloud-platform",
# View your data across Google Cloud Platform services
"https://www.googleapis.com/auth/cloud-platform.read-only",
# Administrate log data for your projects
"https://www.googleapis.com/auth/logging.admin",
# View log data for your projects
"https://www.googleapis.com/auth/logging.read",
# Submit log data for your projects
"https://www.googleapis.com/auth/logging.write"
],
otp_app: :google_api_logging,
base_url: "https://logging.googleapis.com/"
end
| 34.466667 | 77 | 0.72276 |
1c769daad0adb7aa7d70d40704ee7589139aed64 | 1,519 | ex | Elixir | lib/witchcraft.ex | baseballlover723/witchcraft | 75b62d87bc2c22917f6d5f2bc42d1e0163d40460 | [
"MIT"
] | 1 | 2019-10-04T11:32:01.000Z | 2019-10-04T11:32:01.000Z | lib/witchcraft.ex | baseballlover723/witchcraft | 75b62d87bc2c22917f6d5f2bc42d1e0163d40460 | [
"MIT"
] | null | null | null | lib/witchcraft.ex | baseballlover723/witchcraft | 75b62d87bc2c22917f6d5f2bc42d1e0163d40460 | [
"MIT"
] | 1 | 2020-11-18T18:26:12.000Z | 2020-11-18T18:26:12.000Z | defmodule Witchcraft do
@moduledoc """
Top level module
## Hierarchy
Semigroupoid Semigroup Setoid Foldable Functor -----------┐
↓ ↓ ↓ ↓ ↙ ↓ ↘ |
Category Monoid Ord Traversable Apply Bifunctor |
↓ ↙ ↘ ↓
Arrow Applicative Chain Extend
↘ ↙ ↓
Monad Comonad
## `use Witchcraft`
There is a convenient `use` macro to import *all* functions in the library.
use Witchcraft
This recursively calls `use` on all children modules.
Any options passed to `use` will be passed down to all dependencies.
use Witchcraft, except: [right_fold: 2]
If you would like to not override the functions and operators from `Kernel`,
you can pass the special option `override_kernel: false`.
use Witchcraft, override_kernel: false
This same style of `use` is also available on all submodules, and follow
the dependency chart (above).
"""
defmacro __using__(opts \\ []) do
quote do
use Witchcraft.Arrow, unquote(opts)
use Witchcraft.Monoid, unquote(opts)
use Witchcraft.Bifunctor, unquote(opts)
use Witchcraft.Traversable, unquote(opts)
use Witchcraft.Monad, unquote(opts)
use Witchcraft.Comonad, unquote(opts)
end
end
end
| 32.319149 | 78 | 0.548387 |
1c76a8e7c768701efe86d7afd530ef0cd171c76d | 1,037 | exs | Elixir | mix.exs | hippware/reprise | 0cdb09214cb66b9c365096e71662ca804352660a | [
"BSD-2-Clause"
] | null | null | null | mix.exs | hippware/reprise | 0cdb09214cb66b9c365096e71662ca804352660a | [
"BSD-2-Clause"
] | null | null | null | mix.exs | hippware/reprise | 0cdb09214cb66b9c365096e71662ca804352660a | [
"BSD-2-Clause"
] | null | null | null | defmodule Reprise.Mixfile do
use Mix.Project
def project do
[app: :reprise,
version: "0.5.2-dev",
elixir: "~> 1.0",
description: description(),
package: package(),
deps: deps()]
end
def application do
[mod: {Reprise, []},
registered: [Reprise],
applications: [:logger],
description: 'Simple module reloader',
]
end
defp deps do
[
{:dialyze, "~> 0.2", only: :dev},
{:ex_doc, "~> 0.19", only: :dev},
]
end
defp description do
"""
Reprise reloads your modules after they've been recompiled.
It is thought as a companion to inotify tools.
It scans for changes in beam files belonging to youyr project.
Doesn't recompile stuff by itself.
"""
end
defp package do
[ maintainers: ["Wojciech Kaczmarek",
"Yurii Rashkovskii", "Jay Doane", "Josh Austin"],
licenses: ["BSD"],
description: description(),
links: %{"GitHub" => "https://github.com/herenowcoder/reprise"}
]
end
end
| 22.06383 | 69 | 0.590164 |
1c7710cd1d2462689e1dcd7a185280c72d73c835 | 1,604 | ex | Elixir | lib/chronik/pub_sub.ex | parody/chronik | ec2dbc5b415433d8732575db6ce24b02ebaf5da3 | [
"MIT"
] | 28 | 2017-09-12T13:54:17.000Z | 2021-07-20T22:04:28.000Z | lib/chronik/pub_sub.ex | parody/chronik | ec2dbc5b415433d8732575db6ce24b02ebaf5da3 | [
"MIT"
] | 18 | 2017-09-22T14:06:00.000Z | 2018-06-09T14:18:35.000Z | lib/chronik/pub_sub.ex | parody/chronik | ec2dbc5b415433d8732575db6ce24b02ebaf5da3 | [
"MIT"
] | 2 | 2018-05-11T09:36:50.000Z | 2019-09-23T18:29:58.000Z | defmodule Chronik.PubSub do
@moduledoc """
`Chronik.PubSub` adapter contract and API.
In `Chronik` there is only one feed (all). This means that subscribers
see a total ordering of events.
"""
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Chronik.PubSub
alias Chronik.Config
{_cfg, adapter} = Config.fetch_config(__MODULE__, opts)
@adapter adapter
defdelegate subscribe(opts \\ []), to: @adapter
defdelegate unsubscribe(), to: @adapter
defdelegate broadcast(events), to: @adapter
defdelegate start_link(opts), to: @adapter
end
end
@typedoc "The result status of all operations on the `Chronik.PubSub`"
@type result_status :: :ok | {:error, String.t()}
@doc """
Subscribes the caller to the PubSub.
Multiple subscriptions to the PubSub are allowed. The
subscriber will receive the events multiple times.
The accepted options are:
- `consistency`: `:eventual` (default) or `:strict`
"""
@callback subscribe(opts :: Keyword.t()) :: result_status()
@doc """
Unsubscribes the caller from the PubSub. No further events are
received from the PubSub.
**note**: events could still be on the subscribers' mailbox.
"""
@callback unsubscribe() :: result_status()
@doc """
Broadcasts an enumeration of `records` to all the subscribers.
"""
@callback broadcast(records :: [Chronik.EventRecord]) :: result_status()
# API
def start_link(opts \\ []) do
{_store, pub_sub} = Chronik.Config.fetch_adapters()
pub_sub.start_link(opts)
end
end
| 25.870968 | 74 | 0.682045 |
1c773eeeed79aaec74dec8219f8a824a23132ecb | 3,580 | ex | Elixir | lib/transformations.ex | Bajena/the-ray-tracer-challenge | 8b18405fbfadd247c888660adf0a542e4f2024cd | [
"MIT"
] | 3 | 2020-03-23T09:41:07.000Z | 2021-06-03T00:14:24.000Z | lib/transformations.ex | Bajena/the-ray-tracer-challenge | 8b18405fbfadd247c888660adf0a542e4f2024cd | [
"MIT"
] | 17 | 2020-03-14T11:54:59.000Z | 2020-06-21T20:01:29.000Z | lib/transformations.ex | Bajena/the-ray-tracer-challenge | 8b18405fbfadd247c888660adf0a542e4f2024cd | [
"MIT"
] | null | null | null | defmodule RayTracer.Transformations do
@moduledoc """
This module defines matrix transformations like
scaling, shearing, rotating and translating
"""
alias RayTracer.Matrix
alias RayTracer.RTuple
@spec translation(number, number, number) :: Matrix.matrix
def translation(x, y, z) do
Matrix.ident
|> Matrix.set(0, 3, x)
|> Matrix.set(1, 3, y)
|> Matrix.set(2, 3, z)
end
@spec scaling(number, number, number) :: Matrix.matrix
def scaling(x, y, z) do
Matrix.ident
|> Matrix.set(0, 0, x)
|> Matrix.set(1, 1, y)
|> Matrix.set(2, 2, z)
end
@doc """
Returns a matrix that is a `r` radians rotation matrix over the X axis.
"""
@spec rotation_x(number) :: Matrix.matrix
def rotation_x(r) do
Matrix.ident
|> Matrix.set(1, 1, :math.cos(r))
|> Matrix.set(1, 2, -:math.sin(r))
|> Matrix.set(2, 1, :math.sin(r))
|> Matrix.set(2, 2, :math.cos(r))
end
@doc """
Returns a matrix that is a `r` radians rotation matrix over the Y axis.
"""
@spec rotation_y(number) :: Matrix.matrix
def rotation_y(r) do
Matrix.ident
|> Matrix.set(0, 0, :math.cos(r))
|> Matrix.set(0, 2, :math.sin(r))
|> Matrix.set(2, 0, -:math.sin(r))
|> Matrix.set(2, 2, :math.cos(r))
end
@doc """
Returns a matrix that is a `r` radians rotation matrix over the Z axis.
"""
@spec rotation_z(number) :: Matrix.matrix
def rotation_z(r) do
Matrix.ident
|> Matrix.set(0, 0, :math.cos(r))
|> Matrix.set(0, 1, -:math.sin(r))
|> Matrix.set(1, 0, :math.sin(r))
|> Matrix.set(1, 1, :math.cos(r))
end
@doc """
Returns a shearing matrix in which:
- xy - moves x in proportion to y
- xz - moves x in proportion to z
- yx - moves y in proportion to x
- yz - moves y in proportion to z
- zx - moves z in proportion to x
- zy - moves z in proportion to y
"""
@spec shearing(number, number, number, number, number, number) :: Matrix.matrix
def shearing(xy, xz, yx, yz, zx, zy) do
Matrix.ident
|> Matrix.set(0, 1, xy)
|> Matrix.set(0, 2, xz)
|> Matrix.set(1, 0, yx)
|> Matrix.set(1, 2, yz)
|> Matrix.set(2, 0, zx)
|> Matrix.set(2, 1, zy)
end
@doc """
Returns a transormation matrix which allows to "pretend" that the eye moves instead of the world.
`from` - Specifies where we want the eye to be in the scene
`to` - Specifies the point at which the eye will look
`up` - A vector indicating which direction is up.
"""
@spec view_transform(RTuple.point, RTuple.point, RTuple.vector) :: Matrix.matrix
def view_transform(from, to, up) do
nup = up |> RTuple.normalize
forward = RTuple.sub(to, from) |> RTuple.normalize
left = RTuple.cross(forward, nup)
true_up = RTuple.cross(left, forward)
orientation =
Matrix.ident
|> Matrix.set(0, 0, left |> RTuple.x)
|> Matrix.set(0, 1, left |> RTuple.y)
|> Matrix.set(0, 2, left |> RTuple.z)
|> Matrix.set(1, 0, true_up |> RTuple.x)
|> Matrix.set(1, 1, true_up |> RTuple.y)
|> Matrix.set(1, 2, true_up |> RTuple.z)
|> Matrix.set(2, 0, -(forward |> RTuple.x))
|> Matrix.set(2, 1, -(forward |> RTuple.y))
|> Matrix.set(2, 2, -(forward |> RTuple.z))
translate_from = translation(
-(from |> RTuple.x),
-(from |> RTuple.y),
-(from |> RTuple.z)
)
orientation |> Matrix.mult(translate_from)
end
def compose(transformations) do
transformations
|> Enum.reverse
|> Enum.reduce(fn x, acc -> acc |> Matrix.mult(x) end)
end
end
| 29.344262 | 101 | 0.600279 |
1c7748fb066a3147b7001b410d19f928a1513341 | 473 | exs | Elixir | config/prod.exs | thommay/bakeoff | 4c6cca860c45371f04021c3afceb4fe2e0b8d380 | [
"MIT"
] | null | null | null | config/prod.exs | thommay/bakeoff | 4c6cca860c45371f04021c3afceb4fe2e0b8d380 | [
"MIT"
] | null | null | null | config/prod.exs | thommay/bakeoff | 4c6cca860c45371f04021c3afceb4fe2e0b8d380 | [
"MIT"
] | null | null | null | use Mix.Config
config :bakeoff, Bakeoff.Endpoint,
http: [port: {:system, "PORT"}],
url: [host: System.get_env("HOST"), port: 80],
cache_static_manifest: "priv/static/manifest.json"
config :logger, level: :info
config :bakeoff, Bakeoff.Endpoint,
secret_key_base: System.get_env("SECRET_KEY_BASE")
config :bakeoff, Bakeoff.Repo,
adapter: Ecto.Adapters.Postgres,
url: System.get_env("DATABASE_URL"),
size: 20 # The amount of database connections in the pool
| 27.823529 | 59 | 0.733615 |
1c7752dd285982e15d541ea7bcf69163f4ac2688 | 598 | ex | Elixir | lib/astro_ex/unit.ex | NickHurst/astro_ex | e5ea2c587fa0d7d673d903af04f4ca1de5e87f67 | [
"MIT"
] | null | null | null | lib/astro_ex/unit.ex | NickHurst/astro_ex | e5ea2c587fa0d7d673d903af04f4ca1de5e87f67 | [
"MIT"
] | null | null | null | lib/astro_ex/unit.ex | NickHurst/astro_ex | e5ea2c587fa0d7d673d903af04f4ca1de5e87f67 | [
"MIT"
] | null | null | null | defprotocol AstroEx.Unit do
@moduledoc """
AstroEx unit protocol
"""
alias AstroEx.Unit.{Arcmin, Arcsec, Degrees, DMS, HMS, Radian}
@type t :: Arcmin.t() | Arcsec.t() | Degrees.t() | DMS.t() | HMS.t() | Radian.t()
@typep unit_module :: Arcmin | Arcsec | Degrees | DMS | HMS | Radian
@typep number_module :: Integer | Float
@spec cast(unit :: t(), to :: number_module() | unit_module()) :: t()
def cast(unit, to)
@spec from_degrees(degrees :: number() | Degrees.t()) :: t()
def from_degrees(degrees)
@spec to_string(unit :: t()) :: String.t()
def to_string(unit)
end
| 27.181818 | 83 | 0.62709 |
1c779a545fa836f1e8fcdfb55ed84b5ae8f460ec | 484 | ex | Elixir | lib/training/application.ex | megalithic/genstatem_training | 6e8cb1027fe95ff363f57a9f6b582e7b642ccdb9 | [
"MIT"
] | null | null | null | lib/training/application.ex | megalithic/genstatem_training | 6e8cb1027fe95ff363f57a9f6b582e7b642ccdb9 | [
"MIT"
] | null | null | null | lib/training/application.ex | megalithic/genstatem_training | 6e8cb1027fe95ff363f57a9f6b582e7b642ccdb9 | [
"MIT"
] | null | null | null | defmodule Training.Application do
# See https://hexdocs.pm/elixir/Application.html
# for more information on OTP Applications
@moduledoc false
use Application
def start(_type, _args) do
children = [
# Training.Switch.start_link({:off, 0})
]
# See https://hexdocs.pm/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: Training.Supervisor]
Supervisor.start_link(children, opts)
end
end
| 25.473684 | 62 | 0.710744 |
1c779b94ed5fe20deeda4a8600b75596c9ceaeba | 14,283 | ex | Elixir | lib/oli_web/controllers/legacy_superactivity_controller.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | 1 | 2022-03-17T20:35:47.000Z | 2022-03-17T20:35:47.000Z | lib/oli_web/controllers/legacy_superactivity_controller.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | 9 | 2021-11-02T16:52:09.000Z | 2022-03-25T15:14:01.000Z | lib/oli_web/controllers/legacy_superactivity_controller.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | null | null | null | defmodule OliWeb.LegacySuperactivityController do
use OliWeb, :controller
require Logger
alias XmlBuilder
alias Oli.Interop.CustomActivities.{
SuperActivityClient,
SuperActivitySession,
AttemptHistory,
FileRecord,
FileDirectory
}
alias Oli.Delivery.Sections
alias Oli.Delivery.Attempts.Core, as: Attempts
alias Oli.Grading
alias Oli.Delivery.Attempts.ActivityLifecycle
alias Oli.Delivery.Attempts.ActivityLifecycle.Evaluate, as: ActivityEvaluation
alias Oli.Delivery.Attempts.Core.ClientEvaluation
alias Oli.Delivery.Attempts.Core.StudentInput
alias Oli.Activities.Model.Feedback
alias Oli.Delivery.Attempts.ActivityLifecycle
alias Oli.Repo
def context(conn, %{"attempt_guid" => attempt_guid} = _params) do
user = conn.assigns.current_user
attempt = Attempts.get_activity_attempt_by(attempt_guid: attempt_guid)
case attempt do
nil ->
error(conn, 404, "Attempt not found")
_ ->
activity_attempt =
Attempts.get_latest_activity_attempt(attempt.resource_attempt_id, attempt.resource_id)
|> Repo.preload([:part_attempts, revision: [:scoring_strategy]])
part_ids = Enum.map(activity_attempt.part_attempts, fn x -> x.part_id end)
%{"base" => base, "src" => src} = activity_attempt.revision.content
context = %{
attempt_guid: activity_attempt.attempt_guid,
src_url: "https://#{conn.host}/superactivity/#{base}/#{src}",
activity_type: activity_attempt.revision.activity_type.slug,
server_url: "https://#{conn.host}/jcourse/superactivity/server",
user_guid: user.id,
mode: "delivery",
part_ids: part_ids
}
json(conn, context)
end
end
def process(
conn,
%{"commandName" => command_name, "activityContextGuid" => attempt_guid} = params
) do
user = conn.assigns.current_user
context = fetch_context(conn.host, user, attempt_guid)
xml_response = process_command(command_name, context, params)
case xml_response do
{:ok, xml} ->
conn
|> put_resp_content_type("text/xml")
|> send_resp(200, xml)
{:error, error, code} ->
conn
|> put_resp_content_type("text/text")
|> send_resp(code, error)
end
end
def file_not_found(conn, _params) do
conn
|> put_status(404)
|> text("File Not Found")
end
defp fetch_context(host, user, attempt_guid) do
activity_attempt =
Attempts.get_activity_attempt_by(attempt_guid: attempt_guid)
|> Repo.preload([:part_attempts, revision: [:scoring_strategy]])
%{"base" => base, "src" => src} = activity_attempt.revision.content
resource_attempt = Attempts.get_resource_attempt_by(id: activity_attempt.resource_attempt_id)
resource_access = Attempts.get_resource_access(resource_attempt.resource_access_id)
section =
Sections.get_section_preloaded!(resource_access.section_id)
|> Repo.preload([:institution, :section_project_publications])
instructors = Grading.fetch_instructors(section.slug)
enrollment =
Sections.get_enrollment(section.slug, user.id)
|> Repo.preload([:context_roles])
project = Sections.get_project_by_section_resource(section.id, activity_attempt.resource_id)
path = "media/" <> project.slug
web_content_url = "https://#{Application.fetch_env!(:oli, :media_url)}/#{path}/"
host_url = "https://#{host}"
save_files =
ActivityLifecycle.get_activity_attempt_save_files(
activity_attempt.attempt_guid,
Integer.to_string(user.id),
activity_attempt.attempt_number
)
%{
server_time_zone: get_timezone(),
user: user,
host: host,
section: section,
activity_attempt: activity_attempt,
resource_attempt: resource_attempt,
resource_access: resource_access,
save_files: save_files,
instructors: instructors,
enrollment: enrollment,
web_content_url: web_content_url,
host_url: host_url,
base: base,
src: src
}
end
defp process_command("loadClientConfig", context, _params) do
xml =
SuperActivityClient.setup(%{
context: context
})
|> XmlBuilder.document()
|> XmlBuilder.generate()
{:ok, xml}
end
defp process_command("beginSession", context, _params) do
xml =
SuperActivitySession.setup(%{
context: context
})
|> XmlBuilder.document()
|> XmlBuilder.generate()
{:ok, xml}
end
defp process_command("loadContentFile", context, _params) do
%{"modelXml" => modelXml} = context.activity_attempt.revision.content
{:ok, modelXml}
end
defp process_command("startAttempt", context, params) do
case context.activity_attempt.date_evaluated do
nil ->
attempt_history(context)
_ ->
seed_state_from_previous = Map.get(params, "seedResponsesWithPrevious", false)
case ActivityLifecycle.reset_activity(
context.section.slug,
context.activity_attempt.attempt_guid,
seed_state_from_previous
) do
{:ok, {attempt_state, _model}} ->
attempt_history(fetch_context(context.host, context.user, attempt_state.attemptGuid))
{:error, _} ->
{:error, "server error", 500}
end
end
end
defp process_command(
"scoreAttempt",
context,
%{"scoreValue" => score_value, "scoreId" => score_type} = params
) do
part_attempt =
case Map.get(params, "partId") do
nil ->
# Assumes custom has a single part if partId is absent from request parameters
Enum.at(context.activity_attempt.part_attempts, 0)
part_id ->
Enum.filter(context.activity_attempt.part_attempts, fn p -> part_id === p.part_id end)
end
# oli legacy allows for custom activities to supply arbitrary score types.
# Worse still; an activity can supply multiple score types as part of the grade. How to handle these on Torus?
case purse_score(score_type, score_value) do
{:non_numeric, score_value} ->
custom_scores =
Map.merge(context.activity_attempt.custom_scores, %{score_type => score_value})
Attempts.update_activity_attempt(context.activity_attempt, %{custom_scores: custom_scores})
{:numeric, score, out_of} ->
eval_numeric_score(context, score, out_of, part_attempt)
end
end
defp process_command("endAttempt", context, _params) do
case finalize_activity_attempt(context) do
{:ok, _} ->
attempt_history(
fetch_context(context.host, context.user, context.activity_attempt.attempt_guid)
)
{:error, message} ->
Logger.error("Error when processing help message #{inspect(message)}")
{:error, "server error", 500}
end
end
defp process_command(command_name, _context, _params)
when command_name === "loadUserSyllabus" do
{:error, "command not supported", 400}
end
defp process_command(
"writeFileRecord",
context,
%{
"activityContextGuid" => attempt_guid,
"byteEncoding" => byte_encoding,
"fileName" => file_name,
"fileRecordData" => content,
"resourceTypeID" => activity_type,
"mimeType" => mime_type,
"userGuid" => user_id
} = params
) do
{:ok, save_file} =
case context.activity_attempt.date_evaluated do
nil ->
file_info = %{
attempt_guid: attempt_guid,
user_id: user_id,
content: content,
mime_type: mime_type,
byte_encoding: byte_encoding,
activity_type: activity_type,
file_name: file_name
}
attempt_number = Map.get(params, "attemptNumber")
file_info =
if attempt_number != nil do
Map.merge(file_info, %{attempt_number: attempt_number})
else
file_info
end
ActivityLifecycle.save_activity_attempt_state_file(file_info)
_ ->
attempt_number = Map.get(params, "attemptNumber")
save_file =
ActivityLifecycle.get_activity_attempt_save_file(
attempt_guid,
user_id,
attempt_number,
file_name
)
{:ok, save_file}
end
case save_file do
nil ->
{:error, "file not found", 404}
_ ->
xml =
FileRecord.setup(%{
context: context,
date_created: DateTime.to_unix(save_file.inserted_at),
file_name: save_file.file_name,
guid: save_file.file_guid
})
|> XmlBuilder.document()
|> XmlBuilder.generate()
{:ok, xml}
end
end
defp process_command(
"loadFileRecord",
_context,
%{
"activityContextGuid" => attempt_guid
} = params
) do
file_name = Map.get(params, "fileName")
attempt_number = Map.get(params, "attemptNumber")
user_id = Map.get(params, "userGuid")
save_file =
ActivityLifecycle.get_activity_attempt_save_file(
attempt_guid,
user_id,
attempt_number,
file_name
)
case save_file do
nil -> {:error, "file not found", 404}
_ -> {:ok, URI.decode(save_file.content)}
end
end
defp process_command("deleteFileRecord", context, _params) do
# no op
xml =
FileDirectory.setup(%{
context: context
})
|> XmlBuilder.document()
|> XmlBuilder.generate()
{:ok, xml}
end
defp process_command(_command_name, _context, _params) do
{:error, "command not supported", 400}
end
defp finalize_activity_attempt(context) do
case context.activity_attempt.date_evaluated do
nil ->
part_attempts = Attempts.get_latest_part_attempts(context.activity_attempt.attempt_guid)
# Ensures all parts are evaluated before rolling up the part scores into activity score
# Note that by default assign 0 out of 100 is assumed for any part not already evaluated
client_evaluations =
Enum.reduce(part_attempts, [], fn p, acc ->
case p.date_evaluated do
nil -> acc ++ [create_evaluation(context, 0, 100, p)]
_ -> acc
end
end)
Repo.transaction(fn ->
if length(client_evaluations) > 0 do
ActivityEvaluation.apply_super_activity_evaluation(
context.section.slug,
context.activity_attempt.attempt_guid,
client_evaluations
)
end
rest =
ActivityEvaluation.rollup_part_attempt_evaluations(
context.activity_attempt.attempt_guid,
:normalize
)
rest
end)
_ ->
{:ok, "activity already finalized"}
end
end
defp create_evaluation(context, score, out_of, part_attempt) do
{:ok, feedback} = Feedback.parse(%{"id" => "1", "content" => "elsewhere"})
user_input =
case Enum.at(context.save_files, 0) do
nil -> "some-input"
_ -> Enum.at(context.save_files, 0).content
end
%{
attempt_guid: part_attempt.attempt_guid,
client_evaluation: %ClientEvaluation{
input: %StudentInput{
input: user_input
},
score: score,
out_of: out_of,
feedback: feedback
}
}
end
defp eval_numeric_score(context, score, out_of, part_attempt) do
client_evaluations = [
create_evaluation(context, score, out_of, part_attempt)
]
case ActivityEvaluation.apply_super_activity_evaluation(
context.section.slug,
context.activity_attempt.attempt_guid,
client_evaluations
) do
{:ok, _evaluations} ->
attempt_history(
fetch_context(context.host, context.user, context.activity_attempt.attempt_guid)
)
{:error, message} ->
Logger.error("The error when applying client evaluation #{message}")
{:error, "server error", 500}
end
end
defp purse_score(score_type, score_value) do
case score_type do
"completed" ->
{:non_numeric, score_value}
"count" ->
{:non_numeric, score_value}
"feedback" ->
{:non_numeric, score_value}
"grade" ->
{score, _} = Float.parse(score_value)
{:numeric, score * 100, 100}
"percent" ->
{score, _} = Float.parse(score_value)
{:numeric, score * 100, 100}
"percentScore" ->
{score, _} = Float.parse(score_value)
{:numeric, score, 100}
"posttest1Score" ->
{score, _} = Float.parse(score_value)
{:numeric, score * 100, 100}
"posttest2Score" ->
{score, _} = Float.parse(score_value)
{:numeric, score * 100, 100}
"pretestScore" ->
{score, _} = Float.parse(score_value)
{:numeric, score * 100, 100}
"problem1Completed" ->
{:non_numeric, score_value}
"problem2Completed" ->
{:non_numeric, score_value}
"problem3Completed" ->
{:non_numeric, score_value}
"score" ->
case String.split(score_value, ",", trim: true) do
[numerator, denominator] ->
{score, _} = Float.parse(numerator)
{out_of, _} = Float.parse(denominator)
{:numeric, score, out_of}
_ ->
{:non_numeric, score_value}
end
"status" ->
{:non_numeric, score_value}
"visited" ->
{:non_numeric, score_value}
end
end
defp attempt_history(context) do
xml =
AttemptHistory.setup(%{
context: context
})
|> XmlBuilder.document()
|> XmlBuilder.generate()
{:ok, xml}
end
defp get_timezone() do
{zone, result} = System.cmd("date", ["+%Z"])
if result == 0, do: String.trim(zone)
end
defp error(conn, code, reason) do
conn
|> Plug.Conn.send_resp(code, reason)
|> Plug.Conn.halt()
end
end
| 27.896484 | 114 | 0.617377 |
1c77cafff501641e2652dcb7fae691b769a3b8e6 | 207 | ex | Elixir | lib/mix/tasks/day4.ex | takakoshimizu/advent-of-code-2020 | dd0c49396196aa7686f36d596263baff0e64b800 | [
"MIT"
] | null | null | null | lib/mix/tasks/day4.ex | takakoshimizu/advent-of-code-2020 | dd0c49396196aa7686f36d596263baff0e64b800 | [
"MIT"
] | null | null | null | lib/mix/tasks/day4.ex | takakoshimizu/advent-of-code-2020 | dd0c49396196aa7686f36d596263baff0e64b800 | [
"MIT"
] | null | null | null | defmodule Mix.Tasks.Day4 do
use Mix.Task
@shortdoc "Runs the day 4 problem."
def run(_) do
Advent2020.Day4.main(input())
end
defp input, do: File.read!("./lib/advent2020/day4/input.txt")
end
| 18.818182 | 63 | 0.681159 |
1c78136433929f6040f2d83337ae8ea75d5b3e57 | 706 | ex | Elixir | lib/attendance_web/controllers/fallback_controller.ex | hex337/attendance-phoenix | aaade35897b5910d00f2c047cc29bfe2d213f786 | [
"MIT"
] | null | null | null | lib/attendance_web/controllers/fallback_controller.ex | hex337/attendance-phoenix | aaade35897b5910d00f2c047cc29bfe2d213f786 | [
"MIT"
] | null | null | null | lib/attendance_web/controllers/fallback_controller.ex | hex337/attendance-phoenix | aaade35897b5910d00f2c047cc29bfe2d213f786 | [
"MIT"
] | null | null | null | defmodule AttendanceWeb.FallbackController do
@moduledoc """
Translates controller action results into valid `Plug.Conn` responses.
See `Phoenix.Controller.action_fallback/1` for more details.
"""
use AttendanceWeb, :controller
def call(conn, {:error, %Ecto.Changeset{} = changeset}) do
conn
|> put_status(:unprocessable_entity)
|> render(AttendanceWeb.ChangesetView, "error.json", changeset: changeset)
end
def call(conn, {:error, :not_found}) do
conn
|> put_status(:not_found)
|> render(AttendanceWeb.ErrorView, :"404")
end
def call(conn, {:error, :unauthorized}) do
conn
|> put_status(:unauthorized)
|> json(%{error: "Login error"})
end
end
| 26.148148 | 78 | 0.689802 |
1c782538c27675fd3cd660af16d9ad326f1f3ae7 | 5,444 | ex | Elixir | lib/pow/phoenix/controllers/controller.ex | danschultzer/authex | 920ab2ef9dafaaaba215247e08c3e194e9474fc0 | [
"MIT"
] | 4 | 2018-05-07T16:37:15.000Z | 2018-07-14T00:44:12.000Z | lib/pow/phoenix/controllers/controller.ex | danschultzer/authex | 920ab2ef9dafaaaba215247e08c3e194e9474fc0 | [
"MIT"
] | null | null | null | lib/pow/phoenix/controllers/controller.ex | danschultzer/authex | 920ab2ef9dafaaaba215247e08c3e194e9474fc0 | [
"MIT"
] | null | null | null | defmodule Pow.Phoenix.Controller do
@moduledoc """
Used with Pow Phoenix controllers to handle messages, routes and callbacks.
## Usage
defmodule MyPowExtension.Phoenix.MyController do
use Pow.Phoenix.Controller
def process_new(conn, _params) do
{:ok, :response, conn}
end
def respond_new({:ok, :response, conn}) do
render(conn, "new.html")
end
end
## Configuration options
* `:messages_backend` - See `Pow.Phoenix.Messages` for more.
* `:routes_backend` - See `Pow.Phoenix.Routes` for more.
* `:controller_callbacks` - See
`Pow.Extension.Phoenix.ControllerCallbacks` for more.
"""
alias Plug.Conn
alias Pow.Config
alias Pow.Phoenix.{Messages, PlugErrorHandler, Routes, ViewHelpers}
alias Pow.Plug
@doc false
defmacro __using__(config) do
quote do
use Phoenix.Controller,
namespace: Pow.Phoenix
import unquote(__MODULE__), only: [require_authenticated: 2, require_not_authenticated: 2, put_no_cache_header: 2]
plug :pow_layout, unquote(config)
def action(conn, _opts), do: unquote(__MODULE__).action(__MODULE__, conn, conn.params)
defp pow_layout(conn, _config), do: ViewHelpers.layout(conn)
unquote(__MODULE__).__define_helper_methods__()
end
end
@doc false
defmacro __define_helper_methods__ do
quote do
def messages(conn), do: unquote(__MODULE__).messages(conn, Messages)
def routes(conn), do: unquote(__MODULE__).routes(conn, Routes)
defoverridable messages: 1, routes: 1
end
end
@doc """
Handles the controller action call.
If a `:controller_callbacks` module has been set in the configuration,
then `before_process` and `before_respond` will be called on this module
on all actions.
"""
@spec action(atom(), Conn.t(), map()) :: Conn.t()
def action(controller, %{private: private} = conn, params) do
action = private.phoenix_action
config = Plug.fetch_config(conn)
callbacks = Config.get(config, :controller_callbacks)
conn
|> maybe_callback(callbacks, :before_process, controller, action, config)
|> process_action(controller, action, params)
|> maybe_callback(callbacks, :before_respond, controller, action, config)
|> respond_action(controller, action)
end
defp process_action({:halt, conn}, _controller, _action, _params), do: {:halt, conn}
defp process_action(conn, controller, action, params) do
apply(controller, String.to_atom("process_#{action}"), [conn, params])
end
defp respond_action({:halt, conn}, _controller, _action), do: conn
defp respond_action(results, controller, action) do
apply(controller, String.to_atom("respond_#{action}"), [results])
end
defp maybe_callback({:halt, conn}, _callbacks, _hook, _controller, _action, _config),
do: {:halt, conn}
defp maybe_callback(results, nil, _hook, _controller, _action, _config), do: results
defp maybe_callback(results, callbacks, hook, controller, action, config) do
apply(callbacks, hook, [controller, action, results, config])
end
@doc """
Fetches messages backend from configuration, or use fallback.
"""
@spec messages(Conn.t(), atom()) :: atom()
def messages(conn, fallback) do
conn
|> Plug.fetch_config()
|> Config.get(:messages_backend, fallback)
end
@doc """
Fetches routes backend from configuration, or use fallback.
"""
@spec routes(Conn.t(), atom()) :: atom()
def routes(conn, fallback) do
conn
|> Plug.fetch_config()
|> Config.get(:routes_backend, fallback)
end
@spec route_helper(atom()) :: binary()
def route_helper(plug) do
as = Phoenix.Naming.resource_name(plug, "Controller")
[base | _rest] = Module.split(plug)
base = Macro.underscore(base)
"#{base}_#{as}"
end
@doc """
Ensures that user has been authenticated.
`Pow.Phoenix.PlugErrorHandler` is used as error handler. See
`Pow.Plug.RequireAuthenticated` for more.
"""
@spec require_authenticated(Conn.t(), Keyword.t()) :: Conn.t()
def require_authenticated(conn, _opts) do
opts = Plug.RequireAuthenticated.init(error_handler: PlugErrorHandler)
Plug.RequireAuthenticated.call(conn, opts)
end
@doc """
Ensures that user hasn't been authenticated.
`Pow.Phoenix.PlugErrorHandler` is used as error handler. See
`Pow.Plug.RequireNotAuthenticated` for more.
"""
@spec require_not_authenticated(Conn.t(), Keyword.t()) :: Conn.t()
def require_not_authenticated(conn, _opts) do
opts = Plug.RequireNotAuthenticated.init(error_handler: PlugErrorHandler)
Plug.RequireNotAuthenticated.call(conn, opts)
end
@default_cache_control_header Conn.get_resp_header(struct(Conn), "cache-control")
@no_cache_control_header "no-cache, no-store, must-revalidate"
@doc """
Ensures that the page can't be cached in browser.
This will add a "cache-control" header with
"no-cache, no-store, must-revalidate" if the cache control header hasn't
been changed from the default value in the `Plug.Conn` struct.
"""
@spec put_no_cache_header(Conn.t(), Keyword.t()) :: Conn.t()
def put_no_cache_header(conn, _opts) do
conn
|> Conn.get_resp_header("cache-control")
|> case do
@default_cache_control_header -> Conn.put_resp_header(conn, "cache-control", @no_cache_control_header)
_any -> conn
end
end
end
| 31.651163 | 120 | 0.692506 |
1c782b08893eb3f2a27d7ff32547235da68de7cf | 3,051 | exs | Elixir | test/advisory_locks_test.exs | scudelletti/eventstore | 9f795d45f1a8ca4c72d0614df24586dcb2ecfa12 | [
"MIT"
] | null | null | null | test/advisory_locks_test.exs | scudelletti/eventstore | 9f795d45f1a8ca4c72d0614df24586dcb2ecfa12 | [
"MIT"
] | null | null | null | test/advisory_locks_test.exs | scudelletti/eventstore | 9f795d45f1a8ca4c72d0614df24586dcb2ecfa12 | [
"MIT"
] | null | null | null | defmodule EventStore.AdvisoryLocksTest do
use EventStore.StorageCase
alias EventStore.{AdvisoryLocks, Config, ProcessHelper, Wait}
alias EventStore.Storage
@locks TestEventStore.EventStore.AdvisoryLocks
@conn TestEventStore.EventStore.AdvisoryLocks.Postgrex
setup do
postgrex_config = Config.parsed(TestEventStore, :eventstore) |> Config.default_postgrex_opts()
{:ok, conn} = Postgrex.start_link(postgrex_config)
[conn: conn]
end
describe "acquire lock" do
test "should acquire lock when available" do
assert {:ok, lock} = AdvisoryLocks.try_advisory_lock(@locks, 1)
assert is_reference(lock)
end
test "should acquire lock when same process already has lock" do
assert {:ok, lock1} = AdvisoryLocks.try_advisory_lock(@locks, 1)
assert {:ok, lock2} = AdvisoryLocks.try_advisory_lock(@locks, 1)
assert {:ok, lock3} = AdvisoryLocks.try_advisory_lock(@locks, 1)
assert is_reference(lock1)
assert is_reference(lock2)
assert is_reference(lock3)
end
test "should fail to acquire lock when already taken", %{conn: conn} do
:ok = Storage.Lock.try_acquire_exclusive_lock(conn, 1)
assert {:error, :lock_already_taken} = AdvisoryLocks.try_advisory_lock(@locks, 1)
end
end
describe "release lock" do
test "should release lock when process terminates", %{conn: conn} do
reply_to = self()
pid =
spawn_link(fn ->
assert {:ok, _lock} = AdvisoryLocks.try_advisory_lock(@locks, 1)
send(reply_to, :lock_acquired)
# Wait until terminated
:timer.sleep(:infinity)
end)
assert_receive :lock_acquired
assert {:error, :lock_already_taken} = Storage.Lock.try_acquire_exclusive_lock(conn, 1)
ProcessHelper.shutdown(pid)
# Wait for lock to be released after process terminates
Wait.until(fn ->
assert :ok = Storage.Lock.try_acquire_exclusive_lock(conn, 1)
end)
end
end
describe "disconnect" do
test "should send `lock_released` message" do
assert {:ok, lock} = AdvisoryLocks.try_advisory_lock(@locks, 1)
connection_down()
assert_receive({AdvisoryLocks, :lock_released, ^lock, :shutdown})
end
end
describe "acquire same lock on different schemas" do
setup do
postgrex_config = Config.parsed(TestEventStore, :eventstore)
{:ok, conn1} =
postgrex_config
|> Keyword.put(:schema, "public")
|> Config.default_postgrex_opts()
|> Postgrex.start_link()
{:ok, conn2} =
postgrex_config
|> Keyword.put(:schema, "example")
|> Config.default_postgrex_opts()
|> Postgrex.start_link()
[conn1: conn1, conn2: conn2]
end
test "should acquire lock", %{conn1: conn1, conn2: conn2} do
:ok = Storage.Lock.try_acquire_exclusive_lock(conn1, 1)
:ok = Storage.Lock.try_acquire_exclusive_lock(conn2, 1)
end
end
defp connection_down do
send(@locks, {:DOWN, @conn, nil, :shutdown})
end
end
| 28.514019 | 98 | 0.67355 |
1c782fe52a86d934eeced2bcc4a378d1bb43d6ec | 1,888 | exs | Elixir | clients/memcache/mix.exs | myskoach/elixir-google-api | 4f8cbc2fc38f70ffc120fd7ec48e27e46807b563 | [
"Apache-2.0"
] | null | null | null | clients/memcache/mix.exs | myskoach/elixir-google-api | 4f8cbc2fc38f70ffc120fd7ec48e27e46807b563 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/memcache/mix.exs | myskoach/elixir-google-api | 4f8cbc2fc38f70ffc120fd7ec48e27e46807b563 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Memcache.Mixfile do
use Mix.Project
@version "0.9.0"
def project() do
[
app: :google_api_memcache,
version: @version,
elixir: "~> 1.6",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps(),
source_url: "https://github.com/googleapis/elixir-google-api/tree/master/clients/memcache"
]
end
def application() do
[extra_applications: [:logger]]
end
defp deps() do
[
{:google_gax, "~> 0.4"},
{:ex_doc, "~> 0.16", only: :dev}
]
end
defp description() do
"""
Cloud Memorystore for Memcached API client library. Google Cloud Memorystore for Memcached API is used for creating and managing Memcached instances in GCP.
"""
end
defp package() do
[
files: ["lib", "mix.exs", "README*", "LICENSE"],
maintainers: ["Jeff Ching", "Daniel Azuma"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/googleapis/elixir-google-api/tree/master/clients/memcache",
"Homepage" => "https://cloud.google.com/memorystore/"
}
]
end
end
| 28.179104 | 160 | 0.660487 |
1c784614cd41af666b405bfa9f244002a5d5c4e9 | 1,674 | exs | Elixir | test/helper_test.exs | vpsinc/modbus | e1b04916dc12c104e032122738815a1e3910394b | [
"Apache-2.0"
] | null | null | null | test/helper_test.exs | vpsinc/modbus | e1b04916dc12c104e032122738815a1e3910394b | [
"Apache-2.0"
] | null | null | null | test/helper_test.exs | vpsinc/modbus | e1b04916dc12c104e032122738815a1e3910394b | [
"Apache-2.0"
] | null | null | null | defmodule Modbus.HelperTest do
use ExUnit.Case
alias Modbus.Utils
alias Modbus.Crc
test "bool_to_byte test" do
assert 0x00 == Utils.bool_to_byte(0)
assert 0xFF == Utils.bool_to_byte(1)
end
test "byte_count test" do
assert 1 == Utils.byte_count(1)
assert 1 == Utils.byte_count(2)
assert 1 == Utils.byte_count(7)
assert 1 == Utils.byte_count(8)
assert 2 == Utils.byte_count(9)
assert 2 == Utils.byte_count(15)
assert 2 == Utils.byte_count(16)
assert 3 == Utils.byte_count(17)
end
test "bin_to_bitlist test" do
assert [1] == Utils.bin_to_bitlist(1, <<0x13>>)
assert [1, 1] == Utils.bin_to_bitlist(2, <<0x13>>)
assert [1, 1, 0] == Utils.bin_to_bitlist(3, <<0x13>>)
assert [1, 1, 0, 0] == Utils.bin_to_bitlist(4, <<0x13>>)
assert [1, 1, 0, 0, 1] == Utils.bin_to_bitlist(5, <<0x13>>)
assert [1, 1, 0, 0, 1, 0] == Utils.bin_to_bitlist(6, <<0x13>>)
assert [1, 1, 0, 0, 1, 0, 0] == Utils.bin_to_bitlist(7, <<0x13>>)
assert [1, 1, 0, 0, 1, 0, 0, 0] == Utils.bin_to_bitlist(8, <<0x13>>)
assert [1, 1, 0, 0, 1, 0, 0, 0, 1] == Utils.bin_to_bitlist(9, <<0x13, 0x01>>)
end
test "bin_to_reglist test" do
assert [0x0102] == Utils.bin_to_reglist(1, <<0x01, 0x02>>)
assert [0x0102, 0x0304] == Utils.bin_to_reglist(2, <<0x01, 0x02, 0x03, 0x04>>)
end
test "crc test" do
p(<<0xCB, 0x4F>>, <<0x01, 0x05, 0x0B, 0xB8, 0x00, 0x00>>)
p(<<0x3B, 0x0E>>, <<0x01, 0x05, 0x0B, 0xB8, 0xFF, 0x00>>)
p(<<0xCB, 0x7F>>, <<0x01, 0x01, 0x0B, 0xB8, 0x00, 0x01>>)
p(<<0x88, 0x51>>, <<0x01, 0x01, 0x01, 0x00>>)
end
defp p(crc, data) do
assert crc == data |> Crc.crc()
end
end
| 33.48 | 82 | 0.601553 |
1c784ba5a89591d377e6f769509e19ecfe09540c | 754 | ex | Elixir | lib/mgp_web/live/phoenix_live_svelte.ex | imprest/mgp | 61457315243d0e0c26713601b9930ca34a116a16 | [
"MIT"
] | null | null | null | lib/mgp_web/live/phoenix_live_svelte.ex | imprest/mgp | 61457315243d0e0c26713601b9930ca34a116a16 | [
"MIT"
] | 2 | 2020-12-22T12:30:58.000Z | 2021-05-19T10:07:26.000Z | lib/mgp_web/live/phoenix_live_svelte.ex | imprest/mgp | 61457315243d0e0c26713601b9930ca34a116a16 | [
"MIT"
] | null | null | null | defmodule PhoenixLiveSvelte do
import Phoenix.HTML
import Phoenix.HTML.Tag
def live_svelte_component(name, props \\ %{}, options \\ []) do
html_escape([
receiver_element(name, props, options)
])
end
defp receiver_element(name, props, options) do
attr = Keyword.get(options, :receiver, [])
tag = Keyword.get(options, :receiver_tag, :div)
binding_prefix = Keyword.get(options, :binding_prefix, "phx-")
default_attr = [
id: Keyword.get(options, :id),
data: [
svelte_name: name,
svelte_props: Jason.encode!(props)
],
"#{binding_prefix}hook": "LiveSvelte",
"#{binding_prefix}update": "ignore"
]
content_tag(tag, "", Keyword.merge(default_attr, attr))
end
end
| 26 | 66 | 0.643236 |
1c784c8c0674dfc5670b342c58549f691160ece1 | 1,121 | exs | Elixir | config/config.exs | crertel/bloglixir | 6c8ee7775ee1f76f8bb9c9f8a9db74e6cf5eecc6 | [
"MIT"
] | null | null | null | config/config.exs | crertel/bloglixir | 6c8ee7775ee1f76f8bb9c9f8a9db74e6cf5eecc6 | [
"MIT"
] | null | null | null | config/config.exs | crertel/bloglixir | 6c8ee7775ee1f76f8bb9c9f8a9db74e6cf5eecc6 | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure for your application as:
#
# config :bloglixir, key: :value
#
# And access this configuration in your application as:
#
# Application.get_env(:bloglixir, :key)
#
# Or configure a 3rd-party app:
#
# config :logger, level: :info
#
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env}.exs"
| 36.16129 | 73 | 0.752007 |
1c784ed81f544e6a6b2b1113c1fbb06575d42ed1 | 10,508 | ex | Elixir | lib/phoenix_live_view/utils.ex | speeddragon/phoenix_live_view | df3c414214e06e3ae207ffba01036f3a3abf8454 | [
"MIT"
] | null | null | null | lib/phoenix_live_view/utils.ex | speeddragon/phoenix_live_view | df3c414214e06e3ae207ffba01036f3a3abf8454 | [
"MIT"
] | null | null | null | lib/phoenix_live_view/utils.ex | speeddragon/phoenix_live_view | df3c414214e06e3ae207ffba01036f3a3abf8454 | [
"MIT"
] | null | null | null | defmodule Phoenix.LiveView.Utils do
# Shared helpers used mostly by Channel and Diff,
# but also Static, and LiveViewTest.
@moduledoc false
alias Phoenix.LiveView.Rendered
alias Phoenix.LiveView.Socket
# All available mount options
@mount_opts [:temporary_assigns, :layout]
@max_flash_age :timer.seconds(60)
@doc """
Assigns a value if it changed change.
"""
def assign(%Socket{} = socket, key, value) do
case socket do
%{assigns: %{^key => ^value}} -> socket
%{} -> force_assign(socket, key, value)
end
end
@doc """
Forces an assign.
"""
def force_assign(%Socket{assigns: assigns, changed: changed} = socket, key, val) do
new_changed = Map.put(changed, key, true)
new_assigns = Map.put(assigns, key, val)
%{socket | assigns: new_assigns, changed: new_changed}
end
@doc """
Clears the changes from the socket assigns.
"""
def clear_changed(%Socket{private: private, assigns: assigns} = socket) do
temporary = Map.get(private, :temporary_assigns, %{})
%Socket{socket | changed: %{}, assigns: Map.merge(assigns, temporary)}
end
@doc """
Checks if the socket changed.
"""
def changed?(%Socket{changed: changed}), do: changed != %{}
def changed?(%Socket{changed: %{} = changed}, assign), do: Map.has_key?(changed, assign)
def changed?(%Socket{}, _), do: false
@doc """
Configures the socket for use.
"""
def configure_socket(%{id: nil, assigns: assigns, view: view} = socket, private, action, flash) do
%{
socket
| id: random_id(),
private: private,
assigns: configure_assigns(assigns, view, action, flash)
}
end
def configure_socket(%{assigns: assigns, view: view} = socket, private, action, flash) do
%{socket | private: private, assigns: configure_assigns(assigns, view, action, flash)}
end
defp configure_assigns(assigns, view, action, flash) do
Map.merge(assigns, %{live_module: view, live_action: action, flash: flash})
end
@doc """
Returns a random ID with valid DOM tokens
"""
def random_id do
"phx-"
|> Kernel.<>(random_encoded_bytes())
|> String.replace(["/", "+"], "-")
end
@doc """
Prunes any data no longer needed after mount.
"""
def post_mount_prune(%Socket{} = socket) do
socket
|> clear_changed()
|> drop_private([:connect_params, :assign_new])
end
@doc """
Renders the view with socket into a rendered struct.
"""
def to_rendered(socket, view) do
inner_content =
render_assigns(socket)
|> view.render()
|> check_rendered!(view)
case layout(socket, view) do
{layout_mod, layout_template} ->
socket = assign(socket, :inner_content, inner_content)
layout_template
|> layout_mod.render(render_assigns(socket))
|> check_rendered!(layout_mod)
false ->
inner_content
end
end
defp check_rendered!(%Rendered{} = rendered, _view), do: rendered
defp check_rendered!(other, view) do
raise RuntimeError, """
expected #{inspect(view)} to return a %Phoenix.LiveView.Rendered{} struct
Ensure your render function uses ~L, or your eex template uses the .leex extension.
Got:
#{inspect(other)}
"""
end
@doc """
Returns the socket's flash messages.
"""
def get_flash(%Socket{assigns: assigns}), do: assigns.flash
def get_flash(%{} = flash, key), do: flash[key]
@doc """
Puts a new flash with the socket's flash messages.
"""
def replace_flash(%Socket{} = socket, %{} = new_flash) do
assign(socket, :flash, new_flash)
end
@doc """
Clears the flash.
"""
def clear_flash(%Socket{} = socket), do: assign(socket, :flash, %{})
@doc """
Clears the key from the flash.
"""
def clear_flash(%Socket{} = socket, key) do
new_flash = Map.delete(socket.assigns.flash, key)
assign(socket, :flash, new_flash)
end
@doc """
Puts a flash message in the socket.
"""
def put_flash(%Socket{assigns: assigns} = socket, kind, msg) do
kind = flash_key(kind)
new_flash = Map.put(assigns.flash, kind, msg)
assign(socket, :flash, new_flash)
end
defp flash_key(binary) when is_binary(binary), do: binary
defp flash_key(atom) when is_atom(atom), do: Atom.to_string(atom)
@doc """
Returns the configured signing salt for the endpoint.
"""
def salt!(endpoint) when is_atom(endpoint) do
endpoint.config(:live_view)[:signing_salt] ||
raise ArgumentError, """
no signing salt found for #{inspect(endpoint)}.
Add the following LiveView configuration to your config/config.exs:
config :my_app, MyAppWeb.Endpoint,
...,
live_view: [signing_salt: "#{random_encoded_bytes()}"]
"""
end
@doc """
Returns the internal or external matched LiveView route info for the given uri
"""
def live_link_info!(nil, view, _uri) do
raise ArgumentError,
"cannot invoke handle_params/3 on #{inspect(view)} " <>
"because it is not mounted nor accessed through the router live/3 macro"
end
def live_link_info!(router, view, uri) do
%URI{host: host, path: path, query: query} = parsed_uri = URI.parse(uri)
query_params = if query, do: Plug.Conn.Query.decode(query), else: %{}
case Phoenix.Router.route_info(router, "GET", URI.decode(path || ""), host) do
%{plug: Phoenix.LiveView.Plug, phoenix_live_view: {^view, action}, path_params: path_params} ->
{:internal, Map.merge(query_params, path_params), action, parsed_uri}
%{} ->
:external
:error ->
raise ArgumentError,
"cannot invoke handle_params nor live_redirect/live_link to #{inspect(uri)} " <>
"because it isn't defined in #{inspect(router)}"
end
end
@doc """
Raises error message for bad live patch on mount.
"""
def raise_bad_mount_and_live_patch!() do
raise RuntimeError, """
attempted to live patch while mounting.
a LiveView cannot be mounted while issuing a live patch to the client. \
Use push_redirect/2 or redirect/2 instead if you wish to mount and redirect.
"""
end
@doc """
Calls the optional `mount/N` callback, otherwise returns the socket as is.
"""
def maybe_call_mount!(socket, view, args) do
arity = length(args)
if function_exported?(view, :mount, arity) do
case apply(view, :mount, args) do
{:ok, %Socket{} = socket, opts} when is_list(opts) ->
validate_mount_redirect!(socket.redirected)
Enum.reduce(opts, socket, fn {key, val}, acc -> mount_opt(acc, key, val, arity) end)
{:ok, %Socket{} = socket} ->
validate_mount_redirect!(socket.redirected)
socket
other ->
raise ArgumentError, """
invalid result returned from #{inspect(view)}.mount/#{length(args)}.
Expected {:ok, socket} | {:ok, socket, opts}, got: #{inspect(other)}
"""
end
else
socket
end
end
defp validate_mount_redirect!({:live, {_, _}, _}), do: raise_bad_mount_and_live_patch!()
defp validate_mount_redirect!(_), do: :ok
@doc """
Calls the optional `update/2` callback, otherwise update the socket directly.
"""
def maybe_call_update!(socket, component, assigns) do
if function_exported?(component, :update, 2) do
socket =
case component.update(assigns, socket) do
{:ok, %Socket{} = socket} ->
socket
other ->
raise ArgumentError, """
invalid result returned from #{inspect(component)}.update/2.
Expected {:ok, socket}, got: #{inspect(other)}
"""
end
if socket.redirected do
raise "cannot redirect socket on update/2"
end
socket
else
Enum.reduce(assigns, socket, fn {k, v}, acc -> assign(acc, k, v) end)
end
end
@doc """
Signs the socket's flash into a token if it has been set.
"""
def sign_flash(endpoint_mod, %{} = flash) do
Phoenix.Token.sign(endpoint_mod, flash_salt(endpoint_mod), flash)
end
@doc """
Verifies the socket's flash token.
"""
def verify_flash(endpoint_mod, flash_token) do
salt = flash_salt(endpoint_mod)
case Phoenix.Token.verify(endpoint_mod, salt, flash_token, max_age: @max_flash_age) do
{:ok, flash} -> flash
{:error, _reason} -> %{}
end
end
defp random_encoded_bytes do
binary = <<
System.system_time(:nanosecond)::64,
:erlang.phash2({node(), self()})::16,
:erlang.unique_integer()::16
>>
Base.url_encode64(binary)
end
defp mount_opt(%Socket{} = socket, key, val, _arity) when key in @mount_opts do
do_mount_opt(socket, key, val)
end
defp mount_opt(%Socket{view: view}, key, val, arity) do
raise ArgumentError, """
invalid option returned from #{inspect(view)}.mount/#{arity}.
Expected keys to be one of #{inspect(@mount_opts)}
got: #{inspect(key)}: #{inspect(val)}
"""
end
defp do_mount_opt(socket, :layout, {mod, template}) when is_atom(mod) and is_binary(template) do
%Socket{socket | private: Map.put(socket.private, :phoenix_live_layout, {mod, template})}
end
defp do_mount_opt(socket, :layout, false) do
%Socket{socket | private: Map.put(socket.private, :phoenix_live_layout, false)}
end
defp do_mount_opt(_socket, :layout, bad_layout) do
raise ArgumentError,
"the :layout mount option expects a tuple of the form {MyLayoutView, \"my_template.html\"}, " <>
"got: #{inspect(bad_layout)}"
end
defp do_mount_opt(socket, :temporary_assigns, temp_assigns) do
unless Keyword.keyword?(temp_assigns) do
raise "the :temporary_assigns mount option must be keyword list"
end
temp_assigns = Map.new(temp_assigns)
%Socket{
socket
| assigns: Map.merge(temp_assigns, socket.assigns),
private: Map.put(socket.private, :temporary_assigns, temp_assigns)
}
end
defp drop_private(%Socket{private: private} = socket, keys) do
%Socket{socket | private: Map.drop(private, keys)}
end
defp render_assigns(socket) do
Map.put(socket.assigns, :socket, %Socket{socket | assigns: %Socket.AssignsNotInSocket{}})
end
defp layout(socket, view) do
case socket.private do
%{phoenix_live_layout: layout} -> layout
%{} -> view.__live__()[:layout] || false
end
end
defp flash_salt(endpoint_mod) when is_atom(endpoint_mod) do
"flash:" <> salt!(endpoint_mod)
end
end
| 28.710383 | 106 | 0.644747 |
1c785a1e796fa3ccf26d43e6a07a43093ae77e12 | 63,415 | ex | Elixir | lib/aws/generated/glacier.ex | benmmari/aws-elixir | b97477498a9e8ba0d46a09255302d88c6a1c8573 | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/glacier.ex | benmmari/aws-elixir | b97477498a9e8ba0d46a09255302d88c6a1c8573 | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/glacier.ex | benmmari/aws-elixir | b97477498a9e8ba0d46a09255302d88c6a1c8573 | [
"Apache-2.0"
] | null | null | null | # WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
# See https://github.com/aws-beam/aws-codegen for more details.
defmodule AWS.Glacier do
@moduledoc """
Amazon S3 Glacier (Glacier) is a storage solution for "cold data."
Glacier is an extremely low-cost storage service that provides secure,
durable, and easy-to-use storage for data backup and archival. With
Glacier, customers can store their data cost effectively for months, years,
or decades. Glacier also enables customers to offload the administrative
burdens of operating and scaling storage to AWS, so they don't have to
worry about capacity planning, hardware provisioning, data replication,
hardware failure and recovery, or time-consuming hardware migrations.
Glacier is a great storage choice when low storage cost is paramount and
your data is rarely retrieved. If your application requires fast or
frequent access to your data, consider using Amazon S3. For more
information, see [Amazon Simple Storage Service (Amazon
S3)](http://aws.amazon.com/s3/).
You can store any kind of data in any format. There is no maximum limit on
the total amount of data you can store in Glacier.
If you are a first-time user of Glacier, we recommend that you begin by
reading the following sections in the *Amazon S3 Glacier Developer Guide*:
<ul> <li> [What is Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html)
- This section of the Developer Guide describes the underlying data model,
the operations it supports, and the AWS SDKs that you can use to interact
with the service.
</li> <li> [Getting Started with Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html)
- The Getting Started section walks you through the process of creating a
vault, uploading archives, creating jobs to download archives, retrieving
the job output, and deleting archives.
</li> </ul>
"""
@doc """
This operation aborts a multipart upload identified by the upload ID.
After the Abort Multipart Upload request succeeds, you cannot upload any
more parts to the multipart upload or complete the multipart upload.
Aborting a completed upload fails. However, aborting an already-aborted
upload will succeed, for a short time. For more information about uploading
a part and completing a multipart upload, see `UploadMultipartPart` and
`CompleteMultipartUpload`.
This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Working with
Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [Abort Multipart
Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def abort_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation aborts the vault locking process if the vault lock is not in
the `Locked` state. If the vault lock is in the `Locked` state when this
operation is requested, the operation returns an `AccessDeniedException`
error. Aborting the vault locking process removes the vault lock policy
from the specified vault.
A vault lock is put into the `InProgress` state by calling
`InitiateVaultLock`. A vault lock is put into the `Locked` state by calling
`CompleteVaultLock`. You can get the state of a vault lock by calling
`GetVaultLock`. For more information about the vault locking process, see
[Amazon Glacier Vault
Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
For more information about vault lock policies, see [Amazon Glacier Access
Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
This operation is idempotent. You can successfully invoke this operation
multiple times, if the vault lock is in the `InProgress` state or if there
is no policy associated with the vault.
"""
def abort_vault_lock(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation adds the specified tags to a vault. Each tag is composed of
a key and a value. Each vault can have up to 10 tags. If your request would
cause the tag limit for the vault to be exceeded, the operation throws the
`LimitExceededException` error. If a tag already exists on the vault under
a specified key, the existing key value will be overwritten. For more
information about tags, see [Tagging Amazon S3 Glacier
Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def add_tags_to_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=add"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
You call this operation to inform Amazon S3 Glacier (Glacier) that all the
archive parts have been uploaded and that Glacier can now assemble the
archive from the uploaded parts. After assembling and saving the archive to
the vault, Glacier returns the URI path of the newly created archive
resource. Using the URI path, you can then access the archive. After you
upload an archive, you should save the archive ID returned to retrieve the
archive at a later point. You can also get the vault inventory to obtain a
list of archive IDs in a vault. For more information, see `InitiateJob`.
In the request, you must include the computed SHA256 tree hash of the
entire archive you have uploaded. For information about computing a SHA256
tree hash, see [Computing
Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
On the server side, Glacier also constructs the SHA256 tree hash of the
assembled archive. If the values match, Glacier saves the archive to the
vault; otherwise, it returns an error, and the operation fails. The
`ListParts` operation returns a list of parts uploaded for a specific
multipart upload. It includes checksum information for each uploaded part
that can be used to debug a bad checksum issue.
Additionally, Glacier also checks for any missing content ranges when
assembling the archive, if missing content ranges are found, Glacier
returns an error and the operation fails.
Complete Multipart Upload is an idempotent operation. After your first
successful complete multipart upload, if you call the operation again
within a short period, the operation will succeed and return the same
archive ID. This is useful in the event you experience a network issue that
causes an aborted connection or receive a 500 server error, in which case
you can repeat your Complete Multipart Upload request and get the same
archive ID without creating duplicate archives. Note, however, that after
the multipart upload completes, you cannot call the List Parts operation
and the multipart upload will not appear in List Multipart Uploads
response, even if idempotent complete is possible.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Complete Multipart
Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def complete_multipart_upload(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
{headers, input} =
[
{"archiveSize", "x-amz-archive-size"},
{"checksum", "x-amz-sha256-tree-hash"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-archive-id", "archiveId"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation completes the vault locking process by transitioning the
vault lock from the `InProgress` state to the `Locked` state, which causes
the vault lock policy to become unchangeable. A vault lock is put into the
`InProgress` state by calling `InitiateVaultLock`. You can obtain the state
of the vault lock by calling `GetVaultLock`. For more information about the
vault locking process, [Amazon Glacier Vault
Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
This operation is idempotent. This request is always successful if the
vault lock is in the `Locked` state and the provided lock ID matches the
lock ID originally used to lock the vault.
If an invalid lock ID is passed in the request when the vault lock is in
the `Locked` state, the operation returns an `AccessDeniedException` error.
If an invalid lock ID is passed in the request when the vault lock is in
the `InProgress` state, the operation throws an `InvalidParameter` error.
"""
def complete_vault_lock(client, account_id, lock_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy/#{URI.encode(lock_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
This operation creates a new vault with the specified name. The name of the
vault must be unique within a region for an AWS account. You can create up
to 1,000 vaults per account. If you need to create more vaults, contact
Amazon S3 Glacier.
You must use the following guidelines when naming a vault.
<ul> <li> Names can be between 1 and 255 characters long.
</li> <li> Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
(hyphen), and '.' (period).
</li> </ul> This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Creating a Vault
in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html)
and [Create Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html)
in the *Amazon Glacier Developer Guide*.
"""
def create_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
case request(client, :put, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation deletes an archive from a vault. Subsequent requests to
initiate a retrieval of this archive will fail. Archive retrievals that are
in progress for this archive ID may or may not succeed according to the
following scenarios:
<ul> <li> If the archive retrieval job is actively preparing the data for
download when Amazon S3 Glacier receives the delete archive request, the
archival retrieval operation might fail.
</li> <li> If the archive retrieval job has successfully prepared the
archive for download when Amazon S3 Glacier receives the delete archive
request, you will be able to download the output.
</li> </ul> This operation is idempotent. Attempting to delete an
already-deleted archive does not result in an error.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting an
Archive in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html)
and [Delete
Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html)
in the *Amazon Glacier Developer Guide*.
"""
def delete_archive(client, account_id, archive_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives/#{URI.encode(archive_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes a vault. Amazon S3 Glacier will delete a vault only
if there are no archives in the vault as of the last inventory and there
have been no writes to the vault since the last inventory. If either of
these conditions is not satisfied, the vault deletion fails (that is, the
vault is not removed) and Amazon S3 Glacier returns an error. You can use
`DescribeVault` to return the number of archives in a vault, and you can
use [Initiate a Job (POST
jobs)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html)
to initiate a new inventory retrieval for a vault. The inventory contains
the archive IDs you use to delete archives using [Delete Archive (DELETE
archive)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html).
This operation is idempotent.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Deleting a Vault
in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html)
and [Delete Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html)
in the *Amazon S3 Glacier Developer Guide*.
"""
def delete_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes the access policy associated with the specified
vault. The operation is eventually consistent; that is, it might take some
time for Amazon S3 Glacier to completely remove the access policy, and you
might still see the effect of the policy for a short time after you send
the delete request.
This operation is idempotent. You can invoke delete multiple times, even if
there is no policy associated with the vault. For more information about
vault access policies, see [Amazon Glacier Access Control with Vault Access
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def delete_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation deletes the notification configuration set for a vault. The
operation is eventually consistent; that is, it might take some time for
Amazon S3 Glacier to completely disable the notifications and you might
still receive some notifications for a short time after you send the delete
request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Delete Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html)
in the Amazon S3 Glacier Developer Guide.
"""
def delete_vault_notifications(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
This operation returns information about a job you previously initiated,
including the job initiation date, the user who initiated the job, the job
status code/message and the Amazon SNS topic to notify after Amazon S3
Glacier (Glacier) completes the job. For more information about initiating
a job, see `InitiateJob`.
<note> This operation enables you to check the status of your job. However,
it is strongly recommended that you set up an Amazon SNS topic and specify
it in your initiate job request so that Glacier can notify the topic after
it completes the job.
</note> A job ID will not expire for at least 24 hours after Glacier
completes the job.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For more information about using this operation, see the documentation for
the underlying REST API [Describe
Job](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def describe_job(client, account_id, job_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation returns information about a vault, including the vault's
Amazon Resource Name (ARN), the date the vault was created, the number of
archives it contains, and the total size of all the archives in the vault.
The number of archives and their total size are as of the last inventory
generation. This means that if you add or remove an archive from a vault,
and then immediately use Describe Vault, the change in contents will not be
immediately reflected. If you want to retrieve the latest inventory of the
vault, use `InitiateJob`. Amazon S3 Glacier generates vault inventories
approximately daily. For more information, see [Downloading a Vault
Inventory in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html).
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault
Metadata in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [Describe Vault
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def describe_vault(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation returns the current data retrieval policy for the account
and region specified in the GET request. For more information about data
retrieval policies, see [Amazon Glacier Data Retrieval
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def get_data_retrieval_policy(client, account_id, options \\ []) do
path_ = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation downloads the output of the job you initiated using
`InitiateJob`. Depending on the job type you specified when you initiated
the job, the output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of the output by
specifying a byte range. In the case of an archive retrieval job, depending
on the byte range you specify, Amazon S3 Glacier (Glacier) returns the
checksum for the portion of the data. You can compute the checksum on the
client and verify that the values match to ensure the portion you
downloaded is the correct data.
A job ID will not expire for at least 24 hours after Glacier completes the
job. That a byte range. For both archive and inventory retrieval jobs, you
should verify the downloaded size against the size returned in the headers
from the **Get Job Output** response.
For archive retrieval jobs, you should also verify that the size is what
you expected. If you download a portion of the output, the expected size is
based on the range of bytes you specified. For example, if you specify a
range of `bytes=0-1048575`, you should verify your download size is
1,048,576 bytes. If you download an entire archive, the expected size is
the size of the archive when you uploaded it to Amazon S3 Glacier The
expected size is also returned in the headers from the **Get Job Output**
response.
In the case of an archive retrieval job, depending on the byte range you
specify, Glacier returns the checksum for the portion of the data. To
ensure the portion you downloaded is the correct data, compute the checksum
on the client, verify that the values match, and verify that the size is
what you expected.
A job ID does not expire for at least 24 hours after Glacier completes the
job. That is, you can download the job output within the 24 hours period
after Amazon Glacier completes the job.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Downloading a
Vault
Inventory](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html),
[Downloading an
Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html),
and [Get Job Output
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html)
"""
def get_job_output(client, account_id, job_id, vault_name, range \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs/#{URI.encode(job_id)}/output"
headers = []
headers = if !is_nil(range) do
[{"Range", range} | headers]
else
headers
end
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Accept-Ranges", "acceptRanges"},
{"x-amz-archive-description", "archiveDescription"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Content-Range", "contentRange"},
{"Content-Type", "contentType"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation retrieves the `access-policy` subresource set on the vault;
for more information on setting this subresource, see [Set Vault Access
Policy (PUT
access-policy)](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html).
If there is no access policy set on the vault, the operation returns a `404
Not found` error. For more information about vault access policies, see
[Amazon Glacier Access Control with Vault Access
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def get_vault_access_policy(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation retrieves the following attributes from the `lock-policy`
subresource set on the specified vault:
<ul> <li> The vault lock policy set on the vault.
</li> <li> The state of the vault lock, which is either `InProgess` or
`Locked`.
</li> <li> When the lock ID expires. The lock ID is used to complete the
vault locking process.
</li> <li> When the vault lock was initiated and put into the `InProgress`
state.
</li> </ul> A vault lock is put into the `InProgress` state by calling
`InitiateVaultLock`. A vault lock is put into the `Locked` state by calling
`CompleteVaultLock`. You can abort the vault locking process by calling
`AbortVaultLock`. For more information about the vault locking process,
[Amazon Glacier Vault
Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If there is no vault lock policy set on the vault, the operation returns a
`404 Not found` error. For more information about vault lock policies,
[Amazon Glacier Access Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
"""
def get_vault_lock(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation retrieves the `notification-configuration` subresource of
the specified vault.
For information about setting a notification configuration on a vault, see
`SetVaultNotifications`. If a notification configuration for a vault is not
set, the operation returns a `404 Not Found` error. For more information
about vault notifications, see [Configuring Vault Notifications in Amazon
S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html).
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Get Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def get_vault_notifications(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation initiates a job of the specified type, which can be a
select, an archival retrieval, or a vault retrieval. For more information
about using this operation, see the documentation for the underlying REST
API [Initiate a
Job](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html).
"""
def initiate_job(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 202) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-job-id", "jobId"},
{"x-amz-job-output-path", "jobOutputPath"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates a multipart upload. Amazon S3 Glacier creates a
multipart upload resource and returns its ID in the response. The multipart
upload ID is used in subsequent requests to upload parts of an archive (see
`UploadMultipartPart`).
When you initiate a multipart upload, you specify the part size in number
of bytes. The part size must be a megabyte (1024 KB) multiplied by a power
of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608
(8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum
is 4 GB.
Every part you upload to this resource (see `UploadMultipartPart`), except
the last one, must have the same size. The last one can be the same size or
smaller. For example, suppose you want to upload a 16.2 MB file. If you
initiate the multipart upload with a part size of 4 MB, you will upload
four parts of 4 MB each and one part of 0.2 MB.
<note> You don't need to know the size of the archive when you start a
multipart upload because Amazon S3 Glacier does not require you to specify
the overall archive size.
</note> After you complete the multipart upload, Amazon S3 Glacier
(Glacier) removes the multipart upload resource referenced by the ID.
Glacier also removes the multipart upload resource if you cancel the
multipart upload or it may be removed if there is no activity for a period
of 24 hours.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Initiate Multipart
Upload](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html)
in the *Amazon Glacier Developer Guide*.
"""
def initiate_multipart_upload(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
{headers, input} =
[
{"archiveDescription", "x-amz-archive-description"},
{"partSize", "x-amz-part-size"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "location"},
{"x-amz-multipart-upload-id", "uploadId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation initiates the vault locking process by doing the following:
<ul> <li> Installing a vault lock policy on the specified vault.
</li> <li> Setting the lock state of vault lock to `InProgress`.
</li> <li> Returning a lock ID, which is used to complete the vault locking
process.
</li> </ul> You can set one vault lock policy for each vault and this
policy can be up to 20 KB in size. For more information about vault lock
policies, see [Amazon Glacier Access Control with Vault Lock
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html).
You must complete the vault locking process within 24 hours after the vault
lock enters the `InProgress` state. After the 24 hour window ends, the lock
ID expires, the vault automatically exits the `InProgress` state, and the
vault lock policy is removed from the vault. You call `CompleteVaultLock`
to complete the vault locking process by setting the state of the vault
lock to `Locked`.
After a vault lock is in the `Locked` state, you cannot initiate a new
vault lock for the vault.
You can abort the vault locking process by calling `AbortVaultLock`. You
can get the state of the vault lock by calling `GetVaultLock`. For more
information about the vault locking process, [Amazon Glacier Vault
Lock](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html).
If this operation is called when the vault lock is in the `InProgress`
state, the operation returns an `AccessDeniedException` error. When the
vault lock is in the `InProgress` state you must call `AbortVaultLock`
before you can initiate a new vault lock policy.
"""
def initiate_vault_lock(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/lock-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-lock-id", "lockId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation lists jobs for a vault, including jobs that are in-progress
and jobs that have recently finished. The List Job operation returns a list
of these jobs sorted by job initiation time.
<note> Amazon Glacier retains recently completed jobs for a period before
deleting them; however, it eventually removes completed jobs. The output of
completed jobs can be retrieved. Retaining completed jobs for a period of
time after they have completed enables you to get a job output in the event
you miss the job completion notification or your first attempt to download
it fails. For example, suppose you start an archive retrieval job to
download an archive. After the job completes, you start to download the
archive but encounter a network error. In this scenario, you can retry and
download the archive while the job exists.
</note> The List Jobs operation supports pagination. You should always
check the response `Marker` field. If there are no more jobs to list, the
`Marker` field is set to `null`. If there are more jobs to list, the
`Marker` field is set to a non-null value, which you can use to continue
the pagination of the list. To return a list of jobs that begins at a
specific job, set the marker request parameter to the `Marker` value for
that job that you obtained from a previous List Jobs request.
You can set a maximum limit for the number of jobs returned in the response
by specifying the `limit` parameter in the request. The default limit is
50. The number of jobs returned might be fewer than the limit, but the
number of returned jobs never exceeds the limit.
Additionally, you can filter the jobs list returned by specifying the
optional `statuscode` parameter or `completed` parameter, or both. Using
the `statuscode` parameter, you can specify to return only jobs that match
either the `InProgress`, `Succeeded`, or `Failed` status. Using the
`completed` parameter, you can specify to return only jobs that were
completed (`true`) or jobs that were not completed (`false`).
For more information about using this operation, see the documentation for
the underlying REST API [List
Jobs](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html).
"""
def list_jobs(client, account_id, vault_name, completed \\ nil, limit \\ nil, marker \\ nil, statuscode \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/jobs"
headers = []
query_ = []
query_ = if !is_nil(statuscode) do
[{"statuscode", statuscode} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
query_ = if !is_nil(completed) do
[{"completed", completed} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists in-progress multipart uploads for the specified vault.
An in-progress multipart upload is a multipart upload that has been
initiated by an `InitiateMultipartUpload` request, but has not yet been
completed or aborted. The list returned in the List Multipart Upload
response has no guaranteed order.
The List Multipart Uploads operation supports pagination. By default, this
operation returns up to 50 multipart uploads in the response. You should
always check the response for a `marker` at which to continue the list; if
there are no more items the `marker` is `null`. To return a list of
multipart uploads that begins at a specific upload, set the `marker`
request parameter to the value you obtained from a previous List Multipart
Upload request. You can also limit the number of uploads returned in the
response by specifying the `limit` parameter in the request.
Note the difference between this operation and listing parts (`ListParts`).
The List Multipart Uploads operation lists all multipart uploads for a
vault and does not require a multipart upload ID. The List Parts operation
requires a multipart upload ID since parts are associated with a single
upload.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with
Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List Multipart Uploads
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_multipart_uploads(client, account_id, vault_name, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists the parts of an archive that have been uploaded in a
specific multipart upload. You can make this request at any time during an
in-progress multipart upload before you complete the upload (see
`CompleteMultipartUpload`. List Parts returns an error for completed
uploads. The list returned in the List Parts response is sorted by part
range.
The List Parts operation supports pagination. By default, this operation
returns up to 50 uploaded parts in the response. You should always check
the response for a `marker` at which to continue the list; if there are no
more items the `marker` is `null`. To return a list of parts that begins at
a specific part, set the `marker` request parameter to the value you
obtained from a previous List Parts request. You can also limit the number
of parts returned in the response by specifying the `limit` parameter in
the request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and the underlying REST API, see [Working with
Archives in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html)
and [List
Parts](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_parts(client, account_id, upload_id, vault_name, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists the provisioned capacity units for the specified AWS
account.
"""
def list_provisioned_capacity(client, account_id, options \\ []) do
path_ = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists all the tags attached to a vault. The operation
returns an empty map if there are no tags. For more information about tags,
see [Tagging Amazon S3 Glacier
Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
"""
def list_tags_for_vault(client, account_id, vault_name, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation lists all vaults owned by the calling user's account. The
list returned in the response is ASCII-sorted by vault name.
By default, this operation returns up to 10 items. If there are more vaults
to list, the response `marker` field contains the vault Amazon Resource
Name (ARN) at which to continue the list with a new List Vaults request;
otherwise, the `marker` field is `null`. To return a list of vaults that
begins at a specific vault, set the `marker` request parameter to the vault
ARN you obtained from a previous List Vaults request. You can also limit
the number of vaults returned in the response by specifying the `limit`
parameter in the request.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Retrieving Vault
Metadata in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html)
and [List Vaults
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html)
in the *Amazon Glacier Developer Guide*.
"""
def list_vaults(client, account_id, limit \\ nil, marker \\ nil, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults"
headers = []
query_ = []
query_ = if !is_nil(marker) do
[{"marker", marker} | query_]
else
query_
end
query_ = if !is_nil(limit) do
[{"limit", limit} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
This operation purchases a provisioned capacity unit for an AWS account.
"""
def purchase_provisioned_capacity(client, account_id, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/provisioned-capacity"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-capacity-id", "capacityId"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation removes one or more tags from the set of tags attached to a
vault. For more information about tags, see [Tagging Amazon S3 Glacier
Resources](https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html).
This operation is idempotent. The operation will be successful, even if
there are no tags attached to the vault.
"""
def remove_tags_from_vault(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/tags?operation=remove"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
This operation sets and then enacts a data retrieval policy in the region
specified in the PUT request. You can set one policy per region for an AWS
account. The policy is enacted within a few minutes of a successful PUT
operation.
The set policy operation does not affect retrieval jobs that were in
progress before the policy was enacted. For more information about data
retrieval policies, see [Amazon Glacier Data Retrieval
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html).
"""
def set_data_retrieval_policy(client, account_id, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/policies/data-retrieval"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation configures an access policy for a vault and will overwrite
an existing policy. To configure a vault access policy, send a PUT request
to the `access-policy` subresource of the vault. An access policy is
specific to a vault and is also called a vault subresource. You can set one
access policy per vault and the policy can be up to 20 KB in size. For more
information about vault access policies, see [Amazon Glacier Access Control
with Vault Access
Policies](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html).
"""
def set_vault_access_policy(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/access-policy"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation configures notifications that will be sent when specific
events happen to a vault. By default, you don't get any notifications.
To configure vault notifications, send a PUT request to the
`notification-configuration` subresource of the vault. The request should
include a JSON document that provides an Amazon SNS topic and specific
events for which you want Amazon S3 Glacier to send notifications to the
topic.
Amazon SNS topics must grant permission to the vault to be allowed to
publish notifications to the topic. You can configure a vault to publish a
notification for the following vault events:
<ul> <li> **ArchiveRetrievalCompleted** This event occurs when a job that
was initiated for an archive retrieval is completed (`InitiateJob`). The
status of the completed job can be "Succeeded" or "Failed". The
notification sent to the SNS topic is the same output as returned from
`DescribeJob`.
</li> <li> **InventoryRetrievalCompleted** This event occurs when a job
that was initiated for an inventory retrieval is completed (`InitiateJob`).
The status of the completed job can be "Succeeded" or "Failed". The
notification sent to the SNS topic is the same output as returned from
`DescribeJob`.
</li> </ul> An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM) users don't
have any permissions by default. You must grant them explicit permission to
perform specific actions. For more information, see [Access Control Using
AWS Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Configuring Vault
Notifications in Amazon S3
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html)
and [Set Vault Notification Configuration
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html)
in the *Amazon Glacier Developer Guide*.
"""
def set_vault_notifications(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/notification-configuration"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 204)
end
@doc """
This operation adds an archive to a vault. This is a synchronous operation,
and for a successful upload, your data is durably persisted. Amazon S3
Glacier returns the archive ID in the `x-amz-archive-id` header of the
response.
You must use the archive ID to access your data in Amazon S3 Glacier. After
you upload an archive, you should save the archive ID returned so that you
can retrieve or delete the archive later. Besides saving the archive ID,
you can also index it and give it a friendly name to allow for better
searching. You can also use the optional archive description field to
specify how the archive is referred to in an external index of archives,
such as you might create in Amazon DynamoDB. You can also get the vault
inventory to obtain a list of archive IDs in a vault. For more information,
see `InitiateJob`.
You must provide a SHA256 tree hash of the data you are uploading. For
information about computing a SHA256 tree hash, see [Computing
Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
You can optionally specify an archive description of up to 1,024 printable
ASCII characters. You can get the archive description when you either
retrieve the archive or get the vault inventory. For more information, see
`InitiateJob`. Amazon Glacier does not interpret the description in any
way. An archive description does not need to be unique. You cannot use the
description to retrieve or sort the archive list.
Archives are immutable. After you upload an archive, you cannot edit the
archive or its description.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading an
Archive in Amazon
Glacier](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html)
and [Upload
Archive](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html)
in the *Amazon Glacier Developer Guide*.
"""
def upload_archive(client, account_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/archives"
{headers, input} =
[
{"archiveDescription", "x-amz-archive-description"},
{"checksum", "x-amz-sha256-tree-hash"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-archive-id", "archiveId"},
{"x-amz-sha256-tree-hash", "checksum"},
{"Location", "location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
This operation uploads a part of an archive. You can upload archive parts
in any order. You can also upload them in parallel. You can upload up to
10,000 parts for a multipart upload.
Amazon Glacier rejects your upload part request if any of the following
conditions is true:
<ul> <li> **SHA256 tree hash does not match**To ensure that part data is
not corrupted in transmission, you compute a SHA256 tree hash of the part
and include it in your request. Upon receiving the part data, Amazon S3
Glacier also computes a SHA256 tree hash. If these hash values don't match,
the operation fails. For information about computing a SHA256 tree hash,
see [Computing
Checksums](https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html).
</li> <li> **Part size does not match**The size of each part except the
last must match the size specified in the corresponding
`InitiateMultipartUpload` request. The size of the last part must be the
same size as, or smaller than, the specified size.
<note> If you upload a part whose size is smaller than the part size you
specified in your initiate multipart upload request and that part is not
the last part, then the upload part request will succeed. However, the
subsequent Complete Multipart Upload request will fail.
</note> </li> <li> **Range does not align**The byte range value in the
request does not align with the part size specified in the corresponding
initiate request. For example, if you specify a part size of 4194304 bytes
(4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8
MB - 1) are valid part ranges. However, if you set a range value of 2 MB to
6 MB, the range does not align with the part size and the upload will fail.
</li> </ul> This operation is idempotent. If you upload the same part
multiple times, the data included in the most recent request overwrites the
previously uploaded data.
An AWS account has full permission to perform all operations (actions).
However, AWS Identity and Access Management (IAM) users don't have any
permissions by default. You must grant them explicit permission to perform
specific actions. For more information, see [Access Control Using AWS
Identity and Access Management
(IAM)](https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html).
For conceptual information and underlying REST API, see [Uploading Large
Archives in Parts (Multipart
Upload)](https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html)
and [Upload Part
](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html)
in the *Amazon Glacier Developer Guide*.
"""
def upload_multipart_part(client, account_id, upload_id, vault_name, input, options \\ []) do
path_ = "/#{URI.encode(account_id)}/vaults/#{URI.encode(vault_name)}/multipart-uploads/#{URI.encode(upload_id)}"
{headers, input} =
[
{"checksum", "x-amz-sha256-tree-hash"},
{"range", "Content-Range"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, 204) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"x-amz-sha256-tree-hash", "checksum"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "glacier"}
host = build_host("glacier", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
| 45.622302 | 131 | 0.719467 |
1c785f3e97b9cabcc942539cac1d814b83aa23ab | 420 | ex | Elixir | backend/lib/backend_web/views/todo_view.ex | sdil/todo-app-phoenix-nuxtjs | cfdb2ccce9cf54f691236ae81740608ce1c6a227 | [
"MIT"
] | null | null | null | backend/lib/backend_web/views/todo_view.ex | sdil/todo-app-phoenix-nuxtjs | cfdb2ccce9cf54f691236ae81740608ce1c6a227 | [
"MIT"
] | null | null | null | backend/lib/backend_web/views/todo_view.ex | sdil/todo-app-phoenix-nuxtjs | cfdb2ccce9cf54f691236ae81740608ce1c6a227 | [
"MIT"
] | null | null | null | defmodule BackendWeb.TodoView do
use BackendWeb, :view
alias BackendWeb.TodoView
def render("index.json", %{todos: todos}) do
%{data: render_many(todos, TodoView, "todo.json")}
end
def render("show.json", %{todo: todo}) do
%{data: render_one(todo, TodoView, "todo.json")}
end
def render("todo.json", %{todo: todo}) do
%{id: todo.id,
title: todo.title,
done: todo.done}
end
end
| 22.105263 | 54 | 0.640476 |
1c7863cd9cb1188062b30afc547477bd43b8948b | 65 | ex | Elixir | imdb_data/lib/imdb_data_web/views/review_view.ex | defndaines/eiga | 89adc25dd6c7a5be86d6dc6be9dffc62ad05bdd8 | [
"Apache-2.0"
] | null | null | null | imdb_data/lib/imdb_data_web/views/review_view.ex | defndaines/eiga | 89adc25dd6c7a5be86d6dc6be9dffc62ad05bdd8 | [
"Apache-2.0"
] | 2 | 2016-03-10T03:04:11.000Z | 2017-02-11T17:43:44.000Z | imdb_data/lib/imdb_data_web/views/review_view.ex | defndaines/eiga | 89adc25dd6c7a5be86d6dc6be9dffc62ad05bdd8 | [
"Apache-2.0"
] | null | null | null | defmodule IMDbDataWeb.ReviewView do
use IMDbDataWeb, :view
end
| 16.25 | 35 | 0.815385 |
1c788f25f0f3716b3be34888997b8dd52d7bd3f7 | 1,613 | ex | Elixir | clients/on_demand_scanning/lib/google_api/on_demand_scanning/v1/model/grafeas_v1_slsa_provenance_zero_two_slsa_builder.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | clients/on_demand_scanning/lib/google_api/on_demand_scanning/v1/model/grafeas_v1_slsa_provenance_zero_two_slsa_builder.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | clients/on_demand_scanning/lib/google_api/on_demand_scanning/v1/model/grafeas_v1_slsa_provenance_zero_two_slsa_builder.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.OnDemandScanning.V1.Model.GrafeasV1SlsaProvenanceZeroTwoSlsaBuilder do
@moduledoc """
Identifies the entity that executed the recipe, which is trusted to have correctly performed the operation and populated this provenance.
## Attributes
* `id` (*type:* `String.t`, *default:* `nil`) -
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:id => String.t() | nil
}
field(:id)
end
defimpl Poison.Decoder,
for: GoogleApi.OnDemandScanning.V1.Model.GrafeasV1SlsaProvenanceZeroTwoSlsaBuilder do
def decode(value, options) do
GoogleApi.OnDemandScanning.V1.Model.GrafeasV1SlsaProvenanceZeroTwoSlsaBuilder.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.OnDemandScanning.V1.Model.GrafeasV1SlsaProvenanceZeroTwoSlsaBuilder do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 31.019231 | 139 | 0.748915 |
1c78acdcf6505355b3fb7b03be913b6a230fe66c | 519 | exs | Elixir | priv/test_repo/migrations/20210602145604_migrate_resources12.exs | frankdugan3/ash_postgres | ae173f0229ffe1dea821e8a73c2c6b8c858b39f6 | [
"MIT"
] | 13 | 2020-09-04T22:31:23.000Z | 2022-02-06T13:24:23.000Z | priv/test_repo/migrations/20210602145604_migrate_resources12.exs | frankdugan3/ash_postgres | ae173f0229ffe1dea821e8a73c2c6b8c858b39f6 | [
"MIT"
] | 57 | 2019-12-04T15:23:41.000Z | 2022-02-14T22:55:16.000Z | priv/test_repo/migrations/20210602145604_migrate_resources12.exs | frankdugan3/ash_postgres | ae173f0229ffe1dea821e8a73c2c6b8c858b39f6 | [
"MIT"
] | 15 | 2020-10-22T13:26:25.000Z | 2021-07-26T23:49:42.000Z | defmodule AshPostgres.TestRepo.Migrations.MigrateResources12 do
@moduledoc """
Updates resources based on their most recent snapshots.
This file was autogenerated with `mix ash_postgres.generate_migrations`
"""
use Ecto.Migration
def up do
create table(:authors, primary_key: false) do
add :id, :uuid, null: false, default: fragment("uuid_generate_v4()"), primary_key: true
add :first_name, :text
add :last_name, :text
end
end
def down do
drop table(:authors)
end
end | 24.714286 | 93 | 0.710983 |
1c78b1b1b10b3a80673691e9524ed7b26b9fa89f | 4,990 | ex | Elixir | lib/rdf/graph_builder.ex | marcelotto/rdf-ex | 12adce69eb2dbff027cbc83aaaf912067aea1b02 | [
"MIT"
] | 53 | 2017-06-25T22:20:44.000Z | 2020-04-27T17:27:51.000Z | lib/rdf/graph_builder.ex | marcelotto/rdf-ex | 12adce69eb2dbff027cbc83aaaf912067aea1b02 | [
"MIT"
] | 7 | 2017-06-25T00:29:11.000Z | 2020-03-11T00:23:47.000Z | lib/rdf/graph_builder.ex | marcelotto/rdf-ex | 12adce69eb2dbff027cbc83aaaf912067aea1b02 | [
"MIT"
] | 2 | 2018-01-19T15:48:27.000Z | 2020-03-01T00:29:35.000Z | defmodule RDF.Graph.Builder do
alias RDF.{Description, Graph, Dataset, PrefixMap, IRI}
defmodule Error do
defexception [:message]
end
defmodule Helper do
defdelegate a(), to: RDF.NS.RDF, as: :type
defdelegate a(s, o), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3, o4), to: RDF.NS.RDF, as: :type
defdelegate a(s, o1, o2, o3, o4, o5), to: RDF.NS.RDF, as: :type
def exclude(_), do: nil
end
def build({:__block__, _, block}, opts) do
{declarations, data} = Enum.split_with(block, &declaration?/1)
{base, declarations} = extract_base(declarations)
base_string = base_string(base)
data = resolve_relative_iris(data, base_string)
declarations = resolve_relative_iris(declarations, base_string)
{prefixes, declarations} = extract_prefixes(declarations)
quote do
alias RDF.XSD
alias RDF.NS.{RDFS, OWL}
import RDF.Sigils
import Helper
unquote(declarations)
RDF.Graph.Builder.do_build(
unquote(data),
unquote(opts),
unquote(prefixes),
unquote(base_string)
)
end
end
def build(single, opts) do
build({:__block__, [], List.wrap(single)}, opts)
end
@doc false
def do_build(data, opts, prefixes, base) do
RDF.graph(graph_opts(opts, prefixes, base))
|> Graph.add(Enum.filter(data, &rdf?/1))
end
defp graph_opts(opts, prefixes, base) do
opts
|> set_base_opt(base)
|> set_prefix_opt(prefixes)
end
defp set_base_opt(opts, nil), do: opts
defp set_base_opt(opts, base), do: Keyword.put(opts, :base_iri, base)
defp set_prefix_opt(opts, []), do: opts
defp set_prefix_opt(opts, prefixes) do
Keyword.update(opts, :prefixes, RDF.default_prefixes(prefixes), fn opt_prefixes ->
PrefixMap.new(prefixes)
|> PrefixMap.merge!(opt_prefixes, :ignore)
end)
end
defp base_string(nil), do: nil
defp base_string(base) when is_binary(base), do: base
defp base_string(base) when is_atom(base), do: apply(base, :__base_iri__, [])
defp base_string({:sigil_I, _, [{_, _, [base]}, _]}), do: base
defp base_string(_) do
raise Error,
message: "invalid @base expression; only literal values are allowed as @base value"
end
defp resolve_relative_iris(ast, base) do
Macro.prewalk(ast, fn
{:sigil_I, meta_outer, [{:<<>>, meta_inner, [iri]}, list]} = sigil ->
if IRI.absolute?(iri) do
sigil
else
absolute = iri |> IRI.absolute(base) |> IRI.to_string()
{:sigil_I, meta_outer, [{:<<>>, meta_inner, [absolute]}, list]}
end
other ->
other
end)
end
defp extract_base(declarations) do
{base, declarations} =
Enum.reduce(declarations, {nil, []}, fn
{:@, line, [{:base, _, [{:__aliases__, _, ns}] = aliases}]}, {_, declarations} ->
{Module.concat(ns), [{:alias, line, aliases} | declarations]}
{:@, _, [{:base, _, [base]}]}, {_, declarations} ->
{base, declarations}
declaration, {base, declarations} ->
{base, [declaration | declarations]}
end)
{base, Enum.reverse(declarations)}
end
defp extract_prefixes(declarations) do
{prefixes, declarations} =
Enum.reduce(declarations, {[], []}, fn
{:@, line, [{:prefix, _, [[{prefix, {:__aliases__, _, ns} = aliases}]]}]},
{prefixes, declarations} ->
{[prefix(prefix, ns) | prefixes], [{:alias, line, [aliases]} | declarations]}
{:@, line, [{:prefix, _, [{:__aliases__, _, ns}] = aliases}]}, {prefixes, declarations} ->
{[prefix(ns) | prefixes], [{:alias, line, aliases} | declarations]}
declaration, {prefixes, declarations} ->
{prefixes, [declaration | declarations]}
end)
{prefixes, Enum.reverse(declarations)}
end
defp prefix(namespace) do
namespace
|> Enum.reverse()
|> hd()
|> to_string()
|> Macro.underscore()
|> String.to_atom()
|> prefix(namespace)
end
defp prefix(prefix, namespace), do: {prefix, Module.concat(namespace)}
defp declaration?({:=, _, _}), do: true
defp declaration?({:@, _, [{:prefix, _, _}]}), do: true
defp declaration?({:@, _, [{:base, _, _}]}), do: true
defp declaration?({:alias, _, _}), do: true
defp declaration?({:import, _, _}), do: true
defp declaration?({:require, _, _}), do: true
defp declaration?({:use, _, _}), do: true
defp declaration?(_), do: false
defp rdf?(nil), do: false
defp rdf?(:ok), do: false
defp rdf?(%Description{}), do: true
defp rdf?(%Graph{}), do: true
defp rdf?(%Dataset{}), do: true
defp rdf?(statements) when is_map(statements), do: true
defp rdf?(statements) when is_tuple(statements), do: true
defp rdf?(list) when is_list(list), do: true
defp rdf?(invalid) do
raise Error, message: "invalid RDF data: #{inspect(invalid)}"
end
end
| 30.242424 | 98 | 0.617435 |
1c78eef7c83fa08f96d8eb90661588304e600892 | 1,218 | ex | Elixir | lib/hyper_potion/endpoint.ex | selfup/hyper_potion | 6e4f3f393e4fd8d7ddf7e0dcbf8021e7dad18823 | [
"MIT"
] | null | null | null | lib/hyper_potion/endpoint.ex | selfup/hyper_potion | 6e4f3f393e4fd8d7ddf7e0dcbf8021e7dad18823 | [
"MIT"
] | null | null | null | lib/hyper_potion/endpoint.ex | selfup/hyper_potion | 6e4f3f393e4fd8d7ddf7e0dcbf8021e7dad18823 | [
"MIT"
] | null | null | null | defmodule HyperPotion.Endpoint do
use Phoenix.Endpoint, otp_app: :hyper_potion
socket "/socket", HyperPotion.UserSocket
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phoenix.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/", from: :hyper_potion, gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
plug Phoenix.LiveReloader
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Logger
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Poison
plug Plug.MethodOverride
plug Plug.Head
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
plug Plug.Session,
store: :cookie,
key: "_hyper_potion_key",
signing_salt: "hP23gxAh"
plug HyperPotion.Router
end
| 28.325581 | 69 | 0.720854 |
1c7907438c12683f0933d02fc8171ad4ebe15a9a | 2,107 | ex | Elixir | lib/gen_retry/worker.ex | TMSCH/gen_retry | 5edb4bea66902b9c3011cf7a37c78b1e410a9b54 | [
"MIT"
] | 202 | 2016-06-13T13:41:20.000Z | 2022-03-09T06:20:52.000Z | lib/gen_retry/worker.ex | TMSCH/gen_retry | 5edb4bea66902b9c3011cf7a37c78b1e410a9b54 | [
"MIT"
] | 8 | 2016-09-12T19:34:24.000Z | 2020-08-13T03:59:26.000Z | lib/gen_retry/worker.ex | devstopfix/gen_retry | 5edb4bea66902b9c3011cf7a37c78b1e410a9b54 | [
"MIT"
] | 12 | 2016-09-12T17:01:11.000Z | 2020-08-26T06:00:51.000Z | defmodule GenRetry.Worker do
@moduledoc false
@default_logger GenRetry.Utils
alias GenRetry.State
use GenServer
@spec init({GenRetry.retryable_fun(), GenRetry.Options.t()}) ::
{:ok, GenRetry.State.t()}
def init({fun, opts}) do
config = Application.get_env(:gen_retry, GenRetry.Logger, [])
logger = Keyword.get(config, :logger, @default_logger)
GenServer.cast(self(), :try)
{:ok, %State{function: fun, opts: opts, logger: logger}}
end
@spec handle_cast(:try, GenRetry.State.t()) ::
{:noreply, GenRetry.State.t()} | {:stop, :normal, GenRetry.State.t()}
def handle_cast(:try, state) do
sleep_for = round(state.retry_at - :erlang.system_time(:milli_seconds))
if sleep_for > 0, do: :timer.sleep(sleep_for)
state = %{state | tries: state.tries + 1}
try do
return_value = state.function.()
if pid = state.opts.respond_to do
send(pid, {:success, return_value, state})
end
if on_success = state.opts.on_success do
on_success.({return_value, state})
end
{:stop, :normal, state}
rescue
e ->
state.logger.log(inspect(e))
if should_try_again(state) do
retry_at = :erlang.system_time(:milli_seconds) + delay_time(state)
GenServer.cast(self(), :try)
{:noreply, %{state | retry_at: retry_at}}
else
if pid = state.opts.respond_to do
send(pid, {:failure, e, __STACKTRACE__, state})
end
if on_failure = state.opts.on_failure do
on_failure.({e, __STACKTRACE__, state})
end
{:stop, :normal, state}
end
end
end
@spec delay_time(GenRetry.State.t()) :: integer
defp delay_time(state) do
base_delay = state.opts.delay * :math.pow(state.opts.exp_base, state.tries)
jitter = :rand.uniform() * base_delay * state.opts.jitter
round(base_delay + jitter)
end
@spec should_try_again(GenRetry.State.t()) :: boolean
defp should_try_again(state) do
state.opts.retries == :infinity || state.opts.retries >= state.tries
end
end
| 29.263889 | 79 | 0.628382 |
1c79195083653f5f0a2bdfe25be6293d55d16f2b | 14,481 | ex | Elixir | lib/iex/lib/iex/pry.ex | chulkilee/elixir | 699231dcad52916a76f38856cbd7cf7c7bdadc51 | [
"Apache-2.0"
] | null | null | null | lib/iex/lib/iex/pry.ex | chulkilee/elixir | 699231dcad52916a76f38856cbd7cf7c7bdadc51 | [
"Apache-2.0"
] | null | null | null | lib/iex/lib/iex/pry.ex | chulkilee/elixir | 699231dcad52916a76f38856cbd7cf7c7bdadc51 | [
"Apache-2.0"
] | 8 | 2018-02-20T18:30:53.000Z | 2019-06-18T14:23:31.000Z | defmodule IEx.Pry do
@moduledoc """
The low-level API for prying sessions and setting up breakpoints.
"""
use GenServer
@table __MODULE__
@server __MODULE__
@timeout :infinity
@initial_counter 1
@type id :: integer()
@type break :: {id, module, {function, arity}, pending :: non_neg_integer}
@doc """
Callback for `IEx.pry/1`.
You can invoke this function directly when you are not able to invoke
`IEx.pry/1` as a macro. This function expects the binding (from
`Kernel.binding/0`) and the environment (from `__ENV__/0`).
"""
def pry(binding, %Macro.Env{} = env) do
self = self()
%{file: file, line: line, module: module, function: function_arity} = env
{:current_stacktrace, stacktrace} = Process.info(self, :current_stacktrace)
opts = [
binding: binding,
dot_iex_path: "",
env: env,
prefix: "pry",
stacktrace: prune_stacktrace(stacktrace)
]
location =
case function_arity do
{function, arity} ->
"#{Exception.format_mfa(module, function, arity)} (#{Path.relative_to_cwd(file)}:#{line})"
_ ->
"#{Path.relative_to_cwd(file)}:#{line}"
end
whereami =
case whereami(file, line, 2) do
{:ok, lines} -> [?\n, ?\n, lines]
:error -> []
end
# If we are the current evaluator, it is because we just
# reached a pry/breakpoint and the user hit continue().
# In both cases, we are safe to print and the request will
# succeed.
request =
if Process.get(:iex_evaluator) do
IO.puts(IEx.color(:eval_interrupt, "Break reached: #{location}#{whereami}"))
"Prying #{inspect(self)} at #{location}"
else
"Request to pry #{inspect(self)} at #{location}#{whereami}"
end
# We cannot use colors because IEx may be off
case IEx.Server.take_over(request, opts) do
:ok ->
:ok
{:error, :no_iex} ->
extra =
if match?({:win32, _}, :os.type()) do
" If you are using Windows, you may need to start IEx with the --werl flag."
else
""
end
message = "Cannot pry #{inspect(self)} at #{location}. Is an IEx shell running?" <> extra
IO.puts(:stdio, message)
{:error, :no_iex}
{:error, _} = error ->
error
end
end
def pry(binding, opts) when is_list(opts) do
vars = for {k, _} when is_atom(k) <- binding, do: {k, nil}
pry(binding, %{:elixir.env_for_eval(opts) | vars: vars})
end
@elixir_internals [:elixir, :erl_eval, IEx.Evaluator, IEx.Pry]
defp prune_stacktrace([{mod, _, _, _} | t]) when mod in @elixir_internals do
prune_stacktrace(t)
end
defp prune_stacktrace([{Process, :info, 2, _} | t]) do
prune_stacktrace(t)
end
defp prune_stacktrace([h | t]) do
[h | prune_stacktrace(t)]
end
defp prune_stacktrace([]) do
[]
end
@doc """
Formats the location for `whereami/3` prying.
It receives the `file`, `line` and the snippet `radius` and
returns `{:ok, lines}`, where lines is a list of chardata
containing each formatted line, or `:error`.
The actual line is especially formatted in bold.
"""
def whereami(file, line, radius)
when is_binary(file) and is_integer(line) and is_integer(radius) and radius > 0 do
with true <- File.regular?(file),
[_ | _] = lines <- whereami_lines(file, line, radius) do
{:ok, lines}
else
_ -> :error
end
end
defp whereami_lines(file, line, radius) do
min = max(line - radius - 1, 0)
max = line + radius - 1
file
|> File.stream!()
|> Enum.slice(min..max)
|> Enum.with_index(min + 1)
|> Enum.map(&whereami_format_line(&1, line))
end
defp whereami_format_line({line_text, line_number}, line) do
gutter = String.pad_leading(Integer.to_string(line_number), 5, " ")
if line_number == line do
IO.ANSI.format_fragment([:bright, gutter, ": ", line_text, :normal])
else
[gutter, ": ", line_text]
end
end
@doc """
Sets up a breakpoint on the given module/function/arity matching the given
args and guard.
"""
@spec break(module, function, arity, Macro.t(), pos_integer) ::
{:ok, id()}
| {
:error,
:recompilation_failed
| :no_beam_file
| :unknown_function_arity
| :otp_20_is_required
| :missing_debug_info
| :outdated_debug_info
| :non_elixir_module
}
def break(module, function, arity, condition, breaks \\ 1)
when is_atom(module) and is_atom(function) and is_integer(arity) and arity >= 0 and
is_integer(breaks) and breaks > 0 do
GenServer.call(@server, {:break, module, {function, arity}, condition, breaks}, @timeout)
end
@doc """
Raising variant of `break/5`.
"""
@spec break(module, function, arity, Macro.t(), pos_integer) :: id()
def break!(module, function, arity, condition, breaks \\ 1) do
case break(module, function, arity, condition, breaks) do
{:ok, id} ->
id
{:error, kind} ->
message =
case kind do
:missing_debug_info ->
"module #{inspect(module)} was not compiled with debug_info"
:no_beam_file ->
"could not find .beam file for #{inspect(module)}"
:non_elixir_module ->
"module #{inspect(module)} was not written in Elixir"
:otp_20_is_required ->
"you are running on an earlier OTP version than OTP 20"
:outdated_debug_info ->
"module #{inspect(module)} was not compiled with the latest debug_info"
:recompilation_failed ->
"the module could not be compiled with breakpoints (likely an internal error)"
:unknown_function_arity ->
"unknown function/macro #{Exception.format_mfa(module, function, arity)}"
end
raise "could not set breakpoint, " <> message
end
end
@doc """
Resets the breaks on a given breakpoint id.
"""
@spec reset_break(id) :: :ok | :not_found
def reset_break(id) when is_integer(id) do
GenServer.call(@server, {:reset_break, {id, :_, :_, :_, :_}}, @timeout)
end
@doc """
Resets the breaks for the given module, function and arity.
If the module is not instrumented or if the given function
does not have a breakpoint, it is a no-op and it returns
`:not_found`. Otherwise it returns `:ok`.
"""
@spec reset_break(module, function, arity) :: :ok | :not_found
def reset_break(module, function, arity) do
GenServer.call(@server, {:reset_break, {:_, module, {function, arity}, :_, :_}}, @timeout)
end
@doc """
Removes all breakpoints on all modules.
This effectively loads the non-instrumented version of
currently instrumented modules into memory.
"""
@spec remove_breaks :: :ok
def remove_breaks do
GenServer.call(@server, :remove_breaks, @timeout)
end
@doc """
Removes breakpoints in the given module.
This effectively loads the non-instrumented version of
the module into memory.
"""
@spec remove_breaks(module) :: :ok | {:error, :no_beam_file}
def remove_breaks(module) do
GenServer.call(@server, {:remove_breaks, module}, @timeout)
end
@doc """
Returns all breakpoints.
"""
@spec breaks :: [break]
def breaks do
@server
|> GenServer.call(:breaks, @timeout)
|> Enum.sort()
end
## Callbacks
@doc false
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: @server)
end
@impl true
def init(:ok) do
Process.flag(:trap_exit, true)
:ets.new(@table, [:named_table, :public, write_concurrency: true])
{:ok, @initial_counter}
end
@impl true
def handle_call({:break, module, fa, condition, breaks}, _from, counter) do
# If there is a match for the given module and fa, we
# use the ref, otherwise we create a new one.
{ref, counter} =
case :ets.match_object(@table, {:_, module, fa, :_, :_}) do
[{ref, _, _, _, _}] -> {ref, counter}
[] -> {counter, counter + 1}
end
case fetch_elixir_debug_info_with_fa_check(module, fa) do
{:ok, beam, backend, elixir} ->
true = :ets.insert(@table, {ref, module, fa, condition, breaks})
entries = :ets.match_object(@table, {:_, module, :_, :_, :_})
{:reply, instrument(beam, backend, elixir, ref, entries), counter}
{:error, _} = error ->
{:reply, error, counter}
end
end
def handle_call({:reset_break, pattern}, _from, counter) do
reset =
for {ref, module, fa, condition, _} <- :ets.match_object(@table, pattern) do
if instrumented?(module) do
:ets.insert(@table, {ref, module, fa, condition, 0})
true
else
:ets.delete(@table, ref)
false
end
end
if Enum.any?(reset) do
{:reply, :ok, counter}
else
{:reply, :not_found, counter}
end
end
def handle_call(:breaks, _from, counter) do
entries =
for {id, module, function_arity, _condition, breaks} <- :ets.tab2list(@table),
keep_instrumented(id, module) == :ok do
{id, module, function_arity, max(breaks, 0)}
end
{:reply, entries, counter}
end
def handle_call(:remove_breaks, _from, _counter) do
# Make sure to deinstrument before clearing
# up the table to avoid race conditions.
@table
|> :ets.match({:_, :"$1", :_, :_, :_})
|> List.flatten()
|> Enum.uniq()
|> Enum.each(&deinstrument_if_instrumented/1)
true = :ets.delete_all_objects(@table)
{:reply, :ok, @initial_counter}
end
def handle_call({:remove_breaks, module}, _from, counter) do
# Make sure to deinstrumented before clearing
# up the table to avoid race conditions.
reply = deinstrument_if_instrumented(module)
true = :ets.match_delete(@table, {:_, module, :_, :_, :_})
{:reply, reply, counter}
end
defp keep_instrumented(id, module) do
if instrumented?(module) do
:ok
else
:ets.delete(@table, id)
:error
end
end
defp deinstrument_if_instrumented(module) do
if instrumented?(module) do
deinstrument(module)
else
:ok
end
end
defp deinstrument(module) do
with [_ | _] = beam <- :code.which(module),
{:ok, binary} <- File.read(beam) do
:code.purge(module)
{:module, _} = :code.load_binary(module, beam, binary)
:ok
else
_ -> {:error, :no_beam_file}
end
end
defp fetch_elixir_debug_info_with_fa_check(module, fa) do
case :code.which(module) do
[_ | _] = beam ->
case :beam_lib.chunks(beam, [:debug_info]) do
{:ok, {_, [debug_info: {:debug_info_v1, backend, {:elixir_v1, map, _} = elixir}]}} ->
case List.keyfind(map.definitions, fa, 0) do
{_, _, _, _} -> {:ok, beam, backend, elixir}
nil -> {:error, :unknown_function_arity}
end
{:ok, {_, [debug_info: {:debug_info_v1, _, _}]}} ->
{:error, :non_elixir_module}
{:error, :beam_lib, {:unknown_chunk, _, _}} ->
# TODO: Remove this when we require OTP 20+
{:error, :otp_20_is_required}
{:error, :beam_lib, {:missing_chunk, _, _}} ->
{:error, :missing_debug_info}
_ ->
{:error, :outdated_debug_info}
end
_ ->
{:error, :no_beam_file}
end
end
defp instrument(beam, backend, {:elixir_v1, map, specs}, counter, entries) do
%{attributes: attributes, definitions: definitions, module: module} = map
attributes = [{:iex_pry, true} | attributes]
definitions = Enum.map(definitions, &instrument_definition(&1, map, entries))
map = %{map | attributes: attributes, definitions: definitions}
with {:ok, forms} <- backend.debug_info(:erlang_v1, module, {:elixir_v1, map, specs}, []),
{:ok, _, binary, _} <- :compile.noenv_forms(forms, [:return | map.compile_opts]) do
:code.purge(module)
{:module, _} = :code.load_binary(module, beam, binary)
{:ok, counter}
else
_error ->
{:error, :recompilation_failed}
end
end
defp instrument_definition({fa, kind, meta, clauses} = definition, map, entries) do
case List.keyfind(entries, fa, 2) do
{ref, _, ^fa, condition, _} ->
%{module: module, file: file} = map
file =
case meta[:location] do
{file, _} -> file
_ -> file
end
opts = [module: module, file: file, function: fa]
clauses = Enum.map(clauses, &instrument_clause(&1, ref, condition, opts))
{fa, kind, meta, clauses}
nil ->
definition
end
end
defp instrument_clause({meta, args, guards, clause}, ref, case_pattern, opts) do
opts = [line: Keyword.get(meta, :line, 1)] ++ opts
# We store variables on a map ignoring the context.
# In the rare case where variables in different contexts
# have the same name, the last one wins.
{_, binding} =
Macro.prewalk(args, %{}, fn
{name, _, ctx} = var, acc when name != :_ and is_atom(name) and is_atom(ctx) ->
{var, Map.put(acc, name, var)}
expr, acc ->
{expr, acc}
end)
# Have an extra binding per argument for case matching.
case_vars = Macro.generate_arguments(length(args), __MODULE__)
case_head = {:{}, [], case_vars}
update_op = Macro.escape({5, -1, -1, -1})
# Generate the take_over condition with the ets lookup.
# Remember this is expanded AST, so no aliases allowed,
# no locals (such as the unary -) and so on.
condition =
quote do
case unquote(case_head) do
unquote(case_pattern) ->
# :ets.update_counter(table, key, {pos, inc, threshold, reset})
case :ets.update_counter(unquote(@table), unquote(ref), unquote(update_op)) do
unquote(-1) -> :ok
_ -> :"Elixir.IEx.Pry".pry(unquote(Map.to_list(binding)), unquote(opts))
end
_ ->
:ok
end
end
args =
case_vars
|> Enum.zip(args)
|> Enum.map(fn {var, arg} -> {:=, [], [arg, var]} end)
{meta, args, guards, {:__block__, [], [condition, clause]}}
end
defp instrumented?(module) do
module.__info__(:attributes)[:iex_pry] == [true]
end
end
| 29.492872 | 100 | 0.599613 |
1c792aa457e87b2ce1cf4fbd42de04744f201687 | 1,657 | ex | Elixir | lib/harald/host/att/error_codes.ex | RicardoTrindade/harald | 3f56003265c29af0780730eb538183b50e55df2f | [
"MIT"
] | null | null | null | lib/harald/host/att/error_codes.ex | RicardoTrindade/harald | 3f56003265c29af0780730eb538183b50e55df2f | [
"MIT"
] | null | null | null | lib/harald/host/att/error_codes.ex | RicardoTrindade/harald | 3f56003265c29af0780730eb538183b50e55df2f | [
"MIT"
] | null | null | null | defmodule Harald.Host.ATT.ErrorCodes do
@moduledoc """
First 19 errors:
Reference: version 5.2, Vol 3, Section 3.4.1.1
Errors 0xFC - 0xFF:
Reference: Core Specification Supplement Version 9, Part B
"""
@error_codes %{
0x01 => "Invalid Handle",
0x02 => "Read Not Permitted",
0x03 => "Write Not Permitted",
0x04 => "Invalid PDU",
0x05 => "Insufficient Authentication",
0x06 => "Request Not Supported",
0x07 => "Invalid Offset",
0x08 => "Insufficient Authorization",
0x09 => "Prepare Queue Full",
0x0A => "Attribute Not Found",
0x0B => "Attribute not Long",
0x0C => "Insufficient Encryption Key Size",
0x0D => "Invalid Attribute Value Length",
0x0E => "Unlikely Error",
0x0F => "Insufficient Encryption",
0x10 => "Unsupported Group Type",
0x11 => "Insufficient Resources",
0x12 => "Database Out Of Sync",
0x13 => "Value Not Allowed",
0xFC => "Write Request Rejected",
0xFD => "Client Characteristic Configuration Descriptor Improperly Configured",
0xFE => "Procedure Already in Progress",
0xFF => "Out of Range"
}
Enum.each(@error_codes, fn
{error_code, _name}
when error_code in 0x80..0x9F ->
def decode(unquote(error_code)), do: {:ok, unquote("Application Error")}
{error_code, name} ->
def decode(unquote(error_code)), do: {:ok, unquote(name)}
def encode(unquote(name)), do: {:ok, unquote(error_code)}
end)
def decode(encoded_error_code) do
{:error, {:decode, {__MODULE__, encoded_error_code}}}
end
def encode(decoded_error_code) do
{:error, {:encode, {__MODULE__, decoded_error_code}}}
end
end
| 31.865385 | 83 | 0.653591 |
1c7934b9c7024ff8c9bc7671b73d032fc9559ca6 | 795 | exs | Elixir | mix.exs | shareup/herb | 0cf75a6d8447d0fc6c3c6fedc6cf2d1e0888e101 | [
"MIT"
] | null | null | null | mix.exs | shareup/herb | 0cf75a6d8447d0fc6c3c6fedc6cf2d1e0888e101 | [
"MIT"
] | 3 | 2020-02-03T10:10:24.000Z | 2020-05-24T12:36:04.000Z | mix.exs | shareup/herb | 0cf75a6d8447d0fc6c3c6fedc6cf2d1e0888e101 | [
"MIT"
] | null | null | null | defmodule Herb.MixProject do
use Mix.Project
def project do
[
app: :herb,
version: "0.0.0",
elixir: "~> 1.10",
start_permanent: Mix.env() == :prod,
deps: deps(),
escript: escript(),
description: description(),
package: [
licenses: ["MIT"],
links: %{
"GitHub" => "https://github.com/shareup/herb"
}
]
]
end
def description do
"Execute .exs scripts with the ability to depend on other mix projects (including hex packages) without setting up a project yourself."
end
def escript do
[main_module: Herb]
end
def application do
[
extra_applications: [:logger]
]
end
defp deps do
[
{:ex_doc, ">= 0.0.0", only: :dev, runtime: false}
]
end
end
| 18.928571 | 139 | 0.562264 |
1c793cb73432ca7a135d5d06d49a5948329bb9c1 | 516 | exs | Elixir | test/reporting_test.exs | access-company/testgear | 6817b5a9f782fd501c5e8c74f12a33617bec4ba8 | [
"Apache-2.0"
] | 1 | 2018-04-26T08:40:13.000Z | 2018-04-26T08:40:13.000Z | test/reporting_test.exs | access-company/testgear | 6817b5a9f782fd501c5e8c74f12a33617bec4ba8 | [
"Apache-2.0"
] | 14 | 2018-04-02T09:12:03.000Z | 2019-10-08T04:43:39.000Z | test/reporting_test.exs | access-company/testgear | 6817b5a9f782fd501c5e8c74f12a33617bec4ba8 | [
"Apache-2.0"
] | 2 | 2018-04-26T05:50:06.000Z | 2021-09-01T18:48:43.000Z | # Copyright(c) 2015-2021 ACCESS CO., LTD. All rights reserved.
defmodule Testgear.ReportingTest do
use ExUnit.Case
@tag :blackbox
test "logging should function correctly" do
assert Req.get("/report_log").status == 200
end
@tag :blackbox
test "metric reporting should function correctly" do
assert Req.get("/report_metric").status == 200
end
@tag :blackbox
test "async job registration should function correctly" do
assert Req.post("/register_async_job", "").status == 200
end
end
| 24.571429 | 62 | 0.717054 |
1c7976e4f48dc3e231ce7fedb03f7a540d55d3ab | 2,096 | exs | Elixir | test/upload_test.exs | artemeff/upload | ab495e79caed499c0c80d273e581887b7df51b9d | [
"MIT"
] | 30 | 2017-12-23T15:00:30.000Z | 2021-09-29T19:33:09.000Z | test/upload_test.exs | artemeff/upload | ab495e79caed499c0c80d273e581887b7df51b9d | [
"MIT"
] | 6 | 2018-05-03T18:44:36.000Z | 2021-04-21T17:15:25.000Z | test/upload_test.exs | artemeff/upload | ab495e79caed499c0c80d273e581887b7df51b9d | [
"MIT"
] | 5 | 2019-09-27T16:37:58.000Z | 2021-10-04T21:04:35.000Z | defmodule UploadTest do
use ExUnit.Case
doctest Upload,
except: [
get_url: 1,
get_signed_url: 2,
transfer: 1,
generate_key: 2,
cast: 2,
cast_path: 2
]
@fixture Path.expand("./fixtures/text.txt", __DIR__)
@plug %Plug.Upload{path: @fixture, filename: "text.txt"}
test "get_url/1 and transfer/1" do
start_supervised(Upload.Adapters.Test)
assert {:ok, upload} = Upload.cast_path(@fixture)
assert {:ok, upload} = Upload.transfer(upload)
assert Upload.get_url(upload) == upload.key
assert Upload.get_url(upload.key) == upload.key
end
test "get_signed_url/2" do
start_supervised(Upload.Adapters.Test)
assert {:ok, upload} = Upload.cast_path(@fixture)
assert {:ok, upload} = Upload.transfer(upload)
assert Upload.get_signed_url(upload) == {:ok, upload.key}
assert Upload.get_signed_url(upload.key) == {:ok, upload.key}
end
test "generate_key/1" do
assert Upload.generate_key("phoenix.png") =~ ~r"^[a-z0-9]{32}\.png$"
end
test "generate_key/2" do
assert Upload.generate_key("phoenix.png", prefix: ["logos"]) =~ ~r"^logos/[a-z0-9]{32}\.png$"
end
test "cast/1 with a %Plug.Upload{}" do
assert {:ok, upload} = Upload.cast(@plug)
assert upload.path == @plug.path
assert upload.filename == @plug.filename
assert upload.key =~ ~r"^[a-z0-9]{32}\.txt$"
assert upload.status == :pending
end
test "cast/1 with an %Upload{}" do
assert {:ok, upload} = Upload.cast_path(@fixture)
assert {:ok, ^upload} = Upload.cast(upload)
end
test "cast/1 with something else" do
assert Upload.cast(100) == :error
assert Upload.cast(nil) == :error
end
test "cast_path/1 with a path" do
assert {:ok, upload} = Upload.cast_path(@fixture)
assert upload.path == @plug.path
assert upload.filename == @plug.filename
assert upload.key =~ ~r"^[a-z0-9]{32}\.txt$"
assert upload.status == :pending
end
test "cast_path/1 with something else" do
assert :error = Upload.cast_path(100)
assert :error = Upload.cast_path(nil)
end
end
| 27.578947 | 97 | 0.649809 |
1c7a718648a39b51f513330cc80a1f081c96ad07 | 1,363 | exs | Elixir | mix.exs | whitepaperclip/bamboo_postmark | f983bfed8a25636b2e2561aa79c96374ad35ea85 | [
"MIT"
] | 31 | 2016-11-15T19:33:21.000Z | 2022-02-17T19:54:52.000Z | mix.exs | whitepaperclip/bamboo_postmark | f983bfed8a25636b2e2561aa79c96374ad35ea85 | [
"MIT"
] | 30 | 2016-12-29T22:42:56.000Z | 2022-02-07T03:39:53.000Z | mix.exs | pablo-co/bamboo_postmark | c8b8cbf6103fc38a8ad467a3ab0eb6ebe44a84e6 | [
"MIT"
] | 28 | 2016-11-17T00:45:18.000Z | 2021-06-25T19:44:34.000Z | defmodule BambooPostmark.Mixfile do
use Mix.Project
@source_url "https://github.com/pablo-co/bamboo_postmark"
@version "1.0.0"
def project do
[
app: :bamboo_postmark,
version: @version,
elixir: "~> 1.4",
name: "Bamboo Postmark",
build_embedded: Mix.env() == :prod,
start_permanent: Mix.env() == :prod,
package: package(),
deps: deps(),
docs: docs()
]
end
def application do
[
extra_applications: [:logger]
]
end
defp deps do
[
{:bamboo, ">= 2.0.0"},
{:hackney, ">= 1.6.5"},
{:poison, ">= 1.5.0", only: :test},
{:plug, "~> 1.0"},
{:plug_cowboy, "~> 1.0", only: [:test, :dev]},
{:ex_doc, "> 0.0.0", only: :dev, runtime: false}
]
end
defp package do
[
description: "A Bamboo adapter for Postmark",
maintainers: ["Pablo Cárdenas"],
licenses: ["MIT"],
links: %{
"Changelog" => "https://hexdocs.pm/bamboo/postmark/changelog.html",
"GitHub" => @source_url
}
]
end
defp docs do
[
extras: [
"CHANGELOG.md": [title: "Changelog"],
"LICENSE.md": [title: "License"],
"README.md": [title: "Overview"]
],
main: "readme",
source_url: @source_url,
homepage_url: @source_url,
formatters: ["html"]
]
end
end
| 21.296875 | 75 | 0.525312 |
1c7a86f5ade1438e1fa5248ea48bd394b3e167fb | 5,495 | ex | Elixir | lib/chat_api_web/router.ex | sladyn98/papercups | 3b17ccc4c974ac6e16c9962a576e64a832dafdb9 | [
"MIT"
] | null | null | null | lib/chat_api_web/router.ex | sladyn98/papercups | 3b17ccc4c974ac6e16c9962a576e64a832dafdb9 | [
"MIT"
] | null | null | null | lib/chat_api_web/router.ex | sladyn98/papercups | 3b17ccc4c974ac6e16c9962a576e64a832dafdb9 | [
"MIT"
] | null | null | null | defmodule ChatApiWeb.Router do
use ChatApiWeb, :router
pipeline :browser do
plug :accepts, ["html"]
plug :fetch_session
plug :fetch_flash
plug :protect_from_forgery
plug :put_secure_browser_headers
end
pipeline :api do
plug(ChatApiWeb.IPAddressPlug)
plug(:accepts, ["json"])
plug(ChatApiWeb.APIAuthPlug, otp_app: :chat_api)
end
pipeline :api_protected do
plug(Pow.Plug.RequireAuthenticated, error_handler: ChatApiWeb.APIAuthErrorHandler)
plug(ChatApiWeb.EnsureUserEnabledPlug)
end
# Swagger
scope "/api/swagger" do
forward "/", PhoenixSwagger.Plug.SwaggerUI, otp_app: :chat_api, swagger_file: "swagger.json"
end
# Public routes
scope "/api", ChatApiWeb do
pipe_through(:api)
resources("/registration", RegistrationController, singleton: true, only: [:create])
resources("/session", SessionController, singleton: true, only: [:create, :delete])
post("/session/renew", SessionController, :renew)
# TODO: figure out a way to secure these methods so they aren't abused
post("/accounts", AccountController, :create)
post("/conversations", ConversationController, :create)
post("/customers", CustomerController, :create)
put("/customers/:id/metadata", CustomerController, :update_metadata)
get("/customers/identify", CustomerController, :identify)
get("/widget_settings", WidgetSettingsController, :show)
put("/widget_settings/metadata", WidgetSettingsController, :update_metadata)
post("/verify_email", UserController, :verify_email)
post("/reset_password", UserController, :create_password_reset)
put("/reset_password", UserController, :reset_password)
post("/browser_sessions", BrowserSessionController, :create)
post("/browser_sessions/:id/finish", BrowserSessionController, :finish)
post("/browser_sessions/:id/restart", BrowserSessionController, :restart)
get("/browser_sessions/:id/exists", BrowserSessionController, :exists)
# TODO: figure out a better name?
get("/conversations/customer", ConversationController, :find_by_customer)
post("/slack/webhook", SlackController, :webhook)
end
# Protected routes
scope "/api", ChatApiWeb do
pipe_through([:api, :api_protected])
get("/me", SessionController, :me)
get("/accounts/me", AccountController, :me)
get("/messages/count", MessageController, :count)
get("/billing", BillingController, :show)
post("/billing", BillingController, :create)
put("/billing", BillingController, :update)
get("/reporting", ReportingController, :index)
get("/slack/oauth", SlackController, :oauth)
get("/slack/authorization", SlackController, :authorization)
get("/gmail/auth", GmailController, :auth)
get("/gmail/oauth", GmailController, :callback)
get("/gmail/authorization", GmailController, :authorization)
post("/gmail/send", GmailController, :send)
put("/widget_settings", WidgetSettingsController, :update)
get("/profile", UserProfileController, :show)
put("/profile", UserProfileController, :update)
get("/user_settings", UserSettingsController, :show)
put("/user_settings", UserSettingsController, :update)
post("/users/:id/disable", UserController, :disable)
post("/users/:id/enable", UserController, :enable)
post("/payment_methods", PaymentMethodController, :create)
get("/payment_methods", PaymentMethodController, :show)
resources("/user_invitations", UserInvitationController, except: [:new, :edit])
resources("/accounts", AccountController, only: [:update, :delete])
resources("/messages", MessageController, except: [:new, :edit])
resources("/conversations", ConversationController, except: [:new, :edit, :create])
resources("/customers", CustomerController, except: [:new, :edit, :create])
resources("/event_subscriptions", EventSubscriptionController, except: [:new, :edit])
resources("/tags", TagController, except: [:new, :edit])
resources "/browser_sessions", BrowserSessionController, except: [:create, :new, :edit]
post("/conversations/:conversation_id/tags", ConversationController, :add_tag)
delete("/conversations/:conversation_id/tags/:tag_id", ConversationController, :remove_tag)
post("/customers/:customer_id/tags", CustomerController, :add_tag)
delete("/customers/:customer_id/tags/:tag_id", CustomerController, :remove_tag)
post("/event_subscriptions/verify", EventSubscriptionController, :verify)
end
# Enables LiveDashboard only for development
#
# If you want to use the LiveDashboard in production, you should put
# it behind authentication and allow only admins to access it.
# If your application does not have an admins-only section yet,
# you can use Plug.BasicAuth to set up some basic authentication
# as long as you are also using SSL (which you should anyway).
if Mix.env() in [:dev, :test] do
import Phoenix.LiveDashboard.Router
scope "/" do
pipe_through([:fetch_session, :protect_from_forgery])
live_dashboard("/dashboard", metrics: ChatApiWeb.Telemetry)
end
end
scope "/", ChatApiWeb do
pipe_through :browser
get "/", PageController, :index
# TODO: move somewhere else?
get "/gmail/auth", GmailController, :index
# Fallback to index, which renders React app
get "/*path", PageController, :index
end
def swagger_info do
%{
info: %{
version: "1.0",
title: "Papercups API"
}
}
end
end
| 39.818841 | 96 | 0.715742 |
1c7a9a32f860531962aca4bb946da51bac96ff38 | 1,796 | ex | Elixir | test/support/model_case.ex | RFmind/scrumapi | 3bc2caf48e4e9b5b5448fe0724815609fc4bcee2 | [
"MIT"
] | null | null | null | test/support/model_case.ex | RFmind/scrumapi | 3bc2caf48e4e9b5b5448fe0724815609fc4bcee2 | [
"MIT"
] | null | null | null | test/support/model_case.ex | RFmind/scrumapi | 3bc2caf48e4e9b5b5448fe0724815609fc4bcee2 | [
"MIT"
] | null | null | null | defmodule Scrumapi.ModelCase do
@moduledoc """
This module defines the test case to be used by
model tests.
You may define functions here to be used as helpers in
your model tests. See `errors_on/2`'s definition as reference.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
alias Scrumapi.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Scrumapi.ModelCase
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Scrumapi.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Scrumapi.Repo, {:shared, self()})
end
:ok
end
@doc """
Helper for returning list of errors in a struct when given certain data.
## Examples
Given a User schema that lists `:name` as a required field and validates
`:password` to be safe, it would return:
iex> errors_on(%User{}, %{password: "password"})
[password: "is unsafe", name: "is blank"]
You could then write your assertion like:
assert {:password, "is unsafe"} in errors_on(%User{}, %{password: "password"})
You can also create the changeset manually and retrieve the errors
field directly:
iex> changeset = User.changeset(%User{}, password: "password")
iex> {:password, "is unsafe"} in changeset.errors
true
"""
def errors_on(struct, data) do
struct.__struct__.changeset(struct, data)
|> Ecto.Changeset.traverse_errors(&Scrumapi.ErrorHelpers.translate_error/1)
|> Enum.flat_map(fn {key, errors} -> for msg <- errors, do: {key, msg} end)
end
end
| 27.212121 | 84 | 0.685969 |
1c7aa092539b369b526aab5112772c3ceb011e4d | 2,480 | ex | Elixir | examples/bank/lib/bank/bank_account_event_handler.ex | OleMchls/incident | 220e46598719fc50d10c96e5a848080172bf6f9b | [
"MIT"
] | null | null | null | examples/bank/lib/bank/bank_account_event_handler.ex | OleMchls/incident | 220e46598719fc50d10c96e5a848080172bf6f9b | [
"MIT"
] | null | null | null | examples/bank/lib/bank/bank_account_event_handler.ex | OleMchls/incident | 220e46598719fc50d10c96e5a848080172bf6f9b | [
"MIT"
] | null | null | null | defmodule Bank.BankAccountEventHandler do
@behaviour Incident.EventHandler
alias Bank.Projections.BankAccount
alias Bank.BankAccount, as: Aggregate
alias Incident.ProjectionStore
@impl true
def listen(%{event_type: "AccountOpened"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
account_number: new_state.account_number,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
@impl true
def listen(%{event_type: "MoneyDeposited"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
@impl true
def listen(%{event_type: "MoneyWithdrawn"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
@impl true
def listen(%{event_type: "MoneySent"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
@impl true
def listen(%{event_type: "MoneyReceived"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
@impl true
def listen(%{event_type: "MoneySentReverted"} = event, state) do
new_state = Aggregate.apply(event, state)
data = %{
aggregate_id: new_state.aggregate_id,
balance: new_state.balance,
version: event.version,
event_id: event.event_id,
event_date: event.event_date
}
ProjectionStore.project(BankAccount, data)
end
end
| 25.050505 | 66 | 0.685081 |
1c7aa108b5cae989c5d5a60632863c9c107d6178 | 473 | exs | Elixir | test/absinthe_subscriptions_web/views/error_view_test.exs | egjimenezg/absinthe_subscriptions | 49d05d9ce141c9de5d1cfabe21452d2bde8c49d9 | [
"Apache-2.0"
] | null | null | null | test/absinthe_subscriptions_web/views/error_view_test.exs | egjimenezg/absinthe_subscriptions | 49d05d9ce141c9de5d1cfabe21452d2bde8c49d9 | [
"Apache-2.0"
] | null | null | null | test/absinthe_subscriptions_web/views/error_view_test.exs | egjimenezg/absinthe_subscriptions | 49d05d9ce141c9de5d1cfabe21452d2bde8c49d9 | [
"Apache-2.0"
] | null | null | null | defmodule AbsintheSubscriptionsWeb.ErrorViewTest do
use AbsintheSubscriptionsWeb.ConnCase, async: true
# Bring render/3 and render_to_string/3 for testing custom views
import Phoenix.View
test "renders 404.html" do
assert render_to_string(AbsintheSubscriptionsWeb.ErrorView, "404.html", []) == "Not Found"
end
test "renders 500.html" do
assert render_to_string(AbsintheSubscriptionsWeb.ErrorView, "500.html", []) == "Internal Server Error"
end
end
| 31.533333 | 106 | 0.765328 |
1c7aa4bd9014e465ae0e3488115a9399944a94c8 | 968 | ex | Elixir | Chapter9/dynamic_workers/lib/todo/cache.ex | benjamindburke/elixir-studies | 65231b5af83dcf701041cae2879107c3bd3e5078 | [
"Unlicense"
] | null | null | null | Chapter9/dynamic_workers/lib/todo/cache.ex | benjamindburke/elixir-studies | 65231b5af83dcf701041cae2879107c3bd3e5078 | [
"Unlicense"
] | null | null | null | Chapter9/dynamic_workers/lib/todo/cache.ex | benjamindburke/elixir-studies | 65231b5af83dcf701041cae2879107c3bd3e5078 | [
"Unlicense"
] | null | null | null | # Todo Cache [dynamic supervisor]
# This module supervises Todo.Server instances which perform a client's to-do storage and retrieval
defmodule Todo.Cache do
# ---------
# Interface functions
# ---------
@spec server_process(charlist) :: pid
def server_process(todo_list_name) do
case start_child(todo_list_name) do
{:ok, pid} -> pid
{:error, {:already_started, pid}} -> pid
end
end
# ---------
# DynamicSupervisor hook functions
# ---------
@spec start_link :: {:error, any} | {:ok, pid}
def start_link do
IO.puts("Starting to-do cache.")
DynamicSupervisor.start_link(
name: __MODULE__,
strategy: :one_for_one
)
end
defp start_child(todo_list_name) do
DynamicSupervisor.start_child(
__MODULE__,
{Todo.Server, todo_list_name}
)
end
def child_spec(_arg) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, []},
type: :supervisor
}
end
end
| 22 | 99 | 0.629132 |
1c7af3c372fd077d08f125408db462145f7c9e3f | 7,853 | exs | Elixir | test/rabbit_mq/producer/producer_worker_test.exs | qworks-io/rabbitex | 3359fc573b8e8e60cdd2cc03b6ebc2bdcc8a3d70 | [
"MIT"
] | 29 | 2020-05-02T18:25:31.000Z | 2021-02-16T19:43:39.000Z | test/rabbit_mq/producer/producer_worker_test.exs | qworks-io/rabbit_ex_mq | 3359fc573b8e8e60cdd2cc03b6ebc2bdcc8a3d70 | [
"MIT"
] | 43 | 2020-04-30T16:34:46.000Z | 2021-07-26T06:22:43.000Z | test/rabbit_mq/producer/producer_worker_test.exs | qworks-io/rabbit_ex_mq | 3359fc573b8e8e60cdd2cc03b6ebc2bdcc8a3d70 | [
"MIT"
] | 3 | 2020-05-03T15:47:16.000Z | 2021-04-05T05:05:47.000Z | defmodule RabbitMQTest.Producer.Worker do
alias RabbitMQ.Producer.Worker
use AMQP
use ExUnit.Case
@amqp_url Application.get_env(:rabbit_mq, :amqp_url)
@data :crypto.strong_rand_bytes(2_000) |> Base.encode64()
@exchange "#{__MODULE__}"
@routing_key :crypto.strong_rand_bytes(16) |> Base.encode16()
@connection __MODULE__.Connection
@base_opts [
connection: @connection,
exchange: @exchange
]
setup_all do
assert {:ok, _pid} = start_supervised({RabbitMQ.Connection, [name: @connection]})
assert {:ok, connection} = Connection.open(@amqp_url)
assert {:ok, channel} = Channel.open(connection)
# Ensure we have a disposable exchange set up.
assert :ok = Exchange.declare(channel, @exchange, :direct, durable: false)
# Clean up after all tests have ran.
on_exit(fn ->
assert :ok = Exchange.delete(channel, @exchange)
assert :ok = Channel.close(channel)
assert :ok = Connection.close(connection)
end)
[channel: channel]
end
setup do
test_pid = self()
# Declare opts with default publisher confirm callbacks.
opts =
@base_opts
|> Keyword.put(:handle_publisher_ack_confirms, fn events ->
send(test_pid, {:ack, events})
end)
|> Keyword.put(:handle_publisher_nack_confirms, fn events ->
send(test_pid, {:nack, events})
end)
[correlation_id: UUID.uuid4(), opts: opts]
end
test "start_link/1 starts a worker and establishes a dedicated channel", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
assert %Worker.State{
channel: %Channel{} = channel,
exchange: @exchange,
outstanding_confirms: []
} = :sys.get_state(pid)
assert channel.conn === GenServer.call(@connection, :get)
end
test "publishes a message", %{
channel: channel,
correlation_id: correlation_id,
opts: opts
} do
# Declare an exclusive queue.
{:ok, %{queue: queue}} = Queue.declare(channel, "", exclusive: true)
:ok = Queue.bind(channel, queue, @exchange, routing_key: @routing_key)
assert {:ok, pid} = start_supervised({Worker, opts})
# Start receiving Consumer events.
assert {:ok, consumer_tag} = Basic.consume(channel, queue)
# This will always be the first message received by the process.
assert_receive({:basic_consume_ok, %{consumer_tag: ^consumer_tag}})
publish_opts = [correlation_id: correlation_id]
assert {:ok, seq_no} = GenServer.call(pid, {:publish, @routing_key, @data, publish_opts})
assert_receive(
{:basic_deliver, @data,
%{
consumer_tag: ^consumer_tag,
correlation_id: ^correlation_id,
delivery_tag: ^seq_no,
routing_key: @routing_key
}}
)
assert_receive({:ack, [{^seq_no, @routing_key, @data, ^publish_opts}]})
# Unsubscribe.
Basic.cancel(channel, consumer_tag)
# This will always be the last message received by the process.
assert_receive({:basic_cancel_ok, %{consumer_tag: ^consumer_tag}})
# Ensure no further messages are received.
refute_receive(_)
# This queue would have been deleted automatically when the connection
# gets closed, however we manually delete it to avoid any naming conflicts
# in between tests, no matter how unlikely. Also, we ensure there are no
# messages left hanging in the queue.
assert {:ok, %{message_count: 0}} = Queue.delete(channel, queue)
end
test "[only item] publisher confirm event confirms a single outstanding confirm", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [{42, nil, nil, nil}])
mode = Enum.random([:basic_ack, :basic_nack])
assert {:noreply, state} = Worker.handle_info({mode, 42, false}, state)
assert [] = state.outstanding_confirms
end
test "[first item] publisher confirm event confirms a single outstanding confirm", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [
{42, nil, nil, nil},
{41, nil, nil, nil}
])
mode = Enum.random([:basic_ack, :basic_nack])
assert {:noreply, state} = Worker.handle_info({mode, 42, false}, state)
assert [{41, nil, nil, nil}] = state.outstanding_confirms
end
test "[somewhere in the list] publisher confirm event confirms a single outstanding confirm", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [
{43, nil, nil, nil},
{42, nil, nil, nil},
{41, nil, nil, nil}
])
mode = Enum.random([:basic_ack, :basic_nack])
assert {:noreply, state} = Worker.handle_info({mode, 42, false}, state)
assert [
{43, nil, nil, nil},
{41, nil, nil, nil}
] = state.outstanding_confirms
end
test "[first or only item] publisher confirm event confirms multiple outstanding confirms", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [
{42, nil, nil, nil},
{41, nil, nil, nil},
{40, nil, nil, nil}
])
mode = Enum.random([:basic_ack, :basic_nack])
assert {:noreply, state} = Worker.handle_info({mode, 42, true}, state)
assert [] = state.outstanding_confirms
end
test "[somewhere in the list] publisher confirm event confirms multiple outstanding confirms",
%{opts: opts} do
assert {:ok, pid} = start_supervised({Worker, opts})
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [
{43, nil, nil, nil},
{42, nil, nil, nil},
{41, nil, nil, nil}
])
mode = Enum.random([:basic_ack, :basic_nack])
assert {:noreply, state} = Worker.handle_info({mode, 42, true}, state)
assert [{43, nil, nil, nil}] = state.outstanding_confirms
end
test "publisher acknowledgement triggers corresponding callback", %{opts: opts} do
test_pid = self()
assert {:ok, pid} =
start_supervised(
{Worker,
Keyword.put(opts, :handle_publisher_ack_confirms, fn confirms ->
send(test_pid, confirms)
end)}
)
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [{42, nil, nil, nil}])
multiple? = Enum.random([true, false])
assert {:noreply, state} = Worker.handle_info({:basic_ack, 42, multiple?}, state)
assert_receive([{42, nil, nil, nil}])
end
test "publisher negative acknowledgement triggers corresponding callback", %{
opts: opts
} do
test_pid = self()
assert {:ok, pid} =
start_supervised(
{Worker,
Keyword.put(opts, :handle_publisher_nack_confirms, fn confirms ->
send(test_pid, confirms)
end)}
)
state =
pid
|> :sys.get_state()
|> Map.put(:outstanding_confirms, [{42, nil, nil, nil}])
multiple? = Enum.random([true, false])
assert {:noreply, state} = Worker.handle_info({:basic_nack, 42, multiple?}, state)
assert_receive([{42, nil, nil, nil}])
end
test "when a monitored process dies, an instruction to stop the GenServer is returned", %{
opts: opts
} do
assert {:ok, pid} = start_supervised({Worker, opts})
state = :sys.get_state(pid)
assert {:stop, {:channel_down, :failure}, state} =
Worker.handle_info({:DOWN, :reference, :process, self(), :failure}, state)
end
end
| 28.871324 | 98 | 0.621291 |
1c7b01999e0f17869c042c193dc59138db89a987 | 2,447 | ex | Elixir | clients/content/lib/google_api/content/v21/model/return_address_address.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/content/lib/google_api/content/v21/model/return_address_address.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/content/lib/google_api/content/v21/model/return_address_address.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Content.V21.Model.ReturnAddressAddress do
@moduledoc """
## Attributes
* `country` (*type:* `String.t`, *default:* `nil`) - CLDR country code (e.g. "US").
* `locality` (*type:* `String.t`, *default:* `nil`) - City, town or commune. May also include dependent localities or sublocalities (e.g. neighborhoods or suburbs).
* `postalCode` (*type:* `String.t`, *default:* `nil`) - Postal code or ZIP (e.g. "94043").
* `recipientName` (*type:* `String.t`, *default:* `nil`) - Name of the recipient to address returns to.
* `region` (*type:* `String.t`, *default:* `nil`) - Top-level administrative subdivision of the country. For example, a state like California ("CA") or a province like Quebec ("QC").
* `streetAddress` (*type:* `list(String.t)`, *default:* `nil`) - Street-level part of the address. May be up to two lines, each line specified as an array element.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:country => String.t() | nil,
:locality => String.t() | nil,
:postalCode => String.t() | nil,
:recipientName => String.t() | nil,
:region => String.t() | nil,
:streetAddress => list(String.t()) | nil
}
field(:country)
field(:locality)
field(:postalCode)
field(:recipientName)
field(:region)
field(:streetAddress, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Content.V21.Model.ReturnAddressAddress do
def decode(value, options) do
GoogleApi.Content.V21.Model.ReturnAddressAddress.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V21.Model.ReturnAddressAddress do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 39.467742 | 186 | 0.686555 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.