hexsha stringlengths 40 40 | size int64 2 991k | ext stringclasses 2 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 208 | max_stars_repo_name stringlengths 6 106 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses list | max_stars_count int64 1 33.5k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 208 | max_issues_repo_name stringlengths 6 106 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses list | max_issues_count int64 1 16.3k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 208 | max_forks_repo_name stringlengths 6 106 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses list | max_forks_count int64 1 6.91k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 991k | avg_line_length float64 1 36k | max_line_length int64 1 977k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c7b1357818290c64426b1c09d63fc0201c87b09 | 11,342 | exs | Elixir | apps/vax/test/integration/adapter_test.exs | vaxine-io/vaxine | 872a83ea8d4935a52c7b850bb17ab099ee9c346b | [
"Apache-2.0"
] | 8 | 2022-03-14T15:33:08.000Z | 2022-03-30T22:06:04.000Z | apps/vax/test/integration/adapter_test.exs | vaxine-io/vaxine | 872a83ea8d4935a52c7b850bb17ab099ee9c346b | [
"Apache-2.0"
] | 9 | 2022-03-15T15:48:28.000Z | 2022-03-21T23:11:34.000Z | apps/vax/test/integration/adapter_test.exs | vaxine-io/vaxine | 872a83ea8d4935a52c7b850bb17ab099ee9c346b | [
"Apache-2.0"
] | null | null | null | defmodule Vax.AdapterIntegrationTest do
use ExUnit.Case, async: true
@moduletag :integration
defmodule TestRepo do
use Ecto.Repo, adapter: Vax.Adapter, otp_app: :vax
end
defmodule MySchema do
use Vax.Schema
schema "my_schema" do
field(:name, :string)
field(:age, :integer)
field(:number_of_friends, :integer, default: 2)
field(:number_of_followers, :integer)
field(:likes_given, Vax.Types.Counter, default: 0)
field(:favorite_numbers, Vax.Types.Set, type: :integer)
field(:public_profile?, Vax.Types.Flag, strategy: :disable_wins)
end
end
setup_all %{} do
hostname = Application.get_env(:vax, :hostname)
port = Application.get_env(:vax, :port)
start_supervised!({TestRepo, hostname: hostname, port: port, log: true})
:ok
end
test "checking out a connection" do
refute TestRepo.checked_out?()
TestRepo.checkout(fn ->
assert TestRepo.checked_out?()
end)
refute TestRepo.checked_out?()
end
test "performing actions on checked out connection" do
TestRepo.checkout(fn ->
id = Ecto.UUID.generate()
assert 0 = TestRepo.read_counter(id)
assert :ok = TestRepo.increment_counter(id, 10)
assert 10 = TestRepo.read_counter(id)
assert :ok = TestRepo.increment_counter(id, 20)
assert 30 = TestRepo.read_counter(id)
end)
end
test "reading and increasing a counter" do
id = Ecto.UUID.generate()
assert 0 = TestRepo.read_counter(id)
assert :ok = TestRepo.increment_counter(id, 10)
assert 10 = TestRepo.read_counter(id)
assert :ok = TestRepo.increment_counter(id, 20)
assert 30 = TestRepo.read_counter(id)
end
describe "insert/2" do
test "inserts raw struct" do
id = Ecto.UUID.generate()
assert {:ok, %MySchema{id: ^id}} = TestRepo.insert(%MySchema{id: id, name: "Carl", age: 26})
assert %MySchema{id: ^id, name: "Carl", age: 26, likes_given: 0} =
TestRepo.get(MySchema, id)
end
test "inserts with a valid changeset" do
id = Ecto.UUID.generate()
changeset = Ecto.Changeset.change(%MySchema{id: id, name: "Carl", age: 26, likes_given: 3})
assert {:ok, %MySchema{id: ^id}} = TestRepo.insert(changeset)
assert %MySchema{id: ^id, name: "Carl", age: 26, likes_given: 3} =
TestRepo.get(MySchema, id)
end
test "inserts with a valid changeset and autogenerated id" do
changeset =
Ecto.Changeset.change(%MySchema{
name: "Carl",
age: 26,
likes_given: 3
})
assert {:ok, %MySchema{id: id}} = TestRepo.insert(changeset)
assert %MySchema{id: ^id} = TestRepo.get(MySchema, id)
end
test "fails for invalid changeset" do
changeset =
%MySchema{name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
assert {:error, %Ecto.Changeset{valid?: false}} = TestRepo.insert(changeset)
end
end
describe "insert!/2" do
test "raises for invalid changeset" do
assert_raise Ecto.InvalidChangesetError, fn ->
%MySchema{name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
|> TestRepo.insert!()
end
end
end
describe "update/2" do
test "updating an entry with valid changeset" do
{:ok, entry} = TestRepo.insert(%MySchema{name: "Carl", age: 26})
assert {:ok, %{age: 27}} =
entry
|> Ecto.Changeset.change(age: 27, likes_given: 2, public_profile?: true)
|> TestRepo.update()
assert %MySchema{age: 27, name: "Carl", likes_given: 2, public_profile?: true} =
TestRepo.get(MySchema, entry.id)
end
test "semantically updates CRDT set type with `compute_change`" do
{:ok, entry} =
TestRepo.insert(%MySchema{name: "Carl", age: 26, favorite_numbers: MapSet.new([10, 20])})
# Add 40, 30
assert {:ok, _} =
entry
|> Ecto.Changeset.change(favorite_numbers: MapSet.new([10, 20, 30, 40]))
|> TestRepo.update()
# Rm 10
assert {:ok, _} =
entry
|> Ecto.Changeset.change(favorite_numbers: MapSet.new([20]))
|> TestRepo.update()
expected_result = MapSet.new([20, 30, 40])
assert %{favorite_numbers: ^expected_result} = updated_entry = TestRepo.reload(entry)
# Rm 20, 30, add 50
assert {:ok, _} =
updated_entry
|> Ecto.Changeset.change(favorite_numbers: MapSet.new([40, 50]))
|> TestRepo.update()
expected_result = MapSet.new([40, 50])
assert %{favorite_numbers: ^expected_result} = TestRepo.reload(updated_entry)
end
test "semantically updates CRDT counter type with `compute_change`" do
{:ok, entry} = TestRepo.insert(%MySchema{name: "Carl", age: 26, likes_given: 1})
# last known value is 1, increasing by 4
assert {:ok, _} =
entry
|> Ecto.Changeset.change(likes_given: 5)
|> TestRepo.update()
# last known value is 1, increasing by 9
assert {:ok, _} =
entry
|> Ecto.Changeset.change(likes_given: 10)
|> TestRepo.update()
# total incrases is 13
assert %{likes_given: 14} = updated_entry = TestRepo.reload(entry)
# last known value is 14, increasing by 2
assert {:ok, %{likes_given: 16}} =
updated_entry
|> Ecto.Changeset.change(likes_given: 16)
|> TestRepo.update()
# last known value is 14, decreasing by 4
assert {:ok, %{likes_given: 12}} =
updated_entry
|> Ecto.Changeset.change(likes_given: 10)
|> TestRepo.update()
end
test "fails for invalid changeset" do
changeset =
%MySchema{id: Ecto.UUID.generate(), name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
assert {:error, %Ecto.Changeset{valid?: false}} = TestRepo.update(changeset)
end
end
describe "update!/2" do
test "raises for invalid changeset" do
assert_raise Ecto.InvalidChangesetError, fn ->
%MySchema{id: Ecto.UUID.generate(), name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
|> TestRepo.update!()
end
end
end
describe "insert_or_update/2" do
test "inserts and updates valid changeset" do
assert {:ok, %{id: id}} =
%MySchema{name: "Carl"}
|> Ecto.Changeset.change()
|> TestRepo.insert_or_update()
assert %{name: "Carl"} = entry = TestRepo.get(MySchema, id)
{:ok, %{id: ^id, name: "James"}} =
entry
|> Ecto.Changeset.change(name: "James")
|> TestRepo.insert_or_update()
assert %{name: "James"} = TestRepo.get(MySchema, id)
end
test "fails for invalid changeset" do
insert_changeset =
%MySchema{name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
assert {:error, %Ecto.Changeset{valid?: false}} =
TestRepo.insert_or_update(insert_changeset)
{:ok, entry} = TestRepo.insert(%MySchema{name: "John"})
update_changeset =
entry
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
assert {:error, %Ecto.Changeset{valid?: false}} =
TestRepo.insert_or_update(update_changeset)
end
end
describe "insert_or_update!/2" do
test "raises for invalid changeset" do
assert_raise Ecto.InvalidChangesetError, fn ->
%MySchema{name: "Carl"}
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
|> TestRepo.insert_or_update!()
end
{:ok, entry} = TestRepo.insert(%MySchema{name: "John"})
assert_raise Ecto.InvalidChangesetError, fn ->
entry
|> Ecto.Changeset.cast(%{age: -10}, [:age])
|> Ecto.Changeset.validate_number(:age, greater_than_or_equal_to: 0)
|> TestRepo.insert_or_update!()
end
end
end
describe "get" do
test "returns nil for inexistent keys" do
assert is_nil(TestRepo.get(MySchema, Ecto.UUID.generate()))
end
end
describe "all" do
import Ecto.Query
setup do
{:ok, entry_a} = TestRepo.insert(%MySchema{name: "John", age: 19})
{:ok, entry_b} = TestRepo.insert(%MySchema{name: "Alice", age: 22})
{:ok, entry_a: entry_a, entry_b: entry_b}
end
test "can handle `where` with `:==`", %{entry_a: entry_a} do
assert [] = from(MySchema, where: [id: ^Ecto.UUID.generate()]) |> TestRepo.all()
assert [%MySchema{} = result] = from(MySchema, where: [id: ^entry_a.id]) |> TestRepo.all()
assert entry_a.id == result.id
end
test "can handle `where` with `in`", %{entry_a: entry_a, entry_b: entry_b} do
list = [entry_a.id, entry_b.id, Ecto.UUID.generate()]
assert [result_a, result_b] =
from(MySchema)
|> where([m], m.id in ^list)
|> TestRepo.all()
assert result_a.id in [entry_a.id, entry_b.id]
assert result_b.id in [entry_a.id, entry_b.id]
end
test "can handle `where` expressions with `or`", %{entry_a: entry_a, entry_b: entry_b} do
[result_a, result_b] =
from(MySchema)
|> where([m], m.id == ^entry_a.id or m.id == ^entry_b.id)
|> TestRepo.all()
assert result_a.id in [entry_a.id, entry_b.id]
assert result_b.id in [entry_a.id, entry_b.id]
end
test "raises if trying to use `where` with non pk field" do
assert_raise(
RuntimeError,
"Vax only supports filtering by primary key in where expressions",
fn ->
from(MySchema)
|> where([m], m.age == ^10)
|> TestRepo.all()
end
)
end
test "raises if trying to use multiple `where` expressions", %{
entry_a: entry_a,
entry_b: entry_b
} do
assert_raise(RuntimeError, ~r/Vax only supports a single/, fn ->
from(MySchema)
|> where([m], m.id == ^entry_a.id)
|> where([m], m.id == ^entry_b.id)
|> TestRepo.all()
end)
end
test "raises if trying to use invalid `where` expression", %{entry_a: entry_a} do
assert_raise(RuntimeError, ~r/Unsupported expression/, fn ->
# `and` is one of the unsupported expressions
from(MySchema)
|> where([m], m.id == ^entry_a.id and m.id == ^entry_a.id)
|> TestRepo.all()
end)
end
end
# FIXME: reset operation seem to be sent correctly, but doesn't take effect
test "deleting a schema entry" do
{:ok, %{id: id} = entry} = TestRepo.insert(%MySchema{name: "Carl", age: 25})
assert {:ok, %MySchema{id: ^id}} = TestRepo.delete(entry)
assert nil = TestRepo.get(MySchema, id)
end
end
| 32.221591 | 98 | 0.601129 |
1c7b2280f7537bacc5321bad6f3821bdf075d450 | 4,613 | ex | Elixir | web_finngen_r8/lib/risteys/icd10.ex | vincent-octo/risteys | 5bb1e70b78988770048b91b42fad025faf98d84a | [
"MIT"
] | null | null | null | web_finngen_r8/lib/risteys/icd10.ex | vincent-octo/risteys | 5bb1e70b78988770048b91b42fad025faf98d84a | [
"MIT"
] | null | null | null | web_finngen_r8/lib/risteys/icd10.ex | vincent-octo/risteys | 5bb1e70b78988770048b91b42fad025faf98d84a | [
"MIT"
] | null | null | null | defmodule Risteys.Icd10 do
use Ecto.Schema
import Ecto.Changeset
schema "icd10s" do
field :code, :string
field :description, :string
many_to_many :phenocodes, Risteys.Phenocode, join_through: Risteys.PhenocodeIcd10
timestamps()
end
@doc false
def changeset(icd10, attrs) do
icd10
|> cast(attrs, [:code, :description])
|> validate_required([:code, :description])
|> unique_constraint(:code)
end
# Get info on ICD-10 from the source file and transform it into
# appropriate data structures.
# Dotted ICD-10s as input, undotted ICD-10s as output.
def init_parser(file_path) do
map_undotted_dotted =
file_path
|> File.stream!()
|> CSV.decode!(headers: true)
|> Enum.reduce(%{}, fn %{"CodeId" => dotted}, acc ->
undotted = String.replace(dotted, ".", "")
Map.put(acc, undotted, dotted)
end)
icd10s = Map.keys(map_undotted_dotted)
map_child_parent =
file_path
|> File.stream!()
|> CSV.decode!(headers: true)
|> Enum.reduce(%{}, fn %{"CodeId" => child, "ParentId" => parent}, acc ->
child = String.replace(child, ".", "")
parent = String.replace(parent, ".", "")
Map.put(acc, child, parent)
end)
map_parent_children =
Enum.reduce(map_child_parent, %{}, fn {child, parent}, acc ->
Map.update(acc, parent, MapSet.new([child]), fn set ->
MapSet.put(set, child)
end)
end)
{icd10s, map_undotted_dotted, map_child_parent, map_parent_children}
end
# Transform an undotted ICD-10 FI to its dotted representation.
# Note that an ICD-10 FI can be a "ICD range", like "W00-W19".
def to_dotted(icd10, map) do
Map.fetch!(map, icd10)
end
# Takes a raw ICD-10 rule and apply the logic to get a set of ICD-10s.
# Work is done with undotted ICD-10 codes, both input and output.
def parse_rule(nil, _, _, _), do: MapSet.new()
def parse_rule("", _, _, _), do: MapSet.new()
def parse_rule(pattern, icd10s, map_child_parent, map_parent_children) do
# ICD rules starting with % are mode encoding.
pattern =
case String.at(pattern, 0) do
"%" -> String.trim_leading(pattern, "%")
_ -> pattern
end
if Map.has_key?(map_child_parent, pattern) do
# Return early if it's an exact match
MapSet.new([pattern])
else
# Find the minimum set of ICD-10 (with using ICD-10 categories) that meet the definition
# In endpoint definition, symptoms pair are coded with "&" to
# denote either "+" or "*" to are used in the real symptoms pairs.
symbols_symptom_pair = "[\\*\\+]"
pattern = String.replace(pattern, "&", symbols_symptom_pair)
# Match only ICDs that starts with the pattern.
# For example this prevents "N77.0*A60.0" matching on "A6[0-4]"
regex = Regex.compile!("^(#{pattern})")
# 1. Get all matches of the regex on the ICD-10s
icd10s
# Exclude range ICDs from matching
|> Enum.reject(fn icd -> String.contains?(icd, "-") end)
|> Enum.filter(fn code -> Regex.match?(regex, code) end)
|> MapSet.new()
# 2. Group them by parent and bubble up until no reduction possible
|> group_reduce(map_child_parent, map_parent_children)
end
end
# Actual algorithm to expand an ICD-10 rule into a set of matching ICD-10s.
defp group_reduce(matches, map_child_parent, map_parent_children) do
{add, remove} =
matches
# Group matches by common parent
|> Enum.reduce(%{}, fn match, acc ->
%{^match => parent} = map_child_parent
Map.update(
acc,
parent,
MapSet.new([match]),
fn set -> MapSet.put(set, match) end
)
end)
# Replace a bunch of codes by their parent if they fully define the parent
|> Enum.reduce({MapSet.new(), MapSet.new()}, fn {parent, children}, {add, remove} ->
%{^parent => all_children} = map_parent_children
if MapSet.equal?(children, all_children) do
# A bunch of children codes can be reduce to their parent
add = MapSet.put(add, parent)
remove = MapSet.union(remove, children)
{add, remove}
else
# Can't reduce the group since it doesn't fully represent the parent
add = MapSet.union(add, children)
{add, remove}
end
end)
reduced = MapSet.difference(add, remove)
# recurs
if MapSet.equal?(matches, reduced) do
reduced
else
group_reduce(reduced, map_child_parent, map_parent_children)
end
end
end
| 32.258741 | 94 | 0.626707 |
1c7b906475e0aa209b2ea47407224bf79d5680e8 | 3,196 | ex | Elixir | clients/cloud_iot/lib/google_api/cloud_iot/v1/model/expr.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/cloud_iot/lib/google_api/cloud_iot/v1/model/expr.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/cloud_iot/lib/google_api/cloud_iot/v1/model/expr.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.CloudIot.V1.Model.Expr do
@moduledoc """
Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
## Attributes
* `description` (*type:* `String.t`, *default:* `nil`) - Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
* `expression` (*type:* `String.t`, *default:* `nil`) - Textual representation of an expression in Common Expression Language syntax.
* `location` (*type:* `String.t`, *default:* `nil`) - Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
* `title` (*type:* `String.t`, *default:* `nil`) - Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:description => String.t(),
:expression => String.t(),
:location => String.t(),
:title => String.t()
}
field(:description)
field(:expression)
field(:location)
field(:title)
end
defimpl Poison.Decoder, for: GoogleApi.CloudIot.V1.Model.Expr do
def decode(value, options) do
GoogleApi.CloudIot.V1.Model.Expr.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudIot.V1.Model.Expr do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 57.071429 | 1,092 | 0.734355 |
1c7bb3dd382f661685e5e3de988ea2a930645ef3 | 398 | ex | Elixir | lib/chat_api/demos/demo.ex | guanghuizeng/papercups | 0d7ca893edddea0d23f0772ccc5694edd407fc63 | [
"MIT"
] | null | null | null | lib/chat_api/demos/demo.ex | guanghuizeng/papercups | 0d7ca893edddea0d23f0772ccc5694edd407fc63 | [
"MIT"
] | null | null | null | lib/chat_api/demos/demo.ex | guanghuizeng/papercups | 0d7ca893edddea0d23f0772ccc5694edd407fc63 | [
"MIT"
] | null | null | null | defmodule ChatApi.Demos.Demo do
use Ecto.Schema
import Ecto.Changeset
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
schema "demos" do
field :count, :integer
field :name, :string
timestamps()
end
@doc false
def changeset(demo, attrs) do
demo
|> cast(attrs, [:name, :count])
|> validate_required([:name, :count])
end
end
| 18.952381 | 52 | 0.665829 |
1c7bb9c0aa6702449074e751ae5c3d1faf816e17 | 2,463 | exs | Elixir | test/index_remove_test.exs | evanob/sorted_set_nif | 1b261b532106b9461d3bab1c262ecea9d2d0b3de | [
"MIT"
] | null | null | null | test/index_remove_test.exs | evanob/sorted_set_nif | 1b261b532106b9461d3bab1c262ecea9d2d0b3de | [
"MIT"
] | null | null | null | test/index_remove_test.exs | evanob/sorted_set_nif | 1b261b532106b9461d3bab1c262ecea9d2d0b3de | [
"MIT"
] | null | null | null | defmodule Discord.SortedSet.IndexRemove.Test do
use ExUnit.Case
use ExUnitProperties
alias Discord.SortedSet
alias Discord.SortedSet.Test.Support.Generator
describe "remove from empty set" do
property "any supported term returns the nil and the set" do
check all term <- Generator.supported_term() do
assert set = SortedSet.new()
assert {nil, ^set} = SortedSet.index_remove(set, term)
end
end
property "any unsupported term returns an error" do
check all term <- Generator.unsupported_term() do
assert set = SortedSet.new()
assert {:error, :unsupported_type} = SortedSet.index_remove(set, term)
end
end
end
describe "remove from a populated set" do
property "remove an element that is not present does not change the set and returns nil" do
check all [present, missing] <- Generator.supported_terms(unique: true, length: 2) do
set =
SortedSet.new()
|> SortedSet.add(present)
assert SortedSet.size(set) == 1
before_remove = SortedSet.to_list(set)
assert {nil, ^set} = SortedSet.index_remove(set, missing)
assert SortedSet.size(set) == 1
after_remove = SortedSet.to_list(set)
assert before_remove == after_remove
end
end
property "remove an element that is present does change the set" do
check all terms <- Generator.supported_terms(unique: true, length: 2) do
[retain, remove] = Enum.sort(terms)
set =
SortedSet.new()
|> SortedSet.add(retain)
|> SortedSet.add(remove)
assert SortedSet.size(set) == 2
assert {1, ^set} = SortedSet.index_remove(set, remove)
assert SortedSet.size(set) == 1
assert SortedSet.to_list(set) == [retain]
end
end
property "removing maintains ordering of remaining items" do
check all terms <- Generator.supported_terms(unique: true, length: 3) do
[first, second, third] = Enum.sort(terms)
set =
SortedSet.new()
|> SortedSet.add(first)
|> SortedSet.add(second)
|> SortedSet.add(third)
assert SortedSet.size(set) == 3
assert SortedSet.to_list(set) == [first, second, third]
assert {1, ^set} = SortedSet.index_remove(set, second)
assert SortedSet.size(set) == 2
assert SortedSet.to_list(set) == [first, third]
end
end
end
end
| 30.036585 | 95 | 0.634186 |
1c7bde46fc21152b54246eb149df5170be5c09fa | 1,106 | ex | Elixir | lib/transferdrop_web/channels/user_socket.ex | sheosi/transferdrop | 9021fd1c0d2d28dddc141264ce95a4e8d896c3ee | [
"MIT"
] | null | null | null | lib/transferdrop_web/channels/user_socket.ex | sheosi/transferdrop | 9021fd1c0d2d28dddc141264ce95a4e8d896c3ee | [
"MIT"
] | null | null | null | lib/transferdrop_web/channels/user_socket.ex | sheosi/transferdrop | 9021fd1c0d2d28dddc141264ce95a4e8d896c3ee | [
"MIT"
] | null | null | null | defmodule TransferdropWeb.UserSocket do
use Phoenix.Socket
## Channels
# channel "room:*", TransferdropWeb.RoomChannel
# Socket params are passed from the client and can
# be used to verify and authenticate a user. After
# verification, you can put default assigns into
# the socket that will be set for all channels, ie
#
# {:ok, assign(socket, :user_id, verified_user_id)}
#
# To deny connection, return `:error`.
#
# See `Phoenix.Token` documentation for examples in
# performing token verification on connect.
@impl true
def connect(_params, socket, _connect_info) do
{:ok, socket}
end
# Socket id's are topics that allow you to identify all sockets for a given user:
#
# def id(socket), do: "user_socket:#{socket.assigns.user_id}"
#
# Would allow you to broadcast a "disconnect" event and terminate
# all active sockets and channels for a given user:
#
# TransferdropWeb.Endpoint.broadcast("user_socket:#{user.id}", "disconnect", %{})
#
# Returning `nil` makes this socket anonymous.
@impl true
def id(_socket), do: nil
end
| 30.722222 | 87 | 0.698915 |
1c7bdf82e4754052dcd27b9ef524553211a3f02c | 12,017 | exs | Elixir | test/actions/pagination_test.exs | kyle5794/ash | 82023da84400366d07001593673d1aaa2a418803 | [
"MIT"
] | null | null | null | test/actions/pagination_test.exs | kyle5794/ash | 82023da84400366d07001593673d1aaa2a418803 | [
"MIT"
] | null | null | null | test/actions/pagination_test.exs | kyle5794/ash | 82023da84400366d07001593673d1aaa2a418803 | [
"MIT"
] | null | null | null | defmodule Ash.Actions.PaginationTest do
use ExUnit.Case, async: true
require Ash.Query
defmodule User do
@moduledoc false
use Ash.Resource, data_layer: Ash.DataLayer.Ets
ets do
private?(true)
end
actions do
read :offset do
pagination offset?: true, countable: true
end
read :optional_offset do
pagination offset?: true, countable: true, required?: false
end
read :offset_countable_by_default do
pagination offset?: true, countable: :by_default, required?: false
end
read :required_offset_with_default do
pagination offset?: true, countable: true, required?: false, default_limit: 25
end
read :keyset do
pagination keyset?: true, countable: true
end
read :optional_keyset do
pagination keyset?: true, countable: true, required?: false
end
read :keyset_countable_by_default do
pagination keyset?: true, countable: :by_default, required?: false
end
read :required_keyset_with_default do
pagination keyset?: true, countable: true, required?: false, default_limit: 25
end
read :both_required do
primary? true
pagination keyset?: true, offset?: true, countable: true
end
read :both_optional do
pagination keyset?: true, offset?: true, countable: true, default_limit: 25
end
create :default
update :default
end
attributes do
attribute :id, :uuid, primary_key?: true, default: &Ecto.UUID.generate/0
attribute :name, :string
end
end
defmodule Api do
use Ash.Api
resources do
resource User
end
end
test "pagination is required by default" do
assert_raise Ash.Error.Invalid.PaginationRequired, fn ->
Api.read!(User, page: false)
end
end
test "a default limit allows not specifying page parameters" do
assert_raise Ash.Error.Invalid.LimitRequired, fn ->
Api.read!(User, page: [offset: 1])
end
Api.read!(User, action: :required_offset_with_default)
end
describe "offset pagination" do
setup do
for i <- 0..9 do
Api.create!(Ash.Changeset.new(User, %{name: "#{i}"}))
end
:ok
end
test "can be limited" do
assert Enum.count(Api.read!(User, action: :optional_offset, page: false)) == 10
assert Enum.count(Api.read!(User, action: :optional_offset, page: [limit: 5]).results) == 5
end
test "can be offset" do
assert Enum.count(Api.read!(User, action: :optional_offset, page: false)) == 10
assert Enum.count(
Api.read!(User, action: :optional_offset, page: [offset: 5, limit: 5]).results
) == 5
end
test "can include a full count" do
assert Api.read!(User, action: :optional_offset, page: [limit: 1, count: true]).count == 10
end
test "can default to including a count" do
assert Api.read!(User, action: :offset_countable_by_default, page: [limit: 1]).count == 10
end
test "count is not included by default otherwise" do
assert is_nil(Api.read!(User, action: :optional_offset, page: [limit: 1]).count)
end
test "`count: false` prevents the count from occurring even if it is on `by_default`" do
assert is_nil(
Api.read!(User,
action: :offset_countable_by_default,
page: [limit: 1, count: false]
).count
)
end
test "pagination works with a sort applied" do
names =
User
|> Ash.Query.sort(:name)
|> Api.read!(page: [offset: 5, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["5", "6", "7", "8", "9"]
end
test "pagination works with a reversed sort applied" do
names =
User
|> Ash.Query.sort(name: :desc)
|> Api.read!(page: [offset: 5, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["4", "3", "2", "1", "0"]
end
test "pagination works with a filter" do
names =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["3", "2", "1", "0"]
end
test "the next page can be fetched" do
assert %{results: [%{name: "3"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 1])
assert %{results: [%{name: "2"}]} = Api.page!(page, :next)
end
test "the previous page can be fetched" do
assert %{results: [%{name: "3"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 1])
assert %{results: [%{name: "4"}]} = Api.page!(page, :prev)
end
test "the first page can be fetched" do
assert %{results: [%{name: "2"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 2, limit: 1])
assert %{results: [%{name: "4"}]} = Api.page!(page, :first)
end
test "the last page can be fetched if the count was not requested" do
assert %{results: [%{name: "3"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 1])
assert %{results: [%{name: "0"}]} = Api.page!(page, :last)
end
test "the last page can be fetched if the count was requested" do
assert %{results: [%{name: "3"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 1, count: true])
assert %{results: [%{name: "0"}]} = Api.page!(page, :last)
end
end
describe "keyset pagination" do
setup do
for i <- 0..9 do
Api.create!(Ash.Changeset.new(User, %{name: "#{i}"}))
end
:ok
end
test "can be limited" do
assert Enum.count(Api.read!(User, action: :optional_keyset, page: false)) == 10
assert Enum.count(Api.read!(User, action: :optional_keyset, page: [limit: 5]).results) == 5
end
test "can include a full count" do
assert Api.read!(User, action: :optional_keyset, page: [limit: 1, count: true]).count == 10
end
test "can default to including a count" do
assert Api.read!(User, action: :keyset_countable_by_default, page: [limit: 1]).count == 10
end
test "count is not included by default otherwise" do
assert is_nil(Api.read!(User, action: :optional_keyset, page: [limit: 1]).count)
end
test "`count: false` prevents the count from occurring even if it is on `by_default`" do
assert is_nil(
Api.read!(User,
action: :keyset_countable_by_default,
page: [limit: 1, count: false]
).count
)
end
test "can ask for records after a specific keyset" do
%{results: [%{id: id, metadata: %{keyset: keyset}}]} =
Api.read!(User, action: :keyset, page: [limit: 1])
%{results: [%{id: next_id}]} =
Api.read!(User, action: :keyset, page: [limit: 1, after: keyset])
refute id == next_id
end
test "can ask for records before a specific keyset" do
%{results: [%{id: id, metadata: %{keyset: keyset}}]} =
Api.read!(User, action: :keyset, page: [limit: 1])
%{results: [%{id: next_id, metadata: %{keyset: keyset2}}]} =
Api.read!(User, action: :keyset, page: [limit: 1, after: keyset])
refute id == next_id
%{results: [%{id: before_id}]} =
Api.read!(User, action: :keyset, page: [limit: 1, before: keyset2])
assert id == before_id
end
test "pagination works with a sort applied" do
page =
User
|> Ash.Query.filter(name == "4")
|> Ash.Query.sort(:name)
|> Api.read!(page: [limit: 1])
keyset = Enum.at(page.results, 0).metadata.keyset
names =
User
|> Ash.Query.sort(:name)
|> Api.read!(page: [after: keyset, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["5", "6", "7", "8", "9"]
end
test "pagination works with a reversed sort applied" do
page =
User
|> Ash.Query.filter(name == "5")
|> Ash.Query.sort(name: :desc)
|> Api.read!(page: [limit: 1])
keyset = Enum.at(page.results, 0).metadata.keyset
names =
User
|> Ash.Query.sort(name: :desc)
|> Api.read!(page: [after: keyset, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["4", "3", "2", "1", "0"]
end
test "pagination works with a filter" do
page =
User
|> Ash.Query.filter(name == "5")
|> Ash.Query.sort(name: :desc)
|> Api.read!(page: [limit: 1])
keyset = Enum.at(page.results, 0).metadata.keyset
names =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name != "4")
|> Api.read!(page: [after: keyset, limit: 5])
|> Map.get(:results)
|> Enum.map(& &1.name)
assert names == ["3", "2", "1", "0"]
end
test "the next page can be fetched" do
assert %{results: [%{name: "4"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [limit: 1])
assert %{results: [%{name: "3"}]} = Api.page!(page, :next)
end
test "the previous page can be fetched" do
assert %{results: [%{name: "4"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [limit: 1])
assert %{results: [%{name: "3"}]} = page = Api.page!(page, :next)
assert %{results: [%{name: "4"}]} = Api.page!(page, :prev)
end
test "the first page can be fetched" do
assert %{results: [%{name: "4"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [limit: 1])
assert %{results: [%{name: "3"}]} = page = Api.page!(page, :next)
assert %{results: [%{name: "4"}]} = Api.page!(page, :first)
end
test "the last page can be fetched" do
assert %{results: [%{name: "3"}]} =
page =
User
|> Ash.Query.sort(name: :desc)
|> Ash.Query.filter(name in ["4", "3", "2", "1", "0"])
|> Api.read!(page: [offset: 1, limit: 1])
assert %{results: [%{name: "0"}]} = Api.page!(page, :last)
end
end
describe "when both are supported" do
setup do
for i <- 0..9 do
Api.create!(Ash.Changeset.new(User, %{name: "#{i}"}))
end
:ok
end
test "it defaults to offset pagination" do
assert %Ash.Page.Offset{} = Api.read!(User, action: :both_optional, page: [limit: 10])
end
test "it adds a keyset to the records, even though it returns an offset page" do
for result <- Api.read!(User, action: :both_optional, page: [limit: 10]).results do
refute is_nil(result.metadata.keyset)
end
end
end
end
| 29.598522 | 97 | 0.534826 |
1c7beeb0c22868e129fdb8b345dea906da1e57a3 | 2,023 | ex | Elixir | lib/gotochgo_web.ex | geolessel/GOTOchgo2021 | 4061a04647456712ba08e8f3c3fce6fb6e30751a | [
"Apache-2.0"
] | null | null | null | lib/gotochgo_web.ex | geolessel/GOTOchgo2021 | 4061a04647456712ba08e8f3c3fce6fb6e30751a | [
"Apache-2.0"
] | null | null | null | lib/gotochgo_web.ex | geolessel/GOTOchgo2021 | 4061a04647456712ba08e8f3c3fce6fb6e30751a | [
"Apache-2.0"
] | 2 | 2021-11-11T17:19:17.000Z | 2021-12-27T16:06:23.000Z | defmodule GotochgoWeb do
@moduledoc """
The entrypoint for defining your web interface, such
as controllers, views, channels and so on.
This can be used in your application as:
use GotochgoWeb, :controller
use GotochgoWeb, :view
The definitions below will be executed for every view,
controller, etc, so keep them short and clean, focused
on imports, uses and aliases.
Do NOT define functions inside the quoted expressions
below. Instead, define any helper function in modules
and import those modules here.
"""
def controller do
quote do
use Phoenix.Controller, namespace: GotochgoWeb
import Plug.Conn
import GotochgoWeb.Gettext
alias GotochgoWeb.Router.Helpers, as: Routes
import Phoenix.LiveView.Controller
end
end
def view do
quote do
use Phoenix.View,
root: "lib/gotochgo_web/templates",
namespace: GotochgoWeb
# Import convenience functions from controllers
import Phoenix.Controller,
only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1]
# Include shared imports and aliases for views
unquote(view_helpers())
end
end
def router do
quote do
use Phoenix.Router
import Plug.Conn
import Phoenix.Controller
import Phoenix.LiveView.Router
end
end
def channel do
quote do
use Phoenix.Channel
import GotochgoWeb.Gettext
end
end
defp view_helpers do
quote do
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
# Import basic rendering functionality (render, render_layout, etc)
import Phoenix.View
import GotochgoWeb.ErrorHelpers
import GotochgoWeb.Gettext
alias GotochgoWeb.Router.Helpers, as: Routes
import Phoenix.LiveView.Helpers
end
end
@doc """
When used, dispatch to the appropriate controller/view/etc.
"""
defmacro __using__(which) when is_atom(which) do
apply(__MODULE__, which, [])
end
end
| 22.988636 | 76 | 0.68957 |
1c7bf52c441ea77f3ed7a5ac7c6fb54ee5291f5d | 974 | exs | Elixir | config/config.exs | joaohf/epad | ff3e37c3fc3b13363f5fb728c6d4a0a3d0ef3fce | [
"MIT"
] | 2 | 2020-09-07T19:45:28.000Z | 2021-05-04T05:31:41.000Z | config/config.exs | joaohf/epad | ff3e37c3fc3b13363f5fb728c6d4a0a3d0ef3fce | [
"MIT"
] | null | null | null | config/config.exs | joaohf/epad | ff3e37c3fc3b13363f5fb728c6d4a0a3d0ef3fce | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
# General application configuration
use Mix.Config
# Configures the endpoint
config :epad, EpadWeb.Endpoint,
url: [host: "localhost"],
secret_key_base: "/TpUDkNXVOAsr2bM2nZ2znQgjFNwyEjdLg9SMFOV+nVbrLcmIXeT+Kafu4NX3hYL",
render_errors: [view: EpadWeb.ErrorView, accepts: ~w(html json), layout: false],
pubsub_server: Epad.PubSub,
live_view: [signing_salt: "wHXf5OeL"]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Use Jason for JSON parsing in Phoenix
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"
| 33.586207 | 86 | 0.767967 |
1c7c1bca360d6c94760be492b11bca9762534608 | 16,099 | ex | Elixir | lib/sanbase_web/graphql/resolvers/metric_resolver.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 81 | 2017-11-20T01:20:22.000Z | 2022-03-05T12:04:25.000Z | lib/sanbase_web/graphql/resolvers/metric_resolver.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 359 | 2017-10-15T14:40:53.000Z | 2022-01-25T13:34:20.000Z | lib/sanbase_web/graphql/resolvers/metric_resolver.ex | santiment/sanbase2 | 9ef6e2dd1e377744a6d2bba570ea6bd477a1db31 | [
"MIT"
] | 16 | 2017-11-19T13:57:40.000Z | 2022-02-07T08:13:02.000Z | defmodule SanbaseWeb.Graphql.Resolvers.MetricResolver do
import SanbaseWeb.Graphql.Helpers.Utils
import SanbaseWeb.Graphql.Helpers.CalibrateInterval
import Sanbase.Utils.ErrorHandling,
only: [handle_graphql_error: 3, maybe_handle_graphql_error: 2]
import Sanbase.Model.Project.Selector,
only: [args_to_selector: 1, args_to_raw_selector: 1]
import SanbaseWeb.Graphql.Helpers.Utils
alias Sanbase.Metric
alias Sanbase.Billing.Plan.Restrictions
alias Sanbase.Billing.Plan.AccessChecker
require Logger
@datapoints 300
def get_metric(_root, %{metric: metric}, _resolution) do
with true <- Metric.is_not_deprecated?(metric),
true <- Metric.has_metric?(metric) do
{:ok, %{metric: metric}}
end
end
def get_available_metrics(_root, %{product: product, plan: plan}, _resolution) do
product = product |> Atom.to_string() |> String.upcase()
{:ok, AccessChecker.get_available_metrics_for_plan(product, plan)}
end
def get_available_metrics(_root, _args, _resolution),
do: {:ok, Metric.available_metrics()}
def get_available_slugs(_root, _args, %{source: %{metric: metric}}),
do: Metric.available_slugs(metric)
def get_human_readable_name(_root, _args, %{source: %{metric: metric}}),
do: Metric.human_readable_name(metric)
def get_metadata(%{}, _args, %{source: %{metric: metric}} = resolution) do
%{context: %{product_id: product_id, auth: %{plan: plan}}} = resolution
case Metric.metadata(metric) do
{:ok, metadata} ->
access_restrictions = Restrictions.get({:metric, metric}, plan, product_id)
{:ok, Map.merge(access_restrictions, metadata)}
{:error, error} ->
{:error, handle_graphql_error("metadata", %{metric: metric}, error)}
end
end
def timeseries_data_complexity(_root, args, resolution) do
# Explicitly set `child_complexity` to 2 as this would be the
# value if both `datetime` and `value` fields are queried.
child_complexity = 2
complexity =
SanbaseWeb.Graphql.Complexity.from_to_interval(args, child_complexity, resolution)
|> Sanbase.Math.to_integer()
{:ok, complexity}
end
def available_since(_root, args, %{source: %{metric: metric}}) do
with {:ok, selector} <- args_to_selector(args),
{:ok, opts} <- selector_args_to_opts(args),
{:ok, first_datetime} <- Metric.first_datetime(metric, selector, opts) do
{:ok, first_datetime}
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(
"Available Since",
%{metric: metric, selector: args_to_raw_selector(args)},
error
)
end)
end
def last_datetime_computed_at(_root, args, %{source: %{metric: metric}}) do
with {:ok, selector} <- args_to_selector(args),
{:ok, opts} <- selector_args_to_opts(args),
true <- valid_metric_selector_pair?(metric, selector) do
Metric.last_datetime_computed_at(metric, selector, opts)
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(
"Last Datetime Computed At",
%{metric: metric, selector: args_to_raw_selector(args)},
error
)
end)
end
def timeseries_data(_root, args, %{source: %{metric: metric}} = resolution) do
requested_fields = requested_fields(resolution)
fetch_timeseries_data(metric, args, requested_fields, :timeseries_data)
end
def timeseries_data_per_slug(
_root,
args,
%{source: %{metric: metric}} = resolution
) do
requested_fields = requested_fields(resolution)
fetch_timeseries_data(
metric,
args,
requested_fields,
:timeseries_data_per_slug
)
end
def aggregated_timeseries_data(
_root,
%{from: from, to: to} = args,
%{source: %{metric: metric}}
) do
include_incomplete_data = Map.get(args, :include_incomplete_data, false)
with {:ok, selector} <- args_to_selector(args),
true <- all_required_selectors_present?(metric, selector),
true <- valid_metric_selector_pair?(metric, selector),
true <- valid_owners_labels_selection?(args),
{:ok, opts} <- selector_args_to_opts(args),
{:ok, from, to} <-
calibrate_incomplete_data_params(
include_incomplete_data,
Metric,
metric,
from,
to
),
{:ok, result} <-
Metric.aggregated_timeseries_data(metric, selector, from, to, opts) do
{:ok, Map.values(result) |> List.first()}
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(metric, args_to_raw_selector(args), error)
end)
end
def histogram_data(
_root,
args,
%{source: %{metric: metric}}
) do
%{to: to, interval: interval, limit: limit} = args
# from datetime arg is not required for `all_spent_coins_cost` metric which calculates
# the histogram for all time.
from = Map.get(args, :from, nil)
interval = transform_interval(metric, interval)
with true <- valid_histogram_args?(metric, args),
{:ok, selector} <- args_to_selector(args),
true <- valid_metric_selector_pair?(metric, selector),
true <- valid_owners_labels_selection?(args),
{:ok, data} <-
Metric.histogram_data(metric, selector, from, to, interval, limit),
{:ok, data} <- maybe_enrich_with_labels(metric, data) do
{:ok, %{values: %{data: data}}}
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(metric, args_to_raw_selector(args), error)
end)
end
def table_data(
_root,
%{from: from, to: to} = args,
%{source: %{metric: metric}}
) do
with {:ok, selector} <- args_to_selector(args),
true <- valid_metric_selector_pair?(metric, selector),
true <- valid_owners_labels_selection?(args),
{:ok, data} <- Metric.table_data(metric, selector, from, to) do
{:ok, data}
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(metric, args_to_raw_selector(args), error)
end)
end
def latest_metrics_data(_root, %{metrics: metrics} = args, _resolution) do
with {:ok, selector} <- args_to_selector(args),
:ok <- check_metrics_slugs_cartesian_limit(metrics, selector, 20_000),
{:ok, data} <-
Metric.LatestMetric.latest_metrics_data(metrics, selector) do
{:ok, data}
end
end
# Private functions
# required_selectors is a list of lists like [["slug"], ["label_fqn", "label_fqns"]]
# At least one of the elements in every list must be present. A list of length
# greater than 1 is used mostly with singlure/plural versions of the same thing.
defp all_required_selectors_present?(metric, selector) do
selector_names = Map.keys(selector)
with {:ok, required_selectors} <- Metric.required_selectors(metric),
true <-
do_check_required_selectors(
metric,
selector_names,
required_selectors
) do
true
end
end
defp do_check_required_selectors(metric, selector_names, required_selectors) do
required_selectors
|> Enum.reduce_while(true, fn list, acc ->
case Enum.any?(list, &(&1 in selector_names)) do
true ->
{:cont, acc}
false ->
selectors_str =
list
|> Enum.map(&Atom.to_string/1)
|> Enum.map(&Inflex.camelize(&1, :lower))
|> Enum.join(", ")
{:halt,
{:error,
"The metric '#{metric}' must have at least one of the following fields in the selector: #{selectors_str}"}}
end
end)
end
# This is used when a list of slugs and a list of metrics is provided
# Every metric is fetched for every slug and the result length can get too big
defp check_metrics_slugs_cartesian_limit(
metrics,
%{slug: slug_or_slugs},
limit
) do
slugs = List.wrap(slug_or_slugs)
cartesian_product_length = length(metrics) * length(slugs)
case cartesian_product_length <= limit do
true -> :ok
false -> {:error, "The length of the metrics multiplied by the length of\
the slugs must not exceed #{limit}."}
end
end
# timeseries_data and timeseries_data_per_slug are processed and fetched in
# exactly the same way. The only difference is the function that is called
# from the Metric module.
defp fetch_timeseries_data(metric, args, requested_fields, function)
when function in [:timeseries_data, :timeseries_data_per_slug] do
transform =
Map.get(args, :transform, %{type: "none"})
|> Map.update!(:type, &Inflex.underscore/1)
with {:ok, selector} <- args_to_selector(args),
true <- all_required_selectors_present?(metric, selector),
true <- valid_metric_selector_pair?(metric, selector),
true <- valid_owners_labels_selection?(args),
true <- valid_timeseries_selection?(requested_fields, args),
{:ok, opts} <- selector_args_to_opts(args),
{:ok, from, to, interval} <-
transform_datetime_params(selector, metric, transform, args),
{:ok, result} <-
apply(Metric, function, [metric, selector, from, to, interval, opts]),
{:ok, result} <- apply_transform(transform, result),
{:ok, result} <- fit_from_datetime(result, args) do
{:ok, result |> Enum.reject(&is_nil/1)}
end
|> maybe_handle_graphql_error(fn error ->
handle_graphql_error(metric, args_to_raw_selector(args), error)
end)
end
defp transform_datetime_params(selector, metric, transform, args) do
%{from: from, to: to, interval: interval} = args
include_incomplete_data = Map.get(args, :include_incomplete_data, false)
with {:ok, from, to, interval} <-
calibrate(
Metric,
metric,
selector,
from,
to,
interval,
86_400,
@datapoints
),
{:ok, from, to} <-
calibrate_incomplete_data_params(
include_incomplete_data,
Metric,
metric,
from,
to
),
{:ok, from} <-
calibrate_transform_params(transform, from, to, interval) do
{:ok, from, to, interval}
end
end
defp maybe_enrich_with_labels("eth2_top_stakers", data), do: {:ok, data}
defp maybe_enrich_with_labels(_metric, [%{address: address} | _] = data)
when is_binary(address) do
addresses = Enum.map(data, & &1.address) |> Enum.uniq()
{:ok, labels} = Sanbase.Clickhouse.Label.get_address_labels("ethereum", addresses)
labeled_data =
Enum.map(data, fn %{address: address} = elem ->
address_labels = Map.get(labels, address, []) |> Enum.map(& &1.name)
Map.put(elem, :labels, address_labels)
end)
{:ok, labeled_data}
end
defp maybe_enrich_with_labels(_metric, data), do: {:ok, data}
defp calibrate_transform_params(%{type: type}, from, _to, _interval)
when type in ["none", "cumulative_sum"] do
{:ok, from}
end
defp calibrate_transform_params(
%{type: "moving_average", moving_average_base: base},
from,
_to,
interval
) do
shift_by_sec = base * Sanbase.DateTimeUtils.str_to_sec(interval)
from = Timex.shift(from, seconds: -shift_by_sec)
{:ok, from}
end
defp calibrate_transform_params(%{type: type}, from, _to, interval)
when type in [
"changes",
"consecutive_differences",
"percent_change",
"cumulative_percent_change"
] do
shift_by_sec = Sanbase.DateTimeUtils.str_to_sec(interval)
from = Timex.shift(from, seconds: -shift_by_sec)
{:ok, from}
end
defp apply_transform(%{type: "none"}, data), do: {:ok, data}
defp apply_transform(
%{type: "moving_average", moving_average_base: base},
data
) do
Sanbase.Math.simple_moving_average(data, base, value_key: :value)
end
defp apply_transform(%{type: type}, data)
when type in ["changes", "consecutive_differences"] do
result =
Stream.chunk_every(data, 2, 1, :discard)
|> Enum.map(fn [%{value: previous}, %{value: current, datetime: datetime}] ->
%{
datetime: datetime,
value: current - previous
}
end)
{:ok, result}
end
defp apply_transform(%{type: type}, data) when type in ["cumulative_sum"] do
result =
data
|> Enum.scan(fn %{value: current} = elem, %{value: previous} ->
%{elem | value: current + previous}
end)
{:ok, result}
end
defp apply_transform(%{type: type}, data) when type in ["percent_change"] do
result =
Stream.chunk_every(data, 2, 1, :discard)
|> Enum.map(fn [%{value: previous}, %{value: current, datetime: datetime}] ->
%{
datetime: datetime,
value: Sanbase.Math.percent_change(previous, current)
}
end)
{:ok, result}
end
defp apply_transform(%{type: type}, data)
when type in ["cumulative_percent_change"] do
{:ok, cumsum} = apply_transform(%{type: "cumulative_sum"}, data)
apply_transform(%{type: "percent_change"}, cumsum)
end
defp transform_interval("all_spent_coins_cost", interval) do
Enum.max([Sanbase.DateTimeUtils.str_to_days(interval), 1])
|> to_string
|> Kernel.<>("d")
end
defp transform_interval(_, interval), do: interval
@metrics_allowed_missing_from [
"all_spent_coins_cost",
"eth2_staked_amount_per_label"
]
# All histogram metrics except "all_spent_coins_cost" require `from` argument
defp valid_histogram_args?(metric, args) do
if metric in @metrics_allowed_missing_from or Map.get(args, :from) do
true
else
{:error, "Missing required `from` argument"}
end
end
defp valid_owners_labels_selection?(%{selector: selector} = _args) do
cond do
Map.has_key?(selector, :label) and Map.has_key?(selector, :labels) ->
{:error, "Cannot use both 'label' and 'labels' fields at the same time."}
Map.has_key?(selector, :owner) and Map.has_key?(selector, :owners) ->
{:error, "Cannot use both 'owner' and 'owners' fields at the same time."}
Map.has_key?(selector, :labels) and selector.labels == [] ->
{:error, "The 'labels' selector field must not be an empty list."}
Map.has_key?(selector, :owners) and selector.owners == [] ->
{:error, "The 'owners' selector field must not be an empty list."}
true ->
true
end
end
defp valid_owners_labels_selection?(_), do: true
defp valid_timeseries_selection?(requested_fields, args) do
aggregation = Map.get(args, :aggregation, nil)
value_requested? = MapSet.member?(requested_fields, "value")
value_ohlc_requested? = MapSet.member?(requested_fields, "valueOhlc")
cond do
aggregation == :ohlc && value_requested? ->
{:error, "Field value shouldn't be selected when using aggregation ohlc"}
value_ohlc_requested? && aggregation != :ohlc ->
{:error, "Selected field valueOhlc works only with aggregation ohlc"}
value_requested? and value_ohlc_requested? ->
{:error, "Cannot select value and valueOhlc fields at the same time"}
true ->
true
end
end
defguard has_binary_key?(selector, key)
when is_map_key(selector, key) and
is_binary(:erlang.map_get(key, selector))
defp valid_metric_selector_pair?("social_active_users", selector)
when not has_binary_key?(selector, :source) do
{:error,
"""
The 'social_active_users' metric provides data for a social 'source' argument \
(twitter, telegram, etc.). All the slug-related arguments are ignored. \
Provide a 'source' argument.
"""}
end
defp valid_metric_selector_pair?(_metric, _selector), do: true
end
| 32.855102 | 119 | 0.640909 |
1c7c1d6ce93484c3d7d664e318557b80c6db5e05 | 1,674 | ex | Elixir | clients/dataproc/lib/google_api/dataproc/v1/model/test_iam_permissions_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/dataproc/lib/google_api/dataproc/v1/model/test_iam_permissions_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/dataproc/lib/google_api/dataproc/v1/model/test_iam_permissions_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Dataproc.V1.Model.TestIamPermissionsRequest do
@moduledoc """
Request message for TestIamPermissions method.
## Attributes
* `permissions` (*type:* `list(String.t)`, *default:* `nil`) - The set of permissions to check for the resource. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see IAM Overview (https://cloud.google.com/iam/docs/overview#permissions).
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:permissions => list(String.t()) | nil
}
field(:permissions, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Dataproc.V1.Model.TestIamPermissionsRequest do
def decode(value, options) do
GoogleApi.Dataproc.V1.Model.TestIamPermissionsRequest.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Dataproc.V1.Model.TestIamPermissionsRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.617021 | 285 | 0.746117 |
1c7c5c86b1c4ac2c707e69edf0530d81b23a661d | 32,463 | ex | Elixir | lib/aws/generated/connect.ex | smanolloff/aws-elixir | c7cb6577802f5010be7e7b6ccb2c0f3c8c73ea84 | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/connect.ex | smanolloff/aws-elixir | c7cb6577802f5010be7e7b6ccb2c0f3c8c73ea84 | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/connect.ex | smanolloff/aws-elixir | c7cb6577802f5010be7e7b6ccb2c0f3c8c73ea84 | [
"Apache-2.0"
] | null | null | null | # WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
# See https://github.com/aws-beam/aws-codegen for more details.
defmodule AWS.Connect do
@moduledoc """
Amazon Connect is a cloud-based contact center solution that makes it easy to
set up and manage a customer contact center and provide reliable customer
engagement at any scale.
Amazon Connect provides rich metrics and real-time reporting that allow you to
optimize contact routing. You can also resolve customer issues more efficiently
by putting customers in touch with the right agents.
There are limits to the number of Amazon Connect resources that you can create
and limits to the number of requests that you can make per second. For more
information, see [Amazon Connect Service Quotas](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-service-limits.html)
in the *Amazon Connect Administrator Guide*.
To connect programmatically to an AWS service, you use an endpoint. For a list
of Amazon Connect endpoints, see [Amazon Connect Endpoints](https://docs.aws.amazon.com/general/latest/gr/connect_region.html).
Working with contact flows? Check out the [Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html).
"""
@doc """
Associates a set of queues with a routing profile.
"""
def associate_routing_profile_queues(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/associate-queues"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates a contact flow for the specified Amazon Connect instance.
You can also create and update contact flows using the [Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html).
"""
def create_contact_flow(client, instance_id, input, options \\ []) do
path_ = "/contact-flows/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a new routing profile.
"""
def create_routing_profile(client, instance_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Creates a user account for the specified Amazon Connect instance.
For information about how to create user accounts using the Amazon Connect
console, see [Add Users](https://docs.aws.amazon.com/connect/latest/adminguide/user-management.html)
in the *Amazon Connect Administrator Guide*.
"""
def create_user(client, instance_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Deletes a user account from the specified Amazon Connect instance.
For information about what happens to a user's data when their account is
deleted, see [Delete Users from Your Amazon Connect Instance](https://docs.aws.amazon.com/connect/latest/adminguide/delete-users.html)
in the *Amazon Connect Administrator Guide*.
"""
def delete_user(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Describes the specified contact flow.
You can also create and update contact flows using the [Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html).
"""
def describe_contact_flow(client, contact_flow_id, instance_id, options \\ []) do
path_ = "/contact-flows/#{URI.encode(instance_id)}/#{URI.encode(contact_flow_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Describes the specified routing profile.
"""
def describe_routing_profile(client, instance_id, routing_profile_id, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Describes the specified user account.
You can find the instance ID in the console (it’s the final part of the ARN).
The console does not display the user IDs. Instead, list the users and note the
IDs provided in the output.
"""
def describe_user(client, instance_id, user_id, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Describes the specified hierarchy group.
"""
def describe_user_hierarchy_group(client, hierarchy_group_id, instance_id, options \\ []) do
path_ = "/user-hierarchy-groups/#{URI.encode(instance_id)}/#{URI.encode(hierarchy_group_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Describes the hierarchy structure of the specified Amazon Connect instance.
"""
def describe_user_hierarchy_structure(client, instance_id, options \\ []) do
path_ = "/user-hierarchy-structure/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Disassociates a set of queues from a routing profile.
"""
def disassociate_routing_profile_queues(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/disassociate-queues"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves the contact attributes for the specified contact.
"""
def get_contact_attributes(client, initial_contact_id, instance_id, options \\ []) do
path_ = "/contact/attributes/#{URI.encode(instance_id)}/#{URI.encode(initial_contact_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets the real-time metric data from the specified Amazon Connect instance.
For a description of each metric, see [Real-time Metrics Definitions](https://docs.aws.amazon.com/connect/latest/adminguide/real-time-metrics-definitions.html)
in the *Amazon Connect Administrator Guide*.
"""
def get_current_metric_data(client, instance_id, input, options \\ []) do
path_ = "/metrics/current/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves a token for federation.
"""
def get_federation_token(client, instance_id, options \\ []) do
path_ = "/user/federate/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets historical metric data from the specified Amazon Connect instance.
For a description of each historical metric, see [Historical Metrics Definitions](https://docs.aws.amazon.com/connect/latest/adminguide/historical-metrics-definitions.html)
in the *Amazon Connect Administrator Guide*.
"""
def get_metric_data(client, instance_id, input, options \\ []) do
path_ = "/metrics/historical/#{URI.encode(instance_id)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Provides information about the contact flows for the specified Amazon Connect
instance.
You can also create and update contact flows using the [Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html).
For more information about contact flows, see [Contact Flows](https://docs.aws.amazon.com/connect/latest/adminguide/concepts-contact-flows.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_contact_flows(client, instance_id, contact_flow_types \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/contact-flows-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(contact_flow_types) do
[{"contactFlowTypes", contact_flow_types} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about the hours of operation for the specified Amazon
Connect instance.
For more information about hours of operation, see [Set the Hours of Operation for a
Queue](https://docs.aws.amazon.com/connect/latest/adminguide/set-hours-operation.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_hours_of_operations(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/hours-of-operations-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about the phone numbers for the specified Amazon Connect
instance.
For more information about phone numbers, see [Set Up Phone Numbers for Your Contact
Center](https://docs.aws.amazon.com/connect/latest/adminguide/contact-center-phone-number.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_phone_numbers(client, instance_id, max_results \\ nil, next_token \\ nil, phone_number_country_codes \\ nil, phone_number_types \\ nil, options \\ []) do
path_ = "/phone-numbers-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(phone_number_types) do
[{"phoneNumberTypes", phone_number_types} | query_]
else
query_
end
query_ = if !is_nil(phone_number_country_codes) do
[{"phoneNumberCountryCodes", phone_number_country_codes} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about the prompts for the specified Amazon Connect
instance.
"""
def list_prompts(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/prompts-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides information about the queues for the specified Amazon Connect instance.
For more information about queues, see [Queues: Standard and Agent](https://docs.aws.amazon.com/connect/latest/adminguide/concepts-queues-standard-and-agent.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_queues(client, instance_id, max_results \\ nil, next_token \\ nil, queue_types \\ nil, options \\ []) do
path_ = "/queues-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(queue_types) do
[{"queueTypes", queue_types} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List the queues associated with a routing profile.
"""
def list_routing_profile_queues(client, instance_id, routing_profile_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/queues"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides summary information about the routing profiles for the specified Amazon
Connect instance.
For more information about routing profiles, see [Routing Profiles](https://docs.aws.amazon.com/connect/latest/adminguide/concepts-routing.html)
and [Create a Routing Profile](https://docs.aws.amazon.com/connect/latest/adminguide/routing-profiles.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_routing_profiles(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/routing-profiles-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides summary information about the security profiles for the specified
Amazon Connect instance.
For more information about security profiles, see [Security Profiles](https://docs.aws.amazon.com/connect/latest/adminguide/connect-security-profiles.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_security_profiles(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/security-profiles-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists the tags for the specified resource.
For sample policies that use tags, see [Amazon Connect Identity-Based Policy Examples](https://docs.aws.amazon.com/connect/latest/adminguide/security_iam_id-based-policy-examples.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides summary information about the hierarchy groups for the specified Amazon
Connect instance.
For more information about agent hierarchies, see [Set Up Agent Hierarchies](https://docs.aws.amazon.com/connect/latest/adminguide/agent-hierarchy.html)
in the *Amazon Connect Administrator Guide*.
"""
def list_user_hierarchy_groups(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/user-hierarchy-groups-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Provides summary information about the users for the specified Amazon Connect
instance.
"""
def list_users(client, instance_id, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/users-summary/#{URI.encode(instance_id)}"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
When a contact is being recorded, and the recording has been suspended using
SuspendContactRecording, this API resumes recording the call.
Only voice recordings are supported at this time.
"""
def resume_contact_recording(client, input, options \\ []) do
path_ = "/contact/resume-recording"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Initiates a contact flow to start a new chat for the customer.
Response of this API provides a token required to obtain credentials from the
[CreateParticipantConnection](https://docs.aws.amazon.com/connect-participant/latest/APIReference/API_CreateParticipantConnection.html) API in the Amazon Connect Participant Service.
When a new chat contact is successfully created, clients need to subscribe to
the participant’s connection for the created chat within 5 minutes. This is
achieved by invoking
[CreateParticipantConnection](https://docs.aws.amazon.com/connect-participant/latest/APIReference/API_CreateParticipantConnection.html)
with WEBSOCKET and CONNECTION_CREDENTIALS.
A 429 error occurs in two situations:
* API rate limit is exceeded. API TPS throttling returns a
`TooManyRequests` exception from the API Gateway.
* The [quota for concurrent active chats](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-service-limits.html)
is exceeded. Active chat throttling returns a `LimitExceededException`.
For more information about how chat works, see
[Chat](https://docs.aws.amazon.com/connect/latest/adminguide/chat.html) in the
*Amazon Connect Administrator Guide*.
"""
def start_chat_contact(client, input, options \\ []) do
path_ = "/contact/chat"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
This API starts recording the contact when the agent joins the call.
StartContactRecording is a one-time action. For example, if you use
StopContactRecording to stop recording an ongoing call, you can't use
StartContactRecording to restart it. For scenarios where the recording has
started and you want to suspend and resume it, such as when collecting sensitive
information (for example, a credit card number), use SuspendContactRecording and
ResumeContactRecording.
You can use this API to override the recording behavior configured in the [Set recording
behavior](https://docs.aws.amazon.com/connect/latest/adminguide/set-recording-behavior.html)
block.
Only voice recordings are supported at this time.
"""
def start_contact_recording(client, input, options \\ []) do
path_ = "/contact/start-recording"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
This API places an outbound call to a contact, and then initiates the contact
flow.
It performs the actions in the contact flow that's specified (in
`ContactFlowId`).
Agents are not involved in initiating the outbound API (that is, dialing the
contact). If the contact flow places an outbound call to a contact, and then
puts the contact in queue, that's when the call is routed to the agent, like any
other inbound case.
There is a 60 second dialing timeout for this operation. If the call is not
connected after 60 seconds, it fails.
UK numbers with a 447 prefix are not allowed by default. Before you can dial
these UK mobile numbers, you must submit a service quota increase request. For
more information, see [Amazon Connect Service Quotas](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-service-limits.html)
in the *Amazon Connect Administrator Guide*.
"""
def start_outbound_voice_contact(client, input, options \\ []) do
path_ = "/contact/outbound-voice"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Ends the specified contact.
"""
def stop_contact(client, input, options \\ []) do
path_ = "/contact/stop"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
When a contact is being recorded, this API stops recording the call.
StopContactRecording is a one-time action. If you use StopContactRecording to
stop recording an ongoing call, you can't use StartContactRecording to restart
it. For scenarios where the recording has started and you want to suspend it for
sensitive information (for example, to collect a credit card number), and then
restart it, use SuspendContactRecording and ResumeContactRecording.
Only voice recordings are supported at this time.
"""
def stop_contact_recording(client, input, options \\ []) do
path_ = "/contact/stop-recording"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
When a contact is being recorded, this API suspends recording the call.
For example, you might suspend the call recording while collecting sensitive
information, such as a credit card number. Then use ResumeContactRecording to
restart recording.
The period of time that the recording is suspended is filled with silence in the
final recording.
Only voice recordings are supported at this time.
"""
def suspend_contact_recording(client, input, options \\ []) do
path_ = "/contact/suspend-recording"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Adds the specified tags to the specified resource.
The supported resource types are users, routing profiles, and contact flows.
For sample policies that use tags, see [Amazon Connect Identity-Based Policy Examples](https://docs.aws.amazon.com/connect/latest/adminguide/security_iam_id-based-policy-examples.html)
in the *Amazon Connect Administrator Guide*.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Removes the specified tags from the specified resource.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Creates or updates the contact attributes associated with the specified contact.
You can add or update attributes for both ongoing and completed contacts. For
example, you can update the customer's name or the reason the customer called
while the call is active, or add notes about steps that the agent took during
the call that are displayed to the next agent that takes the call. You can also
update attributes for a contact using data from your CRM application and save
the data with the contact in Amazon Connect. You could also flag calls for
additional analysis, such as legal review or identifying abusive callers.
Contact attributes are available in Amazon Connect for 24 months, and are then
deleted.
This operation is also available in the Amazon Connect Flow language. See
[UpdateContactAttributes](https://docs.aws.amazon.com/connect/latest/adminguide/contact-actions-updatecontactattributes.html).
**Important:** You cannot use the operation to update attributes for contacts
that occurred prior to the release of the API, September 12, 2018. You can
update attributes only for contacts that started after the release of the API.
If you attempt to update attributes for a contact that occurred prior to the
release of the API, a 400 error is returned. This applies also to queued
callbacks that were initiated prior to the release of the API but are still
active in your instance.
"""
def update_contact_attributes(client, input, options \\ []) do
path_ = "/contact/attributes"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the specified contact flow.
You can also create and update contact flows using the [Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html).
"""
def update_contact_flow_content(client, contact_flow_id, instance_id, input, options \\ []) do
path_ = "/contact-flows/#{URI.encode(instance_id)}/#{URI.encode(contact_flow_id)}/content"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
The name of the contact flow.
"""
def update_contact_flow_name(client, contact_flow_id, instance_id, input, options \\ []) do
path_ = "/contact-flows/#{URI.encode(instance_id)}/#{URI.encode(contact_flow_id)}/name"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the channels that agents can handle in the Contact Control Panel (CCP)
for a routing profile.
"""
def update_routing_profile_concurrency(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/concurrency"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the default outbound queue of a routing profile.
"""
def update_routing_profile_default_outbound_queue(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/default-outbound-queue"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the name and description of a routing profile.
The request accepts the following data in JSON format. At least `Name` or
`Description` must be provided.
"""
def update_routing_profile_name(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/name"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the properties associated with a set of queues for a routing profile.
"""
def update_routing_profile_queues(client, instance_id, routing_profile_id, input, options \\ []) do
path_ = "/routing-profiles/#{URI.encode(instance_id)}/#{URI.encode(routing_profile_id)}/queues"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Assigns the specified hierarchy group to the specified user.
"""
def update_user_hierarchy(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}/hierarchy"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the identity information for the specified user.
Someone with the ability to invoke `UpdateUserIndentityInfo` can change the
login credentials of other users by changing their email address. This poses a
security risk to your organization. They can change the email address of a user
to the attacker's email address, and then reset the password through email. We
strongly recommend limiting who has the ability to invoke
`UpdateUserIndentityInfo`. For more information, see [Best Practices for Security
Profiles](https://docs.aws.amazon.com/connect/latest/adminguide/security-profile-best-practices.html)
in the *Amazon Connect Administrator Guide*.
"""
def update_user_identity_info(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}/identity-info"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Updates the phone configuration settings for the specified user.
"""
def update_user_phone_config(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}/phone-config"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Assigns the specified routing profile to the specified user.
"""
def update_user_routing_profile(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}/routing-profile"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Assigns the specified security profiles to the specified user.
"""
def update_user_security_profiles(client, instance_id, user_id, input, options \\ []) do
path_ = "/users/#{URI.encode(instance_id)}/#{URI.encode(user_id)}/security-profiles"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "connect"}
host = build_host("connect", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
| 37.968421 | 186 | 0.701075 |
1c7c6d595c9143bec91be7dff947da7619aef827 | 2,820 | ex | Elixir | clients/compute/lib/google_api/compute/v1/model/ssl_certificate_list.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/compute/lib/google_api/compute/v1/model/ssl_certificate_list.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/compute/lib/google_api/compute/v1/model/ssl_certificate_list.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Model.SslCertificateList do
@moduledoc """
Contains a list of SslCertificate resources.
## Attributes
* `id` (*type:* `String.t`, *default:* `nil`) - [Output Only] Unique identifier for the resource; defined by the server.
* `items` (*type:* `list(GoogleApi.Compute.V1.Model.SslCertificate.t)`, *default:* `nil`) - A list of SslCertificate resources.
* `kind` (*type:* `String.t`, *default:* `compute#sslCertificateList`) - Type of resource.
* `nextPageToken` (*type:* `String.t`, *default:* `nil`) - [Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.
* `selfLink` (*type:* `String.t`, *default:* `nil`) - [Output Only] Server-defined URL for this resource.
* `warning` (*type:* `GoogleApi.Compute.V1.Model.SslCertificateListWarning.t`, *default:* `nil`) - [Output Only] Informational warning message.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:id => String.t() | nil,
:items => list(GoogleApi.Compute.V1.Model.SslCertificate.t()) | nil,
:kind => String.t() | nil,
:nextPageToken => String.t() | nil,
:selfLink => String.t() | nil,
:warning => GoogleApi.Compute.V1.Model.SslCertificateListWarning.t() | nil
}
field(:id)
field(:items, as: GoogleApi.Compute.V1.Model.SslCertificate, type: :list)
field(:kind)
field(:nextPageToken)
field(:selfLink)
field(:warning, as: GoogleApi.Compute.V1.Model.SslCertificateListWarning)
end
defimpl Poison.Decoder, for: GoogleApi.Compute.V1.Model.SslCertificateList do
def decode(value, options) do
GoogleApi.Compute.V1.Model.SslCertificateList.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Compute.V1.Model.SslCertificateList do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 45.483871 | 393 | 0.714894 |
1c7c7e19bb1d00a59781e9dd66c807371e2134e9 | 8,492 | ex | Elixir | lib/console_web/controllers/channel_controller.ex | maco2035/console | 2a9a65678b8c671c7d92cdb62dfcfc71b84957c5 | [
"Apache-2.0"
] | 83 | 2018-05-31T14:49:10.000Z | 2022-03-27T16:49:49.000Z | lib/console_web/controllers/channel_controller.ex | maco2035/console | 2a9a65678b8c671c7d92cdb62dfcfc71b84957c5 | [
"Apache-2.0"
] | 267 | 2018-05-22T23:19:02.000Z | 2022-03-31T04:31:06.000Z | lib/console_web/controllers/channel_controller.ex | maco2035/console | 2a9a65678b8c671c7d92cdb62dfcfc71b84957c5 | [
"Apache-2.0"
] | 18 | 2018-11-20T05:15:54.000Z | 2022-03-28T08:20:13.000Z | defmodule ConsoleWeb.ChannelController do
use ConsoleWeb, :controller
alias Console.Repo
alias Console.Organizations
alias Console.Channels
alias Console.Channels.Channel
alias Console.Functions
alias Console.Alerts
alias Console.AlertEvents
alias Console.Flows
plug ConsoleWeb.Plug.AuthorizeAction
action_fallback ConsoleWeb.FallbackController
# For create adafruit / google sheets channel
def create(conn, %{"channel" => channel_params, "func" => function_params }) do
current_organization = conn.assigns.current_organization
channel_params = Map.merge(channel_params, %{"organization_id" => current_organization.id})
result =
Ecto.Multi.new()
|> Ecto.Multi.run(:channel, fn _repo, _ ->
Channels.create_channel(current_organization, channel_params)
end)
|> Ecto.Multi.run(:function, fn _repo, %{ channel: _channel } ->
case function_params["format"] do
"custom" ->
function = Functions.get_function!(current_organization, function_params["id"])
{:ok, function}
"cayenne" ->
function_params = Map.merge(function_params, %{"name" => channel_params["name"], "type" => "decoder", "organization_id" => current_organization.id })
Functions.create_function(function_params, current_organization)
"googlesheet" ->
function_params = Map.merge(function_params, %{"name" => channel_params["name"], "type" => "decoder", "organization_id" => current_organization.id, "format" => "custom" })
Functions.create_function(function_params, current_organization)
end
end)
|> Ecto.Multi.run(:flow, fn _repo, %{ channel: channel, function: function } ->
new_edge = %{
"source" => "function-" <> function.id,
"target" => "channel-" <> channel.id
}
edges =
case Map.get(current_organization.flow, "edges") do
nil -> [new_edge]
edges -> [new_edge | edges]
end
updated_flow =
current_organization.flow
|> Map.put("channel-" <> channel.id, %{ "position" => %{"x" => 300, "y" => 50} })
|> Map.put("function-" <> function.id, %{ "position" => %{"x" => 50, "y" => 50} })
|> Map.put("edges", edges)
Organizations.update_organization(current_organization, %{ "flow" => updated_flow })
end)
|> Repo.transaction()
case result do
{:error, _, changeset, _} -> {:error, changeset}
{:ok, %{ channel: channel, function: _function}} ->
ConsoleWeb.Endpoint.broadcast("graphql:channel_index_bar", "graphql:channel_index_bar:#{current_organization.id}:channel_list_update", %{})
conn
|> put_status(:created)
|> render("show.json", channel: channel)
_ -> result
end
end
def create(conn, %{"channel" => channel_params}) do
current_organization = conn.assigns.current_organization
channel_params = Map.merge(channel_params, %{"organization_id" => current_organization.id})
with {:ok, %Channel{} = channel} <- Channels.create_channel(current_organization, channel_params) do
ConsoleWeb.Endpoint.broadcast("graphql:channel_index_bar", "graphql:channel_index_bar:#{current_organization.id}:channel_list_update", %{})
conn
|> put_status(:created)
|> render("show.json", channel: channel)
end
end
def update(conn, %{"id" => id, "channel" => channel_params}) do
current_organization = conn.assigns.current_organization
channel = Channels.get_channel!(current_organization, id)
affected_flows = Flows.get_flows_with_channel_id(current_organization.id, channel.id)
all_device_ids = Flows.get_all_flows_associated_device_ids(affected_flows)
# get channel info before updating
updated_channel = case length(all_device_ids) do
0 -> nil
_ -> %{ channel_id: id, channel_name: channel.name }
end
with {:ok, %Channel{} = channel} <- Channels.update_channel(channel, current_organization, channel_params) do
ConsoleWeb.Endpoint.broadcast("graphql:channel_show", "graphql:channel_show:#{channel.id}:channel_update", %{})
ConsoleWeb.Endpoint.broadcast("graphql:resources_update", "graphql:resources_update:#{current_organization.id}:organization_resources_update", %{})
ConsoleWeb.Endpoint.broadcast("graphql:channel_index_bar", "graphql:channel_index_bar:#{current_organization.id}:channel_list_update", %{})
broadcast_router_update_devices(all_device_ids)
if updated_channel != nil do
{ _, time } = Timex.format(Timex.now, "%H:%M:%S UTC", :strftime)
details = %{
channel_name: updated_channel.channel_name,
updated_by: conn.assigns.current_user.email,
time: time
}
AlertEvents.notify_alert_event(channel.id, "integration", "integration_with_devices_updated", details)
end
conn
|> put_resp_header("message", "Integration #{channel.name} updated successfully")
|> render("show.json", channel: channel)
end
end
def delete(conn, %{"id" => id}) do
current_organization = conn.assigns.current_organization
channel = Channels.get_channel!(current_organization, id)
affected_flows = Flows.get_flows_with_channel_id(current_organization.id, channel.id)
all_device_ids = Flows.get_all_flows_associated_device_ids(affected_flows)
# get channel info before deleting
deleted_channel = case length(all_device_ids) do
0 -> nil
_ -> %{ channel_id: id, channel_name: Channels.get_channel!(id).name }
end
with {:ok, %Channel{} = channel} <- Channels.delete_channel(channel) do
ConsoleWeb.Endpoint.broadcast("graphql:channels_index_table", "graphql:channels_index_table:#{current_organization.id}:channel_list_update", %{})
ConsoleWeb.Endpoint.broadcast("graphql:channel_index_bar", "graphql:channel_index_bar:#{current_organization.id}:channel_list_update", %{})
ConsoleWeb.Endpoint.broadcast("graphql:flows_nodes_menu", "graphql:flows_nodes_menu:#{current_organization.id}:all_resources_update", %{})
if (deleted_channel != nil) do
{ _, time } = Timex.format(Timex.now, "%H:%M:%S UTC", :strftime)
details = %{
channel_name: deleted_channel.channel_name,
deleted_by: conn.assigns.current_user.email,
time: time
}
AlertEvents.delete_unsent_alert_events_for_integration(id)
AlertEvents.notify_alert_event(deleted_channel.channel_id, "integration", "integration_with_devices_deleted", details)
Alerts.delete_alert_nodes(id, "integration")
end
broadcast_router_update_devices(all_device_ids)
conn
|> put_resp_header("message", "The Integration #{channel.name} has been deleted")
|> render("show.json", channel: channel)
end
end
def get_ubidots_url(conn, %{"token" => token, "name" => name}) do
headers = [{"Content-Type", "application/json"}, {"x-auth-token", token}]
body = Jason.encode!(%{
name: name,
description: "Plugin created using Ubidots APIv2",
settings: %{
device_type_name: "Helium",
helium_labels_as: "tags",
token: token
}
})
response = HTTPoison.post "https://industrial.api.ubidots.com/api/v2.0/plugin_types/~helium/plugins/", body, headers
case response do
{:ok, %{ status_code: 201, body: body }} ->
conn
|> put_resp_header("message", "Received Ubidots plugin webhook URL successfully")
|> send_resp(:ok, body)
_ ->
errors = %{ "errors" => %{ "error" => "Failed to create Ubidots plugin with provided token" }}
conn
|> send_resp(502, Jason.encode!(errors))
end
end
def get_google_form_data(conn, %{"formId" => id }) do
response = HTTPoison.get("https://docs.google.com/forms/d/e/#{id}/viewform")
case response do
{:ok, %{ status_code: 200, body: body }} ->
conn
|> put_resp_header("message", "Received Google Form data successfully")
|> send_resp(:ok, body)
_ ->
errors = %{ "errors" => %{ "error" => "Failed to get Google Form data with provided ID" }}
conn
|> send_resp(502, Jason.encode!(errors))
end
end
defp broadcast_router_update_devices(device_ids) do
ConsoleWeb.Endpoint.broadcast("device:all", "device:all:refetch:devices", %{ "devices" => device_ids })
end
end
| 41.223301 | 183 | 0.664626 |
1c7c8ee878d246407fa529785be33ba39b714e75 | 1,329 | exs | Elixir | mix.exs | victorspringer/recommendationsEx | f9facf8672d91df1037ccb065609a2a756d7fcc3 | [
"MIT"
] | 3 | 2017-06-20T07:47:32.000Z | 2020-10-27T15:25:10.000Z | mix.exs | victorspringer/zionRecs | f9facf8672d91df1037ccb065609a2a756d7fcc3 | [
"MIT"
] | null | null | null | mix.exs | victorspringer/zionRecs | f9facf8672d91df1037ccb065609a2a756d7fcc3 | [
"MIT"
] | 1 | 2019-08-22T03:38:19.000Z | 2019-08-22T03:38:19.000Z | defmodule RecommendationsEx.Mixfile do
use Mix.Project
def project do
[app: :recommendationsEx,
version: "0.0.1",
elixir: "~> 1.2",
elixirc_paths: elixirc_paths(Mix.env),
compilers: [:phoenix, :gettext] ++ Mix.compilers,
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
aliases: aliases(),
deps: deps()]
end
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[mod: {RecommendationsEx, []},
applications: [:phoenix, :phoenix_pubsub, :cowboy, :logger, :gettext, :bolt_sips]]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "web", "test/support"]
defp elixirc_paths(_), do: ["lib", "web"]
# Specifies your project dependencies.
#
# Type `mix help deps` for examples and options.
defp deps do
[{:phoenix, "~> 1.2.1"},
{:phoenix_pubsub, "~> 1.0"},
{:gettext, "~> 0.11"},
{:cowboy, "~> 1.0"},
{:bolt_sips, "~> 0.2"}]
end
# Aliases are shortcuts or tasks specific to the current project.
# For example, to create, migrate and run the seeds file at once:
#
# $ mix ecto.setup
#
# See the documentation for `Mix` for more info on aliases.
defp aliases do
[]
end
end
| 27.122449 | 87 | 0.626787 |
1c7c9ab1c2abb1e9d3a92a3eff35da8171c6d8c2 | 2,811 | ex | Elixir | test/support/functional_case.ex | jayjun/bootleg | 2283ad22810178625c45eb2661624fac914befdd | [
"MIT"
] | 423 | 2017-07-19T18:40:45.000Z | 2021-12-03T08:26:15.000Z | test/support/functional_case.ex | jayjun/bootleg | 2283ad22810178625c45eb2661624fac914befdd | [
"MIT"
] | 115 | 2017-07-19T18:30:14.000Z | 2020-10-27T20:53:32.000Z | test/support/functional_case.ex | jayjun/bootleg | 2283ad22810178625c45eb2661624fac914befdd | [
"MIT"
] | 39 | 2017-07-20T03:14:18.000Z | 2020-11-18T13:39:52.000Z | defmodule Bootleg.FunctionalCase do
@moduledoc false
use ExUnit.CaseTemplate
alias Bootleg.UI
import Bootleg.FunctionalCaseHelpers
require Logger
@image "bootleg-test-sshd"
@cmd "/usr/sbin/sshd"
@args ["-D", "-e"]
@user "me"
@pass "pass"
using args do
quote do
@moduletag :functional
unless unquote(args)[:async] do
setup do
Bootleg.Config.Agent.wait_cleanup()
end
end
end
end
setup tags do
count = Map.get(tags, :boot, 1)
verbosity = Map.get(tags, :ui_verbosity, :silent)
role_opts = Map.get(tags, :role_opts, %{})
key_passphrase = Map.get(tags, :key_passphrase, "")
conf = %{image: @image, cmd: @cmd, args: @args}
hosts = Enum.map(1..count, fn _ -> init(boot(conf), passphrase: key_passphrase) end)
if Map.get(tags, :verbose, System.get_env("TEST_VERBOSE")) do
Logger.info("started docker hosts: #{inspect(hosts, pretty: true)}")
end
unless Map.get(tags, :leave_vm, System.get_env("TEST_LEAVE_CONTAINER")) do
on_exit(fn -> kill(hosts) end)
end
current_verbosity = UI.verbosity()
if current_verbosity != verbosity do
Application.put_env(:bootleg, :verbosity, verbosity)
on_exit(fn -> Application.put_env(:bootleg, :verbosity, current_verbosity) end)
end
{:ok, hosts: hosts, role_opts: role_opts}
end
def boot(%{image: image, cmd: cmd, args: args} = config) do
id =
Docker.run!(
["--rm", "--publish-all", "--detach", "-v", "#{File.cwd!()}:/project"],
image,
cmd,
args
)
ip = Docker.host()
port =
"port"
|> Docker.cmd!([id, "22/tcp"])
|> String.split(":")
|> List.last()
|> String.to_integer()
Map.merge(config, %{id: id, ip: ip, port: port})
end
@dialyzer {:no_return, init: 1, init: 2}
@spec init(binary, []) :: %{}
def init(host, options \\ []) do
adduser!(host, @user)
chpasswd!(host, @user, @pass)
passphrase = Keyword.get(options, :passphrase, "")
{public_key, private_key} = keygen!(host, @user, passphrase)
key_path = Temp.mkdir!("docker-key")
public_key_path = Path.join(key_path, "id_rsa.pub")
private_key_path = Path.join(key_path, "id_rsa")
File.write(public_key_path, public_key)
File.write(private_key_path, private_key)
File.chmod!(private_key_path, 0o600)
Map.merge(host, %{
user: @user,
password: @pass,
public_key: public_key,
public_key_path: public_key_path,
private_key: private_key,
private_key_path: private_key_path
})
end
def kill(hosts) do
running = Enum.map(hosts, &Map.get(&1, :id))
killed = Docker.kill!(running)
diff = running -- killed
if Enum.empty?(diff), do: :ok, else: {:error, diff}
end
end
| 26.271028 | 88 | 0.617574 |
1c7cbcba59a2ee85aede6e41082e4ed3d36f3568 | 688 | exs | Elixir | mix.exs | khattori/exyaml | 4ee8678a8ac241e078373f3c6c3958496fc9ac2e | [
"MIT"
] | null | null | null | mix.exs | khattori/exyaml | 4ee8678a8ac241e078373f3c6c3958496fc9ac2e | [
"MIT"
] | null | null | null | mix.exs | khattori/exyaml | 4ee8678a8ac241e078373f3c6c3958496fc9ac2e | [
"MIT"
] | null | null | null | defmodule Exyaml.MixProject do
use Mix.Project
def project do
[
app: :exyaml,
version: "0.1.0",
elixir: "~> 1.11",
start_permanent: Mix.env() == :prod,
deps: deps(),
test_coverage: [tool: ExCoveralls]
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
{:yamerl, "~> 0.8.0"},
{:excoveralls, "~> 0.13.3", only: :test}
]
end
end
| 21.5 | 87 | 0.563953 |
1c7ce30ed04fb17f864b6ce48fb58c9d69f8e63f | 530 | ex | Elixir | lib/penelope/nlp/tokenize/tokenizer.ex | pylon/penelope | 5b0310dc0647a8e20ab1b4c10d3820f11cfb2601 | [
"Apache-2.0"
] | 53 | 2017-10-13T06:39:49.000Z | 2022-03-28T19:43:42.000Z | lib/penelope/nlp/tokenize/tokenizer.ex | pylon/penelope | 5b0310dc0647a8e20ab1b4c10d3820f11cfb2601 | [
"Apache-2.0"
] | 12 | 2018-01-08T23:05:37.000Z | 2019-08-02T12:59:27.000Z | lib/penelope/nlp/tokenize/tokenizer.ex | pylon/penelope | 5b0310dc0647a8e20ab1b4c10d3820f11cfb2601 | [
"Apache-2.0"
] | 4 | 2018-06-13T19:45:57.000Z | 2019-10-17T13:37:06.000Z | defmodule Penelope.NLP.Tokenize.Tokenizer do
@moduledoc """
The behaviour implemented by all tokenizers.
"""
@doc """
Separate a string into a list of tokens.
"""
@callback tokenize(String.t()) :: [String.t()]
@doc """
Reverse the tokenization process, turning a list of
tokens into a single string. Tokenization is often
a lossy process, so detokenization is not guaranteed
to return a string identical to the original tokenizer's
input.
"""
@callback detokenize([String.t()]) :: String.t()
end
| 26.5 | 58 | 0.698113 |
1c7cf14e0e428b0c198e95075c14e27719212537 | 1,669 | ex | Elixir | lib/events_tools_web/uploaders/avatar.ex | community-tools/community-tools | 40b0e6cc9234b44593d2ab60bb2303d7224deb30 | [
"Apache-2.0"
] | 2 | 2017-10-06T01:14:35.000Z | 2017-11-18T16:44:44.000Z | lib/events_tools_web/uploaders/avatar.ex | community-tools/community-tools | 40b0e6cc9234b44593d2ab60bb2303d7224deb30 | [
"Apache-2.0"
] | 6 | 2017-10-06T00:04:59.000Z | 2017-10-06T00:09:27.000Z | lib/events_tools_web/uploaders/avatar.ex | apps-team/community-tools | 40b0e6cc9234b44593d2ab60bb2303d7224deb30 | [
"Apache-2.0"
] | 1 | 2017-10-06T01:17:35.000Z | 2017-10-06T01:17:35.000Z | defmodule CommunityTools.Avatar do
use Arc.Definition
use Arc.Ecto.Definition
# Include ecto support (requires package arc_ecto installed):
# use Arc.Ecto.Definition
@versions [:original]
# To add a thumbnail version:
# @versions [:original, :thumb]
# Whitelist file extensions:
#def validate({file, _}) do
# ~w(.jpg .jpeg .gif .png) |> Enum.member?(Path.extname(file.file_name))
# end
def validate({file, _}) do
~w(.jpg .jpeg .gif .png) |> Enum.member?(Path.extname(file.file_name))
end
# Define a thumbnail transformation:
# def transform(:thumb, _) do
# {:convert, "-strip -thumbnail 250x250^ -gravity center -extent 250x250 -format png", :png}
# end
# Override the persisted filenames:
# def filename(version, _) do
# version
# end
def filename(version, {file, scope}) do
file_name = Path.basename(file.file_name, Path.extname(file.file_name))
"profile_#{file_name}"
end
# Override the storage directory:
#def storage_dir(version, {file, scope}) do
# "/uploads/#{scope.id}"
# end
def storage_dir(version, {_, image}) do
"priv/static/images/profiles/"
end
# Provide a default URL if there hasn't been a file uploaded
# def default_url(version, scope) do
# "/images/avatars/default_#{version}.png"
# end
# Specify custom headers for s3 objects
# Available options are [:cache_control, :content_disposition,
# :content_encoding, :content_length, :content_type,
# :expect, :expires, :storage_class, :website_redirect_location]
#
# def s3_object_headers(version, {file, scope}) do
# [content_type: Plug.MIME.path(file.file_name)]
# end
end
| 27.816667 | 96 | 0.683643 |
1c7cf6c0f89a45c839405cee96a950313d02b28d | 1,981 | exs | Elixir | clients/service_management/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | clients/service_management/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | clients/service_management/mix.exs | ukrbublik/elixir-google-api | 364cec36bc76f60bec94cbcad34844367a29d174 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.ServiceManagement.Mixfile do
use Mix.Project
@version "0.46.2"
def project() do
[
app: :google_api_service_management,
version: @version,
elixir: "~> 1.6",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
description: description(),
package: package(),
deps: deps(),
source_url: "https://github.com/googleapis/elixir-google-api/tree/master/clients/service_management"
]
end
def application() do
[extra_applications: [:logger]]
end
defp deps() do
[
{:google_gax, "~> 0.4"},
{:ex_doc, "~> 0.16", only: :dev}
]
end
defp description() do
"""
Service Management API client library. Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.
"""
end
defp package() do
[
files: ["lib", "mix.exs", "README*", "LICENSE"],
maintainers: ["Jeff Ching", "Daniel Azuma"],
licenses: ["Apache 2.0"],
links: %{
"GitHub" => "https://github.com/googleapis/elixir-google-api/tree/master/clients/service_management",
"Homepage" => "https://cloud.google.com/service-management/"
}
]
end
end
| 29.567164 | 206 | 0.672388 |
1c7cf857824610f4ac0a20c5b5e59295008d7206 | 1,764 | ex | Elixir | clients/books/lib/google_api/books/v1/model/volume_sale_info_retail_price.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | clients/books/lib/google_api/books/v1/model/volume_sale_info_retail_price.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | clients/books/lib/google_api/books/v1/model/volume_sale_info_retail_price.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Books.V1.Model.VolumeSaleInfoRetailPrice do
@moduledoc """
The actual selling price of the book. This is the same as the suggested
retail or list price unless there are offers or discounts on this volume.
(In LITE projection.)
## Attributes
* `amount` (*type:* `float()`, *default:* `nil`) - Amount in the currency listed below. (In LITE projection.)
* `currencyCode` (*type:* `String.t`, *default:* `nil`) - An ISO 4217, three-letter currency code. (In LITE projection.)
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:amount => float(),
:currencyCode => String.t()
}
field(:amount)
field(:currencyCode)
end
defimpl Poison.Decoder, for: GoogleApi.Books.V1.Model.VolumeSaleInfoRetailPrice do
def decode(value, options) do
GoogleApi.Books.V1.Model.VolumeSaleInfoRetailPrice.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Books.V1.Model.VolumeSaleInfoRetailPrice do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 33.923077 | 124 | 0.728458 |
1c7d1ab8b896d3c3839978d947f9770c316d25f5 | 569 | ex | Elixir | lib/mailman/test_server_supervisor.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 154 | 2015-02-23T17:36:05.000Z | 2017-09-09T05:50:21.000Z | lib/mailman/test_server_supervisor.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 67 | 2015-03-04T04:01:22.000Z | 2017-09-14T04:23:04.000Z | lib/mailman/test_server_supervisor.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 62 | 2015-03-04T03:53:07.000Z | 2017-09-07T23:34:15.000Z | defmodule Mailman.TestServerSupervisor do
@moduledoc """
A DynamicSupervisor to manage TestServers, which can get started ad-hoc.
"""
use DynamicSupervisor
def start_link() do
:ets.new(:mailman_test_servers, [:set, :public, :named_table])
DynamicSupervisor.start_link(__MODULE__, [], name: __MODULE__)
end
def init(_arg) do
DynamicSupervisor.init(strategy: :one_for_one)
end
def start_test_server(parent_pid) do
child_spec = {Mailman.TestServer, {[], parent_pid}}
DynamicSupervisor.start_child(__MODULE__, child_spec)
end
end
| 27.095238 | 74 | 0.739895 |
1c7d405f869b29f5ff96074d900400306e17b367 | 1,489 | exs | Elixir | apps/snitch_core/test/data/model/payment/payment_test.exs | Acrecio/avia | 54d264fc179b5b5f17d174854bdca063e1d935e9 | [
"MIT"
] | 456 | 2018-09-20T02:40:59.000Z | 2022-03-07T08:53:48.000Z | apps/snitch_core/test/data/model/payment/payment_test.exs | Acrecio/avia | 54d264fc179b5b5f17d174854bdca063e1d935e9 | [
"MIT"
] | 273 | 2018-09-19T06:43:43.000Z | 2021-08-07T12:58:26.000Z | apps/snitch_core/test/data/model/payment/payment_test.exs | Acrecio/avia | 54d264fc179b5b5f17d174854bdca063e1d935e9 | [
"MIT"
] | 122 | 2018-09-26T16:32:46.000Z | 2022-03-13T11:44:19.000Z | defmodule Snitch.Data.Model.PaymentTest do
use ExUnit.Case, async: true
use Snitch.DataCase
import Snitch.Factory
alias Snitch.Data.Model.Payment
alias Snitch.Data.Schema
setup do
[
order: insert(:order)
]
end
setup :payment_methods
setup :payments
test "to_subtype", %{ccd: card, chk: check} do
insert(:card_payment, payment_id: card.id)
assert %Schema.CardPayment{} = Payment.to_subtype(card.id)
assert %Schema.Payment{} = Payment.to_subtype(check.id)
end
test "payment count by date", %{ccd: card, chk: check} do
payment =
insert(:payment_ccd, payment_method_id: card.payment_method_id, order_id: card.order_id)
next_date =
payment.inserted_at
|> NaiveDateTime.to_date()
|> Date.add(1)
|> Date.to_string()
|> get_naive_date_time()
payment_date_count =
Payment.get_payment_count_by_date(payment.inserted_at, next_date) |> List.first()
assert %Money{} = payment_date_count.count
end
test "get_all/0 return all payments", %{ccd: cod, chk: check, hpm: hosted_payment} do
returned_payments = Payment.get_all()
assert returned_payments != []
end
test "delete/1 deletes a payment", %{hpm: payment} do
{:ok, cs} = Payment.delete(payment)
assert Payment.get(payment.id) == {:error, :payment_not_found}
end
defp get_naive_date_time(date) do
Date.from_iso8601(date)
|> elem(1)
|> NaiveDateTime.new(~T[00:00:00])
|> elem(1)
end
end
| 25.237288 | 94 | 0.677636 |
1c7d43d98f71ae41cb8d582da8244ee41e5db162 | 100 | exs | Elixir | test/ex_line_pay_test.exs | shufo/ex_line_pay | 9d7a594bfcfdc20b2c31cd02c42a0bb1f9614548 | [
"MIT"
] | null | null | null | test/ex_line_pay_test.exs | shufo/ex_line_pay | 9d7a594bfcfdc20b2c31cd02c42a0bb1f9614548 | [
"MIT"
] | null | null | null | test/ex_line_pay_test.exs | shufo/ex_line_pay | 9d7a594bfcfdc20b2c31cd02c42a0bb1f9614548 | [
"MIT"
] | null | null | null | defmodule ExLinePayTest do
use ExUnit.Case
test "1 + 1 = 2" do
assert 1 + 1 == 2
end
end
| 12.5 | 26 | 0.61 |
1c7d5c9a48598d946a87c369fceb6789f05f6f1a | 942 | exs | Elixir | config/config.exs | anamba/webrtc_example | b539101d29ffeb52f551f263128a0884a5d6a7ae | [
"MIT"
] | null | null | null | config/config.exs | anamba/webrtc_example | b539101d29ffeb52f551f263128a0884a5d6a7ae | [
"MIT"
] | null | null | null | config/config.exs | anamba/webrtc_example | b539101d29ffeb52f551f263128a0884a5d6a7ae | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
use Mix.Config
# Configures the endpoint
config :webrtc_example, WebrtcExample.Endpoint,
url: [host: "localhost"],
root: Path.dirname(__DIR__),
secret_key_base: "l3ff5x9yIaFSZ2aAG5zyHtAf+SK7Pvr3Hn+0ExeSBxQclpqzvexKin5WWAqjiwok",
render_errors: [accepts: ~w(html json)],
pubsub: [name: WebrtcExample.PubSub, adapter: Phoenix.PubSub.PG2]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Use Jason for JSON parsing in Phoenix
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"
| 34.888889 | 86 | 0.769639 |
1c7d624146e556221421ef50d1adcfe29a43db7b | 12,265 | ex | Elixir | lib/boundary/mix/tasks/compile/boundary.ex | sasa1977/boundaries | c6ed70dc07c37da14172761b4d08bc38d9d6594b | [
"MIT"
] | 133 | 2019-08-12T12:22:37.000Z | 2019-09-06T16:27:34.000Z | lib/boundary/mix/tasks/compile/boundary.ex | sasa1977/boundaries | c6ed70dc07c37da14172761b4d08bc38d9d6594b | [
"MIT"
] | 7 | 2019-08-13T00:41:05.000Z | 2019-09-07T10:24:22.000Z | lib/boundary/mix/tasks/compile/boundary.ex | sasa1977/boundaries | c6ed70dc07c37da14172761b4d08bc38d9d6594b | [
"MIT"
] | 2 | 2019-08-13T09:45:49.000Z | 2019-08-20T19:43:09.000Z | defmodule Mix.Tasks.Compile.Boundary do
# credo:disable-for-this-file Credo.Check.Readability.Specs
use Boundary, classify_to: Boundary.Mix
use Mix.Task.Compiler
alias Boundary.Mix.Xref
@moduledoc """
Verifies cross-module function calls according to defined boundaries.
This compiler reports all cross-boundary function calls which are not permitted, according to
the current definition of boundaries. For details on defining boundaries, see the docs for the
`Boundary` module.
## Usage
Once you have configured the boundaries, you need to include the compiler in `mix.exs`:
```
defmodule MySystem.MixProject do
# ...
def project do
[
compilers: [:boundary] ++ Mix.compilers(),
# ...
]
end
# ...
end
```
When developing a library, it's advised to use this compiler only in `:dev` and `:test`
environments:
```
defmodule Boundary.MixProject do
# ...
def project do
[
compilers: extra_compilers(Mix.env()) ++ Mix.compilers(),
# ...
]
end
# ...
defp extra_compilers(:prod), do: []
defp extra_compilers(_env), do: [:boundary]
end
```
## Warnings
Every invalid cross-boundary usage is reported as a compiler warning. Consider the following example:
```
defmodule MySystem.User do
def auth() do
MySystemWeb.Endpoint.url()
end
end
```
Assuming that calls from `MySystem` to `MySystemWeb` are not allowed, you'll get the following warning:
```
$ mix compile
warning: forbidden reference to MySystemWeb
(references from MySystem to MySystemWeb are not allowed)
lib/my_system/user.ex:3
```
Since the compiler emits warnings, `mix compile` will still succeed, and you can normally start
your system, even if some boundary rules are violated. The compiler doesn't force you to immediately
fix these violations, which is a deliberate decision made to avoid disrupting the development flow.
At the same time, it's worth enforcing boundaries on the CI. This can easily be done by providing
the `--warnings-as-errors` option to `mix compile`.
"""
@recursive true
@impl Mix.Task.Compiler
def run(argv) do
Xref.start_link()
Mix.Task.Compiler.after_compiler(:app, &after_compiler(&1, argv))
tracers = Code.get_compiler_option(:tracers)
Code.put_compiler_option(:tracers, [__MODULE__ | tracers])
{:ok, []}
end
@doc false
def trace({remote, meta, to_module, _name, _arity}, env)
when remote in ~w/remote_function imported_function remote_macro imported_macro/a do
mode = if is_nil(env.function) or remote in ~w/remote_macro imported_macro/a, do: :compile, else: :runtime
record(to_module, meta, env, mode, :call)
end
def trace({local, _meta, _to_module, _name, _arity}, env)
when local in ~w/local_function local_macro/a,
# We need to initialize module although we're not going to record the call, to correctly remove previously
# recorded entries when the module is recompiled.
do: initialize_module(env.module)
def trace({:struct_expansion, meta, to_module, _keys}, env),
do: record(to_module, meta, env, :compile, :struct_expansion)
def trace({:alias_reference, meta, to_module}, env) do
unless env.function == {:boundary, 1} do
mode = if is_nil(env.function), do: :compile, else: :runtime
record(to_module, meta, env, mode, :alias_reference)
end
:ok
end
def trace(_event, _env), do: :ok
defp record(to_module, meta, env, mode, type) do
# We need to initialize module even if we're not going to record the call, to correctly remove previously
# recorded entries when the module is recompiled.
initialize_module(env.module)
unless env.module in [nil, to_module] or system_module?(to_module) or
not String.starts_with?(Atom.to_string(to_module), "Elixir.") do
Xref.record(
env.module,
%{
from_function: env.function,
to: to_module,
mode: mode,
type: type,
file: Path.relative_to_cwd(env.file),
line: Keyword.get(meta, :line, env.line)
}
)
end
:ok
end
defp initialize_module(module),
do: unless(is_nil(module), do: Xref.initialize_module(module))
# Building the list of "system modules", which we'll exclude from the traced data, to reduce the collected data and
# processing time.
system_apps = ~w/elixir stdlib kernel/a
system_apps
|> Stream.each(&Application.load/1)
|> Stream.flat_map(&Application.spec(&1, :modules))
# We'll also include so called preloaded modules (e.g. `:erlang`, `:init`), which are not a part of any app.
|> Stream.concat(:erlang.pre_loaded())
|> Enum.each(fn module -> defp system_module?(unquote(module)), do: true end)
defp system_module?(_module), do: false
defp after_compiler({:error, _} = status, _argv), do: status
defp after_compiler({status, diagnostics}, argv) when status in [:ok, :noop] do
# We're reloading the app to make sure we have the latest version. This fixes potential stale state in ElixirLS.
Application.unload(Boundary.Mix.app_name())
Application.load(Boundary.Mix.app_name())
tracers = Enum.reject(Code.get_compiler_option(:tracers), &(&1 == __MODULE__))
Code.put_compiler_option(:tracers, tracers)
Xref.flush(Application.spec(Boundary.Mix.app_name(), :modules) || [])
view =
case Boundary.Mix.read_manifest("boundary_view") do
nil -> rebuild_view()
view -> Boundary.View.refresh(view) || rebuild_view()
end
Boundary.Mix.write_manifest("boundary_view", Boundary.View.drop_main_app(view))
errors = check(view, Xref.entries())
print_diagnostic_errors(errors)
{status(errors, argv), diagnostics ++ errors}
end
defp rebuild_view do
Boundary.Mix.load_app()
Boundary.View.build(Boundary.Mix.app_name())
end
defp status([], _), do: :ok
defp status([_ | _], argv), do: if(warnings_as_errors?(argv), do: :error, else: :ok)
defp warnings_as_errors?(argv) do
{parsed, _argv, _errors} = OptionParser.parse(argv, strict: [warnings_as_errors: :boolean])
Keyword.get(parsed, :warnings_as_errors, false)
end
defp print_diagnostic_errors(errors) do
if errors != [], do: Mix.shell().info("")
Enum.each(errors, &print_diagnostic_error/1)
end
defp print_diagnostic_error(error) do
Mix.shell().info([severity(error.severity), error.message, location(error)])
end
defp location(error) do
if error.file != nil and error.file != "" do
pos = if error.position != nil, do: ":#{error.position}", else: ""
"\n #{error.file}#{pos}\n"
else
"\n"
end
end
defp severity(severity), do: [:bright, color(severity), "#{severity}: ", :reset]
defp color(:error), do: :red
defp color(:warning), do: :yellow
defp check(application, entries) do
Boundary.errors(application, entries)
|> Stream.map(&to_diagnostic_error/1)
|> Enum.sort_by(&{&1.file, &1.position})
rescue
e in Boundary.Error ->
[diagnostic(e.message, file: e.file, position: e.line)]
end
defp to_diagnostic_error({:unclassified_module, module}),
do: diagnostic("#{inspect(module)} is not included in any boundary", file: module_source(module))
defp to_diagnostic_error({:unknown_dep, dep}) do
diagnostic("unknown boundary #{inspect(dep.name)} is listed as a dependency",
file: Path.relative_to_cwd(dep.file),
position: dep.line
)
end
defp to_diagnostic_error({:check_in_false_dep, dep}) do
diagnostic("boundary #{inspect(dep.name)} can't be a dependency because it has check.in set to false",
file: Path.relative_to_cwd(dep.file),
position: dep.line
)
end
defp to_diagnostic_error({:forbidden_dep, dep}) do
diagnostic(
"#{inspect(dep.name)} can't be listed as a dependency because it's not a sibling, a parent, or a dep of some ancestor",
file: Path.relative_to_cwd(dep.file),
position: dep.line
)
end
defp to_diagnostic_error({:unknown_export, export}) do
diagnostic("unknown module #{inspect(export.name)} is listed as an export",
file: Path.relative_to_cwd(export.file),
position: export.line
)
end
defp to_diagnostic_error({:export_not_in_boundary, export}) do
diagnostic("module #{inspect(export.name)} can't be exported because it's not a part of this boundary",
file: Path.relative_to_cwd(export.file),
position: export.line
)
end
defp to_diagnostic_error({:cycle, cycle}) do
cycle = cycle |> Stream.map(&inspect/1) |> Enum.join(" -> ")
diagnostic("dependency cycle found:\n#{cycle}\n")
end
defp to_diagnostic_error({:unknown_boundary, info}) do
diagnostic("unknown boundary #{inspect(info.name)}",
file: Path.relative_to_cwd(info.file),
position: info.line
)
end
defp to_diagnostic_error({:cant_reclassify, info}) do
diagnostic("only mix task and protocol implementation can be reclassified",
file: Path.relative_to_cwd(info.file),
position: info.line
)
end
defp to_diagnostic_error({:invalid_reference, error}) do
reason =
case error.type do
:normal ->
"(references from #{inspect(error.from_boundary)} to #{inspect(error.to_boundary)} are not allowed)"
:runtime ->
"(runtime references from #{inspect(error.from_boundary)} to #{inspect(error.to_boundary)} are not allowed)"
:not_exported ->
module = inspect(error.reference.to)
"(module #{module} is not exported by its owner boundary #{inspect(error.to_boundary)})"
:invalid_external_dep_call ->
"(references from #{inspect(error.from_boundary)} to #{inspect(error.to_boundary)} are not allowed)"
end
message = "forbidden reference to #{inspect(error.reference.to)}\n #{reason}"
diagnostic(message, file: Path.relative_to_cwd(error.reference.file), position: error.reference.line)
end
defp to_diagnostic_error({:unknown_option, %{name: :ignore?, value: value} = data}) do
diagnostic(
"ignore?: #{value} is deprecated, use check: [in: #{not value}, out: #{not value}] instead",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:unknown_option, data}) do
diagnostic("unknown option #{inspect(data.name)}",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:deps_in_check_out_false, data}) do
diagnostic("deps can't be listed if check.out is set to false",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:apps_in_check_out_false, data}) do
diagnostic("check apps can't be listed if check.out is set to false",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:exports_in_check_in_false, data}) do
diagnostic("can't export modules if check.in is set to false",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:invalid_type, data}) do
diagnostic("invalid type",
file: Path.relative_to_cwd(data.file),
position: data.line
)
end
defp to_diagnostic_error({:invalid_ignores, boundary}) do
diagnostic("can't disable checks in a sub-boundary",
file: Path.relative_to_cwd(boundary.file),
position: boundary.line
)
end
defp to_diagnostic_error({:ancestor_with_ignored_checks, boundary, ancestor}) do
diagnostic("sub-boundary inside a boundary with disabled checks (#{inspect(ancestor.name)})",
file: Path.relative_to_cwd(boundary.file),
position: boundary.line
)
end
defp module_source(module) do
module.module_info(:compile)
|> Keyword.fetch!(:source)
|> to_string()
|> Path.relative_to_cwd()
catch
_, _ -> ""
end
def diagnostic(message, opts \\ []) do
%Mix.Task.Compiler.Diagnostic{
compiler_name: "boundary",
details: nil,
file: "unknown",
message: message,
position: nil,
severity: :warning
}
|> Map.merge(Map.new(opts))
end
end
| 31.448718 | 125 | 0.677945 |
1c7daa0bc139b2321fcbf414134871954ccc15bf | 1,276 | ex | Elixir | apps/day08/lib/day08.ex | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | 2 | 2019-12-21T21:21:04.000Z | 2019-12-27T07:00:19.000Z | apps/day08/lib/day08.ex | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | null | null | null | apps/day08/lib/day08.ex | jwarwick/aoc_2019 | 04229b86829b72323498b57a6649fcc6f7c96406 | [
"MIT"
] | null | null | null | defmodule Day08 do
@moduledoc """
AoC 2019, Day 8 - Space Image Format
"""
@width 25
@height 6
@doc """
Compute the image checksum
"""
def part1 do
parse()
|> checksum()
end
@doc """
Generate the image
"""
def part2 do
parse()
|> render(@width, @height)
end
@doc """
Parse the input
"""
def parse() do
Util.priv_file(:day08, "day8_input.txt")
|> File.read!()
|> string_to_layers(@width, @height)
end
@doc """
Turn an input image into the layer structure
"""
def string_to_layers(str, width, height) do
String.trim(str)
|> String.to_charlist()
|> Enum.map(&(&1 - ?0))
|> Enum.chunk_every(width * height)
end
@doc """
Compute an image checksum
"""
def checksum(layers) do
l =
layers
|> Enum.sort_by(&Enum.count(&1, fn x -> x == 0 end))
|> hd()
ones = Enum.count(l, &(&1 == 1))
twos = Enum.count(l, &(&1 == 2))
ones * twos
end
@doc """
Collapse the layers to render the final image
"""
def render(layers, width, _height) do
Enum.reduce(layers, &collapse/2)
|> Enum.chunk_every(width)
end
defp collapse(layer, img) do
Enum.zip(img, layer)
|> Enum.map(fn {i, l} -> if i == 2, do: l, else: i end)
end
end
| 17.971831 | 59 | 0.565831 |
1c7db15e6c895e86527c74a04dc5fd73028a62fe | 1,498 | ex | Elixir | clients/sheets/lib/google_api/sheets/v4/model/chart_date_time_rule.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/sheets/lib/google_api/sheets/v4/model/chart_date_time_rule.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/sheets/lib/google_api/sheets/v4/model/chart_date_time_rule.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Sheets.V4.Model.ChartDateTimeRule do
@moduledoc """
Allows you to organize the date-time values in a source data column into buckets based on selected parts of their date or time values.
## Attributes
* `type` (*type:* `String.t`, *default:* `nil`) - The type of date-time grouping to apply.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:type => String.t() | nil
}
field(:type)
end
defimpl Poison.Decoder, for: GoogleApi.Sheets.V4.Model.ChartDateTimeRule do
def decode(value, options) do
GoogleApi.Sheets.V4.Model.ChartDateTimeRule.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Sheets.V4.Model.ChartDateTimeRule do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 31.87234 | 136 | 0.736315 |
1c7dbe0252af08f2aea79157769f3ae7344fb4c7 | 1,188 | ex | Elixir | lib/ex_unit/lib/ex_unit/runner_stats.ex | tmbb/exdocs_makedown_demo | 6a0039c54d2fa10d79c080efcef8d70d359678f8 | [
"Apache-2.0"
] | 1 | 2017-07-25T21:46:25.000Z | 2017-07-25T21:46:25.000Z | lib/ex_unit/lib/ex_unit/runner_stats.ex | tmbb/exdocs_makedown_demo | 6a0039c54d2fa10d79c080efcef8d70d359678f8 | [
"Apache-2.0"
] | null | null | null | lib/ex_unit/lib/ex_unit/runner_stats.ex | tmbb/exdocs_makedown_demo | 6a0039c54d2fa10d79c080efcef8d70d359678f8 | [
"Apache-2.0"
] | 1 | 2017-07-25T21:46:48.000Z | 2017-07-25T21:46:48.000Z | defmodule ExUnit.RunnerStats do
@moduledoc false
use GenServer
def init(_opts) do
{:ok, %{total: 0, failures: 0, skipped: 0}}
end
def stats(pid) do
GenServer.call(pid, :stats, :infinity)
end
def handle_call(:stats, _from, map) do
{:reply, map, map}
end
def handle_cast({:test_finished, %ExUnit.Test{state: {tag, _}}},
%{total: total, failures: failures} = map) when tag in [:failed, :invalid] do
{:noreply, %{map | total: total + 1, failures: failures + 1}}
end
def handle_cast({:test_finished, %ExUnit.Test{state: {:skip, _}}},
%{total: total, skipped: skipped} = map) do
{:noreply, %{map | total: total + 1, skipped: skipped + 1}}
end
def handle_cast({:case_finished, %ExUnit.TestCase{state: {:failed, _failures}} = test_case},
%{failures: failures, total: total} = map) do
test_count = length(test_case.tests)
{:noreply, %{map | failures: failures + test_count, total: total + test_count}}
end
def handle_cast({:test_finished, _}, %{total: total} = map) do
{:noreply, %{map | total: total + 1}}
end
def handle_cast(_, map) do
{:noreply, map}
end
end
| 28.285714 | 95 | 0.616162 |
1c7df269de36c757eea5d606a6f4bb37f89adcbc | 7,725 | ex | Elixir | lib/sanbase/signals/trigger/settings/eth_wallet_trigger_settings.ex | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | null | null | null | lib/sanbase/signals/trigger/settings/eth_wallet_trigger_settings.ex | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | 1 | 2021-07-24T16:26:03.000Z | 2021-07-24T16:26:03.000Z | lib/sanbase/signals/trigger/settings/eth_wallet_trigger_settings.ex | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | null | null | null | defmodule Sanbase.Signal.Trigger.EthWalletTriggerSettings do
@moduledoc ~s"""
The EthWallet signal is triggered when the balance of a wallet or set of wallets
changes by a predefined amount for a specified asset (Ethereum, SAN tokens, etc.)
The signal can follow a single ethereum address, a list of ethereum addresses
or a project. When a list of addresses or a project is followed, all the addresses
are considered to be owned by a single entity and the transfers between them
are excluded.
"""
use Vex.Struct
import Sanbase.Validation
import Sanbase.Signal.Validation
import Sanbase.Signal.OperationEvaluation
import Sanbase.DateTimeUtils, only: [str_to_sec: 1, round_datetime: 2]
alias __MODULE__
alias Sanbase.Signal.Type
alias Sanbase.Model.Project
alias Sanbase.Clickhouse.HistoricalBalance
alias Sanbase.Signal.Evaluator.Cache
@trigger_type "eth_wallet"
@derive {Jason.Encoder, except: [:filtered_target, :triggered?, :payload, :template_kv]}
@enforce_keys [:type, :channel, :target, :asset]
defstruct type: @trigger_type,
channel: nil,
target: nil,
asset: nil,
operation: nil,
time_window: "1d",
# Private fields, not stored in DB.
filtered_target: %{list: []},
triggered?: false,
payload: %{},
template_kv: %{}
@type t :: %__MODULE__{
type: Type.trigger_type(),
channel: Type.channel(),
target: Type.complex_target(),
asset: Type.asset(),
operation: Type.operation(),
time_window: Type.time_window(),
# Private fields, not stored in DB.
filtered_target: Type.filtered_target(),
triggered?: boolean(),
payload: Type.payload(),
template_kv: Type.tempalte_kv()
}
validates(:channel, &valid_notification_channel?/1)
validates(:target, &valid_eth_wallet_target?/1)
validates(:asset, &valid_slug?/1)
validates(:operation, &valid_absolute_change_operation?/1)
validates(:time_window, &valid_time_window?/1)
@spec type() :: String.t()
def type(), do: @trigger_type
def get_data(
%__MODULE__{
filtered_target: %{list: target_list, type: :eth_address}
} = settings
) do
to = Timex.now()
from = Timex.shift(to, seconds: -str_to_sec(settings.time_window))
target_list
|> Enum.map(fn addr ->
case balance_change(addr, settings.asset.slug, from, to) do
[{^addr, {start_balance, end_balance, balance_change}}] ->
{addr, from,
%{
start_balance: start_balance,
end_balance: end_balance,
balance_change: balance_change
}}
_ ->
nil
end
end)
|> Enum.reject(&is_nil/1)
end
def get_data(%__MODULE__{filtered_target: %{list: target_list, type: :slug}} = settings) do
to = Timex.now()
from = Timex.shift(to, seconds: -str_to_sec(settings.time_window))
target_list
|> Project.by_slug()
|> Enum.map(fn %Project{} = project ->
{:ok, eth_addresses} = Project.eth_addresses(project)
project_balance_data =
eth_addresses
|> Enum.map(&String.downcase/1)
|> balance_change(settings.asset.slug, from, to)
|> Enum.map(fn {_, data} -> data end)
{start_balance, end_balance, balance_change} =
project_balance_data
|> Enum.reduce({0, 0, 0}, fn
{start_balance, end_balance, balance_change}, {start_acc, end_acc, change_acc} ->
{
start_acc + start_balance,
end_acc + end_balance,
change_acc + balance_change
}
end)
{project, from,
%{
start_balance: start_balance,
end_balance: end_balance,
balance_change: balance_change
}}
end)
end
defp balance_change(addresses, slug, from, to) do
cache_key =
{:balance_change, addresses, slug, round_datetime(from, 300), round_datetime(to, 300)}
|> Sanbase.Cache.hash()
Cache.get_or_store(
cache_key,
fn ->
selector = %{infrastructure: "ETH", slug: slug}
HistoricalBalance.balance_change(selector, addresses, from, to)
|> case do
{:ok, result} -> result
_ -> []
end
end
)
end
alias __MODULE__
defimpl Sanbase.Signal.Settings, for: EthWalletTriggerSettings do
def triggered?(%EthWalletTriggerSettings{triggered?: triggered}), do: triggered
def evaluate(%EthWalletTriggerSettings{} = settings, _trigger) do
case EthWalletTriggerSettings.get_data(settings) do
list when is_list(list) and list != [] ->
build_result(list, settings)
_ ->
%EthWalletTriggerSettings{settings | triggered?: false}
end
end
# The result heavily depends on `last_triggered`, so just the settings are not enough
def cache_key(%EthWalletTriggerSettings{}), do: :nocache
defp build_result(list, settings) do
template_kv =
Enum.reduce(list, %{}, fn
{project_or_addr, from, %{balance_change: balance_change} = balance_data}, acc ->
case operation_triggered?(balance_change, settings.operation) do
true ->
Map.put(
acc,
to_identifier(project_or_addr),
template_kv(project_or_addr, settings, balance_data, from)
)
false ->
acc
end
end)
%EthWalletTriggerSettings{
settings
| template_kv: template_kv,
triggered?: template_kv != %{}
}
end
defp to_identifier(%Project{slug: slug}), do: slug
defp to_identifier(addr) when is_binary(addr), do: addr
defp operation_text(%{amount_up: _}), do: "increased"
defp operation_text(%{amount_down: _}), do: "decreased"
defp template_kv(%Project{} = project, settings, balance_data, from) do
kv = %{
type: EthWalletTriggerSettings.type(),
operation: settings.operation,
project_name: project.name,
project_slug: project.slug,
asset: settings.asset.slug,
since: DateTime.truncate(from, :second),
balance_change_text: operation_text(settings.operation),
balance_change: balance_data.balance_change,
balance_change_abs: abs(balance_data.balance_change),
balance: balance_data.end_balance,
previous_balance: balance_data.start_balance
}
template = """
🔔 \#{{project_ticker}} | **{{project_name}}**'s {{asset}} balance {{balance_change_text}} by {{balance_change}} since {{since}}.
was: {{previous_balance}}, now: {{balance}}.
"""
{template, kv}
end
defp template_kv(address, settings, balance_data, from) do
asset = settings.asset.slug
kv = %{
type: EthWalletTriggerSettings.type(),
operation: settings.operation,
target: settings.target,
asset: asset,
address: address,
historical_balance_link: SanbaseWeb.Endpoint.historical_balance_url(address, asset),
since: DateTime.truncate(from, :second),
balance_change: balance_data.balance_change,
balance_change_abs: abs(balance_data.balance_change),
balance: balance_data.end_balance,
previous_balance: balance_data.start_balance
}
template = """
🔔The address {{address}}'s {{asset}} balance #{operation_text(settings.operation)} by {{balance_change_abs}} since {{since}}.
was: {{previous_balance}, now: {{balance}}
"""
{template, kv}
end
end
end
| 32.1875 | 134 | 0.626278 |
1c7df663556db006a4e74521c1b322433abb9619 | 1,427 | exs | Elixir | clients/drive/test/files_test.exs | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/drive/test/files_test.exs | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/drive/test/files_test.exs | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule GoogleApi.Drive.FilesTest do
use GoogleApi.Drive.TestHelper
alias GoogleApi.Drive.V3.Connection
alias GoogleApi.Drive.V3.Model.File
alias GoogleApi.Drive.V3.Model.FileList
alias GoogleApi.Drive.V3.Api.Files
test "upload file" do
conn = Connection.new(token())
assert {:ok, file} =
Files.drive_files_create_simple(
conn,
"multipart",
%File{
name: "README.md"
},
"README.md"
)
assert %File{} = file
end
test "list files" do
conn = Connection.new(token())
assert {:ok, file_list} = Files.drive_files_list(conn)
assert %FileList{} = file_list
end
defp token do
t = System.get_env("AUTH_TOKEN")
assert t, "Please set the AUTH_TOKEN environment variable"
t
end
end
| 27.980392 | 74 | 0.667134 |
1c7e18163facb00ad1f6cd4dac6132edcf9797b7 | 119 | ex | Elixir | lib/xgps/driver/state.ex | gmcintire/xgps | 56cf4c83dfc05aa22b1d7b7d1d2456cf5acfa7a2 | [
"MIT"
] | null | null | null | lib/xgps/driver/state.ex | gmcintire/xgps | 56cf4c83dfc05aa22b1d7b7d1d2456cf5acfa7a2 | [
"MIT"
] | null | null | null | lib/xgps/driver/state.ex | gmcintire/xgps | 56cf4c83dfc05aa22b1d7b7d1d2456cf5acfa7a2 | [
"MIT"
] | null | null | null | defmodule XGPS.Driver.State do
defstruct [
gps_data: nil,
pid: nil,
port_name: nil,
mod: nil
]
end
| 13.222222 | 30 | 0.613445 |
1c7e6d578862115d3b04e2eeb6c6ad3aaa36f81f | 796 | ex | Elixir | apps/astarte_pairing_api/lib/astarte_pairing_api_web/metrics/pipeline_instrumenter.ex | Spidey20202022/astarte | 3950855c592b34363af0cf7f8a921762ce64e512 | [
"Apache-2.0"
] | 1 | 2020-02-04T13:15:22.000Z | 2020-02-04T13:15:22.000Z | apps/astarte_pairing_api/lib/astarte_pairing_api_web/metrics/pipeline_instrumenter.ex | Spidey20202022/astarte | 3950855c592b34363af0cf7f8a921762ce64e512 | [
"Apache-2.0"
] | 1 | 2020-01-20T09:52:48.000Z | 2020-01-20T09:52:48.000Z | apps/astarte_pairing_api/lib/astarte_pairing_api_web/metrics/pipeline_instrumenter.ex | Spidey20202022/astarte | 3950855c592b34363af0cf7f8a921762ce64e512 | [
"Apache-2.0"
] | 1 | 2020-02-04T13:15:50.000Z | 2020-02-04T13:15:50.000Z | #
# This file is part of Astarte.
#
# Copyright 2020 Ispirata Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
defmodule Astarte.Pairing.APIWeb.Metrics.PipelineInstrumenter do
use Prometheus.PlugPipelineInstrumenter
def label_value(:request_path, conn) do
conn.request_path
end
end
| 30.615385 | 74 | 0.768844 |
1c7e7bf30268dad3960962f464ac4629ee040c4c | 1,797 | ex | Elixir | lib/stripe/types.ex | Rutaba/stripity_stripe | 12c525301c781f9c8c7e578cc0d933f5d35183d5 | [
"BSD-3-Clause"
] | null | null | null | lib/stripe/types.ex | Rutaba/stripity_stripe | 12c525301c781f9c8c7e578cc0d933f5d35183d5 | [
"BSD-3-Clause"
] | null | null | null | lib/stripe/types.ex | Rutaba/stripity_stripe | 12c525301c781f9c8c7e578cc0d933f5d35183d5 | [
"BSD-3-Clause"
] | null | null | null | defmodule Stripe.Types do
@moduledoc """
A module that contains shared types matching Stripe schemas.
"""
@type address :: %{
city: String.t() | nil,
country: String.t() | nil,
line1: String.t() | nil,
line2: String.t() | nil,
postal_code: String.t() | nil,
state: String.t() | nil
}
@type dob :: %{
day: 1..31 | nil,
month: 1..12 | nil,
year: pos_integer | nil
}
@type fee :: %{
amount: integer,
application: String.t() | nil,
currency: String.t(),
description: String.t() | nil,
type: String.t()
}
@type japan_address :: %{
city: String.t() | nil,
country: String.t() | nil,
line1: String.t() | nil,
line2: String.t() | nil,
postal_code: String.t() | nil,
state: String.t() | nil,
town: String.t() | nil
}
@type metadata :: %{
optional(String.t()) => String.t()
}
@type shipping :: %{
required(:name) => String.t(),
required(:address) => Stripe.Types.address(),
optional(:carrier) => String.t(),
optional(:eta) => Stripe.timestamp(),
optional(:phone) => String.t(),
optional(:status) => String.t(),
optional(:tracking_number) => String.t(),
optional(:tracking_url) => String.t()
}
@type collection_method_thresholds :: %{
amount_gte: integer | nil,
reset_billing_cycle_anchor: boolean | nil
}
@type transfer_schedule :: %{
delay_days: non_neg_integer,
interval: String.t(),
monthly_anchor: non_neg_integer | nil,
weekly_anchor: String.t() | nil
}
end
| 27.227273 | 62 | 0.501948 |
1c7ed350a05ff2212c9d0ec436f012eac11525e0 | 686 | exs | Elixir | test/yggdrasil/publisher/adapter/icon_test.exs | alexdesousa/icon | 2b363582b129bc0485a39e9845d2a18eabd2f6d6 | [
"MIT"
] | 4 | 2022-01-27T09:10:05.000Z | 2022-03-09T04:38:13.000Z | test/yggdrasil/publisher/adapter/icon_test.exs | alexdesousa/icon | 2b363582b129bc0485a39e9845d2a18eabd2f6d6 | [
"MIT"
] | null | null | null | test/yggdrasil/publisher/adapter/icon_test.exs | alexdesousa/icon | 2b363582b129bc0485a39e9845d2a18eabd2f6d6 | [
"MIT"
] | null | null | null | defmodule Yggdrasil.Publisher.Adapter.IconTest do
use ExUnit.Case, async: true
alias Icon.Schema.Error
alias Yggdrasil.Publisher.Adapter.Icon, as: Publisher
test "cannot publish messages in ICON 2.0 websocket" do
channel = [
name: %{source: :block},
adapter: :icon
]
assert {:error,
%Error{
code: -32_600,
message: "cannot publish messages in the ICON 2.0 blockchain",
reason: :invalid_request
}} = Yggdrasil.publish(channel, "{}")
end
test "start and stop the publisher process" do
assert {:ok, pid} = Publisher.start_link(nil)
assert :ok = Publisher.stop(pid)
end
end
| 26.384615 | 76 | 0.626822 |
1c7f4c9e61e4d9091e259ea1d3b8d07c573165ee | 3,323 | ex | Elixir | lib/pacman/world.ex | zampino/pacman | 889080e26054dd04aa9e3ef5f7971d408a698a86 | [
"MIT"
] | null | null | null | lib/pacman/world.ex | zampino/pacman | 889080e26054dd04aa9e3ef5f7971d408a698a86 | [
"MIT"
] | null | null | null | lib/pacman/world.ex | zampino/pacman | 889080e26054dd04aa9e3ef5f7971d408a698a86 | [
"MIT"
] | null | null | null | defmodule Pacman.World do
@size 20
@directions [right: {1,0}, up: {0, -1}, left: {-1, 0}, down: {0, 1}]
@moduledoc """
Keeps track of instant state of the Grid
has two record definitions:
Grid and Pacman.
Grid#pacmans is a hash of :name => Pacman records
Stores 'Class instance variables':
- @size (of the grid)
- @directions human -> vector translations
### NOTE
World should rather implement a protocol for the Grid
in the sense of http://elixir-lang.org/getting_started/4.html
or maybe something like @impl
"""
defmodule Grid do
defstruct pacmans: HashDict.new,
food: [],
phantoms: []
def replace_pacmans(grid, new_pacmans) do
%Grid{grid | pacmans: new_pacmans}
end
# def update_pacmans(grid, function) do
# Map.update! grid, :pacmans, function
# end
end
defmodule User do
defstruct direction: {1,0},
color: "#FFF",
position: {
div(Module.get_attribute(Pacman.World, :size), 2),
div(Module.get_attribute(Pacman.World, :size), 2)
},
score: 0
end
def new do
%Grid{}
end
def spawn_user_events(name) do
pid = spawn_link(Pacman.UserEvents, :events_loop, [name])
Process.register pid, name
end
@doc "register a new pacman user process under its name and updates the grid"
def register_pacman(%Grid{} = grid, name) do
new_pacmans = HashDict.put_new grid.pacmans, name, %User{}
spawn_user_events(name)
Grid.replace_pacmans grid, new_pacmans
end
@doc "removes pacman"
def remove_pacman(%Grid{} = grid, name) do
new_pacmans = HashDict.delete grid.pacmans, name
if name_pid = Process.whereis(name) do
IO.puts "exiting PID: #{name}"
Process.exit name_pid, :normal
end
Grid.replace_pacmans grid, new_pacmans
end
def move_pacmans(grid) do
new_pacmans = grid.pacmans |>
Enum.map(&displace/1) |>
Enum.into(HashDict.new)
Grid.replace_pacmans grid, new_pacmans
end
def displace({name, pcm}) do
{dx, dy} = ask_direction(name, pcm.direction)
{x, y} = pcm.position
new_x = wrap_position(x, dx)
new_y = wrap_position(y, dy)
new_position = {new_x, new_y}
new_pcm = %User{pcm | position: new_position, direction: {dx, dy}}
{name, new_pcm}
end
@doc "again a 'synchronous call' to ask the direction to the user's process which falls back to the old direction"
def ask_direction(name, old_dir) do
send name, :fetch_direction
receive do
{:new_direction, ^name, dir} ->
# IO.puts "#{name} receives new direction #{inspect(dir)}!"
translate_direction(dir)
after
1 ->
# IO.puts "#{name} gets current direction #{inspect(old_dir)}"
old_dir
end
end
def translate_direction(name) do
@directions[name]
end
def wrap_position(value, delta) do
rem(@size + value + delta, @size)
end
@doc "a Grid's instantaneous representation"
def represent(%Grid{} = grid) do
represent_one = fn({name, pcm}) ->
{x, y} = pcm.position
[name: name, position: [x: x, y: y]]
end
representation = Enum.map(grid.pacmans, represent_one)
{:ok, json_str} = JSON.encode representation
json_str
end
end
| 26.798387 | 116 | 0.633464 |
1c7f52714ada8d5521e2bde35a81ac8059a135e8 | 2,203 | exs | Elixir | test/time_stamp_test.exs | jeremysquires/shipsim | cacf0b7b825a3086e1cc1676374691de3d1cb0c5 | [
"MIT"
] | null | null | null | test/time_stamp_test.exs | jeremysquires/shipsim | cacf0b7b825a3086e1cc1676374691de3d1cb0c5 | [
"MIT"
] | null | null | null | test/time_stamp_test.exs | jeremysquires/shipsim | cacf0b7b825a3086e1cc1676374691de3d1cb0c5 | [
"MIT"
] | 1 | 2019-06-04T04:49:42.000Z | 2019-06-04T04:49:42.000Z | defmodule TimeStampTest do
use ExUnit.Case
describe "TimeStamp doc tests" do
doctest TimeStamp
end
def time_data do
year2020 = "2020-01-01T07:40Z"
year2019 = "2019-01-01T07:40Z"
year2019_delta5minutes = "2019-01-01T07:45Z"
{
:ok,
year2020: year2020,
year2019: year2019,
year2019_delta5minutes: year2019_delta5minutes,
}
end
setup do
time_data()
end
describe "TimeStamp tests" do
test "TimeStamp.delta_time negative year of seconds", context do
year2020 = context[:year2020]
year2019 = context[:year2019]
year_seconds = TimeStamp.delta_time(year2020, year2019)
# IO.puts "Negative year of seconds is #{inspect year_seconds}"
assert year_seconds == -31536000
end
test "TimeStamp.delta_time positive 5 minutes", context do
year2019 = context[:year2019]
year2019_delta5minutes = context[:year2019_delta5minutes]
delta_seconds = TimeStamp.delta_time(year2019, year2019_delta5minutes)
# IO.puts "Delta 5 minutes is #{inspect delta_seconds}"
assert delta_seconds == 5 * 60
end
test "TimeStamp.increment_time +1 minute", context do
year2019 = context[:year2019]
increment_minute = TimeStamp.increment_time(year2019, 60)
assert increment_minute == "2019-01-01T07:41Z"
end
test "TimeStamp.increment_time -1 minute", context do
year2019 = context[:year2019]
increment_minute = TimeStamp.increment_time(year2019, -60)
assert increment_minute == "2019-01-01T07:39Z"
end
test "TimeStamp.increment_time +30 secs round up", context do
year2019 = context[:year2019]
increment_minute = TimeStamp.increment_time(year2019, 60)
assert increment_minute == "2019-01-01T07:41Z"
end
test "TimeStamp.increment_time +29 secs rounds down", context do
year2019 = context[:year2019]
increment_round_down = TimeStamp.increment_time(year2019, 29)
assert increment_round_down == "2019-01-01T07:40Z"
end
test "TimeStamp comparison", context do
year2020 = context[:year2020]
year2019 = context[:year2019]
# IO.puts "Negative year of seconds is #{inspect year_seconds}"
assert year2019 < year2020
end
end
end
| 30.178082 | 76 | 0.712665 |
1c7f85b2376b8c25c59d29a72f10675b245ed412 | 2,578 | ex | Elixir | lib/raft_fleet/process_and_disk_log_index_inspector.ex | queer/raft_fleet | 4b18f93428c8a948e621eb92ecc771816df8347f | [
"MIT"
] | 66 | 2016-07-12T16:55:22.000Z | 2021-12-09T09:41:14.000Z | lib/raft_fleet/process_and_disk_log_index_inspector.ex | queer/raft_fleet | 4b18f93428c8a948e621eb92ecc771816df8347f | [
"MIT"
] | 6 | 2017-10-26T01:36:58.000Z | 2021-07-25T16:56:03.000Z | lib/raft_fleet/process_and_disk_log_index_inspector.ex | queer/raft_fleet | 4b18f93428c8a948e621eb92ecc771816df8347f | [
"MIT"
] | 4 | 2017-07-31T20:48:51.000Z | 2020-10-25T08:06:31.000Z | use Croma
defmodule RaftFleet.ProcessAndDiskLogIndexInspector do
@moduledoc """
A GenServer that reports "eligibility" of a node for hosting the 1st member of a new consensus group.
When starting a new consensus group, a manager process collects reports from all active nodes and determine where to spawn the 1st member process.
The report includes:
- whether there exists a process whose registered name equals to the consensus group name
- last log index in locally stored files (if any)
"""
use GenServer
alias Croma.Result, as: R
alias RaftedValue.LogIndex
alias RaftFleet.PerMemberOptions
defun start_link([]) :: {:ok, pid} do
GenServer.start_link(__MODULE__, :ok, [name: __MODULE__])
end
def init(:ok) do
{:ok, nil}
end
def handle_call({:get_log_index, name}, _from, nil) do
{:reply, make_reply(name), nil}
end
defunp make_reply(name :: atom) :: {:ok, nil | LogIndex.t} | {:error, :process_exists} do
case Process.whereis(name) do
nil ->
case PerMemberOptions.build(name) |> Keyword.get(:persistence_dir) do
nil -> {:ok, nil}
dir -> {:ok, RaftedValue.read_last_log_index(dir)}
end
_pid -> {:error, :process_exists}
end
end
defun find_node_having_latest_log_index(name :: atom) :: {:ok, nil | node} | {:error, :process_exists} do
case RaftFleet.active_nodes() |> Enum.flat_map(fn {_, ns} -> ns end) do
[n] when n -> {:ok, n}
nodes ->
{node_result_pairs, _bad_nodes} = GenServer.multi_call(nodes, __MODULE__, {:get_log_index, name}, 2000)
if Enum.any?(node_result_pairs, &match?({_node, {:error, :process_exists}}, &1)) do
{:error, :process_exists}
else
Enum.map(node_result_pairs, fn {n, {:ok, i}} -> {n, i} end)
|> Enum.reject(&match?({_node, nil}, &1))
|> find_most_preferable_node()
|> R.pure()
end
end
end
defunp find_most_preferable_node(pairs :: [{node, LogIndex.t}]) :: nil | node do
case pairs do
[] -> nil
_ ->
Enum.reduce(pairs, fn(pair1, pair2) ->
if prefer_left?(pair1, pair2), do: pair1, else: pair2
end)
|> elem(0)
end
end
defp prefer_left?({n1, i1}, {n2, i2}) do
cond do
i1 > i2 -> true
i1 < i2 -> false
n1 == Node.self() -> true # prefer local node if available
n2 == Node.self() -> false
:otherwise -> Enum.random([true, false])
end
end
def handle_info(_msg, nil) do
{:noreply, nil}
end
end
| 31.439024 | 148 | 0.615593 |
1c7f8d3993a8a0b7de5b153407fd5eb3bd14d2f6 | 617 | exs | Elixir | MeetingLocation.Elixir/mix.exs | dyslexicanaboko/meeting-location | 66be425c1df266f68d575ca44a98e5d0d27ecb81 | [
"MIT"
] | null | null | null | MeetingLocation.Elixir/mix.exs | dyslexicanaboko/meeting-location | 66be425c1df266f68d575ca44a98e5d0d27ecb81 | [
"MIT"
] | null | null | null | MeetingLocation.Elixir/mix.exs | dyslexicanaboko/meeting-location | 66be425c1df266f68d575ca44a98e5d0d27ecb81 | [
"MIT"
] | null | null | null | defmodule Meetinglocation.MixProject do
use Mix.Project
def project do
[
app: :meetinglocation,
version: "0.1.0",
elixir: "~> 1.10",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:mssqlex, "~> 1.1.0"}
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
]
end
end
| 20.566667 | 87 | 0.576985 |
1c7f911e8b11b33c7d5eaffd81ee49e510e23ac9 | 1,786 | ex | Elixir | clients/content/lib/google_api/content/v21/model/orders_in_store_refund_line_item_response.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v21/model/orders_in_store_refund_line_item_response.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v21/model/orders_in_store_refund_line_item_response.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Content.V21.Model.OrdersInStoreRefundLineItemResponse do
@moduledoc """
## Attributes
* `executionStatus` (*type:* `String.t`, *default:* `nil`) - The status of the execution.
Acceptable values are:
- "`duplicate`"
- "`executed`"
* `kind` (*type:* `String.t`, *default:* `content#ordersInStoreRefundLineItemResponse`) - Identifies what kind of resource this is. Value: the fixed string "content#ordersInStoreRefundLineItemResponse".
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:executionStatus => String.t(),
:kind => String.t()
}
field(:executionStatus)
field(:kind)
end
defimpl Poison.Decoder, for: GoogleApi.Content.V21.Model.OrdersInStoreRefundLineItemResponse do
def decode(value, options) do
GoogleApi.Content.V21.Model.OrdersInStoreRefundLineItemResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V21.Model.OrdersInStoreRefundLineItemResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 33.074074 | 206 | 0.733483 |
1c7f9c8bc2f881579afbc8bde0c32e317237ee14 | 2,565 | exs | Elixir | test/meta_test.exs | jeroenvisser101/cldr_numbers | 9a529817fe8d9f61be330fa970a5a0ffa443a177 | [
"Apache-2.0"
] | 21 | 2019-12-21T23:25:14.000Z | 2022-03-22T00:40:34.000Z | test/meta_test.exs | jeroenvisser101/cldr_numbers | 9a529817fe8d9f61be330fa970a5a0ffa443a177 | [
"Apache-2.0"
] | 15 | 2019-06-14T16:29:23.000Z | 2022-03-02T07:56:33.000Z | test/meta_test.exs | jeroenvisser101/cldr_numbers | 9a529817fe8d9f61be330fa970a5a0ffa443a177 | [
"Apache-2.0"
] | 7 | 2017-10-05T11:35:36.000Z | 2018-08-20T12:17:03.000Z | defmodule Cldr.Number.Format.Meta.Test do
use ExUnit.Case, async: true
test "that we can create a default metadata struct" do
assert Cldr.Number.Format.Meta.new() ==
%Cldr.Number.Format.Meta{
exponent_digits: 0,
exponent_sign: false,
format: [
positive: [format: "#"],
negative: [minus: '-', format: :same_as_positive]
],
fractional_digits: %{max: 0, min: 0},
grouping: %{fraction: %{first: 0, rest: 0}, integer: %{first: 0, rest: 0}},
integer_digits: %{max: 0, min: 1},
multiplier: 1,
number: 0,
padding_char: " ",
padding_length: 0,
round_nearest: 0,
scientific_rounding: 0,
significant_digits: %{max: 0, min: 0}
}
end
test "setting the meta fields" do
alias Cldr.Number.Format.Meta
meta =
Meta.new()
|> Meta.put_integer_digits(2)
|> Meta.put_fraction_digits(3)
|> Meta.put_significant_digits(4)
assert meta.integer_digits == %{max: 0, min: 2}
assert meta.fractional_digits == %{max: 0, min: 3}
assert meta.significant_digits == %{max: 0, min: 4}
meta =
meta
|> Meta.put_exponent_digits(5)
|> Meta.put_exponent_sign(true)
|> Meta.put_scientific_rounding_digits(6)
|> Meta.put_round_nearest_digits(7)
|> Meta.put_padding_length(8)
|> Meta.put_padding_char("Z")
|> Meta.put_multiplier(9)
assert meta.exponent_digits == 5
assert meta.exponent_sign == true
assert meta.scientific_rounding == 6
assert meta.round_nearest == 7
assert meta.padding_length == 8
assert meta.padding_char == "Z"
assert meta.multiplier == 9
meta =
meta
|> Meta.put_fraction_grouping(10)
|> Meta.put_integer_grouping(11)
assert meta.grouping == %{
fraction: %{first: 10, rest: 10},
integer: %{first: 11, rest: 11}
}
meta =
Meta.new()
|> Meta.put_integer_digits(12, 13)
|> Meta.put_fraction_digits(14, 15)
|> Meta.put_significant_digits(16, 17)
assert meta.integer_digits == %{max: 13, min: 12}
assert meta.fractional_digits == %{max: 15, min: 14}
assert meta.significant_digits == %{max: 17, min: 16}
meta =
Meta.new()
|> Meta.put_format([format: "#"], format: "##")
assert meta.format == [positive: [format: "#"], negative: [format: "##"]]
end
end
| 30.535714 | 90 | 0.564133 |
1c7fa08d9d6945d4afaf7df326a1f1596532c11c | 59 | ex | Elixir | lib/wiki/repo.ex | deadmp/wiki | 29d98ed0517e26d2e3f1c75f70852bc7512d87fc | [
"MIT"
] | null | null | null | lib/wiki/repo.ex | deadmp/wiki | 29d98ed0517e26d2e3f1c75f70852bc7512d87fc | [
"MIT"
] | null | null | null | lib/wiki/repo.ex | deadmp/wiki | 29d98ed0517e26d2e3f1c75f70852bc7512d87fc | [
"MIT"
] | null | null | null | defmodule Wiki.Repo do
use Ecto.Repo, otp_app: :wiki
end
| 14.75 | 31 | 0.745763 |
1c7fb40242d3bc1245b691a5b776a6d6c5873ae2 | 1,317 | ex | Elixir | lib/auto_api/commands/seats_command.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | lib/auto_api/commands/seats_command.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | lib/auto_api/commands/seats_command.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | # AutoAPI
# The MIT License
#
# Copyright (c) 2018- High-Mobility GmbH (https://high-mobility.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
defmodule AutoApi.SeatsCommand do
@moduledoc """
Handles Seats commands and apply binary commands on `%AutoApi.SeatsState{}`
"""
use AutoApi.Command
end
| 45.413793 | 79 | 0.77145 |
1c7fb5db02b2a5a98e302bc663c43c0c11231dfd | 390 | ex | Elixir | lib/syn_osc/voice/off.ex | camshaft/syn_osc_ex | c7ed257f2ea772f197096d356467e812a0d5e59a | [
"MIT"
] | 1 | 2020-12-08T01:44:03.000Z | 2020-12-08T01:44:03.000Z | lib/syn_osc/voice/off.ex | camshaft/syn_osc_ex | c7ed257f2ea772f197096d356467e812a0d5e59a | [
"MIT"
] | null | null | null | lib/syn_osc/voice/off.ex | camshaft/syn_osc_ex | c7ed257f2ea772f197096d356467e812a0d5e59a | [
"MIT"
] | null | null | null | defmodule SynOSC.Voice.Off do
defstruct id: nil,
voice: nil,
relative_velocity: false,
velocity: nil
end
defimpl OSC.Encoder, for: SynOSC.Voice.Off do
use SynOSC.Voice
def encode(message, options) do
message
|> call("OFF")
|> set_arguments(format_velocity(message))
|> OSC.Encoder.encode(options)
end
def flag(_), do: []
end
| 19.5 | 46 | 0.630769 |
1c7fcccee49629a0d0ca6bf990d4c4ea53cb5613 | 110 | exs | Elixir | .formatter.exs | thedelchop/ecto_job | 0157d857e4436a35ebcc0a9f5cd4b28b33292f62 | [
"MIT"
] | null | null | null | .formatter.exs | thedelchop/ecto_job | 0157d857e4436a35ebcc0a9f5cd4b28b33292f62 | [
"MIT"
] | null | null | null | .formatter.exs | thedelchop/ecto_job | 0157d857e4436a35ebcc0a9f5cd4b28b33292f62 | [
"MIT"
] | null | null | null | [
import_deps: [:ecto],
inputs: ["*.{ex,exs}", "{config,lib,test}/**/*.{ex,exs}"],
subdirectories: []
]
| 18.333333 | 60 | 0.527273 |
1c7fd23d0cc0bab71c6d2e53492cfdb2d4a46bb1 | 705 | ex | Elixir | lib/score_web/gettext.ex | JulianaHelena5/score | 153122d88586d00c11c73ac447fd14cf95599805 | [
"MIT"
] | 3 | 2021-08-09T14:17:42.000Z | 2022-03-01T17:31:03.000Z | lib/score_web/gettext.ex | JulianaHelena5/score | 153122d88586d00c11c73ac447fd14cf95599805 | [
"MIT"
] | 3 | 2021-08-09T12:27:01.000Z | 2021-08-10T08:54:22.000Z | lib/score_web/gettext.ex | JulianaHelena5/score | 153122d88586d00c11c73ac447fd14cf95599805 | [
"MIT"
] | null | null | null | defmodule ScoreWeb.Gettext do
@moduledoc """
A module providing Internationalization with a gettext-based API.
By using [Gettext](https://hexdocs.pm/gettext),
your module gains a set of macros for translations, for example:
import ScoreWeb.Gettext
# Simple translation
gettext("Here is the string to translate")
# Plural translation
ngettext("Here is the string to translate",
"Here are the strings to translate",
3)
# Domain-based translation
dgettext("errors", "Here is the error message to translate")
See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage.
"""
use Gettext, otp_app: :score
end
| 28.2 | 72 | 0.675177 |
1c7fdd82d9d005ee01a95f3045071a9cc675761f | 518 | exs | Elixir | test/doctor_schedule/accounts/services/create_user_test.exs | theguuholi/doctor_schedule | a92dfa92d1398c59718be2428d36bb326d6bc361 | [
"MIT"
] | 2 | 2022-03-11T12:15:01.000Z | 2022-03-11T13:53:21.000Z | test/doctor_schedule/accounts/services/create_user_test.exs | theguuholi/doctor_schedule | a92dfa92d1398c59718be2428d36bb326d6bc361 | [
"MIT"
] | 3 | 2020-12-12T22:10:17.000Z | 2021-04-05T12:53:12.000Z | test/doctor_schedule/accounts/services/create_user_test.exs | theguuholi/doctor_schedule | a92dfa92d1398c59718be2428d36bb326d6bc361 | [
"MIT"
] | 1 | 2021-02-26T04:24:34.000Z | 2021-02-26T04:24:34.000Z | defmodule DoctorSchedule.Accounts.Services.CreateUserTest do
use DoctorSchedule.DataCase
alias DoctorSchedule.Accounts.Services.CreateUser
test "it should create a user with and without cache" do
user_admin = %{
"email" => "9@3.com",
"first_name" => "5@3.com",
"last_name" => "5@3.com",
"password" => "5@3.com",
"password_confirmation" => "5@3.com",
"role" => "admin"
}
{:ok, user} = CreateUser.execute(user_admin)
assert user.email == "9@3.com"
end
end
| 24.666667 | 60 | 0.627413 |
1c80021f7c1ea7dd44f3769feea02f73d7b65123 | 6,613 | ex | Elixir | instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex | tobiasstadler/opentelemetry-cpp-contrib | 57852b2d1629afb9ae0f6ca699a27d030163570d | [
"Apache-2.0"
] | 1 | 2022-03-25T18:20:31.000Z | 2022-03-25T18:20:31.000Z | instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex | tobiasstadler/opentelemetry-cpp-contrib | 57852b2d1629afb9ae0f6ca699a27d030163570d | [
"Apache-2.0"
] | 3 | 2022-03-24T10:20:53.000Z | 2022-03-31T04:36:53.000Z | instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex | tobiasstadler/opentelemetry-cpp-contrib | 57852b2d1629afb9ae0f6ca699a27d030163570d | [
"Apache-2.0"
] | null | null | null | defmodule Mix.Tasks.Dockerfiles do
use Mix.Task
@grpc_version "v1.36.4"
@otel_cpp_version "v1.3.0"
def run([out_dir | combos]) do
out_dir_abs = Path.expand(out_dir)
Enum.map(combos, fn v -> gen_dockerfile(v) end)
|> Enum.each(fn {job, content} ->
out_path = Path.join(out_dir_abs, filename(job))
File.write!(out_path, content)
end)
end
defp filename(%{os: os, version: version, nginx: nginx}) do
"Dockerfile.#{os}-#{version}.#{nginx}"
end
defp parse_job(identifier) do
[distro, nginx] = String.split(identifier, ":")
[os, version] = String.split(distro, "-")
[ver_major, ver_minor] = String.split(version, ".") |> Enum.map(&String.to_integer/1)
%{os: os, version: version, nginx: nginx, version_major: ver_major, version_minor: ver_minor}
end
defp gen_dockerfile(identifier) do
job = parse_job(identifier)
{job,
Enum.join(
[
header(job),
apt_install_base_pkgs(job),
custom_cmake(job),
custom_nginx(job),
apt_install_custom_pkgs(job),
build_steps(job)
],
"\n"
)}
end
defp header(%{os: os, version: version}) do
"""
ARG image=#{os}:#{version}
FROM $image AS build
"""
end
defp default_packages() do
[
"build-essential",
"autoconf",
"libtool",
"pkg-config",
"ca-certificates",
"gcc",
"g++",
"git",
"libcurl4-openssl-dev",
"libpcre3-dev",
"gnupg2",
"lsb-release",
"curl",
"apt-transport-https",
"software-properties-common",
"zlib1g-dev"
]
end
defp base_packages(%{nginx: "mainline"}), do: default_packages()
defp base_packages(%{nginx: "stable", version_major: ver_maj}) when ver_maj >= 20 do
["nginx" | default_packages()]
end
defp base_packages(_), do: default_packages()
defp base_packages_for_version(%{version_major: major}) when major >= 20, do: ["cmake"]
defp base_packages_for_version(_), do: []
defp custom_cmake(%{version_major: major}) when major >= 20, do: ""
defp custom_cmake(%{os: "debian"}), do: ""
defp custom_cmake(%{os: "ubuntu"}) do
"""
RUN curl -o /etc/apt/trusted.gpg.d/kitware.asc https://apt.kitware.com/keys/kitware-archive-latest.asc \\
&& apt-add-repository "deb https://apt.kitware.com/ubuntu/ `lsb_release -cs` main"
"""
end
defp mainline_apt(%{os: "debian"}), do: "http://nginx.org/packages/mainline/debian"
defp mainline_apt(%{os: "ubuntu"}), do: "http://nginx.org/packages/mainline/ubuntu"
defp stable_apt(%{os: "debian"}), do: "http://nginx.org/packages/debian"
defp stable_apt(%{os: "ubuntu"}), do: "http://nginx.org/packages/ubuntu"
defp custom_nginx_step(apt_url) do
"""
RUN curl -o /etc/apt/trusted.gpg.d/nginx_signing.asc https://nginx.org/keys/nginx_signing.key \\
&& apt-add-repository "deb #{apt_url} `lsb_release -cs` nginx" \\
&& /bin/bash -c 'echo -e "Package: *\\nPin: origin nginx.org\\nPin: release o=nginx\\nPin-Priority: 900"' | tee /etc/apt/preferences.d/99nginx
"""
end
defp custom_nginx(%{nginx: "stable", os: "debian"} = job) do
custom_nginx_step(stable_apt(job))
end
defp custom_nginx(%{nginx: "mainline", os: "debian"} = job) do
custom_nginx_step(mainline_apt(job))
end
defp custom_nginx(%{nginx: "mainline"} = job) do
custom_nginx_step(mainline_apt(job))
end
defp custom_nginx(%{nginx: "stable", os: "ubuntu", version_major: 18} = job) do
custom_nginx_step(stable_apt(job))
end
defp custom_nginx(_), do: ""
defp custom_packages_for_version(%{os: "ubuntu", nginx: "stable", version_major: 18}) do
["cmake", "nginx"]
end
defp custom_packages_for_version(%{version_major: ver_major}) when ver_major < 20, do: ["cmake"]
defp custom_packages_for_version(_), do: []
defp custom_packages(%{os: "debian"}) do
["cmake", "nginx"]
end
defp custom_packages(%{nginx: "mainline"} = job) do
["nginx" | custom_packages_for_version(job)]
end
defp custom_packages(job), do: custom_packages_for_version(job)
defp apt_install_base_pkgs(job) do
packages = base_packages(job) ++ base_packages_for_version(job)
package_install(packages)
end
defp apt_install_custom_pkgs(job) do
custom_packages(job)
|> package_install()
end
defp package_install([]), do: ""
defp package_install(packages) do
"""
RUN apt-get update \\
&& DEBIAN_FRONTEND=noninteractive TZ="Europe/London" \\
apt-get install --no-install-recommends --no-install-suggests -y \\
#{combine(packages, " ")}
"""
end
defp combine(lines, sep) do
Enum.join(lines, sep)
end
defp build_steps(_) do
"""
RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b #{@grpc_version} \\
https://github.com/grpc/grpc \\
&& cd grpc \\
&& mkdir -p cmake/build \\
&& cd cmake/build \\
&& cmake \\
-DgRPC_INSTALL=ON \\
-DgRPC_BUILD_TESTS=OFF \\
-DCMAKE_INSTALL_PREFIX=/install \\
-DCMAKE_BUILD_TYPE=Release \\
-DgRPC_BUILD_GRPC_NODE_PLUGIN=OFF \\
-DgRPC_BUILD_GRPC_OBJECTIVE_C_PLUGIN=OFF \\
-DgRPC_BUILD_GRPC_PHP_PLUGIN=OFF \\
-DgRPC_BUILD_GRPC_PHP_PLUGIN=OFF \\
-DgRPC_BUILD_GRPC_PYTHON_PLUGIN=OFF \\
-DgRPC_BUILD_GRPC_RUBY_PLUGIN=OFF \\
../.. \\
&& make -j2 \\
&& make install
RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b #{@otel_cpp_version} \\
https://github.com/open-telemetry/opentelemetry-cpp.git \\
&& cd opentelemetry-cpp \\
&& mkdir build \\
&& cd build \\
&& cmake -DCMAKE_BUILD_TYPE=Release \\
-DCMAKE_INSTALL_PREFIX=/install \\
-DCMAKE_PREFIX_PATH=/install \\
-DWITH_OTLP=ON \\
-DWITH_OTLP_GRPC=ON \\
-DWITH_OTLP_HTTP=OFF \\
-DBUILD_TESTING=OFF \\
-DWITH_EXAMPLES=OFF \\
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \\
.. \\
&& make -j2 \\
&& make install
RUN mkdir -p otel-nginx/build && mkdir -p otel-nginx/src
COPY src otel-nginx/src/
COPY CMakeLists.txt nginx.cmake otel-nginx/
RUN cd otel-nginx/build \\
&& cmake -DCMAKE_BUILD_TYPE=Release \\
-DCMAKE_PREFIX_PATH=/install \\
-DCMAKE_INSTALL_PREFIX=/usr/share/nginx/modules \\
.. \\
&& make -j2 \\
&& make install
FROM scratch AS export
COPY --from=build /otel-nginx/build/otel_ngx_module.so .
FROM build AS run
CMD ["/usr/sbin/nginx", "-g", "daemon off;"]
"""
end
end
| 29.788288 | 150 | 0.627401 |
1c8026d7e942a71403ff1b167c04c2c5e5ef1124 | 1,041 | ex | Elixir | lib/my_app_808732/endpoint.ex | wsmoak/my_app_808732 | f4f92dd5cb2ffa2d6854a6573ba092ef433ab85d | [
"MIT"
] | null | null | null | lib/my_app_808732/endpoint.ex | wsmoak/my_app_808732 | f4f92dd5cb2ffa2d6854a6573ba092ef433ab85d | [
"MIT"
] | null | null | null | lib/my_app_808732/endpoint.ex | wsmoak/my_app_808732 | f4f92dd5cb2ffa2d6854a6573ba092ef433ab85d | [
"MIT"
] | null | null | null | defmodule MyApp_808732.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app_808732
socket "/socket", MyApp_808732.UserSocket
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phoenix.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/", from: :my_app_808732, gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
plug Phoenix.LiveReloader
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Logger
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Poison
plug Plug.MethodOverride
plug Plug.Head
plug Plug.Session,
store: :cookie,
key: "_my_app_808732_key",
signing_salt: "AuMnm4V8"
plug MyApp_808732.Router
end
| 26.025 | 69 | 0.71854 |
1c805b24dbe300f7b038a42b38d6a76712d20274 | 3,071 | ex | Elixir | lib/mix/tasks/extension/phoenix/pow.extension.phoenix.mailer.gen.templates.ex | patrickbiermann/pow | ebc2ac7d6e15961dac4be38091ff75dae0d26554 | [
"MIT"
] | 4 | 2018-05-07T16:37:15.000Z | 2018-07-14T00:44:12.000Z | lib/mix/tasks/extension/phoenix/pow.extension.phoenix.mailer.gen.templates.ex | patrickbiermann/pow | ebc2ac7d6e15961dac4be38091ff75dae0d26554 | [
"MIT"
] | null | null | null | lib/mix/tasks/extension/phoenix/pow.extension.phoenix.mailer.gen.templates.ex | patrickbiermann/pow | ebc2ac7d6e15961dac4be38091ff75dae0d26554 | [
"MIT"
] | null | null | null | defmodule Mix.Tasks.Pow.Extension.Phoenix.Mailer.Gen.Templates do
@shortdoc "Generates mailer views and templates for extensions"
@moduledoc """
Generates mailer views and templates for extensions.
mix pow.extension.phoenix.mailer.gen.templates --extension PowEmailConfirmation --extension PowResetPassword
## Arguments
* `--extension` - extension to generate templates for
* `--context-app` - app to use for path and module names
"""
use Mix.Task
alias Mix.{Pow, Pow.Extension, Pow.Phoenix, Pow.Phoenix.Mailer}
@switches [context_app: :string, extension: :keep]
@default_opts []
@mix_task "pow.extension.phoenix.mailer.gen.templates"
@impl true
def run(args) do
Pow.no_umbrella!(@mix_task)
Pow.ensure_phoenix!(@mix_task, args)
args
|> Pow.parse_options(@switches, @default_opts)
|> create_template_files()
|> print_shell_instructions()
end
@extension_templates %{
PowResetPassword => [
{"mailer", ~w(reset_password)}
],
PowEmailConfirmation => [
{"mailer", ~w(email_confirmation)}
],
PowInvitation => [
{"mailer", ~w(invitation)}
]
}
defp create_template_files({config, _parsed, _invalid}) do
structure = Phoenix.parse_structure(config)
web_module = structure[:web_module]
web_prefix = structure[:web_prefix]
web_app = structure[:web_app]
extensions =
config
|> Extension.extensions(web_app)
|> Enum.map(fn extension ->
templates = Map.get(@extension_templates, extension, [])
create_views_and_templates(extension, templates, web_module, web_prefix)
extension
end)
%{extensions: extensions, web_app: web_app, structure: structure}
end
defp create_views_and_templates(extension, [], _web_module, _web_prefix) do
Mix.shell().info("Notice: No mailer view or template files will be generated for #{inspect extension} as this extension doesn't have any mailer views defined.")
end
defp create_views_and_templates(extension, templates, web_module, web_prefix) do
Enum.each(templates, fn {name, mails} ->
mails = Enum.map(mails, &String.to_atom/1)
Mailer.create_view_file(extension, name, web_module, web_prefix, mails)
Mailer.create_templates(extension, name, web_prefix, mails)
end)
end
defp print_shell_instructions(%{extensions: [], web_app: web_app}) do
Extension.no_extensions_error(web_app)
end
defp print_shell_instructions(%{structure: %{web_module: web_base, web_prefix: web_prefix}}) do
Mix.shell().info(
"""
Pow mailer templates has been installed in your phoenix app!
Remember to set up #{web_prefix}.ex with `mailer_view/0` function if you haven't already:
defmodule #{inspect(web_base)} do
# ...
def mailer_view do
quote do
use Phoenix.View, root: "#{web_prefix}/templates",
namespace: #{inspect(web_base)}
use Phoenix.HTML
end
end
# ...
end
""")
end
end
| 29.815534 | 164 | 0.672094 |
1c8061d5265a9866977e1abeab99ba8f05d1aad6 | 117 | ex | Elixir | lib/bike_brigade_web/delivery_expired_error.ex | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | 28 | 2021-10-11T01:53:53.000Z | 2022-03-24T17:45:55.000Z | lib/bike_brigade_web/delivery_expired_error.ex | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | 20 | 2021-10-21T08:12:31.000Z | 2022-03-31T13:35:53.000Z | lib/bike_brigade_web/delivery_expired_error.ex | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | null | null | null | defmodule BikeBrigadeWeb.DeliveryExpiredError do
defexception message: "delivery is expired", plug_status: 403
end
| 29.25 | 63 | 0.837607 |
1c8077bfc3720dfd84e372482e43c2a9f3444720 | 31 | exs | Elixir | test/test_helper.exs | SmartColumbusOS/divo | 9fccc400dcc64142698061b3da2a67e7828829e3 | [
"Apache-2.0"
] | 21 | 2019-05-07T14:08:31.000Z | 2020-08-05T02:04:07.000Z | test/test_helper.exs | UrbanOS-Public/divo | 9fccc400dcc64142698061b3da2a67e7828829e3 | [
"Apache-2.0"
] | 11 | 2019-05-08T18:08:09.000Z | 2020-05-19T17:26:55.000Z | test/test_helper.exs | UrbanOS-Public/divo | 9fccc400dcc64142698061b3da2a67e7828829e3 | [
"Apache-2.0"
] | 6 | 2019-05-07T19:26:55.000Z | 2020-01-28T17:11:09.000Z | ExUnit.start(timeout: 120_000)
| 15.5 | 30 | 0.806452 |
1c80974377dc791a595b6baa84a5b1065b2b98ab | 31,096 | exs | Elixir | test/graph_test.exs | elljoh/gremlex | a83fc3a0c46c19efebd6d4881a71adb058a80e38 | [
"MIT"
] | 61 | 2018-05-10T17:03:41.000Z | 2021-04-28T21:45:27.000Z | test/graph_test.exs | elljoh/gremlex | a83fc3a0c46c19efebd6d4881a71adb058a80e38 | [
"MIT"
] | 19 | 2018-05-10T17:14:42.000Z | 2020-12-17T20:43:07.000Z | test/graph_test.exs | elljoh/gremlex | a83fc3a0c46c19efebd6d4881a71adb058a80e38 | [
"MIT"
] | 24 | 2018-08-23T17:42:26.000Z | 2022-03-22T20:31:16.000Z | defmodule Gremlex.GraphTests do
import Gremlex.Graph
alias Gremlex.{Vertex, Edge, Graph}
use ExUnit.Case
use ExUnitProperties
alias :queue, as: Queue
describe "g/0" do
test "returns a new queue" do
assert g() == Queue.new()
end
end
describe "anonymous/0" do
test "adds __ function to the queue" do
actual_graph = anonymous()
expected_graph = Queue.in({"__", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "add_v/1" do
test "adds an addVertex function to the queue" do
actual_graph = g() |> add_v(1)
expected_graph = Queue.in({"addV", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "add_e/1" do
test "adds an addE step to the queue" do
actual_graph = g() |> add_e("foo")
expected_graph = Queue.in({"addE", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "aggregate/2" do
test "adds an aggregate function to the queue" do
actual_graph = g() |> Graph.aggregate("foo")
expected_graph = Queue.in({"aggregate", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "barrier/2" do
test "adds a barrier function with parameter to the queue" do
actual_graph = g() |> Graph.barrier(1)
expected_graph = Queue.in({"barrier", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "barrier/1" do
test "adds a barrier function to the queue" do
actual_graph = g() |> Graph.barrier()
expected_graph = Queue.in({"barrier", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "coin/2" do
test "adds a coin function to the queue" do
actual_graph = g() |> Graph.coin(1.0)
expected_graph = Queue.in({"coin", [1.0]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_label/1" do
test "adds a hasLabel function to the queue" do
actual_graph = g() |> has_label("foo")
expected_graph = Queue.in({"hasLabel", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has/3" do
test "adds a has function to the queue" do
actual_graph = g() |> has("foo", "bar")
expected_graph = Queue.in({"has", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_namespace/1" do
test "adds a has function with namespace to the queue" do
actual_graph = g() |> has_namespace()
expected_graph = Queue.in({"has", ["namespace", "gremlex"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_namespace/2" do
test "adds a has function with namespace to the queue" do
actual_graph = g() |> has_namespace("bar")
expected_graph = Queue.in({"has", ["namespace", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_not/2" do
test "adds a hasNot function to the queue" do
actual_graph = g() |> Graph.has_not("age")
expected_graph = Queue.in({"hasNot", ["age"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "key/1" do
test "adds a key function to the queue" do
actual_graph = g() |> Graph.key()
expected_graph = Queue.in({"key", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_id/2" do
test "adds a hasId function to the queue" do
actual_graph = g() |> Graph.has_id(20)
expected_graph = Queue.in({"hasId", [20]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_key/1" do
test "adds a hasKey function to the queue" do
actual_graph = g() |> Graph.has_key(["name", "age"])
expected_graph = Queue.in({"hasKey", ["name", "age"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "add_namespace/1" do
test "adds a property function with namespace to the queue" do
actual_graph = g() |> add_namespace()
expected_graph = Queue.in({"property", ["namespace", "gremlex"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "add_namespace/2" do
test "adds a property function with namespace to the queue" do
actual_graph = g() |> add_namespace("bar")
expected_graph = Queue.in({"property", ["namespace", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "property/2" do
test "adds a property function to the queue" do
actual_graph = g() |> Graph.property("foo")
expected_graph = Queue.in({"property", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "property/3" do
test "adds a property function to the queue" do
actual_graph = g() |> Graph.property("foo", "bar")
expected_graph = Queue.in({"property", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "property/4" do
test "adds a property function to the queue as single" do
actual_graph = g() |> Graph.property(:single, "foo", "bar")
expected_graph = Queue.in({"property", [:single, "foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a property function to the queue as list" do
actual_graph = g() |> Graph.property(:list, "foo", "bar")
expected_graph = Queue.in({"property", [:list, "foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a property function to the queue as set" do
actual_graph = g() |> Graph.property(:set, "foo", "bar")
expected_graph = Queue.in({"property", [:set, "foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "properties/2" do
test "adds a properties function to the queue" do
actual_graph = g() |> Graph.properties("foo")
expected_graph = Queue.in({"properties", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "properties/1" do
test "adds a properties function to the queue" do
actual_graph = g() |> Graph.properties()
expected_graph = Queue.in({"properties", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "store/2" do
test "adds a store function to the queue" do
actual_graph = g() |> Graph.store("foo")
expected_graph = Queue.in({"store", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "value_map/1" do
test "adds a valueMap function to the queue" do
actual_graph = g() |> Graph.value_map()
expected_graph = Queue.in({"valueMap", []}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a valueMap function to the queue with list of properties" do
actual_graph = g() |> Graph.value_map(["foo", "bar"])
expected_graph = Queue.in({"valueMap", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "value_map/2 when value is string" do
test "adds a valueMap function to the queue" do
actual_graph = g() |> Graph.value_map("foo")
expected_graph = Queue.in({"valueMap", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "value_map/2 when value is a list" do
test "adds a valueMap function to the queue" do
actual_graph = g() |> Graph.value_map(["foo", "bar"])
expected_graph = Queue.in({"valueMap", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "values/2" do
test "adds a value function the queue" do
actual_graph = g() |> values("foo")
expected_graph = Queue.in({"values", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "v/1" do
test "adds a V function to the queue" do
actual_graph = g() |> v()
expected_graph = Queue.in({"V", []}, Queue.new())
assert actual_graph == expected_graph
end
test "creates a vertex when the id is a number" do
check all n <- integer() do
actual_graph = v(n)
expected_graph = %Vertex{id: n, label: ""}
assert actual_graph == expected_graph
end
end
end
describe "v/2" do
test "adds a V function for an id to the queue" do
actual_graph = g() |> v(1)
expected_graph = Queue.in({"V", [1]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a V function when given a vertex to the queue" do
actual_graph = g() |> v(%Vertex{id: 1, label: "foo"})
expected_graph = Queue.in({"V", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "out_e/1" do
test "adds an outE function to the queue" do
actual_graph = g() |> out_e()
expected_graph = Queue.in({"outE", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "out_e/2" do
test "adds an outE function to the queue" do
actual_graph = g() |> out_e("foo")
expected_graph = Queue.in({"outE", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds an outE function to the queue with list of edges" do
actual_graph = g() |> out_e(["foo", "bar"])
expected_graph = Queue.in({"outE", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "out/2" do
test "adds an out function with an edge to the queue" do
actual_graph = g() |> out("foo")
expected_graph = Queue.in({"out", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "out/1" do
test "adds an out function to the queue" do
actual_graph = g() |> out()
expected_graph = Queue.in({"out", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "in_/2" do
test "adds an in function with an edge to the queue" do
actual_graph = g() |> in_("foo")
expected_graph = Queue.in({"in", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "in_/1" do
test "adds an in function to the queue" do
actual_graph = g() |> in_()
expected_graph = Queue.in({"in", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "or_/1" do
test "adds an or function to the queue" do
actual_graph = g() |> or_()
expected_graph = Queue.in({"or", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "and_/1" do
test "adds an and function to the queue" do
actual_graph = g() |> and_()
expected_graph = Queue.in({"and", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "in_e/1" do
test "adds an inE function to the queue" do
actual_graph = g() |> in_e()
expected_graph = Queue.in({"inE", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "in_e/2" do
test "adds an inE function to the queue" do
actual_graph = g() |> in_e("foo")
expected_graph = Queue.in({"inE", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds an inE function to the queue with list of edges" do
actual_graph = g() |> in_e(["foo", "bar"])
expected_graph = Queue.in({"inE", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "in_v/1" do
test "adds an inV function to the queue" do
actual_graph = g() |> in_v()
expected_graph = Queue.in({"inV", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "out_v/1" do
test "adds an outV function to the queue" do
actual_graph = g() |> out_v()
expected_graph = Queue.in({"outV", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "both/1" do
test "adds a both function to the queue" do
actual_graph = g() |> both()
expected_graph = Queue.in({"both", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "both/2" do
test "adds a both function to the queue" do
actual_graph = g() |> both("foo")
expected_graph = Queue.in({"both", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a both function to the queue with provided list" do
actual_graph = g() |> both(["foo", "bar"])
expected_graph = Queue.in({"both", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "both_e/1" do
test "adds a bothE function to the queue" do
actual_graph = g() |> both_e()
expected_graph = Queue.in({"bothE", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "both_e/2" do
test "adds a bothE function to the queue with provided list" do
actual_graph = g() |> both_e(["foo", "bar"])
expected_graph = Queue.in({"bothE", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
test "adds a bothE function to the queue" do
actual_graph = g() |> both_e("foo")
expected_graph = Queue.in({"bothE", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "both_v/1" do
test "adds a bothV function to the queue" do
actual_graph = g() |> both_v()
expected_graph = Queue.in({"bothV", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "dedup/1" do
test "adds an dedup function to the queue" do
actual_graph = g() |> dedup()
expected_graph = Queue.in({"dedup", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "to/2" do
test "adds a to function to the queue" do
actual_graph = g() |> to(1)
expected_graph = Queue.in({"to", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "has_next/1" do
test "adds a hasNext function to the queue" do
actual_graph = g() |> has_next()
expected_graph = Queue.in({"hasNext", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "next/1" do
test "adds a next function to the queue" do
actual_graph = g() |> next()
expected_graph = Queue.in({"next", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "next/2" do
test "adds a next function to the queue" do
actual_graph = g() |> next(2)
expected_graph = Queue.in({"next", [2]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "try_next/1" do
test "adds a tryNext function to the queue" do
actual_graph = g() |> try_next()
expected_graph = Queue.in({"tryNext", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "to_list/1" do
test "adds a toList function to the queue" do
actual_graph = g() |> to_list()
expected_graph = Queue.in({"toList", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "to_set/1" do
test "adds a toSet function to the queue" do
actual_graph = g() |> to_set()
expected_graph = Queue.in({"toSet", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "to_bulk_set/1" do
test "adds a toBulkSet function to the queue" do
actual_graph = g() |> to_bulk_set()
expected_graph = Queue.in({"toBulkSet", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "drop/1" do
test "adds a drop function to the queue" do
actual_graph = g() |> drop()
expected_graph = Queue.in({"drop", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "iterate/1" do
test "adds a iterate function to the queue" do
actual_graph = g() |> iterate()
expected_graph = Queue.in({"iterate", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "sum/1" do
test "adds a sum function to the queue" do
actual_graph = g() |> sum()
expected_graph = Queue.in({"sum", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "min/1" do
test "adds a min function to the queue" do
actual_graph = g() |> min()
expected_graph = Queue.in({"min", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "max/1" do
test "adds a max function to the queue" do
actual_graph = g() |> max()
expected_graph = Queue.in({"max", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "identity/1" do
test "adds a identity function to the queue" do
actual_graph = g() |> identity()
expected_graph = Queue.in({"identity", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "id/1" do
test "adds a id function to the queue" do
actual_graph = g() |> id()
expected_graph = Queue.in({"id", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "cyclic_path/1" do
test "adds a cyclicPath function to the queue" do
actual_graph = g() |> cyclic_path()
expected_graph = Queue.in({"cyclicPath", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "count/1" do
test "adds a count function to the queue" do
actual_graph = g() |> count()
expected_graph = Queue.in({"count", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "group/1" do
test "adds a group function to the queue" do
actual_graph = g() |> group()
expected_graph = Queue.in({"group", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "constant/2" do
test "adds a constant function to the queue" do
actual_graph = g() |> Graph.constant("foo")
expected_graph = Queue.in({"constant", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "group_count/1" do
test "adds a groupCount function to the queue" do
actual_graph = g() |> group_count()
expected_graph = Queue.in({"groupCount", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "group_count/2" do
test "adds a groupCount function to the queue" do
actual_graph = g() |> group_count("foo")
expected_graph = Queue.in({"groupCount", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "within/1" do
test "creates a queue with the within function using a range arg" do
actual_graph = within(1..5)
expected_graph = Queue.in({"within", [1..5]}, Queue.new())
assert actual_graph == expected_graph
end
test "creates a queue with the within function using a list arg" do
actual_graph = within([1, 2, 3])
expected_graph = Queue.in({"within", [1, 2, 3]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "without/1" do
test "creates a queue with the without function using a range arg" do
actual_graph = without(1..5)
expected_graph = Queue.in({"without", [1..5]}, Queue.new())
assert actual_graph == expected_graph
end
test "creates a queue with the without function using a list arg" do
actual_graph = without([1, 2, 3])
expected_graph = Queue.in({"without", [1, 2, 3]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "encode/1" do
test "compiles queue into a query" do
graph =
g()
|> v()
|> has_label("Intent")
|> has("name", "request.group")
|> out("sedan")
|> values("name")
expected_query =
"g.V().hasLabel('Intent').has('name', 'request.group').out('sedan').values('name')"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "compiles query with a vertex id correctly" do
graph = g() |> v(1)
expected_query = "g.V(1)"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "compiles query with a vertex" do
graph = g() |> v(1) |> add_e("foo") |> to(v(2))
expected_query = "g.V(1).addE('foo').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "escape the query correctly with an apostrophe" do
graph = g() |> v(1) |> add_e("foo' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('foo\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("o' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('o\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("foo\\\\' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('foo\\\\\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("foo\\\\\\\\' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('foo\\\\\\\\\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "do no escape already escaped apostrophe" do
graph = g() |> v(1) |> add_e("foo\\' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('foo\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("\\' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
graph = g() |> v(1) |> add_e("\\\\' with apostrophe") |> to(v(2))
expected_query = "g.V(1).addE('\\\\\\' with apostrophe').to(V(2))"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "compiles queue with nil value" do
graph = g() |> v() |> has("name", nil)
expected_query = "g.V().has('name', none)"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "compiles queue with range value" do
graph = g() |> v() |> has("age", within(1..18))
expected_query = "g.V().has('age', within(1..18))"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "add functions as arguments without prepending g. unless it is function v()" do
graph =
g()
|> v("1")
|> has_label("foo")
|> fold()
|> coalesce([g() |> unfold(), g() |> v("2")])
expected_query = "g.V('1').hasLabel('foo').fold().coalesce(unfold(), g.V('2'))"
actual_query = encode(graph)
assert actual_query == expected_query
end
test "deeply nested traversals" do
graph =
g()
|> v("1")
|> repeat(g() |> both_e() |> as("e") |> both_v() |> simple_path())
|> until(g() |> has_label("foo") |> or_() |> loops() |> is(g() |> eq(2)))
|> where(anonymous() |> not_(g() |> in_e("bar")))
|> as("b")
|> select(["e", "b"])
expected_query =
"g.V('1').repeat(bothE().as('e').bothV().simplePath())" <>
".until(hasLabel('foo').or().loops().is(eq(2)))" <>
".where(__.not(inE('bar'))).as('b').select('e', 'b')"
actual_query = encode(graph)
assert actual_query == expected_query
end
end
describe "e/1" do
test "adds an E function to the queue" do
actual_graph = g() |> e()
expected_graph = Queue.in({"E", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "e/2 with number" do
test "adds an E function to the queue which accepts a numeric id" do
actual_graph = g() |> e(1)
expected_graph = Queue.in({"E", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "e/2 with string" do
test "adds an E function to the queue which accepts a string id" do
actual_graph = g() |> e("string-id")
expected_graph = Queue.in({"E", ["string-id"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "e/2 with edge with numeric id" do
test "adds an E function to the queue which accepts a string id" do
edge = %Edge{
id: 123,
label: "someEdge",
in_vertex: %Vertex{id: 345, label: "inVert"},
out_vertex: %Vertex{id: 678, label: "outVert"},
properties: %{}
}
actual_graph = g() |> e(edge)
expected_graph = Queue.in({"E", [123]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "e/2 with edge with string id" do
test "adds an E function to the queue which accepts a string id" do
edge = %Edge{
id: "edge-id",
label: "someEdge",
in_vertex: %Vertex{id: "in-v-id", label: "inVert"},
out_vertex: %Vertex{id: "out-v-id", label: "outVert"},
properties: %{}
}
actual_graph = g() |> e(edge)
expected_graph = Queue.in({"E", ["edge-id"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "coalesce/2" do
test "adds a coalesce function to the queue" do
actual_graph = g() |> coalesce(["foo", "bar"])
expected_graph = Queue.in({"coalesce", ["foo", "bar"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "fold/1" do
test "adds a fold function to the queue" do
actual_graph = g() |> fold()
expected_graph = Queue.in({"fold", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "fold/2" do
test "adds a fold function to the queue" do
actual_graph = g() |> fold("foo")
expected_graph = Queue.in({"fold", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "unfold/1" do
test "adds a unfold function to the queue" do
actual_graph = g() |> unfold()
expected_graph = Queue.in({"unfold", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "unfold/2" do
test "adds a unfold function to the queue" do
actual_graph = g() |> unfold("foo")
expected_graph = Queue.in({"unfold", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "as/2" do
test "adds an as function to the queue" do
actual_graph = g() |> as("foo")
expected_graph = Queue.in({"as", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "select/2" do
test "adds a select function to the queue" do
actual_graph = g() |> select("foo")
expected_graph = Queue.in({"select", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "by/2" do
test "adds a by function to the queue" do
actual_graph = g() |> by("foo")
expected_graph = Queue.in({"by", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "path/1" do
test "adds a path function to the queue" do
actual_graph = g() |> path()
expected_graph = Queue.in({"path", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "simple_path/1" do
test "adds a simplePath function to the queue" do
actual_graph = g() |> simple_path()
expected_graph = Queue.in({"simplePath", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "from/2" do
test "adds a from function to the queue" do
actual_graph = g() |> from("foo")
expected_graph = Queue.in({"from", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "repeat/2" do
test "adds a repeat function to the queue" do
actual_graph = g() |> repeat("foo")
expected_graph = Queue.in({"repeat", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "until/2" do
test "adds a until function to the queue" do
actual_graph = g() |> until("foo")
expected_graph = Queue.in({"until", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "loops/1" do
test "adds a loops function to the queue" do
actual_graph = g() |> loops()
expected_graph = Queue.in({"loops", []}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "is/2" do
test "adds a is function to the queue" do
actual_graph = g() |> is("foo")
expected_graph = Queue.in({"is", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "inject/2" do
test "adds a inject function to the queue" do
actual_graph = g() |> inject("Daniel")
expected_graph = Queue.in({"inject", ["Daniel"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "tail/1" do
test "adds a tail function to the queue" do
actual_graph = g() |> tail()
expected_graph = Queue.in({"tail", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "tail/2" do
test "adds a tail function to the queue" do
actual_graph = g() |> tail(2)
expected_graph = Queue.in({"tail", [2]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "eq/2" do
test "adds a eq function to the queue" do
actual_graph = g() |> eq(1)
expected_graph = Queue.in({"eq", [1]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "where/2" do
test "adds a where function to the queue" do
actual_graph = g() |> where("foo")
expected_graph = Queue.in({"where", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "not_/2" do
test "adds a not function to the queue" do
actual_graph = g() |> not_("foo")
expected_graph = Queue.in({"not", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
describe "cap/2" do
test "adds a cap function to the queue" do
actual_graph = g() |> cap("foo")
expected_graph = Queue.in({"cap", ["foo"]}, Queue.new())
assert actual_graph == expected_graph
end
end
end
| 30.757666 | 91 | 0.60921 |
1c80a51620ab0a305c5f451ba29e04472f3b2678 | 1,484 | exs | Elixir | test/commands/permission/info_test.exs | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 1,003 | 2016-02-23T17:21:12.000Z | 2022-02-20T14:39:35.000Z | test/commands/permission/info_test.exs | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 906 | 2016-02-22T22:54:19.000Z | 2022-03-11T15:19:43.000Z | test/commands/permission/info_test.exs | matusf/cog | 71708301c7dc570fb0d3498a50f47a70ef957788 | [
"Apache-2.0"
] | 95 | 2016-02-23T13:42:31.000Z | 2021-11-30T14:39:55.000Z | defmodule Cog.Test.Commands.Permission.InfoTest do
use Cog.CommandCase, command_module: Cog.Commands.Permission
alias Cog.Commands.Permission.Info
import Cog.Support.ModelUtilities, only: [permission: 1]
test "getting information on a permission works" do
permission("site:foo")
payload = new_req(args: ["site:foo"])
|> send_req(Info)
|> unwrap()
assert %{id: _,
bundle: "site",
name: "foo"} = payload
end
test "getting information for a non-existent permission fails" do
error = new_req(args: ["site:wat"])
|> send_req(Info)
|> unwrap_error()
assert(error == "Could not find 'permission' with the name 'site:wat'")
end
test "getting information requires a permission name" do
error = new_req(args: [])
|> send_req(Info)
|> unwrap_error()
assert(error == "Not enough args. Arguments required: exactly 1.")
end
test "you can only get information on one permission at a time" do
permission("site:foo")
permission("site:bar")
error = new_req(args: ["site:foo", "site:bar"])
|> send_req(Info)
|> unwrap_error()
assert(error == "Too many args. Arguments required: exactly 1.")
end
test "information requires a string argument" do
error = new_req(args: [123])
|> send_req(Info)
|> unwrap_error()
assert(error == "Arguments must be strings")
end
end
| 27.481481 | 75 | 0.616577 |
1c80d4d2f0cc0ca9dccd274e7363c04e36946db5 | 7,377 | ex | Elixir | lib/fexr_imf.ex | Schultzer/fexr_imf | a66816bc69bf04a1d4f11ee2144e5e9d23c37939 | [
"MIT",
"Unlicense"
] | 1 | 2017-08-25T14:01:43.000Z | 2017-08-25T14:01:43.000Z | lib/fexr_imf.ex | Schultzer/fexr_imf | a66816bc69bf04a1d4f11ee2144e5e9d23c37939 | [
"MIT",
"Unlicense"
] | null | null | null | lib/fexr_imf.ex | Schultzer/fexr_imf | a66816bc69bf04a1d4f11ee2144e5e9d23c37939 | [
"MIT",
"Unlicense"
] | null | null | null | defmodule FexrImf do
require Logger
defp query(date) do
ConCache.get_or_store(:imf, "#{date}", fn ->
date
|> url
|> fetch
|> Floki.find("table.tighter")
|> format_table
end)
end
def symbols do
["AED", "AUD", "BHD", "BND", "BRL", "BWP", "CAD", "CHF", "CLP", "CNY", "COP",
"CZK", "DKK", "DZD", "EUR", "GBP", "HUF", "IDR", "ILS", "INR", "IRR", "ISK",
"JPY", "KRW", "KWD", "KZT", "LKR", "LYD", "MUR", "MXN", "MYR", "NOK", "NPR",
"NZD", "OMR", "PEN", "PHP", "PKR", "PLN", "QAR", "RUB", "SAR", "SEK", "SGD",
"THB", "TND", "TTD", "USD", "UYU", "VEF", "ZAR"]
end
defp currency_name_to_symbol do
%{"Swedish Krona" => "SEK", "Saudi Arabian Riyal" => "SAR",
"Chilean Peso" => "CLP", "Brazilian Real" => "BRL", "Botswana Pula" => "BWP",
"Nuevo Sol" => "PEN", "New Zealand Dollar" => "NZD", "Euro" => "EUR",
"Indonesian Rupiah" => "IDR", "Danish Krone" => "DKK",
"Pakistani Rupee" => "PKR", "Mexican Peso" => "MXN",
"Philippine Peso" => "PHP", "Japanese Yen" => "JPY", "U.S. Dollar" => "USD",
"Bolivar Fuerte" => "VEF", "Czech Koruna" => "CZK", "Qatar Riyal" => "QAR",
"U.K. Pound Sterling" => "GBP", "Kazakhstani Tenge" => "KZT",
"Korean Won" => "KRW", "Icelandic Krona" => "ISK", "Iranian Rial" => "IRR",
"South African Rand" => "ZAR", "Thai Baht" => "THB",
"Singapore Dollar" => "SGD", "Indian Rupee" => "INR",
"Trinidad And Tobago Dollar" => "TTD", "Kuwaiti Dinar" => "KWD",
"Hungarian Forint" => "HUF", "Mauritian Rupee" => "MUR",
"Colombian Peso" => "COP", "Swiss Franc" => "CHF", "U.A.E. Dirham" => "AED",
"Bahrain Dinar" => "BHD", "Malaysian Ringgit" => "MYR",
"Peso Uruguayo" => "UYU", "Polish Zloty" => "PLN", "Chinese Yuan" => "CNY",
"Libyan Dinar" => "LYD", "Israeli New Sheqel" => "ILS",
"Canadian Dollar" => "CAD", "Sri Lanka Rupee" => "LKR",
"Norwegian Krone" => "NOK", "Brunei Dollar" => "BND",
"Nepalese Rupee" => "NPR", "Algerian Dinar" => "DZD",
"Tunisian Dinar" => "TND", "Australian Dollar" => "AUD",
"Rial Omani" => "OMR", "Russian Ruble" => "RUB"}
end
defp url(date), do: "https://www.imf.org/external/np/fin/data/rms_mth.aspx?SelectDate=#{date}&reportType=SDRCV"
defp fetch(url) do
case HTTPoison.get(url) do
{:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
Logger.info "Code: 200 URL: #{url}"
body
{:ok, %HTTPoison.Response{status_code: 404}} ->
Logger.info "Code: 404 URL: #{url}"
{:error, %HTTPoison.Error{reason: reason}} ->
Logger.error reason
end
end
defp format_table([one, _, _]), do: format_table(one)
defp format_table([one, two, _, _]), do: format_table(one, two)
defp format_table(one, two), do: Map.merge(format_table(one), format_table(two), fn(_k, v1, v2) -> v1 ++ v2 end)
defp format_table({tag, att, val}) do
table = {tag, att, val} |> filter_table
names = Enum.at(table, 0)
dates = Enum.at(table, 1)
Enum.reject(table, fn(x) -> x == names or x == dates end)
|> serialize(format_dates(dates))
|> map_merge
end
defp filter_table([]), do: []
defp filter_table(string) when is_binary(string), do: string |> String.trim_leading |> String.trim_trailing
defp filter_table([one, _two, _three, _four]), do: filter_table(one)
defp filter_table({_tag, _value, list}), do: for n <- list, do: filter_table(n)
defp serialize(rates, dates), do: for rate <- rates, do: serialize_rate(rate, dates)
defp serialize_rate([name | rates], dates) do
%{parse(name) => serialize_rates(rates, dates)}
end
defp serialize_rates([], []), do: []
defp serialize_rates([[rate] | rates], [date | dates]), do: [{date, rate} | serialize_rates(rates, dates)]
defp format_dates([]), do: []
defp format_dates([[""] | tail]), do: format_dates(tail)
defp format_dates([["Currency"] | tail]), do: format_dates(tail)
defp format_dates([[month_date, [], year] | tail]) do
[month_name, day] = month_date |> String.replace(",", "") |> String.split(" ")
{:ok, date} = Date.new(convert(year), convert(month_name), convert(day))
[date | format_dates(tail)]
end
defp convert(input) do
input = String.downcase(input)
months = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]
case Enum.any?(months, fn(month) -> matches_month?(input, month) end) do
true -> month_number(input, months)
false -> String.to_integer(input)
end
end
defp matches_month?(name, month), do: String.starts_with?(name, month)
defp month_number(name, months), do: Enum.find_index(months, fn (month) -> matches_month?(name, month) end) + 1
defp month_number?("jan"), do: 1
defp month_number?("feb"), do: 2
defp month_number?("mar"), do: 3
defp month_number?("apr"), do: 4
defp month_number?("may"), do: 5
defp month_number?("jun"), do: 6
defp month_number?("jul"), do: 7
defp month_number?("aug"), do: 8
defp month_number?("sep"), do: 9
defp month_number?("oct"), do: 10
defp month_number?("nov"), do: 11
defp month_number?("dec"), do: 12
defp parse(name) when is_binary(name), do: currency_name_to_symbol[name]
defp parse([name]), do: currency_name_to_symbol[name]
defp map_merge(currencies, acc \\ %{})
defp map_merge([], acc), do: acc
defp map_merge([currency | currencies], acc), do: map_merge(currencies, Map.merge(currency, acc))
defp filter(rates, []), do: rates
defp filter(rates, nil), do: rates
defp filter(rates, symbols), do: Map.take(rates, symbols)
defp format_rates(rates, date, base) do
base_rate = find_rate(date, rates[base])
for c <- symbols do
%{c => calc_exr(base_rate, find_rate(date, rates[c]))}
end
|> map_merge
|> Map.delete(base)
end
defp find_rate(date, rates), do: Enum.find(rates, fn({d, r}) -> d == date end)
defp calc_exr(nil, nil), do: "NA"
defp calc_exr({ _date, _base}, {_date, "NA"}), do: "NA"
defp calc_exr({ _date, base}, {_date, rate}) do
exr = (String.to_float(base) / String.to_float(rate))
end
defp convert_symbols(symbols) do
symbols
|> Enum.reject(fn(l) -> not is_atom(l) and not is_binary(l) end)
|> Enum.map(fn(l) -> if is_atom(l), do: Atom.to_string(l) |> String.upcase, else: String.upcase(l) end)
end
def rates({{year, month, day}, base, symbols}) do
date = Date.from_erl!({year, month, day})
date
|> Date.to_string
|> query
|> format_rates(date, base)
|> filter(symbols)
end
@moduledoc """
Documentation for FexrImf.
"""
@doc """
returns a map with exchange rates,
an options is provided to select symbols, default is set to all available symbols
## Examples
iex> FexrImf.rates({2017, 8, 8}, :EUR, ["USD"])
%{"USD" => 1.1814001721291816}
"""
def rates(date, base \\ "USD", symbols \\ [])
def rates(date, _base, _symbols) when not is_tuple(date), do: raise ArgumentError, "date has to be erl format {year, month, day}"
def rates(_date, _base, symbols) when not is_list(symbols), do: raise ArgumentError, "symbols has to be in a list"
def rates(date, base, symbols) when is_atom(base), do: rates(date, Atom.to_string(base) |> String.upcase, symbols)
def rates(date, base, symbols) when is_list(symbols), do: rates({date, base, convert_symbols(symbols)})
end
| 38.025773 | 131 | 0.608106 |
1c80d906feff7bcb0d1d0e2a4b40f49d7e88a2fe | 7,890 | exs | Elixir | test/tutorial/2_string_test.exs | ityonemo/Exonerate | 42b888c156c9179d222b78609d34c07f0b887eaf | [
"MIT"
] | 14 | 2021-01-14T20:14:30.000Z | 2022-01-28T00:58:07.000Z | test/tutorial/2_string_test.exs | ityonemo/Exonerate | 42b888c156c9179d222b78609d34c07f0b887eaf | [
"MIT"
] | 13 | 2019-09-11T17:48:48.000Z | 2021-11-22T23:02:44.000Z | test/tutorial/2_string_test.exs | ityonemo/Exonerate | 42b888c156c9179d222b78609d34c07f0b887eaf | [
"MIT"
] | 1 | 2021-09-12T13:08:54.000Z | 2021-09-12T13:08:54.000Z | defmodule ExonerateTest.Tutorial.StringTest do
use ExUnit.Case, async: true
@moduletag :string
@moduledoc """
basic tests from:
https://json-schema.org/understanding-json-schema/reference/string.html
Literally conforms to all the tests presented in this document.
"""
@moduletag :tutorial
defmodule String do
@moduledoc """
tests from:
https://json-schema.org/understanding-json-schema/reference/string.html#string
"""
require Exonerate
Exonerate.function_from_string(:def, :string, ~s({ "type": "string" }))
end
describe "basic strings example" do
test "various strings match correctly" do
assert :ok = String.string("This is a string")
assert :ok = String.string("Déjà vu")
assert :ok = String.string("")
assert :ok = String.string("42")
end
test "number mismatches a string" do
assert {:error, list} = String.string(42)
assert list[:schema_pointer] == "/type"
assert list[:error_value] == 42
assert list[:json_pointer] == "/"
end
end
defmodule Length do
@moduledoc """
tests from:
https://json-schema.org/understanding-json-schema/reference/string.html#string
"""
require Exonerate
Exonerate.function_from_string(:def, :string, """
{
"type": "string",
"minLength": 2,
"maxLength": 3
}
""")
end
describe "strings length example" do
test "string of correct sizes match" do
assert :ok = Length.string("AB")
assert :ok = Length.string("ABC")
end
test "string of incorrect sizes don't match" do
assert {:error, list} = Length.string("A")
assert list[:schema_pointer] == "/minLength"
assert list[:error_value] == "A"
assert list[:json_pointer] == "/"
assert {:error, list} = Length.string("ABCD")
assert list[:schema_pointer] == "/maxLength"
assert list[:error_value] == "ABCD"
assert list[:json_pointer] == "/"
end
end
defmodule Pattern do
@moduledoc """
tests from:
https://json-schema.org/understanding-json-schema/reference/string.html#regular-expressions
"""
require Exonerate
Exonerate.function_from_string(:def, :string, """
{
"type": "string",
"pattern": "^(\\\\([0-9]{3}\\\\))?[0-9]{3}-[0-9]{4}$"
}
""")
end
describe "strings pattern example" do
test "telephone numbers match" do
assert :ok = Pattern.string("555-1212")
assert :ok = Pattern.string("(888)555-1212")
end
test "string of incorrect patterns don't match" do
assert {:error, list} =
Pattern.string("(888)555-1212 ext. 532")
assert list[:schema_pointer] == "/pattern"
assert list[:error_value] == "(888)555-1212 ext. 532"
assert list[:json_pointer] == "/"
assert {:error, list} =
Pattern.string("(800)FLOWERS")
assert list[:schema_pointer] == "/pattern"
assert list[:error_value] == "(800)FLOWERS"
assert list[:json_pointer] == "/"
end
end
defmodule Format do
require Exonerate
defmodule Custom do
def format("ok"), do: true
def format(_), do: false
def format(a, a), do: true
def format(_, _), do: false
end
Exonerate.function_from_string(:def, :datetime, ~s({"type": "string", "format": "date-time"}))
Exonerate.function_from_string(:def, :datetime_utc, ~s({"type": "string", "format": "date-time", "comment": "a"}),
format: %{"/" => [:utc]})
Exonerate.function_from_string(:def, :datetime_disabled, ~s({"type": "string", "format": "date-time", "comment": "b"}),
format: %{"/" => false})
Exonerate.function_from_string(:def, :datetime_custom, ~s({"type": "string", "format": "date-time", "comment": "c"}),
format: %{"/" => {Custom, :format, []}})
Exonerate.function_from_string(:def, :datetime_custom_params, ~s({"type": "string", "format": "date-time", "comment": "d"}),
format: %{"/" => {Custom, :format, ["ok"]}})
Exonerate.function_from_string(:def, :datetime_custom_private, ~s({"type": "string", "format": "date-time", "comment": "e"}),
format: %{"/" => {:format, []}})
Exonerate.function_from_string(:def, :datetime_custom_private_params, ~s({"type": "string", "format": "date-time", "comment": "f"}),
format: %{"/" => {:format, ["ok"]}})
Exonerate.function_from_string(:def, :date, ~s({"type": "string", "format": "date"}))
Exonerate.function_from_string(:def, :time, ~s({"type": "string", "format": "time"}))
Exonerate.function_from_string(:def, :uuid, ~s({"type": "string", "format": "uuid"}))
Exonerate.function_from_string(:def, :ipv4, ~s({"type": "string", "format": "ipv4"}))
Exonerate.function_from_string(:def, :ipv6, ~s({"type": "string", "format": "ipv6"}))
Exonerate.function_from_string(:def, :custom, ~s({"type": "string", "format": "custom", "comment": "a"}),
format: %{"/" => {Custom, :format, ["ok"]}})
Exonerate.function_from_string(:def, :custom_private, ~s({"type": "string", "format": "custom", "comment": "b"}),
format: %{"/" => {:format, ["ok"]}})
Exonerate.function_from_string(:def, :custom_broad, ~s({"type": "string", "format": "custom"}),
format: %{custom: {Custom, :format, ["ok"]}})
defp format("ok"), do: true
defp format(_), do: false
defp format(a, a), do: true
defp format(_, _), do: false
end
describe "formats:" do
test "date-time" do
assert :ok == Format.datetime(to_string(DateTime.utc_now()))
assert :ok == Format.datetime(to_string(NaiveDateTime.utc_now()))
assert {:error, _} = Format.datetime(to_string(Date.utc_today()))
assert {:error, _} = Format.datetime(to_string(Time.utc_now()))
assert {:error, _} = Format.datetime("foobar")
end
test "date-time-utc" do
assert :ok == Format.datetime_utc(to_string(DateTime.utc_now()))
assert {:error, _} = Format.datetime_utc(to_string(NaiveDateTime.utc_now()))
end
test "date-time-disabled" do
assert :ok == Format.datetime_disabled("foobar")
end
test "date-time-custom" do
assert :ok == Format.datetime_custom("ok")
assert {:error, _} = Format.datetime_custom("bar")
end
test "date-time-custom-params" do
assert :ok == Format.datetime_custom_params("ok")
assert {:error, _} = Format.datetime_custom_params("bar")
end
test "date-time-custom-private" do
assert :ok == Format.datetime_custom_private("ok")
assert {:error, _} = Format.datetime_custom_private("bar")
end
test "date-time-custom-private-params" do
assert :ok == Format.datetime_custom_private_params("ok")
assert {:error, _} = Format.datetime_custom_private_params("bar")
end
test "date" do
assert :ok == Format.date(to_string(Date.utc_today()))
assert {:error, _} = Format.date("foo")
end
test "time" do
assert :ok == Format.time(to_string(Time.utc_now()))
assert {:error, _} = Format.time("foo")
end
test "ipv4" do
assert :ok == Format.ipv4("10.10.10.10")
assert {:error, _} = Format.ipv4("256.10.10.10")
end
test "ipv6" do
assert :ok == Format.ipv6("::1")
assert {:error, _} = Format.ipv6("foo")
end
test "custom" do
assert :ok == Format.custom("ok")
assert {:error, _} = Format.custom("foo")
end
test "custom-private" do
assert :ok == Format.custom_private("ok")
assert {:error, _} = Format.custom_private("foo")
end
test "custom-broad" do
assert :ok == Format.custom_broad("ok")
assert {:error, _} = Format.custom_broad("foo")
end
end
end
| 31.686747 | 136 | 0.596578 |
1c8126ca27117166f78c2131161ed74cdba2db69 | 80 | exs | Elixir | .formatter.exs | sourdoughdev/mixduty | 493ef31ba4d8cf281676dcd374f06e5b010f038b | [
"MIT"
] | 8 | 2018-10-23T18:48:17.000Z | 2021-11-29T04:49:12.000Z | .formatter.exs | sourdoughdev/mixduty | 493ef31ba4d8cf281676dcd374f06e5b010f038b | [
"MIT"
] | 3 | 2021-01-05T16:24:43.000Z | 2021-01-31T18:06:32.000Z | .formatter.exs | sourdoughdev/mixduty | 493ef31ba4d8cf281676dcd374f06e5b010f038b | [
"MIT"
] | 10 | 2019-05-14T12:12:44.000Z | 2021-11-22T08:32:41.000Z | [
inputs: ["mix.exs", "{config,lib,test}/**/*.{ex,exs}"],
line_length: 98
]
| 16 | 57 | 0.5375 |
1c8153d428e1cf545547ec01c1338f7ab02bc635 | 74,495 | ex | Elixir | lib/ecto/schema.ex | dgvncsz0f/ecto | bae06fe650328cc1060c09fe889a2de9a10edb1b | [
"Apache-2.0"
] | null | null | null | lib/ecto/schema.ex | dgvncsz0f/ecto | bae06fe650328cc1060c09fe889a2de9a10edb1b | [
"Apache-2.0"
] | null | null | null | lib/ecto/schema.ex | dgvncsz0f/ecto | bae06fe650328cc1060c09fe889a2de9a10edb1b | [
"Apache-2.0"
] | null | null | null | defmodule Ecto.Schema do
@moduledoc ~S"""
Defines a schema.
An Ecto schema is used to map any data source into an Elixir struct.
One of such use cases is to map data coming from a repository,
usually a table, into Elixir structs.
## Example
defmodule User do
use Ecto.Schema
schema "users" do
field :name, :string
field :age, :integer, default: 0
has_many :posts, Post
end
end
By default, a schema will automatically generate a primary key which is named
`id` and of type `:integer`. The `field` macro defines a field in the schema
with given name and type. `has_many` associates many posts with the user
schema.
Note that the name of the database table does not need to correlate to your
module name. For example, if you are working with a legacy database, you can
reference the table name when you define your schema:
defmodule User do
use Ecto.Schema
schema "legacy_users" do
# ... fields ...
end
end
## Schema attributes
Supported attributes, to be set beforehand, for configuring the defined schema.
These attributes are:
* `@primary_key` - configures the schema primary key. It expects
a tuple `{field_name, type, options}` with the primary key field
name, type (typically `:id` or `:binary_id`, but can be any type) and
options. Defaults to `{:id, :id, autogenerate: true}`. When set
to `false`, does not define a primary key in the schema unless
composite keys are defined using the options of `field`.
* `@schema_prefix` - configures the schema prefix. Defaults to `nil`,
which generates structs and queries without prefix. When set, the
prefix will be used by every built struct and on queries whenever
the schema is used in a `from` or a `join`. In PostgreSQL, the prefix
is called "SCHEMA" (typically set via Postgres' `search_path`).
In MySQL the prefix points to databases.
* `@foreign_key_type` - configures the default foreign key type
used by `belongs_to` associations. Defaults to `:id`;
* `@timestamps_opts` - configures the default timestamps type
used by `timestamps`. Defaults to `[type: :naive_datetime]`;
* `@derive` - the same as `@derive` available in `Kernel.defstruct/1`
as the schema defines a struct behind the scenes;
* `@field_source_mapper` - a function that receives the current field name
and returns the mapping of this field name in the underlying source.
In other words, it is a mechanism to automatically generate the `:source`
option for the `field` macro. It defaults to `fn x -> x end`, where no
field transformation is done;
The advantage of configuring the schema via those attributes is
that they can be set with a macro to configure application wide
defaults.
For example, if your database does not support autoincrementing
primary keys and requires something like UUID or a RecordID, you
can configure and use`:binary_id` as your primary key type as follows:
# Define a module to be used as base
defmodule MyApp.Schema do
defmacro __using__(_) do
quote do
use Ecto.Schema
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
end
end
end
# Now use MyApp.Schema to define new schemas
defmodule MyApp.Comment do
use MyApp.Schema
schema "comments" do
belongs_to :post, MyApp.Post
end
end
Any schemas using `MyApp.Schema` will get the `:id` field with type
`:binary_id` as the primary key. We explain what the `:binary_id` type
entails in the next section.
The `belongs_to` association on `MyApp.Comment` will also define
a `:post_id` field with `:binary_id` type that references the `:id`
field of the `MyApp.Post` schema.
## Primary keys
Ecto supports two ID types, called `:id` and `:binary_id`, which are
often used as the type for primary keys and associations.
The `:id` type is used when the primary key is an integer while the
`:binary_id` is used for primary keys in particular binary formats,
which may be `Ecto.UUID` for databases like PostgreSQL and MySQL,
or some specific ObjectID or RecordID often imposed by NoSQL databases.
In both cases, both types have their semantics specified by the
underlying adapter/database. If you use the `:id` type with
`:autogenerate`, it means the database will be responsible for
auto-generation of the id. This is often the case for primary keys
in relational databases which are auto-incremented.
Similarly, the `:binary_id` type may be generated in the adapter
for cases like UUID but it may also be handled by the database if
required. In any case, both scenarios are handled transparently by
Ecto.
Besides `:id` and `:binary_id`, which are often used by primary
and foreign keys, Ecto provides a huge variety of types to be used
by any column.
Ecto also supports composite primary keys.
If your primary key is not named "id" (e.g. if you are working with a
legacy database), you can use the `@primary_key` attribute to configure
your key name using the `source` option. For example, the following
attribute defines an integer primary key named `legacy_id` which is
automatically incremented by the database:
@primary_key {:id, :integer, autogenerate: false, source: :legacy_id}
## Types and casting
When defining the schema, types need to be given. Types are split
into two categories, primitive types and custom types.
### Primitive types
The primitive types are:
Ecto type | Elixir type | Literal syntax in query
:---------------------- | :---------------------- | :---------------------
`:id` | `integer` | 1, 2, 3
`:binary_id` | `binary` | `<<int, int, int, ...>>`
`:integer` | `integer` | 1, 2, 3
`:float` | `float` | 1.0, 2.0, 3.0
`:boolean` | `boolean` | true, false
`:string` | UTF-8 encoded `string` | "hello"
`:binary` | `binary` | `<<int, int, int, ...>>`
`{:array, inner_type}` | `list` | `[value, value, value, ...]`
`:map` | `map` |
`{:map, inner_type}` | `map` |
`:decimal` | [`Decimal`](https://github.com/ericmj/decimal) |
`:date` | `Date` |
`:time` | `Time` |
`:naive_datetime` | `NaiveDateTime` |
`:naive_datetime_usec` | `NaiveDateTime` |
`:utc_datetime` | `DateTime` |
`:utc_datetime_usec` | `DateTime` |
**Note:** For the `{:array, inner_type}` and `{:map, inner_type}` type,
replace `inner_type` with one of the valid types, such as `:string`.
### Custom types
Besides providing primitive types, Ecto allows custom types to be
implemented by developers, allowing Ecto behaviour to be extended.
A custom type is a module that implements the `Ecto.Type` behaviour.
By default, Ecto provides the following custom types:
Custom type | Database type | Elixir type
:---------------------- | :---------------------- | :---------------------
`Ecto.UUID` | `:uuid` | `uuid-string`
Read the `Ecto.Type` documentation for more information on implementing
your own types.
Finally, schemas can also have virtual fields by passing the
`virtual: true` option. These fields are not persisted to the database
and can optionally not be type checked by declaring type `:any`.
### The datetime types
Four different datetime primitive types are available:
* `naive_datetime` - has a precision of seconds and casts values
to Elixir's `NaiveDateTime` struct which has no timezone information.
* `naive_datetime_usec` - has a default precision of microseconds and
also casts values to `NaiveDateTime` with no timezone information.
* `utc_datetime` - has a precision of seconds and casts values to
to Elixir's `DateTime` struct and expects the time zone to be set to UTC.
* `utc_datetime_usec` has a default precision of microseconds and also
casts values to `DateTime` expecting the time zone be set to UTC.
Having these different types allows developers to choose a type that will
be compatible with the database and your project's precision requirements.
For example, some older versions of MySQL do not support microseconds in
datetime fields.
When choosing what datetime type to work with, keep in mind that Elixir
functions like `NaiveDateTime.utc_now/0` have a default precision of 6.
Casting a value with a precision greater than 0 to a non-`usec` type will
truncate all microseconds and set the precision to 0.
### The map type
The map type allows developers to store an Elixir map directly
in the database:
# In your migration
create table(:users) do
add :data, :map
end
# In your schema
field :data, :map
# Now in your code
user = Repo.insert! %User{data: %{"foo" => "bar"}}
Keep in mind that we advise the map keys to be strings or integers
instead of atoms. Atoms may be accepted depending on how maps are
serialized but the database will always return atom keys as strings
due to security reasons.
In order to support maps, different databases may employ different
techniques. For example, PostgreSQL will store those values in jsonb
fields, allowing you to just query parts of it. MySQL and MSSQL, on
the other hand, do not yet provide a JSON type, so the value will be
stored in a text field.
For maps to work in such databases, Ecto will need a JSON library.
By default Ecto will use [Jason](http://github.com/michalmuskala/jason)
which needs to be added to your deps in `mix.exs`:
{:jason, "~> 1.0"}
You can however configure the adapter to use another library. For example,
if using Postgres:
config :postgrex, :json_library, YourLibraryOfChoice
Or if using MySQL:
config :mariaex, :json_library, YourLibraryOfChoice
If changing the JSON library, remember to recompile the adapter afterwards
by cleaning the current build:
mix deps.clean --build postgrex
### Casting
When directly manipulating the struct, it is the responsibility of
the developer to ensure the field values have the proper type. For
example, you can create a user struct with an invalid value
for `age`:
iex> user = %User{age: "0"}
iex> user.age
"0"
However, if you attempt to persist the struct above, an error will
be raised since Ecto validates the types when sending them to the
adapter/database.
Therefore, when working with and manipulating external data, it is
recommended to use `Ecto.Changeset`'s that are able to filter
and properly cast external data:
changeset = Ecto.Changeset.cast(%User{}, %{"age" => "0"}, [:age])
user = Repo.insert!(changeset)
**You can use Ecto schemas and changesets to cast and validate any kind
of data, regardless if the data will be persisted to an Ecto repository
or not**.
## Reflection
Any schema module will generate the `__schema__` function that can be
used for runtime introspection of the schema:
* `__schema__(:source)` - Returns the source as given to `schema/2`;
* `__schema__(:prefix)` - Returns optional prefix for source provided by
`@schema_prefix` schema attribute;
* `__schema__(:primary_key)` - Returns a list of primary key fields (empty if there is none);
* `__schema__(:fields)` - Returns a list of all non-virtual field names;
* `__schema__(:field_source, field)` - Returns the alias of the given field;
* `__schema__(:type, field)` - Returns the type of the given non-virtual field;
* `__schema__(:associations)` - Returns a list of all association field names;
* `__schema__(:association, assoc)` - Returns the association reflection of the given assoc;
* `__schema__(:embeds)` - Returns a list of all embedded field names;
* `__schema__(:embed, embed)` - Returns the embedding reflection of the given embed;
* `__schema__(:read_after_writes)` - Non-virtual fields that must be read back
from the database after every write (insert or update);
* `__schema__(:autogenerate_id)` - Primary key that is auto generated on insert;
Furthermore, both `__struct__` and `__changeset__` functions are
defined so structs and changeset functionalities are available.
"""
@type source :: String.t
@type prefix :: String.t | nil
@type schema :: %{optional(atom) => any, __struct__: atom, __meta__: Ecto.Schema.Metadata.t}
@type embedded_schema :: %{optional(atom) => any, __struct__: atom}
@type t :: schema | embedded_schema
defmodule Metadata do
@moduledoc """
Stores metadata of a struct.
## State
The state of the schema is stored in the `:state` field and allows
following values:
* `:built` - the struct was constructed in memory and is not persisted
to database yet;
* `:loaded` - the struct was loaded from database and represents
persisted data;
* `:deleted` - the struct was deleted and no longer represents persisted
data.
## Source
The `:source` tracks the (table or collection) where the struct is or should
be persisted to.
## Prefix
Tracks the source prefix in the data storage.
## Context
The `:context` field represents additional state some databases require
for proper updates of data. It is not used by the built-in adapters of
`Ecto.Adapters.Postres` and `Ecto.Adapters.MySQL`.
## Schema
The `:schema` field refers the module name for the schema this metadata belongs to.
"""
defstruct [:state, :source, :context, :schema, :prefix]
@type state :: :built | :loaded | :deleted
@type t :: %__MODULE__{
context: any,
prefix: Ecto.Schema.prefix,
schema: module,
source: Ecto.Schema.source,
state: state,
}
defimpl Inspect do
import Inspect.Algebra
def inspect(metadata, opts) do
%{source: source, prefix: prefix, state: state, context: context} = metadata
entries =
for entry <- [state, prefix, source, context],
entry != nil,
do: to_doc(entry, opts)
concat ["#Ecto.Schema.Metadata<"] ++ Enum.intersperse(entries, ", ") ++ [">"]
end
end
end
@doc false
defmacro __using__(_) do
quote do
import Ecto.Schema, only: [schema: 2, embedded_schema: 1]
@primary_key nil
@timestamps_opts []
@foreign_key_type :id
@schema_prefix nil
@field_source_mapper fn x -> x end
Module.register_attribute(__MODULE__, :ecto_primary_keys, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_fields, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_field_sources, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_embeds, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_raw, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_autogenerate, accumulate: true)
Module.register_attribute(__MODULE__, :ecto_autoupdate, accumulate: true)
Module.put_attribute(__MODULE__, :ecto_autogenerate_id, nil)
end
end
@doc """
Defines an embedded schema.
An embedded schema does not require a source name
and it does not include a metadata field.
Embedded schemas by default set the primary key type
to `:binary_id` but such can be configured with the
`@primary_key` attribute.
Embedded schemas don't define the `__meta__` field.
"""
defmacro embedded_schema([do: block]) do
schema(nil, false, :binary_id, block)
end
@doc """
Defines a schema struct with a source name and field definitions.
An additional field called `__meta__` is added to the struct for storing
internal Ecto state. This field always has a `Ecto.Schema.Metadata` struct
as value and can be manipulated with the `Ecto.put_meta/2` function.
"""
defmacro schema(source, [do: block]) do
schema(source, true, :id, block)
end
defp schema(source, meta?, type, block) do
prelude =
quote do
@after_compile Ecto.Schema
Module.register_attribute(__MODULE__, :changeset_fields, accumulate: true)
Module.register_attribute(__MODULE__, :struct_fields, accumulate: true)
meta? = unquote(meta?)
source = unquote(source)
prefix = @schema_prefix
# Those module attributes are accessed only dynamically
# so we explicitly reference them here to avoid warnings.
_ = @foreign_key_type
_ = @timestamps_opts
if meta? do
unless is_binary(source) do
raise ArgumentError, "schema source must be a string, got: #{inspect source}"
end
meta = %Metadata{state: :built, source: source, prefix: prefix, schema: __MODULE__}
Module.put_attribute(__MODULE__, :struct_fields, {:__meta__, meta})
end
if @primary_key == nil do
@primary_key {:id, unquote(type), autogenerate: true}
end
primary_key_fields =
case @primary_key do
false ->
[]
{name, type, opts} ->
Ecto.Schema.__field__(__MODULE__, name, type, [primary_key: true] ++ opts)
[name]
other ->
raise ArgumentError, "@primary_key must be false or {name, type, opts}"
end
try do
import Ecto.Schema
unquote(block)
after
:ok
end
end
postlude =
quote unquote: false do
primary_key_fields = @ecto_primary_keys |> Enum.reverse
autogenerate = @ecto_autogenerate |> Enum.reverse
autoupdate = @ecto_autoupdate |> Enum.reverse
fields = @ecto_fields |> Enum.reverse
field_sources = @ecto_field_sources |> Enum.reverse
assocs = @ecto_assocs |> Enum.reverse
embeds = @ecto_embeds |> Enum.reverse
defstruct @struct_fields
def __changeset__ do
%{unquote_splicing(Macro.escape(@changeset_fields))}
end
def __schema__(:prefix), do: unquote(prefix)
def __schema__(:source), do: unquote(source)
def __schema__(:fields), do: unquote(Enum.map(fields, &elem(&1, 0)))
def __schema__(:primary_key), do: unquote(primary_key_fields)
def __schema__(:hash), do: unquote(:erlang.phash2({primary_key_fields, fields}))
def __schema__(:read_after_writes), do: unquote(Enum.reverse(@ecto_raw))
def __schema__(:autogenerate_id), do: unquote(Macro.escape(@ecto_autogenerate_id))
def __schema__(:autogenerate), do: unquote(Macro.escape(autogenerate))
def __schema__(:autoupdate), do: unquote(Macro.escape(autoupdate))
def __schema__(:query) do
%Ecto.Query{
from: %Ecto.Query.FromExpr{
source: {unquote(source), __MODULE__},
prefix: unquote(prefix)
}
}
end
for clauses <- Ecto.Schema.__schema__(fields, field_sources, assocs, embeds),
{args, body} <- clauses do
def __schema__(unquote_splicing(args)), do: unquote(body)
end
end
quote do
unquote(prelude)
unquote(postlude)
end
end
## API
@doc """
Defines a field on the schema with given name and type.
The field name will be used as is to read and write to the database
by all of the built-in adapters unless overridden with the `:source`
option.
## Options
* `:default` - Sets the default value on the schema and the struct.
The default value is calculated at compilation time, so don't use
expressions like `DateTime.utc_now` or `Ecto.UUID.generate` as
they would then be the same for all records.
* `:source` - Defines the name that is to be used in database for this field.
This is useful when attaching to an existing database. The value should be
an atom.
* `:autogenerate` - a `{module, function, args}` tuple for a function
to call to generate the field value before insertion if value is not set.
A shorthand value of `true` is equivalent to `{type, :autogenerate, []}`.
* `:read_after_writes` - When true, the field is always read back
from the database after insert and updates.
For relational databases, this means the RETURNING option of those
statements is used. For this reason, MySQL does not support this
option and will raise an error if a schema is inserted/updated with
read after writes fields.
* `:virtual` - When true, the field is not persisted to the database.
Notice virtual fields do not support `:autogenerate` nor
`:read_after_writes`.
* `:primary_key` - When true, the field is used as part of the
composite primary key
"""
defmacro field(name, type \\ :string, opts \\ []) do
quote do
Ecto.Schema.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts))
end
end
@doc """
Generates `:inserted_at` and `:updated_at` timestamp fields.
The fields generated by this macro will automatically be set to
the current time when inserting and updating values in a repository.
## Options
* `:type` - the timestamps type, defaults to `:naive_datetime`.
* `:inserted_at` - the name of the column for insertion times or `false`
* `:updated_at` - the name of the column for update times or `false`
* `:autogenerate` - a module-function-args tuple used for generating
both `inserted_at` and `updated_at` timestamps
All options can be pre-configured by setting `@timestamps_opts`.
"""
defmacro timestamps(opts \\ []) do
quote bind_quoted: binding() do
timestamps =
[inserted_at: :inserted_at, updated_at: :updated_at, type: :naive_datetime]
|> Keyword.merge(@timestamps_opts)
|> Keyword.merge(opts)
type = Keyword.fetch!(timestamps, :type)
autogen = timestamps[:autogenerate] || {Ecto.Schema, :__timestamps__, [type]}
if inserted_at = Keyword.fetch!(timestamps, :inserted_at) do
Ecto.Schema.field(inserted_at, type, [])
Module.put_attribute(__MODULE__, :ecto_autogenerate, {inserted_at, autogen})
end
if updated_at = Keyword.fetch!(timestamps, :updated_at) do
Ecto.Schema.field(updated_at, type, [])
Module.put_attribute(__MODULE__, :ecto_autogenerate, {updated_at, autogen})
Module.put_attribute(__MODULE__, :ecto_autoupdate, {updated_at, autogen})
end
end
end
@doc ~S"""
Indicates a one-to-many association with another schema.
The current schema has zero or more records of the other schema. The other
schema often has a `belongs_to` field with the reverse association.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other schema, defaults to the underscored name of the current schema
suffixed by `_id`
* `:references` - Sets the key on the current schema to be used for the
association, defaults to the primary key on the schema
* `:through` - Allow this association to be defined in terms of existing
associations. Read the section on `:through` associations for more info
* `:on_delete` - The action taken on associations when parent record
is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`.
Notice `:on_delete` may also be set in migrations when creating a
reference. If your database supports it, setting the `:on_delete` in the
migration is preferred as the database can effectively guarantee integrity,
it is more efficient, and it also allows `:nilify_all` and `:delete_all`
to cascade.
* `:on_replace` - The action taken on associations when the record is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, `:nilify`, or `:delete`.
See `Ecto.Changeset`'s section on related data for more info.
* `:defaults` - Default values to use when building the association
* `:where` - A filter for the association. See "Filtering associations" below.
It does not apply to `:through` associations.
## Examples
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :comments, Comment
end
end
# Get all comments for a given post
post = Repo.get(Post, 42)
comments = Repo.all assoc(post, :comments)
# The comments can come preloaded on the post struct
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments))
post.comments #=> [%Comment{...}, ...]
`has_many` can be used to define hierarchical relationships within a single
schema, for example threaded comments.
defmodule Comment do
use Ecto.Schema
schema "comments" do
field :content, :string
field :parent_id, :integer
belongs_to :parent, Comment, foreign_key: :id, references: :parent_id, define_field: false
has_many :children, Comment, foreign_key: :parent_id, references: :id
end
end
## Filtering associations
A query can also be given instead of a schema. Querying, joining or preloading
the association will use the given query. Currently only where clauses can be
provided in queries. Let's see some examples:
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :public_comments, Comment,
where: [public: true]
has_many :deleted_comments, Comment,
where: dynamic([c], not(is_nil(comment.deleted_at)))
end
end
The `:where` option may receive a dynamic query, a keyword list or a MFA
(a tuple with a module, function and args to invoke). The MFA is especially
useful to avoid duplication in those definitions:
defmodule Comment do
def deleted_filter do
dynamic([c], not(is_nil(comment.deleted_at)))
end
end
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :deleted_comments, Comment,
where: {Comment, :deleted_filter, []}
end
end
**Important!** Please use this feature only when strictly necessary,
otherwise it is very easy to end-up with large schemas with dozens of
different associations polluting your schema and affecting your
application performance. For instance, if you are using associations
only for different querying purposes, then it is preferable to build
and compose queries, rather than defining multiple associations:
posts
|> Ecto.assoc(:comments)
|> Comment.deleted()
Or when preloading:
from posts, preload: [comments: ^Comment.deleted()]
## has_many/has_one :through
Ecto also supports defining associations in terms of other associations
via the `:through` option. Let's see an example:
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :comments, Comment
has_one :permalink, Permalink
# In the has_many :through example below, the `:comments`
# in the list [:comments, :author] refers to the
# `has_many :comments` in the Post own schema and the
# `:author` refers to the `belongs_to :author` of the
# Comment's schema (the module below).
# (see the description below for more details)
has_many :comments_authors, through: [:comments, :author]
# Specify the association with custom source
has_many :tags, {"posts_tags", Tag}
end
end
defmodule Comment do
use Ecto.Schema
schema "comments" do
belongs_to :author, Author
belongs_to :post, Post
has_one :post_permalink, through: [:post, :permalink]
end
end
In the example above, we have defined a `has_many :through` association
named `:comments_authors`. A `:through` association always expects a list
and the first element of the list must be a previously defined association
in the current module. For example, `:comments_authors` first points to
`:comments` in the same module (Post), which then points to `:author` in
the next schema, `Comment`.
This `:through` association will return all authors for all comments
that belongs to that post:
# Get all comments authors for a given post
post = Repo.get(Post, 42)
authors = Repo.all assoc(post, :comments_authors)
Although we used the `:through` association in the example above, Ecto
also allows developers to dynamically build the through associations using
the `Ecto.assoc/2` function:
assoc(post, [:comments, :author])
In fact, given `:through` associations are read-only, **using the `Ecto.assoc/2`
format is the preferred mechanism for working with through associations**. Use
the schema-based one only if you need to store the through data alongside of
the parent struct, in specific cases such as preloading.
`:through` associations can also be preloaded. In such cases, not only
the `:through` association is preloaded but all intermediate steps are
preloaded too:
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments_authors))
post.comments_authors #=> [%Author{...}, ...]
# The comments for each post will be preloaded too
post.comments #=> [%Comment{...}, ...]
# And the author for each comment too
hd(post.comments).author #=> %Author{...}
When the `:through` association is expected to return one or zero items,
`has_one :through` should be used instead, as in the example at the beginning
of this section:
# How we defined the association above
has_one :post_permalink, through: [:post, :permalink]
# Get a preloaded comment
[comment] = Repo.all(Comment) |> Repo.preload(:post_permalink)
comment.post_permalink #=> %Permalink{...}
"""
defmacro has_many(name, queryable, opts \\ []) do
queryable = expand_alias(queryable, __CALLER__)
quote do
Ecto.Schema.__has_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-one association with another schema.
The current schema has zero or one records of the other schema. The other
schema often has a `belongs_to` field with the reverse association.
## Options
* `:foreign_key` - Sets the foreign key, this should map to a field on the
other schema, defaults to the underscored name of the current module
suffixed by `_id`
* `:references` - Sets the key on the current schema to be used for the
association, defaults to the primary key on the schema
* `:through` - If this association must be defined in terms of existing
associations. Read the section in `has_many/3` for more information
* `:on_delete` - The action taken on associations when parent record
is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`.
Notice `:on_delete` may also be set in migrations when creating a
reference. If your database supports it, setting the `:on_delete` in the
migration is preferred as the database can effectively guarantee integrity,
it is more efficient, and it also allows `:nilify_all` and `:delete_all`
to cascade.
* `:on_replace` - The action taken on associations when the record is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or
`:delete`. See `Ecto.Changeset`'s section on related data for more info.
* `:defaults` - Default values to use when building the association
* `:where` - A filter for the association. See "Filtering associations"
in `has_many/3`. It does not apply to `:through` associations.
## Examples
defmodule Post do
use Ecto.Schema
schema "posts" do
has_one :permalink, Permalink
# Specify the association with custom source
has_one :category, {"posts_categories", Category}
end
end
# The permalink can come preloaded on the post struct
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink))
post.permalink #=> %Permalink{...}
## Using Queries as Associations
A query can also be given instead of a schema. Querying, joining or preloading the association will
use the given query. Currently only where clauses can be provided in queries. Let's see an example:
defmodule Post do
...
def active() do
from post in Post,
where: post.active
end
end
defmodule Comment do
use Ecto.Schema
schema "comments" do
has_one :post, Post.active()
end
end
Note: building the association does not consider the query filters.
For example, if the given query requires the active field of the associated records to be true,
building such association won't automatically set the active field to true.
"""
defmacro has_one(name, queryable, opts \\ []) do
queryable = expand_alias(queryable, __CALLER__)
quote do
Ecto.Schema.__has_one__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc ~S"""
Indicates a one-to-one or many-to-one association with another schema.
The current schema belongs to zero or one records of the other schema. The other
schema often has a `has_one` or a `has_many` field with the reverse association.
You should use `belongs_to` in the table that contains the foreign key. Imagine
a company <-> employee relationship. If the employee contains the `company_id` in
the underlying database table, we say the employee belongs to company.
In fact, when you invoke this macro, a field with the name of foreign key is
automatically defined in the schema for you.
## Options
* `:foreign_key` - Sets the foreign key field name, defaults to the name
of the association suffixed by `_id`. For example, `belongs_to :company`
will define foreign key of `:company_id`
* `:references` - Sets the key on the other schema to be used for the
association, defaults to: `:id`
* `:define_field` - When false, does not automatically define a `:foreign_key`
field, implying the user is defining the field manually elsewhere
* `:type` - Sets the type of automatically defined `:foreign_key`.
Defaults to: `:integer` and can be set per schema via `@foreign_key_type`
* `:on_replace` - The action taken on associations when the record is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or `:delete`.
See `Ecto.Changeset`'s section on related data for more info.
* `:defaults` - Default values to use when building the association
* `:primary_key` - If the underlying belongs_to field is a primary key
* `:source` - The source for the underlying field
* `:where` - A filter for the association. See "Filtering associations"
in `has_many/3`.
## Examples
defmodule Comment do
use Ecto.Schema
schema "comments" do
belongs_to :post, Post
end
end
# The post can come preloaded on the comment record
[comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post))
comment.post #=> %Post{...}
If you need custom options on the underlying field, you can define the
field explicitly and then pass `define_field: false` to `belongs_to`:
defmodule Comment do
use Ecto.Schema
schema "comments" do
field :post_id, :integer, ... # custom options
belongs_to :post, Post, define_field: false
end
end
## Using Queries as Associations
A query can also be given instead of a schema. Querying, joining or preloading the association will
use the given query. Currently only where clauses can be provided in queries. Let's see an example:
defmodule Post do
...
def active() do
from post in Post,
where: post.active
end
end
defmodule Comment do
use Ecto.Schema
schema "posts" do
belongs_to :post, Post.active()
end
end
Note: building the association does not consider the query filters.
For example, if the given query requires the active field of the associated records to be true,
building such association won't automatically set the active field to true.
## Polymorphic associations
One common use case for belongs to associations is to handle
polymorphism. For example, imagine you have defined a Comment
schema and you wish to use it for commenting on both tasks and
posts.
Some abstractions would force you to define some sort of
polymorphic association with two fields in your database:
* commentable_type
* commentable_id
The problem with this approach is that it breaks references in
the database. You can't use foreign keys and it is very inefficient,
both in terms of query time and storage.
In Ecto, we have three ways to solve this issue. The simplest
is to define multiple fields in the Comment schema, one for each
association:
* task_id
* post_id
Unless you have dozens of columns, this is simpler for the developer,
more DB friendly and more efficient in all aspects.
Alternatively, because Ecto does not tie a schema to a given table,
we can use separate tables for each association. Let's start over
and define a new Comment schema:
defmodule Comment do
use Ecto.Schema
schema "abstract table: comments" do
# This will be used by associations on each "concrete" table
field :assoc_id, :integer
end
end
Notice we have changed the table name to "abstract table: comments".
You can choose whatever name you want, the point here is that this
particular table will never exist.
Now in your Post and Task schemas:
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :comments, {"posts_comments", Comment}, foreign_key: :assoc_id
end
end
defmodule Task do
use Ecto.Schema
schema "tasks" do
has_many :comments, {"tasks_comments", Comment}, foreign_key: :assoc_id
end
end
Now each association uses its own specific table, "posts_comments"
and "tasks_comments", which must be created on migrations. The
advantage of this approach is that we never store unrelated data
together, also ensuring we keep database references fast and correct.
When using this technique, the only limitation is that you cannot
build comments directly. For example, the command below
Repo.insert!(%Comment{})
will attempt to use the abstract table. Instead, one should use
Repo.insert!(build_assoc(post, :comments))
leveraging the `Ecto.build_assoc/3` function. You can also
use `Ecto.assoc/2` and `Ecto.Query.assoc/2` in the query syntax
to easily retrieve associated comments to a given post or
task:
# Fetch all comments associated with the given task
Repo.all(assoc(task, :comments))
Or all comments in a given table:
Repo.all from(c in {"posts_comments", Comment}), ...)
The third and final option is to use `many_to_many/3` to
define the relationships between the resources. In this case,
the comments table won't have the foreign key, instead there
is a intermediary table responsible for associating the entries:
defmodule Comment do
use Ecto.Schema
schema "comments" do
# ...
end
end
In your posts and tasks:
defmodule Post do
use Ecto.Schema
schema "posts" do
many_to_many :comments, Comment, join_through: "posts_comments"
end
end
defmodule Task do
use Ecto.Schema
schema "tasks" do
many_to_many :comments, Comment, join_through: "tasks_comments"
end
end
See `many_to_many/3` for more information on this particular approach.
"""
defmacro belongs_to(name, queryable, opts \\ []) do
queryable = expand_alias(queryable, __CALLER__)
quote do
Ecto.Schema.__belongs_to__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
@doc ~S"""
Indicates a many-to-many association with another schema.
The association happens through a join schema or source, containing
foreign keys to the associated schemas. For example, the association
below:
# from MyApp.Post
many_to_many :tags, MyApp.Tag, join_through: "posts_tags"
is backed by relational databases through a join table as follows:
[Post] <-> [posts_tags] <-> [Tag]
id <-- post_id
tag_id --> id
More information on the migration for creating such a schema is shown
below.
## Options
* `:join_through` - specifies the source of the associated data.
It may be a string, like "posts_tags", representing the
underlying storage table or an atom, like `MyApp.PostTag`,
representing a schema. This option is required.
* `:join_keys` - specifies how the schemas are associated. It
expects a keyword list with two entries, the first being how
the join table should reach the current schema and the second
how the join table should reach the associated schema. In the
example above, it defaults to: `[post_id: :id, tag_id: :id]`.
The keys are inflected from the schema names.
* `:on_delete` - The action taken on associations when the parent record
is deleted. May be `:nothing` (default) or `:delete_all`.
`:delete_all` will only remove data from the join source, never the
associated records. Notice `:on_delete` may also be set in migrations
when creating a reference. If your database supports it, setting the
`:on_delete` in the migration is preferred as the database can effectively
guarantee integrity, it is more efficient, and it also allows `:nilify_all`
and `:delete_all` to cascade.
* `:on_replace` - The action taken on associations when the record is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, or `:delete`.
`:delete` will only remove data from the join source, never the
associated records. See `Ecto.Changeset`'s section on related data
for more info.
* `:defaults` - Default values to use when building the association
* `:unique` - When true, checks if the associated entries are unique.
This is done by checking the primary key of the associated entries during
repository operations. Keep in mind this does not guarantee uniqueness at the
database level. For such it is preferred to set a unique index in the database.
For example: `create unique_index(:posts_tags, [:post_id, :tag_id])`
* `:where` - A filter for the association. See "Filtering associations"
in `has_many/3`
* `:join_through_where` - A filter for the join through association.
See "Filtering associations" in `has_many/3`
## Removing data
If you attempt to remove associated `many_to_many` data, **Ecto will
always remove data from the join schema and never from the target
associations** be it by setting `:on_replace` to `:delete`, `:on_delete`
to `:delete_all` or by using changeset functions such as
`Ecto.Changeset.put_assoc/3`. For example, if a `Post` has a many to many
relationship with `Tag`, setting `:on_delete` to `:delete_all` will
only delete entries from the "posts_tags" table in case `Post` is
deleted.
## Migration
How your migration should be structured depends on the value you pass
in `:join_through`. If `:join_through` is simply a string, representing
a table, you may define a table without primary keys and you must not
include any further columns, as those values won't be set by Ecto:
create table(:posts_tags, primary_key: false) do
add :post_id, references(:posts)
add :tag_id, references(:tags)
end
However, if your `:join_through` is a schema, like `MyApp.PostTag`, your
join table may be structured as any other table in your codebase,
including timestamps:
create table(:posts_tags) do
add :post_id, references(:posts)
add :tag_id, references(:tags)
timestamps()
end
Because `:join_through` contains a schema, in such cases, autogenerated
values and primary keys will be automatically handled by Ecto.
## Examples
defmodule Post do
use Ecto.Schema
schema "posts" do
many_to_many :tags, Tag, join_through: "posts_tags"
end
end
# Let's create a post and a tag
post = Repo.insert!(%Post{})
tag = Repo.insert!(%Tag{name: "introduction"})
# We can associate at any time post and tags together using changesets
post
|> Repo.preload(:tags) # Load existing data
|> Ecto.Changeset.change() # Build the changeset
|> Ecto.Changeset.put_assoc(:tags, [tag]) # Set the association
|> Repo.update!
# In a later moment, we may get all tags for a given post
post = Repo.get(Post, 42)
tags = Repo.all(assoc(post, :tags))
# The tags may also be preloaded on the post struct for reading
[post] = Repo.all(from(p in Post, where: p.id == 42, preload: :tags))
post.tags #=> [%Tag{...}, ...]
## Join Schema Example
You may prefer to use a join schema to handle many_to_many associations. The
decoupled nature of Ecto allows us to create a "join" struct which
`belongs_to` both sides of the many to many association.
In our example, a User has and belongs to many Organizations
defmodule UserOrganization do
use Ecto.Schema
@primary_key false
schema "users_organizations" do
belongs_to :user, User
belongs_to :organization, Organization
timestamps() # Added bonus, a join schema will also allow you to set timestamps
end
def changeset(struct, params \\ %{}) do
struct
|> Ecto.Changeset.cast(params, [:user_id, :organization_id])
|> Ecto.Changeset.validate_required([:user_id, :organization_id])
# Maybe do some counter caching here!
end
end
defmodule User do
use Ecto.Schema
schema "users" do
many_to_many :organizations, Organization, join_through: UserOrganization
end
end
defmodule Organization do
use Ecto.Schema
schema "organizations" do
many_to_many :users, User, join_through: UserOrganization
end
end
# Then to create the association, pass in the ID's of an existing
# User and Organization to UserOrganization.changeset
changeset = UserOrganization.changeset(%UserOrganization{}, %{user_id: id, organization_id: id})
case Repo.insert(changeset) do
{:ok, assoc} -> # Assoc was created!
{:error, changeset} -> # Handle the error
end
## Using Queries as Associations
A query can also be given instead of a schema, both for the join_through and the destination.
Querying, joining or preloading the association will use the given query. Currently only where
clauses can be provided in queries. Let's see an example:
defmodule UserOrganization do
use Ecto.Schema
@primary_key false
schema "users_organizations" do
belongs_to :user, User
belongs_to :organization, Organization
field :deleted, :boolean
timestamps() # Added bonus, a join schema will also allow you to set timestamps
end
def active() do
from user_organization in UserOrganization,
where: is_nil(user_organization.deleted)
end
end
defmodule User do
use Ecto.Schema
schema "users" do
many_to_many :organizations, Organization, join_through: UserOrganization
field :banned, :boolean
end
def not_banned() do
from user in User,
where: not(user.banned)
end
end
defmodule Organization do
use Ecto.Schema
schema "organizations" do
many_to_many :users, User.not_banned(), join_through: UserOrganization.active()
end
end
Note: building the association does not consider the query filters.
For example, if the given query requires the active field of the associated records to be true,
building such association won't automatically set the active field to true.
"""
defmacro many_to_many(name, queryable, opts \\ []) do
queryable = expand_alias(queryable, __CALLER__)
quote do
Ecto.Schema.__many_to_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts))
end
end
## Embeds
@doc ~S"""
Indicates an embedding of a schema.
The current schema has zero or one records of the other schema embedded
inside of it. It uses a field similar to the `:map` type for storage,
but allows embeds to have all the things regular schema can.
You must declare your `embeds_one/3` field with type `:map` at the
database level.
The embedded may or may not have a primary key. Ecto use the primary keys
to detect if an embed is being updated or not. If a primary is not present,
`:on_replace` should be set to either `:update` or `:delete` if there is a
desire to either update or delete the current embed when a new one is set.
## Options
* `:on_replace` - The action taken on associations when the embed is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, `:update`, or `:delete`.
See `Ecto.Changeset`'s section on related data for more info.
## Examples
defmodule Order do
use Ecto.Schema
schema "orders" do
embeds_one :item, Item
end
end
defmodule Item do
use Ecto.Schema
embedded_schema do
field :title
end
end
# The item is loaded with the order
order = Repo.get!(Order, 42)
order.item #=> %Item{...}
Adding and removal of embeds can only be done via the `Ecto.Changeset`
API so Ecto can properly track the embed life-cycle:
order = Repo.get!(Order, 42)
item = %Item{title: "Soap"}
# Generate a changeset
changeset = Ecto.Changeset.change(order)
# Put a new embed to the changeset
changeset = Ecto.Changeset.put_embed(changeset, :item, item)
# Update the order, and fetch the item
item = Repo.update!(changeset).item
# Item is generated with a unique identification
item
# => %Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}
## Inline embedded schema
The schema module can be defined inline in the parent schema in simple
cases:
defmodule Parent do
use Ecto.Schema
schema "parents" do
field :name, :string
embeds_one :child, Child do
field :name, :string
field :age, :integer
end
end
end
When defining an inline embed, the `:primary_key` option may be given to
customize the embed primary key type.
Options should be passed before the `do` block like this:
embeds_one :child, Child, on_replace: :delete do
field :name, :string
field :age, :integer
end
Defining embedded schema in such a way will define a `Parent.Child` module
with the appropriate struct. In order to properly cast the embedded schema.
When casting the inline-defined embedded schemas you need to use the `:with`
option of `cast_embed/3` to provide the proper function to do the casting.
For example:
def changeset(schema, params) do
schema
|> cast(params, [:name])
|> cast_embed(:child, with: &child_changeset/2)
end
defp child_changeset(schema, params) do
schema
|> cast(params, [:name, :age])
end
## Encoding and decoding
Because many databases do not support direct encoding and decoding
of embeds, it is often emulated by Ecto by using specific encoding
and decoding rules.
For example, PostgreSQL will store embeds on top of JSONB columns,
which means types in embedded schemas won't go through the usual
dump->DB->load cycle but rather encode->DB->decode->cast. This means
that, when using embedded schemas with databases like PG or MySQL,
make sure all of your types can be JSON encoded/decoded correctly.
Ecto provides this guarantee for all built-in types.
"""
defmacro embeds_one(name, schema, opts \\ [])
defmacro embeds_one(name, schema, do: block) do
quote do
embeds_one(unquote(name), unquote(schema), [], do: unquote(block))
end
end
defmacro embeds_one(name, schema, opts) do
schema = expand_alias(schema, __CALLER__)
quote do
Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), unquote(schema), unquote(opts))
end
end
@doc """
Indicates an embedding of a schema.
For options and examples see documentation of `embeds_one/3`.
"""
defmacro embeds_one(name, schema, opts, do: block) do
quote do
{schema, opts} = Ecto.Schema.__embeds_module__(__ENV__, unquote(schema), unquote(opts), unquote(Macro.escape(block)))
Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), schema, opts)
end
end
@doc ~S"""
Indicates an embedding of many schemas.
The current schema has zero or more records of the other schema embedded
inside of it. Embeds have all the things regular schemas have.
It is recommended to declare your `embeds_many/3` field with type `:map`
and a default of `"[]"` (although Ecto will also automatically translate
`nil` values from the database into empty lists).
The embedded may or may not have a primary key. Ecto use the primary keys
to detect if an embed is being updated or not. If a primary is not present
and you still want the list of embeds to be updated, `:on_replace` must be
set to `:delete`, forcing all current embeds to be deleted and replaced by
new ones whenever a new list of embeds is set.
For encoding and decoding of embeds, please read the docs for
`embeds_one/3`.
## Options
* `:on_replace` - The action taken on associations when the embed is
replaced when casting or manipulating parent changeset. May be
`:raise` (default), `:mark_as_invalid`, or `:delete`.
See `Ecto.Changeset`'s section on related data for more info.
## Examples
defmodule Order do
use Ecto.Schema
schema "orders" do
embeds_many :items, Item
end
end
defmodule Item do
use Ecto.Schema
embedded_schema do
field :title
end
end
# The items are loaded with the order
order = Repo.get!(Order, 42)
order.items #=> [%Item{...}, ...]
Adding and removal of embeds can only be done via the `Ecto.Changeset`
API so Ecto can properly track the embed life-cycle:
# Order has no items
order = Repo.get!(Order, 42)
order.items
# => []
items = [%Item{title: "Soap"}]
# Generate a changeset
changeset = Ecto.Changeset.change(order)
# Put a one or more new items
changeset = Ecto.Changeset.put_embed(changeset, :items, items)
# Update the order and fetch items
items = Repo.update!(changeset).items
# Items are generated with a unique identification
items
# => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}]
Updating of embeds must be done using a changeset for each changed embed.
# Order has an existing items
order = Repo.get!(Order, 42)
order.items
# => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}]
# Generate a changeset
changeset = Ecto.Changeset.change(order)
# Put the updated item as a changeset
current_item = List.first(order.items)
item_changeset = Ecto.Changeset.change(current_item, title: "Mujju's Soap")
order_changeset = Ecto.Changeset.put_embed(changeset, :items, [item_changeset])
# Update the order and fetch items
items = Repo.update!(order_changeset).items
# Item has the updated title
items
# => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Mujju's Soap"}]
## Inline embedded schema
The schema module can be defined inline in the parent schema in simple
cases:
defmodule Parent do
use Ecto.Schema
schema "parents" do
field :name, :string
embeds_many :children, Child do
field :name, :string
field :age, :integer
end
end
end
When defining an inline embed, the `:primary_key` option may be given to
customize the embed primary key type.
Defining embedded schema in such a way will define a `Parent.Child` module
with the appropriate struct. In order to properly cast the embedded schema.
When casting the inline-defined embedded schemas you need to use the `:with`
option of `cast_embed/3` to provide the proper function to do the casting.
For example:
def changeset(schema, params) do
schema
|> cast(params, [:name])
|> cast_embed(:children, with: &child_changeset/2)
end
defp child_changeset(schema, params) do
schema
|> cast(params, [:name, :age])
end
"""
defmacro embeds_many(name, schema, opts \\ [])
defmacro embeds_many(name, schema, do: block) do
quote do
embeds_many(unquote(name), unquote(schema), [], do: unquote(block))
end
end
defmacro embeds_many(name, schema, opts) do
schema = expand_alias(schema, __CALLER__)
quote do
Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), unquote(schema), unquote(opts))
end
end
@doc """
Indicates an embedding of many schemas.
For options and examples see documentation of `embeds_many/3`.
"""
defmacro embeds_many(name, schema, opts, do: block) do
quote do
{schema, opts} = Ecto.Schema.__embeds_module__(__ENV__, unquote(schema), unquote(opts), unquote(Macro.escape(block)))
Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), schema, opts)
end
end
@doc """
Internal function for integrating associations into schemas.
This function exists as an extension point for libraries to
add new types of associations to Ecto. For the existing APIs,
see `belongs_to/3`, `has_many/3`, `has_one/3` and `many_to_many/3`.
This function expects the current schema, the association cardinality,
the association name, the association module (that implements
`Ecto.Association` callbacks) and a keyword list of options.
"""
@spec association(module, :one | :many, atom(), module, Keyword.t) :: Ecto.Association.t
def association(schema, cardinality, name, association, opts) do
not_loaded = %Ecto.Association.NotLoaded{
__owner__: schema,
__field__: name,
__cardinality__: cardinality
}
put_struct_field(schema, name, not_loaded)
opts = [cardinality: cardinality] ++ opts
struct = association.struct(schema, name, opts)
Module.put_attribute(schema, :ecto_assocs, {name, struct})
struct
end
## Callbacks
@doc false
def __timestamps__(:naive_datetime) do
%{NaiveDateTime.utc_now() | microsecond: {0, 0}}
end
def __timestamps__(:naive_datetime_usec) do
NaiveDateTime.utc_now()
end
def __timestamps__(:utc_datetime) do
DateTime.from_unix!(System.system_time(:second), :second)
end
def __timestamps__(:utc_datetime_usec) do
DateTime.from_unix!(System.system_time(:microsecond), :microsecond)
end
def __timestamps__(type) do
type.from_unix!(System.system_time(:microsecond), :microsecond)
end
@doc false
# Loads data into struct by assumes fields are properly
# named and belongs to the struct. Types and values are
# zipped together in one pass as they are loaded.
def __safe_load__(struct, types, values, prefix, source, loader) do
zipped = safe_load_zip(types, values, struct, loader)
case Map.merge(struct, Map.new(zipped)) do
%{__meta__: %Metadata{} = metadata} = struct ->
metadata = %{metadata | state: :loaded, source: source, prefix: prefix}
Map.put(struct, :__meta__, metadata)
map ->
map
end
end
defp safe_load_zip([{field, type} | fields], [value | values], struct, loader) do
[{field, load!(struct, field, type, value, loader)} |
safe_load_zip(fields, values, struct, loader)]
end
defp safe_load_zip([], [], _struct, _loader) do
[]
end
@doc false
# Assumes data does not all belongs to schema/struct
# and that it may also require source-based renaming.
def __unsafe_load__(schema, data, loader) do
types = schema.__schema__(:load)
struct = schema.__struct__()
case __unsafe_load__(struct, types, data, loader) do
%{__meta__: %Metadata{} = metadata} = struct ->
Map.put(struct, :__meta__, %{metadata | state: :loaded})
map ->
map
end
end
@doc false
def __unsafe_load__(struct, types, map, loader) when is_map(map) do
Enum.reduce(types, struct, fn pair, acc ->
{field, source, type} = field_source_and_type(pair)
case fetch_string_or_atom_field(map, source) do
{:ok, value} -> Map.put(acc, field, load!(struct, field, type, value, loader))
:error -> acc
end
end)
end
@compile {:inline, field_source_and_type: 1, fetch_string_or_atom_field: 2}
defp field_source_and_type({field, {:source, source, type}}) do
{field, source, type}
end
defp field_source_and_type({field, type}) do
{field, field, type}
end
defp fetch_string_or_atom_field(map, field) when is_atom(field) do
case Map.fetch(map, Atom.to_string(field)) do
{:ok, value} -> {:ok, value}
:error -> Map.fetch(map, field)
end
end
defp load!(struct, field, type, value, loader) do
case loader.(type, value) do
{:ok, value} ->
value
:error ->
raise ArgumentError, "cannot load `#{inspect value}` as type #{inspect type} " <>
"for field `#{field}`#{error_data(struct)}"
end
end
defp error_data(%{__struct__: atom}) do
" in schema #{inspect atom}"
end
defp error_data(other) when is_map(other) do
""
end
@doc false
def __field__(mod, name, type, opts) do
check_field_type!(name, type, opts)
define_field(mod, name, type, opts)
end
defp define_field(mod, name, type, opts) do
virtual? = opts[:virtual] || false
pk? = opts[:primary_key] || false
Module.put_attribute(mod, :changeset_fields, {name, type})
put_struct_field(mod, name, Keyword.get(opts, :default))
unless virtual? do
source = opts[:source] || Module.get_attribute(mod, :field_source_mapper).(name)
if name != source do
Module.put_attribute(mod, :ecto_field_sources, {name, source})
end
if raw = opts[:read_after_writes] do
Module.put_attribute(mod, :ecto_raw, name)
end
case gen = opts[:autogenerate] do
{_, _, _} ->
store_mfa_autogenerate!(mod, name, type, gen)
true ->
store_type_autogenerate!(mod, name, source || name, type, pk?)
_ ->
:ok
end
if raw && gen do
raise ArgumentError, "cannot mark the same field as autogenerate and read_after_writes"
end
if pk? do
Module.put_attribute(mod, :ecto_primary_keys, name)
end
Module.put_attribute(mod, :ecto_fields, {name, type})
end
end
@valid_has_options [:foreign_key, :references, :through, :on_delete, :defaults, :on_replace, :where]
@doc false
def __has_many__(mod, name, queryable, opts) do
check_options!(opts, @valid_has_options, "has_many/3")
if is_list(queryable) and Keyword.has_key?(queryable, :through) do
association(mod, :many, name, Ecto.Association.HasThrough, queryable)
else
struct =
association(mod, :many, name, Ecto.Association.Has, [queryable: queryable] ++ opts)
Module.put_attribute(mod, :changeset_fields, {name, {:assoc, struct}})
end
end
@doc false
def __has_one__(mod, name, queryable, opts) do
check_options!(opts, @valid_has_options, "has_one/3")
if is_list(queryable) and Keyword.has_key?(queryable, :through) do
association(mod, :one, name, Ecto.Association.HasThrough, queryable)
else
struct =
association(mod, :one, name, Ecto.Association.Has, [queryable: queryable] ++ opts)
Module.put_attribute(mod, :changeset_fields, {name, {:assoc, struct}})
end
end
# :primary_key is valid here to support associative entity
# https://en.wikipedia.org/wiki/Associative_entity
@valid_belongs_to_options [:foreign_key, :references, :define_field, :type,
:on_replace, :defaults, :primary_key, :source, :where]
@doc false
def __belongs_to__(mod, name, queryable, opts) do
check_options!(opts, @valid_belongs_to_options, "belongs_to/3")
opts = Keyword.put_new(opts, :foreign_key, :"#{name}_id")
foreign_key_type = opts[:type] || Module.get_attribute(mod, :foreign_key_type)
if name == Keyword.get(opts, :foreign_key) do
raise ArgumentError, "foreign_key #{inspect name} must be distinct from corresponding association name"
end
if Keyword.get(opts, :define_field, true) do
__field__(mod, opts[:foreign_key], foreign_key_type, opts)
end
struct =
association(mod, :one, name, Ecto.Association.BelongsTo, [queryable: queryable] ++ opts)
Module.put_attribute(mod, :changeset_fields, {name, {:assoc, struct}})
end
@valid_many_to_many_options [:join_through, :join_keys, :on_delete, :defaults, :on_replace,
:unique, :where, :join_through_where]
@doc false
def __many_to_many__(mod, name, queryable, opts) do
check_options!(opts, @valid_many_to_many_options, "many_to_many/3")
struct =
association(mod, :many, name, Ecto.Association.ManyToMany, [queryable: queryable] ++ opts)
Module.put_attribute(mod, :changeset_fields, {name, {:assoc, struct}})
end
@valid_embeds_one_options [:strategy, :on_replace, :source]
@doc false
def __embeds_one__(mod, name, schema, opts) do
check_options!(opts, @valid_embeds_one_options, "embeds_one/3")
embed(mod, :one, name, schema, opts)
end
@valid_embeds_many_options [:strategy, :on_replace, :source]
@doc false
def __embeds_many__(mod, name, schema, opts) do
check_options!(opts, @valid_embeds_many_options, "embeds_many/3")
opts = Keyword.put(opts, :default, [])
embed(mod, :many, name, schema, opts)
end
@doc false
def __embeds_module__(env, name, opts, block) do
{pk, opts} = Keyword.pop(opts, :primary_key, {:id, :binary_id, autogenerate: true})
block =
quote do
use Ecto.Schema
@primary_key unquote(Macro.escape(pk))
embedded_schema do
unquote(block)
end
end
module = Module.concat(env.module, name)
Module.create(module, block, env)
{module, opts}
end
## Quoted callbacks
@doc false
def __after_compile__(%{module: module} = env, _) do
# If we are compiling code, we can validate associations now,
# as the Elixir compiler will solve dependencies.
#
# TODO: This is a hack, don't do this at home, it may break any time.
# Instead Elixir should provide a proper API to check for compilation.
if Process.info(self(), :error_handler) == {:error_handler, Kernel.ErrorHandler} do
for name <- module.__schema__(:associations) do
assoc = module.__schema__(:association, name)
case assoc.__struct__.after_compile_validation(assoc, env) do
:ok ->
:ok
{:error, message} ->
IO.warn "invalid association `#{assoc.field}` in schema #{inspect module}: #{message}",
Macro.Env.stacktrace(env)
end
end
end
:ok
end
@doc false
def __schema__(fields, field_sources, assocs, embeds) do
load =
for {name, type} <- fields do
if alias = field_sources[name] do
{name, {:source, alias, type}}
else
{name, type}
end
end
dump =
for {name, type} <- fields do
{name, {field_sources[name] || name, type}}
end
field_sources_quoted =
for {name, _type} <- fields do
{[:field_source, name], field_sources[name] || name}
end
types_quoted =
for {name, type} <- fields do
{[:type, name], Macro.escape(type)}
end
assoc_quoted =
for {name, refl} <- assocs do
{[:association, name], Macro.escape(refl)}
end
assoc_names = Enum.map(assocs, &elem(&1, 0))
embed_quoted =
for {name, refl} <- embeds do
{[:embed, name], Macro.escape(refl)}
end
embed_names = Enum.map(embeds, &elem(&1, 0))
single_arg = [
{[:dump], dump |> Map.new() |> Macro.escape()},
{[:load], load |> Macro.escape()},
{[:associations], assoc_names},
{[:embeds], embed_names}
]
catch_all = [
{[:field_source, quote(do: _)], nil},
{[:type, quote(do: _)], nil},
{[:association, quote(do: _)], nil},
{[:embed, quote(do: _)], nil}
]
[
single_arg,
field_sources_quoted,
types_quoted,
assoc_quoted,
embed_quoted,
catch_all
]
end
## Private
defp embed(mod, cardinality, name, schema, opts) do
opts = [cardinality: cardinality, related: schema] ++ opts
struct = Ecto.Embedded.struct(mod, name, opts)
define_field(mod, name, {:embed, struct}, opts)
Module.put_attribute(mod, :ecto_embeds, {name, struct})
end
defp put_struct_field(mod, name, assoc) do
fields = Module.get_attribute(mod, :struct_fields)
if List.keyfind(fields, name, 0) do
raise ArgumentError, "field/association #{inspect name} is already set on schema"
end
Module.put_attribute(mod, :struct_fields, {name, assoc})
end
defp check_options!(opts, valid, fun_arity) do
case Enum.find(opts, fn {k, _} -> not(k in valid) end) do
{k, _} -> raise ArgumentError, "invalid option #{inspect k} for #{fun_arity}"
nil -> :ok
end
end
defp check_field_type!(name, :datetime, _opts) do
raise ArgumentError, "invalid type :datetime for field #{inspect name}. " <>
"You probably meant to choose one between :naive_datetime " <>
"(no time zone information) or :utc_datetime (time zone is set to UTC)"
end
defp check_field_type!(name, {:embed, _}, _opts) do
raise ArgumentError, "cannot declare field #{inspect name} as embed. Use embeds_one/many instead"
end
defp check_field_type!(name, type, opts) do
cond do
type == :any and !opts[:virtual] ->
raise ArgumentError, "only virtual fields can have type :any, " <>
"invalid type for field #{inspect name}"
Ecto.Type.primitive?(type) ->
type
is_atom(type) and Code.ensure_compiled?(type) and function_exported?(type, :type, 0) ->
type
is_atom(type) and function_exported?(type, :__schema__, 1) ->
raise ArgumentError,
"schema #{inspect type} is not a valid type for field #{inspect name}." <>
" Did you mean to use belongs_to, has_one, has_many, embeds_one, or embeds_many instead?"
true ->
raise ArgumentError, "invalid or unknown type #{inspect type} for field #{inspect name}"
end
end
defp store_mfa_autogenerate!(mod, name, type, mfa) do
if autogenerate_id(type) do
raise ArgumentError, ":autogenerate with {m, f, a} not supported by ID types"
else
Module.put_attribute(mod, :ecto_autogenerate, {name, mfa})
end
end
defp store_type_autogenerate!(mod, name, source, type, pk?) do
cond do
id = autogenerate_id(type) ->
cond do
not pk? ->
raise ArgumentError, "only primary keys allow :autogenerate for type #{inspect type}, " <>
"field #{inspect name} is not a primary key"
Module.get_attribute(mod, :ecto_autogenerate_id) ->
raise ArgumentError, "only one primary key with ID type may be marked as autogenerated"
true ->
Module.put_attribute(mod, :ecto_autogenerate_id, {name, source, id})
end
Ecto.Type.primitive?(type) ->
raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <>
"primitive type #{inspect type}"
# Note the custom type has already been loaded in check_type!/3
not function_exported?(type, :autogenerate, 0) ->
raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <>
"custom type #{inspect type} that does not define autogenerate/0"
true ->
Module.put_attribute(mod, :ecto_autogenerate, {name, {type, :autogenerate, []}})
end
end
defp autogenerate_id(type) do
id = if Ecto.Type.primitive?(type), do: type, else: type.type
if id in [:id, :binary_id], do: id, else: nil
end
defp expand_alias({:__aliases__, _, _} = ast, env),
do: Macro.expand(ast, %{env | function: {:__schema__, 2}})
defp expand_alias(ast, _env),
do: ast
end
| 34.648837 | 123 | 0.666421 |
1c817d96342ece4533befed5912b0deb5e73eeb4 | 5,341 | ex | Elixir | clients/cloud_error_reporting/lib/google_api/cloud_error_reporting/v1beta1/model/error_group_stats.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/cloud_error_reporting/lib/google_api/cloud_error_reporting/v1beta1/model/error_group_stats.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/cloud_error_reporting/lib/google_api/cloud_error_reporting/v1beta1/model/error_group_stats.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroupStats do
@moduledoc """
Data extracted for a specific group based on certain filter criteria,
such as a given time period and/or service filter.
## Attributes
* `affectedServices` (*type:* `list(GoogleApi.CloudErrorReporting.V1beta1.Model.ServiceContext.t)`, *default:* `nil`) - Service contexts with a non-zero error count for the given filter
criteria. This list can be truncated if multiple services are affected.
Refer to `num_affected_services` for the total count.
* `affectedUsersCount` (*type:* `String.t`, *default:* `nil`) - Approximate number of affected users in the given group that
match the filter criteria.
Users are distinguished by data in the `ErrorContext` of the
individual error events, such as their login name or their remote
IP address in case of HTTP requests.
The number of affected users can be zero even if the number of
errors is non-zero if no data was provided from which the
affected user could be deduced.
Users are counted based on data in the request
context that was provided in the error report. If more users are
implicitly affected, such as due to a crash of the whole service,
this is not reflected here.
* `count` (*type:* `String.t`, *default:* `nil`) - Approximate total number of events in the given group that match
the filter criteria.
* `firstSeenTime` (*type:* `DateTime.t`, *default:* `nil`) - Approximate first occurrence that was ever seen for this group
and which matches the given filter criteria, ignoring the
time_range that was specified in the request.
* `group` (*type:* `GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroup.t`, *default:* `nil`) - Group data that is independent of the filter criteria.
* `lastSeenTime` (*type:* `DateTime.t`, *default:* `nil`) - Approximate last occurrence that was ever seen for this group and
which matches the given filter criteria, ignoring the time_range
that was specified in the request.
* `numAffectedServices` (*type:* `integer()`, *default:* `nil`) - The total number of services with a non-zero error count for the given
filter criteria.
* `representative` (*type:* `GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorEvent.t`, *default:* `nil`) - An arbitrary event that is chosen as representative for the whole group.
The representative event is intended to be used as a quick preview for
the whole group. Events in the group are usually sufficiently similar
to each other such that showing an arbitrary representative provides
insight into the characteristics of the group as a whole.
* `timedCounts` (*type:* `list(GoogleApi.CloudErrorReporting.V1beta1.Model.TimedCount.t)`, *default:* `nil`) - Approximate number of occurrences over time.
Timed counts returned by ListGroups are guaranteed to be:
- Inside the requested time interval
- Non-overlapping, and
- Ordered by ascending time.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:affectedServices =>
list(GoogleApi.CloudErrorReporting.V1beta1.Model.ServiceContext.t()),
:affectedUsersCount => String.t(),
:count => String.t(),
:firstSeenTime => DateTime.t(),
:group => GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroup.t(),
:lastSeenTime => DateTime.t(),
:numAffectedServices => integer(),
:representative => GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorEvent.t(),
:timedCounts => list(GoogleApi.CloudErrorReporting.V1beta1.Model.TimedCount.t())
}
field(:affectedServices,
as: GoogleApi.CloudErrorReporting.V1beta1.Model.ServiceContext,
type: :list
)
field(:affectedUsersCount)
field(:count)
field(:firstSeenTime, as: DateTime)
field(:group, as: GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroup)
field(:lastSeenTime, as: DateTime)
field(:numAffectedServices)
field(:representative, as: GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorEvent)
field(:timedCounts, as: GoogleApi.CloudErrorReporting.V1beta1.Model.TimedCount, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroupStats do
def decode(value, options) do
GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroupStats.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudErrorReporting.V1beta1.Model.ErrorGroupStats do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 50.866667 | 189 | 0.72889 |
1c8197f987dfa1642d5d28e6bb18ab7eb521de19 | 2,212 | exs | Elixir | test/sanbase/clickhouse/api_call_data_test.exs | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | null | null | null | test/sanbase/clickhouse/api_call_data_test.exs | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | 1 | 2021-07-24T16:26:03.000Z | 2021-07-24T16:26:03.000Z | test/sanbase/clickhouse/api_call_data_test.exs | sitedata/sanbase2 | 8da5e44a343288fbc41b68668c6c80ae8547d557 | [
"MIT"
] | null | null | null | defmodule Sanbase.Clickhouse.ApiCallDataTest do
use Sanbase.DataCase
import Sanbase.Factory
import Sanbase.DateTimeUtils, only: [from_iso8601_to_unix!: 1, from_iso8601!: 1]
alias Sanbase.Clickhouse.ApiCallData
setup do
user = insert(:user)
[user: user]
end
test "clickhouse returns data", context do
dt1_str = "2019-01-01T00:00:00Z"
dt2_str = "2019-01-02T00:00:00Z"
dt3_str = "2019-01-03T00:00:00Z"
rows = [
[from_iso8601_to_unix!(dt1_str), 400],
[from_iso8601_to_unix!(dt2_str), 100],
[from_iso8601_to_unix!(dt3_str), 200]
]
Sanbase.Mock.prepare_mock2(&Sanbase.ClickhouseRepo.query/2, {:ok, %{rows: rows}})
|> Sanbase.Mock.run_with_mocks(fn ->
result =
ApiCallData.api_call_history(
context.user.id,
from_iso8601!(dt1_str),
from_iso8601!(dt3_str),
"1d"
)
assert result ==
{:ok,
[
%{api_calls_count: 400, datetime: from_iso8601!(dt1_str)},
%{api_calls_count: 100, datetime: from_iso8601!(dt2_str)},
%{api_calls_count: 200, datetime: from_iso8601!(dt3_str)}
]}
end)
end
test "clickhouse returns empty list", context do
dt1_str = "2019-01-01T00:00:00Z"
dt2_str = "2019-01-03T00:00:00Z"
Sanbase.Mock.prepare_mock2(&Sanbase.ClickhouseRepo.query/2, {:ok, %{rows: []}})
|> Sanbase.Mock.run_with_mocks(fn ->
result =
ApiCallData.api_call_history(
context.user.id,
from_iso8601!(dt1_str),
from_iso8601!(dt2_str),
"1d"
)
assert result == {:ok, []}
end)
end
test "clickhouse returns error", context do
dt1_str = "2019-01-01T00:00:00Z"
dt3_str = "2019-01-03T00:00:00Z"
Sanbase.Mock.prepare_mock2(&Sanbase.ClickhouseRepo.query/2, {:error, "Something went wrong"})
|> Sanbase.Mock.run_with_mocks(fn ->
{:error, error} =
ApiCallData.api_call_history(
context.user.id,
from_iso8601!(dt1_str),
from_iso8601!(dt3_str),
"1d"
)
assert error =~ "Cannot execute database query."
end)
end
end
| 26.97561 | 97 | 0.600814 |
1c81abaed2c7272c71896fc731a886c9689e82de | 2,247 | ex | Elixir | clients/classroom/lib/google_api/classroom/v1/model/course_material.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/classroom/lib/google_api/classroom/v1/model/course_material.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/classroom/lib/google_api/classroom/v1/model/course_material.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Classroom.V1.Model.CourseMaterial do
@moduledoc """
A material attached to a course as part of a material set.
## Attributes
* `driveFile` (*type:* `GoogleApi.Classroom.V1.Model.DriveFile.t`, *default:* `nil`) - Google Drive file attachment.
* `form` (*type:* `GoogleApi.Classroom.V1.Model.Form.t`, *default:* `nil`) - Google Forms attachment.
* `link` (*type:* `GoogleApi.Classroom.V1.Model.Link.t`, *default:* `nil`) - Link atatchment.
* `youTubeVideo` (*type:* `GoogleApi.Classroom.V1.Model.YouTubeVideo.t`, *default:* `nil`) - Youtube video attachment.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:driveFile => GoogleApi.Classroom.V1.Model.DriveFile.t() | nil,
:form => GoogleApi.Classroom.V1.Model.Form.t() | nil,
:link => GoogleApi.Classroom.V1.Model.Link.t() | nil,
:youTubeVideo => GoogleApi.Classroom.V1.Model.YouTubeVideo.t() | nil
}
field(:driveFile, as: GoogleApi.Classroom.V1.Model.DriveFile)
field(:form, as: GoogleApi.Classroom.V1.Model.Form)
field(:link, as: GoogleApi.Classroom.V1.Model.Link)
field(:youTubeVideo, as: GoogleApi.Classroom.V1.Model.YouTubeVideo)
end
defimpl Poison.Decoder, for: GoogleApi.Classroom.V1.Model.CourseMaterial do
def decode(value, options) do
GoogleApi.Classroom.V1.Model.CourseMaterial.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Classroom.V1.Model.CourseMaterial do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 40.125 | 122 | 0.720961 |
1c81cd54ef97bcbbd6a75206d2ef459a8cc30094 | 1,153 | ex | Elixir | lib/ex_json_schema/validator/type.ex | my-channels/ex_json_schema | cf52e0f392847b83715737a49f28cceeb4230a03 | [
"MIT"
] | null | null | null | lib/ex_json_schema/validator/type.ex | my-channels/ex_json_schema | cf52e0f392847b83715737a49f28cceeb4230a03 | [
"MIT"
] | 1 | 2019-07-24T05:23:58.000Z | 2019-07-24T05:23:58.000Z | lib/ex_json_schema/validator/type.ex | my-channels/ex_json_schema | cf52e0f392847b83715737a49f28cceeb4230a03 | [
"MIT"
] | 1 | 2019-08-19T20:47:06.000Z | 2019-08-19T20:47:06.000Z | defmodule ExJsonSchema.Validator.Type do
alias ExJsonSchema.Validator
alias ExJsonSchema.Validator.Error
@spec validate(String.t(), ExJsonSchema.data()) :: Validator.errors()
def validate(type, data) do
case valid?(type, data) do
true ->
[]
false ->
[
%Error{
error: %Error.Type{expected: List.wrap(type), actual: data |> data_type},
path: ""
}
]
end
end
defp valid?(type, data) when is_list(type) do
Enum.any?(type, &valid?(&1, data))
end
defp valid?(type, data) do
case type do
"null" -> is_nil(data)
"boolean" -> is_boolean(data)
"string" -> is_binary(data)
"integer" -> is_integer(data)
"number" -> is_number(data)
"array" -> is_list(data)
"object" -> is_map(data)
end
end
defp data_type(data) do
cond do
is_nil(data) -> "null"
is_boolean(data) -> "boolean"
is_binary(data) -> "string"
is_integer(data) -> "integer"
is_number(data) -> "number"
is_list(data) -> "array"
is_map(data) -> "object"
true -> "unknown"
end
end
end
| 23.06 | 85 | 0.563747 |
1c81e9f64ba3cba6d338ae49d661ff602c6720b3 | 3,684 | ex | Elixir | lib/oli/interop/scrub.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | 45 | 2020-04-17T15:40:27.000Z | 2022-03-25T00:13:30.000Z | lib/oli/interop/scrub.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | 944 | 2020-02-13T02:37:01.000Z | 2022-03-31T17:50:07.000Z | lib/oli/interop/scrub.ex | malav2110/oli-torus | 8af64e762a7c8a2058bd27a7ab8e96539ffc055f | [
"MIT"
] | 23 | 2020-07-28T03:36:13.000Z | 2022-03-17T14:29:02.000Z | defmodule Oli.Interop.Scrub do
alias Oli.Resources.PageContent
@doc """
Content scrubbing routine to clean and adjust content found in ingested
pages and activities. Takes a input the content to scrub and returns
a tuple `{changes, scrubbed_content}` where `changes` is a list summarizing
all changes made and `scrubbed_content` is the mutated content.
"""
def scrub(items) when is_list(items) do
{changes, items} =
Enum.map(items, fn i -> scrub(i) end)
|> Enum.unzip()
{List.flatten(changes), items}
end
# Ensure that code blocks only contain `code_line` as children. If they contain
# anything else, extract all text from that child and convert the child to
# a code_line
def scrub(%{"type" => "code", "children" => children} = item) do
item = ensure_id_present(item)
if Enum.any?(children, fn c ->
!Map.has_key?(c, "type") or Map.get(c, "type") != "code_line"
end) do
children = Enum.map(children, &to_code_line/1)
{["Adjusted code block contents"], Map.put(item, "children", children)}
else
{[], item}
end
end
def scrub(%{"model" => model} = item) when is_list(model) do
{changes, model} = scrub(model)
{changes, Map.put(item, "model", model)}
end
def scrub(%{"children" => children} = item) do
item = ensure_id_present(item)
{changes, children} = scrub(children)
{changes, Map.put(item, "children", children)}
end
def scrub(item) do
# To get to this impl of scrub, the item here is not a list, does not have
# a "children" attribute, does not have "model" attribute that is a list,
# and isn't a code block (or any other handed case). We will scrub this
# item by scrubbing well known keys, allowing us to handle current and future
# activity schemas
Map.keys(item)
|> Enum.reduce({[], ensure_id_present(item)}, fn key, {all, item} ->
{changes, updated} = scrub_well_known(item, key)
{changes ++ all, updated}
end)
end
def scrub_well_known(item, "content"), do: scrub_well_known_key(item, "content")
def scrub_well_known(item, "model"), do: scrub_well_known_key(item, "model")
def scrub_well_known(item, "stem"), do: scrub_well_known_key(item, "stem")
def scrub_well_known(item, "choices"), do: scrub_well_known_key(item, "choices")
def scrub_well_known(item, "parts"), do: scrub_well_known_key(item, "parts")
def scrub_well_known(item, "hints"), do: scrub_well_known_key(item, "hints")
def scrub_well_known(item, "responses"), do: scrub_well_known_key(item, "responses")
def scrub_well_known(item, "feedback"), do: scrub_well_known_key(item, "feedback")
def scrub_well_known(item, "authoring"), do: scrub_well_known_key(item, "authoring")
def scrub_well_known(item, _), do: {[], item}
defp scrub_well_known_key(item, key) do
{changes, scrubbed} =
Map.get(item, key)
|> scrub()
{changes, Map.put(item, key, scrubbed)}
end
defp ensure_id_present(item) do
case Map.get(item, "id") do
nil -> Map.put(item, "id", Oli.Utils.random_string(12))
_ -> item
end
end
defp to_code_line(%{"type" => "code_line"} = item), do: item
defp to_code_line(item) do
%{
"type" => "code_line",
"children" => [
%{
"type" => "text",
"text" => extract_text(item)
}
]
}
end
# From an arbitrary content element, recursively extract all "text" nodes, concatenating
# them together to form a singular string
defp extract_text(item) do
PageContent.map_reduce(item, "", fn e, text -> {e, text <> Map.get(e, "text", "")} end)
|> Tuple.to_list()
|> Enum.at(1)
end
end
| 34.429907 | 91 | 0.65608 |
1c820c467789016b00a0924662a59acfe614ee4f | 3,117 | ex | Elixir | clients/cloud_resource_manager/lib/google_api/cloud_resource_manager/v1/model/binding.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/cloud_resource_manager/lib/google_api/cloud_resource_manager/v1/model/binding.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/cloud_resource_manager/lib/google_api/cloud_resource_manager/v1/model/binding.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.CloudResourceManager.V1.Model.Binding do
@moduledoc """
Associates `members` with a `role`.
## Attributes
- condition (Expr): Unimplemented. The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently. Defaults to: `null`.
- members ([String.t]): Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`. Defaults to: `null`.
- role (String.t): Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:condition => GoogleApi.CloudResourceManager.V1.Model.Expr.t(),
:members => list(any()),
:role => any()
}
field(:condition, as: GoogleApi.CloudResourceManager.V1.Model.Expr)
field(:members, type: :list)
field(:role)
end
defimpl Poison.Decoder, for: GoogleApi.CloudResourceManager.V1.Model.Binding do
def decode(value, options) do
GoogleApi.CloudResourceManager.V1.Model.Binding.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudResourceManager.V1.Model.Binding do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 57.722222 | 1,055 | 0.733397 |
1c821bbfd04dd700e1f15d15c5d5c98caebaad0f | 2,922 | exs | Elixir | config/runtime.exs | databrecht/uswidi | 422c5d957af769a12799b6d3ea56ad1e9f769873 | [
"MIT"
] | null | null | null | config/runtime.exs | databrecht/uswidi | 422c5d957af769a12799b6d3ea56ad1e9f769873 | [
"MIT"
] | null | null | null | config/runtime.exs | databrecht/uswidi | 422c5d957af769a12799b6d3ea56ad1e9f769873 | [
"MIT"
] | null | null | null | import Config
# config/runtime.exs is executed for all environments, including
# during releases. It is executed after compilation and before the
# system starts, so it is typically used to load production configuration
# and secrets from environment variables or elsewhere. Do not define
# any compile-time configuration in here, as it won't be applied.
# The block below contains prod specific runtime configuration.
if config_env() == :prod do
database_url =
System.get_env("DATABASE_URL") ||
raise """
environment variable DATABASE_URL is missing.
For example: ecto://USER:PASS@HOST/DATABASE
"""
config :uswidi, Uswidi.Repo,
# ssl: true,
# socket_options: [:inet6],
url: database_url,
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
# The secret key base is used to sign/encrypt cookies and other secrets.
# A default value is used in config/dev.exs and config/test.exs but you
# want to use a different value for prod and you most likely don't want
# to check this value into version control, so we use an environment
# variable instead.
secret_key_base =
System.get_env("SECRET_KEY_BASE") ||
raise """
environment variable SECRET_KEY_BASE is missing.
You can generate one by calling: mix phx.gen.secret
"""
config :uswidi, UswidiWeb.Endpoint,
url: [host: System.get_env("APP_NAME") <> ".gigalixirapp.com", port: 443],
http: [
# Enable IPv6 and bind on all interfaces.
# Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access.
# See the documentation on https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html
# for details about using IPv6 vs IPv4 and loopback vs public addresses.
ip: {0, 0, 0, 0, 0, 0, 0, 0},
port: String.to_integer(System.get_env("PORT") || "4000")
],
secret_key_base: secret_key_base
# ## Using releases
#
# If you are doing OTP releases, you need to instruct Phoenix
# to start each relevant endpoint:
#
config :uswidi, UswidiWeb.Endpoint, server: true
#
# Then you can assemble a release by calling `mix release`.
# See `mix help release` for more information.
# ## Configuring the mailer
#
# In production you need to configure the mailer to use a different adapter.
# Also, you may need to configure the Swoosh API client of your choice if you
# are not using SMTP. Here is an example of the configuration:
#
# config :uswidi, Uswidi.Mailer,
# adapter: Swoosh.Adapters.Mailgun,
# api_key: System.get_env("MAILGUN_API_KEY"),
# domain: System.get_env("MAILGUN_DOMAIN")
#
# For this example you need include a HTTP client required by Swoosh API client.
# Swoosh supports Hackney and Finch out of the box:
#
# config :swoosh, :api_client, Swoosh.ApiClient.Hackney
#
# See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details.
end
| 38.96 | 82 | 0.696099 |
1c825ca182373fbae55568544b6813249c944f7a | 4,029 | ex | Elixir | lib/changelog_web/helpers/admin_helpers.ex | lawik/changelog.com | 3f0b6ceb6ba723df9b21164114fbc208509e9c12 | [
"MIT"
] | null | null | null | lib/changelog_web/helpers/admin_helpers.ex | lawik/changelog.com | 3f0b6ceb6ba723df9b21164114fbc208509e9c12 | [
"MIT"
] | null | null | null | lib/changelog_web/helpers/admin_helpers.ex | lawik/changelog.com | 3f0b6ceb6ba723df9b21164114fbc208509e9c12 | [
"MIT"
] | null | null | null | defmodule ChangelogWeb.Helpers.AdminHelpers do
use Phoenix.HTML
alias Changelog.Repo
alias ChangelogWeb.TimeView
alias ChangelogWeb.Helpers.SharedHelpers
def error_class(form, field), do: if(form.errors[field], do: "error", else: "")
def hidden_class(condition), do: if(condition, do: "hidden", else: "")
def error_message(form, field) do
case form.errors[field] do
{message, _} ->
content_tag :div, class: "ui pointing red basic label" do
message
end
nil ->
""
end
end
def file_toggle_buttons() do
content_tag(:span) do
[
content_tag(:a, "(use url)", href: "javascript:void(0);", class: "field-action use-url"),
content_tag(:a, "(use file)",
href: "javascript:void(0);",
class: "field-action use-file",
style: "display: none;"
)
]
end
end
def filter_select(filter, options) when is_binary(filter),
do: filter_select(String.to_atom(filter), options)
def filter_select(filter, options) do
content_tag(:select, class: "ui fluid selection dropdown js-filter") do
Enum.map(options, fn {value, label} ->
args =
if filter == value do
[value: value, selected: true]
else
[value: value]
end
content_tag(:option, label, args)
end)
end
end
def help_icon(help_text) do
~e"""
<i class="help circle icon fluid" data-popup="true" data-variation="wide" data-content="<%= help_text %>"></i>
"""
end
def info_icon(info_text) do
~e"""
<i class="info circle icon fluid" data-popup="true" data-variation="wide" data-content="<%= info_text %>"></i>
"""
end
def icon_link(icon_name, options) do
options = Keyword.put(options, :class, "ui icon button")
link(content_tag(:i, "", class: "#{icon_name} icon"), options)
end
def is_persisted(struct), do: is_integer(struct.id)
def is_loaded(nil), do: false
def is_loaded(%Ecto.Association.NotLoaded{}), do: false
def is_loaded(_association), do: true
def label_with_clear(attr, text) do
content_tag(:label, for: attr) do
[
content_tag(:span, text),
content_tag(:a, "(clear)", href: "javascript:void(0);", class: "field-action js-clear")
]
end
end
# Attempts to load an associated record on a form. Starts with direct
# relationship on form data, then tries querying Repo.
def load_from_form(form, module, relationship) do
form_data = Map.get(form.data, relationship)
foreign_key = "#{relationship}_id"
record_id =
Map.get(form.data, String.to_existing_atom(foreign_key)) || form.params[foreign_key]
cond do
is_loaded(form_data) -> form_data
is_nil(record_id) -> nil
true -> Repo.get(module, record_id)
end
end
def next_param(conn, default \\ nil), do: Map.get(conn.params, "next", default)
def download_count(ep_or_pod),
do: ep_or_pod.download_count |> round() |> SharedHelpers.comma_separated()
def reach_count(ep_or_pod) do
if ep_or_pod.reach_count > ep_or_pod.download_count do
SharedHelpers.comma_separated(ep_or_pod.reach_count)
else
download_count(ep_or_pod)
end
end
def semantic_calendar_field(form, field) do
~e"""
<div class="ui calendar">
<div class="ui input left icon">
<i class="calendar icon"></i>
<%= text_input(form, field, name: "", id: "") %>
<%= hidden_input(form, field) %>
</div>
</div>
"""
end
def submit_button(type, text, next \\ "") do
content_tag(:button, text,
class: "ui #{type} fluid basic button",
type: "submit",
name: "next",
value: next
)
end
def ts(ts), do: TimeView.ts(ts)
def ts(ts, style), do: TimeView.ts(ts, style)
def up_or_down_class(a, b) when is_integer(a) and is_integer(b) do
if a > b do
"green"
else
"red"
end
end
def yes_no_options do
[{"Yes", true}, {"No", false}]
end
end
| 26.506579 | 114 | 0.623232 |
1c8269592edc7fa55a207d2885a314845dc1bb73 | 185 | ex | Elixir | lib/erlef_web/views/admin/volunteer_view.ex | dhadka/website | e67c23d7052b4ef00a1af52b0b9ebc952d34776e | [
"Apache-2.0"
] | 1 | 2021-03-13T01:34:28.000Z | 2021-03-13T01:34:28.000Z | lib/erlef_web/views/admin/volunteer_view.ex | dhadka/website | e67c23d7052b4ef00a1af52b0b9ebc952d34776e | [
"Apache-2.0"
] | null | null | null | lib/erlef_web/views/admin/volunteer_view.ex | dhadka/website | e67c23d7052b4ef00a1af52b0b9ebc952d34776e | [
"Apache-2.0"
] | null | null | null | defmodule ErlefWeb.Admin.VolunteerView do
use ErlefWeb, :view
def title(%{assigns: %{volunteer: vol}}) do
"Volunteer - #{vol.name}"
end
def title(_), do: "Volunteers"
end
| 18.5 | 45 | 0.67027 |
1c827c7329eebe1a9a0e10e8c5bbf25666268487 | 707 | exs | Elixir | config/config.exs | sadesyllas/nerves_bluetooth | d316dbc192be8beec8b76dd245225117bc7d925f | [
"Apache-2.0"
] | 14 | 2018-01-29T23:34:50.000Z | 2021-07-15T04:29:36.000Z | config/config.exs | sadesyllas/nerves_bluetooth | d316dbc192be8beec8b76dd245225117bc7d925f | [
"Apache-2.0"
] | 2 | 2017-05-24T18:16:42.000Z | 2018-11-06T19:29:09.000Z | config/config.exs | sadesyllas/nerves_bluetooth | d316dbc192be8beec8b76dd245225117bc7d925f | [
"Apache-2.0"
] | 3 | 2018-06-01T07:10:19.000Z | 2019-01-06T22:32:56.000Z | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
use Mix.Config
# Import target specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
# Uncomment to use target specific configurations
# import_config "#{Mix.Project.config[:target]}.exs"
config :rpi0_bt, :wlan0,
ssid: "your-ssid",
key_mgmt: :"WPA-PSK",
psk: "your-key"
config :rpi0_bt, :debug, false
config :nerves, :firmware,
rootfs_additions: "rootfs-additions"
import_config "#{Mix.Project.config[:target]}.exs"
| 28.28 | 63 | 0.755304 |
1c827e611aa8e31348a1001787a64dcfd382340a | 1,880 | exs | Elixir | deps/file_system/mix.exs | s3vi26/KoanTests | 208ee66ac50610558727c45c1773f9fdf80afdd5 | [
"MIT"
] | null | null | null | deps/file_system/mix.exs | s3vi26/KoanTests | 208ee66ac50610558727c45c1773f9fdf80afdd5 | [
"MIT"
] | null | null | null | deps/file_system/mix.exs | s3vi26/KoanTests | 208ee66ac50610558727c45c1773f9fdf80afdd5 | [
"MIT"
] | null | null | null | defmodule FileSystem.Mixfile do
use Mix.Project
def project do
[ app: :file_system,
version: "0.2.2",
elixir: "~> 1.3",
deps: deps(),
description: "A file system change watcher wrapper based on [fs](https://github.com/synrc/fs)",
source_url: "https://github.com/falood/file_system",
package: package(),
compilers: [:file_system | Mix.compilers],
aliases: ["compile.file_system": &file_system/1],
docs: [
extras: ["README.md"],
main: "readme",
]
]
end
def application do
[
applications: [:logger],
]
end
defp deps do
[
{ :ex_doc, "~> 0.14", only: :docs },
]
end
defp file_system(_args) do
case :os.type do
{:unix, :darwin} -> compile_mac()
_ -> :ok
end
end
defp compile_mac do
require Logger
source = "c_src/mac/*.c"
target = "priv/mac_listener"
if Mix.Utils.stale?(Path.wildcard(source), [target]) do
Logger.info "Compiling file system watcher for Mac..."
cmd = "clang -framework CoreFoundation -framework CoreServices -Wno-deprecated-declarations #{source} -o #{target}"
if Mix.shell.cmd(cmd) > 0 do
Logger.error "Could not compile file system watcher for Mac, try to run #{inspect cmd} manually inside the dependnecy."
else
Logger.info "Done."
end
:ok
else
:noop
end
end
defp package do
%{ maintainers: ["Xiangrong Hao", "Max Veytsman"],
files: [
"lib", "README.md", "mix.exs",
"c_src/mac/cli.c",
"c_src/mac/cli.h",
"c_src/mac/common.h",
"c_src/mac/compat.c",
"c_src/mac/compat.h",
"c_src/mac/main.c",
"priv/inotifywait.exe",
],
licenses: ["WTFPL"],
links: %{"Github" => "https://github.com/falood/file_system"}
}
end
end
| 24.736842 | 127 | 0.569681 |
1c8287d34d643fc74486598476411d98c6169881 | 489 | ex | Elixir | youtube/groxio/tetris/lib/tetris_web/views/error_view.ex | jim80net/elixir_tutorial_projects | db19901a9305b297faa90642bebcc08455621b52 | [
"Unlicense"
] | null | null | null | youtube/groxio/tetris/lib/tetris_web/views/error_view.ex | jim80net/elixir_tutorial_projects | db19901a9305b297faa90642bebcc08455621b52 | [
"Unlicense"
] | null | null | null | youtube/groxio/tetris/lib/tetris_web/views/error_view.ex | jim80net/elixir_tutorial_projects | db19901a9305b297faa90642bebcc08455621b52 | [
"Unlicense"
] | null | null | null | defmodule TetrisWeb.ErrorView do
use TetrisWeb, :view
# If you want to customize a particular status code
# for a certain format, you may uncomment below.
# def render("500.html", _assigns) do
# "Internal Server Error"
# end
# By default, Phoenix returns the status message from
# the template name. For example, "404.html" becomes
# "Not Found".
def template_not_found(template, _assigns) do
Phoenix.Controller.status_message_from_template(template)
end
end
| 28.764706 | 61 | 0.734151 |
1c829b5d5338624dee3079fb526a9daceb74c977 | 2,659 | exs | Elixir | test/ot/plug_test.exs | sumup-oss/ot | 07e8211e59c0b91c015f36f4166becf799bd6184 | [
"Apache-2.0"
] | 1 | 2021-09-20T07:23:38.000Z | 2021-09-20T07:23:38.000Z | test/ot/plug_test.exs | sumup-oss/ot | 07e8211e59c0b91c015f36f4166becf799bd6184 | [
"Apache-2.0"
] | null | null | null | test/ot/plug_test.exs | sumup-oss/ot | 07e8211e59c0b91c015f36f4166becf799bd6184 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2021, SumUp Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
defmodule Ot.PlugTest do
use ExUnit.Case
use Plug.Test
@trace_id String.duplicate("a", 32)
@span_id String.duplicate("b", 16)
@traceparent "00-#{@trace_id}-#{@span_id}-00"
setup tags do
# Start Ot
ot_config = Map.get(tags, :ot_config, [])
ot_defaults = [service_name: "my-app", collectors: []]
start_supervised!({Ot, Keyword.merge(ot_defaults, ot_config)})
:ok
end
@tag ot_config: [plug: [paths: ["/foo/*"]]]
test "call/2: ignored path" do
conn(:get, "/bar") |> Ot.Plug.call([])
assert Ot.current_span() == nil
end
@tag ot_config: [plug: [paths: ["/foo/*"]]]
test "call/2: matched path" do
conn = conn(:get, "/foo/bar?baz=0") |> Ot.Plug.call([])
span = Ot.current_span()
assert %Ot.Span{} = span
assert <<_::binary-size(16)>> = span.id
assert <<_::binary-size(32)>> = span.traceId
assert span.name == "request"
assert span.parentId == nil
assert span.kind == "SERVER"
assert span.tags == %{
:component => "my-app",
"http.method" => "GET",
"http.path" => "/foo/bar",
"http.query_string" => "baz=0"
}
send_resp(conn, 200, "OK")
assert Ot.current_span().tags == Map.put(span.tags, "http.status_code", 200)
end
test "call/2: traceparent" do
conn(:get, "/foo/bar?baz=0")
|> put_req_header("traceparent", @traceparent)
|> Ot.Plug.call([])
span = Ot.current_span()
assert <<_::binary-size(16)>> = span.id
refute span.id == @span_id
assert span.traceId == @trace_id
end
test "call/2: query string tuncation" do
onek = String.duplicate("a", 1000)
conn(:get, "/foo/bar?" <> onek <> "a") |> Ot.Plug.call([])
assert Ot.current_span().tags["http.query_string"] == onek
end
@tag ot_config: [plug: [conn_private_tags: [test_private_tag: "test_span_tag"]]]
test "call/2: private field tagging" do
conn(:get, "/foo/bar")
|> put_private(:test_private_tag, "test-value")
|> Ot.Plug.call([])
assert Ot.current_span().tags["test_span_tag"] == "test-value"
end
end
| 30.215909 | 82 | 0.632569 |
1c82a9b3c1cdefe43fb27d1da0a813a16f7ce100 | 1,306 | ex | Elixir | lib/phoenix_components/component.ex | tsenturk/phoenix_components | dc1905743a9d30078b581112af105b48282eca79 | [
"MIT"
] | 3 | 2018-11-22T15:39:09.000Z | 2019-06-13T19:48:17.000Z | lib/phoenix_components/component.ex | tsenturk/phoenix_components | dc1905743a9d30078b581112af105b48282eca79 | [
"MIT"
] | 1 | 2021-07-15T13:01:36.000Z | 2021-07-15T13:01:36.000Z | lib/phoenix_components/component.ex | tsenturk/phoenix_components | dc1905743a9d30078b581112af105b48282eca79 | [
"MIT"
] | 5 | 2018-11-07T12:48:40.000Z | 2021-07-14T11:09:54.000Z | defmodule PhoenixComponents.Component do
@moduledoc """
Module used to define the view part of a component.
This module is used to define the view associated to a component. It serves
two different purposes, define from where to load the template of a component
and import the helpers to use inside the component's template.
## Example
PhoenixComponents defines the view template at `lib/app_web/components/` by default.
E.g. `lib/app_web/components/awesome_button/view.eex`
defmodule YourApp.Components.AwesomeButton do
use PhoenixComponents.Component
def class_for_type(type) do
"btn--" <> to_string(type)
end
end
This in combination with a template defines a component.
E.g. `lib/app_web/components/awesome_button/template.html.eex`
<button class="<%= class_for_type @attr.type %>">
<%= @content %>
</button>
"""
@doc """
When used, defines the current module as a Component View module.
"""
defmacro __using__(opts) do
root = Keyword.get(opts, :root)
namespace = Keyword.get(opts, :namespace)
quote do
use Phoenix.View, root: unquote(root), namespace: unquote(namespace)
use Phoenix.HTML
use PhoenixComponents.View, namespace: unquote(namespace)
end
end
end
| 27.787234 | 86 | 0.695253 |
1c82d47a74550267b42f1f0ec84ef242bdeba186 | 1,123 | exs | Elixir | apps/game_of_life_web/config/dev.exs | tslim/elixir-phoenix-life | c86fd69198c9193bdcd5b3b1b504f41f67268139 | [
"MIT"
] | 1 | 2016-09-28T07:38:55.000Z | 2016-09-28T07:38:55.000Z | apps/game_of_life_web/config/dev.exs | tslim/elixir-phoenix-life | c86fd69198c9193bdcd5b3b1b504f41f67268139 | [
"MIT"
] | null | null | null | apps/game_of_life_web/config/dev.exs | tslim/elixir-phoenix-life | c86fd69198c9193bdcd5b3b1b504f41f67268139 | [
"MIT"
] | null | null | null | use Mix.Config
# For development, we disable any cache and enable
# debugging and code reloading.
#
# The watchers configuration can be used to run external
# watchers to your application. For example, we use it
# with brunch.io to recompile .js and .css sources.
config :game_of_life_web, GameOfLife.Web.Endpoint,
http: [port: 4000],
debug_errors: true,
code_reloader: true,
check_origin: false,
watchers: [node: ["node_modules/brunch/bin/brunch", "watch", "--stdin",
cd: Path.expand("../", __DIR__)]]
# Watch static and templates for browser reloading.
config :game_of_life_web, GameOfLife.Web.Endpoint,
live_reload: [
patterns: [
~r{priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$},
~r{priv/gettext/.*(po)$},
~r{web/views/.*(ex)$},
~r{web/templates/.*(eex)$}
]
]
# Do not include metadata nor timestamps in development logs
config :logger, :console, format: "[$level] $message\n"
# Set a higher stacktrace during development. Avoid configuring such
# in production as building large stacktraces may be expensive.
config :phoenix, :stacktrace_depth, 20
| 32.085714 | 73 | 0.695459 |
1c82e4c02cf32786d31e18cc6ba1757c652541bd | 441 | exs | Elixir | priv/repo/migrations/20180220044803_create_raw_supplier_rate.exs | max-konin/cgrates_web_jsonapi | e82690e343d790b0f77dea6699483fcb6fd8a162 | [
"MIT"
] | 2 | 2018-10-03T07:41:32.000Z | 2021-03-21T11:27:27.000Z | priv/repo/migrations/20180220044803_create_raw_supplier_rate.exs | max-konin/cgrates_web_jsonapi | e82690e343d790b0f77dea6699483fcb6fd8a162 | [
"MIT"
] | 1 | 2018-10-31T04:55:59.000Z | 2018-10-31T04:55:59.000Z | priv/repo/migrations/20180220044803_create_raw_supplier_rate.exs | max-konin/cgrates_web_jsonapi | e82690e343d790b0f77dea6699483fcb6fd8a162 | [
"MIT"
] | 5 | 2018-09-27T11:30:44.000Z | 2021-01-16T08:28:58.000Z | defmodule CgratesWebJsonapi.Repo.Migrations.CreateRawSupplierRate do
use Ecto.Migration
def change do
create table(:raw_supplier_rates) do
add(:rate, :float)
add(:supplier_name, :string)
add(:prefix, :string)
add(:description, :string)
add(:tariff_plan_id, references(:tariff_plans, on_delete: :nothing))
timestamps()
end
create(index(:raw_supplier_rates, [:tariff_plan_id]))
end
end
| 24.5 | 74 | 0.696145 |
1c82f3e69c88266209e8b2e19515796e437dfa27 | 1,841 | ex | Elixir | lib/snowflake/generator.ex | blitzstudios/snowflake | 3451939e245e11406eefa932b71e65f586263a72 | [
"MIT"
] | 55 | 2017-02-08T14:30:15.000Z | 2022-03-29T08:21:13.000Z | lib/snowflake/generator.ex | blitzstudios/snowflake | 3451939e245e11406eefa932b71e65f586263a72 | [
"MIT"
] | 2 | 2017-04-03T18:05:45.000Z | 2017-05-10T15:52:34.000Z | lib/snowflake/generator.ex | blitzstudios/snowflake | 3451939e245e11406eefa932b71e65f586263a72 | [
"MIT"
] | 11 | 2017-03-29T14:12:32.000Z | 2021-04-30T01:30:08.000Z | defmodule Snowflake.Generator do
@moduledoc false
use GenServer
@machine_id_overflow 1024
@seq_overflow 4096
def start_link(epoch, machine_id) when machine_id < @machine_id_overflow do
state = {epoch, ts(epoch), machine_id, 0}
GenServer.start_link(__MODULE__, state, name: __MODULE__)
end
def init(args) do
{:ok, args}
end
def handle_call(:next_id, from, {epoch, prev_ts, machine_id, seq} = state) do
case next_ts_and_seq(epoch, prev_ts, seq) do
{:error, :seq_overflow} ->
:timer.sleep(1)
handle_call(:next_id, from, state)
{:error, :backwards_clock} ->
{:reply, {:error, :backwards_clock}, state}
{:ok, new_ts, new_seq} ->
new_state = {epoch, new_ts, machine_id, new_seq}
{:reply, {:ok, create_id(new_ts, machine_id, new_seq)}, new_state}
end
end
def handle_call(:machine_id, _from, {_epoch, _prev_ts, machine_id, _seq} = state) do
{:reply, {:ok, machine_id}, state}
end
def handle_call({:set_machine_id, machine_id}, _from, {epoch, prev_ts, _old_machine_id, seq}) do
{:reply, {:ok, machine_id}, {epoch, prev_ts, machine_id, seq}}
end
defp next_ts_and_seq(epoch, prev_ts, seq) do
case ts(epoch) do
^prev_ts ->
case seq + 1 do
@seq_overflow -> {:error, :seq_overflow}
next_seq -> {:ok, prev_ts, next_seq}
end
new_ts ->
cond do
new_ts < prev_ts -> {:error, :backwards_clock}
true -> {:ok, new_ts, 0}
end
end
end
defp create_id(ts, machine_id, seq) do
<< new_id :: unsigned-integer-size(64)>> = <<
ts :: unsigned-integer-size(42),
machine_id :: unsigned-integer-size(10),
seq :: unsigned-integer-size(12) >>
new_id
end
defp ts(epoch) do
System.os_time(:millisecond) - epoch
end
end
| 27.893939 | 98 | 0.62629 |
1c83053f8159a4fc9bb0225e663680e7895227ac | 113,976 | ex | Elixir | lib/ecto/changeset.ex | Ljzn/ecto | b3a4b68877f178693568e6c4251e1dec1a38e06c | [
"Apache-2.0"
] | 1 | 2022-01-24T07:32:34.000Z | 2022-01-24T07:32:34.000Z | lib/ecto/changeset.ex | Ljzn/ecto | b3a4b68877f178693568e6c4251e1dec1a38e06c | [
"Apache-2.0"
] | null | null | null | lib/ecto/changeset.ex | Ljzn/ecto | b3a4b68877f178693568e6c4251e1dec1a38e06c | [
"Apache-2.0"
] | null | null | null | defmodule Ecto.Changeset do
@moduledoc ~S"""
Changesets allow filtering, casting, validation and
definition of constraints when manipulating structs.
There is an example of working with changesets in the introductory
documentation in the `Ecto` module. The functions `cast/4` and
`change/2` are the usual entry points for creating changesets.
The first one is used to cast and validate external parameters,
such as parameters sent through a form, API, command line, etc.
The second one is used to change data directly from your application.
The remaining functions in this module, such as validations,
constraints, association handling, are about manipulating
changesets. Let's discuss some of this extra functionality.
## External vs internal data
Changesets allow working with both kinds of data:
* internal to the application - for example programmatically generated,
or coming from other subsystems. This use case is primarily covered
by the `change/2` and `put_change/3` functions.
* external to the application - for example data provided by the user in
a form that needs to be type-converted and properly validated. This
use case is primarily covered by the `cast/4` function.
## Validations and constraints
Ecto changesets provide both validations and constraints which
are ultimately turned into errors in case something goes wrong.
The difference between them is that most validations can be
executed without a need to interact with the database and, therefore,
are always executed before attempting to insert or update the entry
in the database. Some validations may happen against the database but
they are inherently unsafe. Those validations start with a `unsafe_`
prefix, such as `unsafe_validate_unique/3`.
On the other hand, constraints rely on the database and are always safe.
As a consequence, validations are always checked before constraints.
Constraints won't even be checked in case validations failed.
Let's see an example:
defmodule User do
use Ecto.Schema
import Ecto.Changeset
schema "users" do
field :name
field :email
field :age, :integer
end
def changeset(user, params \\ %{}) do
user
|> cast(params, [:name, :email, :age])
|> validate_required([:name, :email])
|> validate_format(:email, ~r/@/)
|> validate_inclusion(:age, 18..100)
|> unique_constraint(:email)
end
end
In the `changeset/2` function above, we define three validations.
They check that `name` and `email` fields are present in the
changeset, the e-mail is of the specified format, and the age is
between 18 and 100 - as well as a unique constraint in the email
field.
Let's suppose the e-mail is given but the age is invalid. The
changeset would have the following errors:
changeset = User.changeset(%User{}, %{age: 0, email: "mary@example.com"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [age: {"is invalid", []}, name: {"can't be blank", []}]
In this case, we haven't checked the unique constraint in the
e-mail field because the data did not validate. Let's fix the
age and the name, and assume that the e-mail already exists in the
database:
changeset = User.changeset(%User{}, %{age: 42, name: "Mary", email: "mary@example.com"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
Validations and constraints define an explicit boundary when the check
happens. By moving constraints to the database, we also provide a safe,
correct and data-race free means of checking the user input.
### Deferred constraints
Some databases support deferred constraints, i.e., constraints which are
checked at the end of the transaction rather than at the end of each statement.
Changesets do not support this type of constraints. When working with deferred
constraints, a violation while invoking `Repo.insert/2` or `Repo.update/2` won't
return `{:error, changeset}`, but rather raise an error at the end of the
transaction.
## Empty values
Many times, the data given on cast needs to be further pruned, specially
regarding empty values. For example, if you are gathering data to be
cast from the command line or through an HTML form or any other text-based
format, it is likely those means cannot express nil values. For
those reasons, changesets include the concept of empty values, which are
values that will be automatically converted to the field's default value
on `cast/4`. Those values are stored in the changeset `empty_values` field
and default to `[""]`. You can also pass the `:empty_values` option to
`cast/4` in case you want to change how a particular `cast/4` work.
## Associations, embeds and on replace
Using changesets you can work with associations as well as with embedded
structs. There are two primary APIs:
* `cast_assoc/3` and `cast_embed/3` - those functions are used when
working with external data. In particular, they allow you to change
associations and embeds alongside the parent struct, all at once.
* `put_assoc/4` and `put_embed/4` - it allows you to replace the
association or embed as a whole. This can be used to move associated
data from one entry to another, to completely remove or replace
existing entries.
See the documentation for those functions for more information.
### The `:on_replace` option
When using any of those APIs, you may run into situations where Ecto sees
data is being replaced. For example, imagine a Post has many Comments where
the comments have IDs 1, 2 and 3. If you call `cast_assoc/3` passing only
the IDs 1 and 2, Ecto will consider 3 is being "replaced" and it will raise
by default. Such behaviour can be changed when defining the relation by
setting `:on_replace` option when defining your association/embed according
to the values below:
* `:raise` (default) - do not allow removing association or embedded
data via parent changesets
* `:mark_as_invalid` - if attempting to remove the association or
embedded data via parent changeset - an error will be added to the parent
changeset, and it will be marked as invalid
* `:nilify` - sets owner reference column to `nil` (available only for
associations). Use this on a `belongs_to` column to allow the association
to be cleared out so that it can be set to a new value. Will set `action`
on associated changesets to `:replace`
* `:update` - updates the association, available only for `has_one` and `belongs_to`.
This option will update all the fields given to the changeset including the id
for the association
* `:delete` - removes the association or related data from the database.
This option has to be used carefully (see below). Will set `action` on associated
changesets to `:replace`
The `:delete` option in particular must be used carefully as it would allow
users to delete any associated data by simply not sending any data for a given
field. If you need deletion, it is often preferred to manually mark the changeset
for deletion if a `delete` field is set in the params, as in the example below:
defmodule Comment do
use Ecto.Schema
import Ecto.Changeset
schema "comments" do
field :body, :string
end
def changeset(comment, %{"delete" => "true"}) do
%{Ecto.Changeset.change(comment) | action: :delete}
end
def changeset(comment, params) do
cast(comment, params, [:body])
end
end
## Schemaless changesets
In the changeset examples so far, we have always used changesets to validate
and cast data contained in a struct defined by an Ecto schema, such as the `%User{}`
struct defined by the `User` module.
However, changesets can also be used with "regular" structs too by passing a tuple
with the data and its types:
user = %User{}
types = %{first_name: :string, last_name: :string, email: :string}
changeset =
{user, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
where the user struct refers to the definition in the following module:
defmodule User do
defstruct [:name, :age]
end
Changesets can also be used with data in a plain map, by following the same API:
data = %{}
types = %{name: :string}
params = %{name: "Callum"}
changeset =
{data, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
Such functionality makes Ecto extremely useful to cast, validate and prune data even
if it is not meant to be persisted to the database.
### Changeset actions
Changesets have an action field which is usually set by `Ecto.Repo`
whenever one of the operations such as `insert` or `update` is called:
changeset = User.changeset(%User{}, %{age: 42, email: "mary@example.com"})
{:error, changeset} = Repo.insert(changeset)
changeset.action
#=> :insert
This means that when working with changesets that are not meant to be
persisted to the database, such as schemaless changesets, you may need
to explicitly set the action to one specific value. Frameworks such as
Phoenix use the action value to define how HTML forms should act.
Instead of setting the action manually, you may use `apply_action/2` that
emulates operations such as `Repo.insert`. `apply_action/2` will return
`{:ok, changes}` if the changeset is valid or `{:error, changeset}`, with
the given `action` set in the changeset in case of errors.
## The Ecto.Changeset struct
The public fields are:
* `valid?` - Stores if the changeset is valid
* `data` - The changeset source data, for example, a struct
* `params` - The parameters as given on changeset creation
* `changes` - The `changes` from parameters that were approved in casting
* `errors` - All errors from validations
* `required` - All required fields as a list of atoms
* `action` - The action to be performed with the changeset
* `types` - Cache of the data's field types
* `empty_values` - A list of values to be considered empty
* `repo` - The repository applying the changeset (only set after a Repo function is called)
* `repo_opts` - A keyword list of options given to the underlying repository operation
The following fields are private and must not be accessed directly.
* `validations`
* `constraints`
* `filters`
* `prepare`
### Redacting fields in inspect
To hide a fields value from the inspect protocol of `Ecto.Changeset`, mark
the field as `redact: true` in the schema, and it will display with the
value `**redacted**`.
"""
require Ecto.Query
alias __MODULE__
alias Ecto.Changeset.Relation
@empty_values [""]
# If a new field is added here, def merge must be adapted
defstruct valid?: false, data: nil, params: nil, changes: %{},
errors: [], validations: [], required: [], prepare: [],
constraints: [], filters: %{}, action: nil, types: nil,
empty_values: @empty_values, repo: nil, repo_opts: []
@type t(data_type) :: %Changeset{valid?: boolean(),
repo: atom | nil,
repo_opts: Keyword.t,
data: data_type,
params: %{optional(String.t) => term} | nil,
changes: %{optional(atom) => term},
required: [atom],
prepare: [(t -> t)],
errors: [{atom, error}],
constraints: [constraint],
validations: [{atom, term}],
filters: %{optional(atom) => term},
action: action,
types: nil | %{atom => Ecto.Type.t}}
@type t :: t(Ecto.Schema.t | map | nil)
@type error :: {String.t, Keyword.t}
@type action :: nil | :insert | :update | :delete | :replace | :ignore | atom
@type constraint :: %{type: :check | :exclusion | :foreign_key | :unique,
constraint: String.t, match: :exact | :suffix | :prefix,
field: atom, error_message: String.t, error_type: atom}
@type data :: map()
@type types :: map()
@number_validators %{
less_than: {&</2, "must be less than %{number}"},
greater_than: {&>/2, "must be greater than %{number}"},
less_than_or_equal_to: {&<=/2, "must be less than or equal to %{number}"},
greater_than_or_equal_to: {&>=/2, "must be greater than or equal to %{number}"},
equal_to: {&==/2, "must be equal to %{number}"},
not_equal_to: {&!=/2, "must be not equal to %{number}"},
}
@relations [:embed, :assoc]
@match_types [:exact, :suffix, :prefix]
@doc """
Wraps the given data in a changeset or adds changes to a changeset.
`changes` is a map or keyword where the key is an atom representing a
field, association or embed and the value is a term. Note the `value` is
directly stored in the changeset with no validation whatsoever. For this
reason, this function is meant for working with data internal to the
application.
When changing embeds and associations, see `put_assoc/4` for a complete
reference on the accepted values.
This function is useful for:
* wrapping a struct inside a changeset
* directly changing a struct without performing castings nor validations
* directly bulk-adding changes to a changeset
Changed attributes will only be added if the change does not have the
same value as the field in the data.
When a changeset is passed as the first argument, the changes passed as the
second argument are merged over the changes already in the changeset if they
differ from the values in the struct.
When a `{data, types}` is passed as the first argument, a changeset is
created with the given data and types and marked as valid.
See `cast/4` if you'd prefer to cast and validate external parameters.
## Examples
iex> changeset = change(%Post{})
%Ecto.Changeset{...}
iex> changeset.valid?
true
iex> changeset.changes
%{}
iex> changeset = change(%Post{author: "bar"}, title: "title")
iex> changeset.changes
%{title: "title"}
iex> changeset = change(%Post{title: "title"}, title: "title")
iex> changeset.changes
%{}
iex> changeset = change(changeset, %{title: "new title", body: "body"})
iex> changeset.changes.title
"new title"
iex> changeset.changes.body
"body"
"""
@spec change(Ecto.Schema.t | t | {data, types}, %{atom => term} | Keyword.t) :: t
def change(data, changes \\ %{})
def change({data, types}, changes) when is_map(data) do
change(%Changeset{data: data, types: Enum.into(types, %{}), valid?: true}, changes)
end
def change(%Changeset{types: nil}, _changes) do
raise ArgumentError, "changeset does not have types information"
end
def change(%Changeset{changes: changes, types: types} = changeset, new_changes)
when is_map(new_changes) or is_list(new_changes) do
{changes, errors, valid?} =
get_changed(changeset.data, types, changes, new_changes,
changeset.errors, changeset.valid?)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
def change(%{__struct__: struct} = data, changes) when is_map(changes) or is_list(changes) do
types = struct.__changeset__()
{changes, errors, valid?} = get_changed(data, types, %{}, changes, [], true)
%Changeset{valid?: valid?, data: data, changes: changes,
errors: errors, types: types}
end
defp get_changed(data, types, old_changes, new_changes, errors, valid?) do
Enum.reduce(new_changes, {old_changes, errors, valid?}, fn
{key, value}, {changes, errors, valid?} ->
put_change(data, changes, errors, valid?, key, value, Map.get(types, key))
end)
end
@doc """
Applies the given `params` as changes for the given `data` according to
the given set of `permitted` keys. Returns a changeset.
The given `data` may be either a changeset, a schema struct or a `{data, types}`
tuple. The second argument is a map of `params` that are cast according
to the type information from `data`. `params` is a map with string keys
or a map with atom keys containing potentially invalid data.
During casting, all `permitted` parameters whose values match the specified
type information will have their key name converted to an atom and stored
together with the value as a change in the `:changes` field of the changeset.
All parameters that are not explicitly permitted are ignored.
If casting of all fields is successful, the changeset is returned as valid.
Note that `cast/4` validates the types in the `params`, but not in the given
`data`.
## Options
* `:empty_values` - a list of values to be considered as empty when casting.
All empty values are discarded on cast. Defaults to `[""]`
## Examples
iex> changeset = cast(post, params, [:title])
iex> if changeset.valid? do
...> Repo.update!(changeset)
...> end
Passing a changeset as the first argument:
iex> changeset = cast(post, %{title: "Hello"}, [:title])
iex> new_changeset = cast(changeset, %{title: "Foo", body: "World"}, [:body])
iex> new_changeset.params
%{"title" => "Hello", "body" => "World"}
Or creating a changeset from a simple map with types:
iex> data = %{title: "hello"}
iex> types = %{title: :string}
iex> changeset = cast({data, types}, %{title: "world"}, [:title])
iex> apply_changes(changeset)
%{title: "world"}
## Composing casts
`cast/4` also accepts a changeset as its first argument. In such cases, all
the effects caused by the call to `cast/4` (additional errors and changes)
are simply added to the ones already present in the argument changeset.
Parameters are merged (**not deep-merged**) and the ones passed to `cast/4`
take precedence over the ones already in the changeset.
"""
@spec cast(Ecto.Schema.t | t | {data, types},
%{binary => term} | %{atom => term} | :invalid,
[atom],
Keyword.t) :: t
def cast(data, params, permitted, opts \\ [])
def cast(_data, %{__struct__: _} = params, _permitted, _opts) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect(params)}`"
end
def cast({data, types}, params, permitted, opts) when is_map(data) do
cast(data, types, %{}, params, permitted, opts)
end
def cast(%Changeset{types: nil}, _params, _permitted, _opts) do
raise ArgumentError, "changeset does not have types information"
end
def cast(%Changeset{changes: changes, data: data, types: types, empty_values: empty_values} = changeset,
params, permitted, opts) do
opts = Keyword.put_new(opts, :empty_values, empty_values)
new_changeset = cast(data, types, changes, params, permitted, opts)
cast_merge(changeset, new_changeset)
end
def cast(%{__struct__: module} = data, params, permitted, opts) do
cast(data, module.__changeset__(), %{}, params, permitted, opts)
end
defp cast(%{} = data, %{} = types, %{} = changes, :invalid, permitted, _opts) when is_list(permitted) do
_ = Enum.each(permitted, &cast_key/1)
%Changeset{params: nil, data: data, valid?: false, errors: [],
changes: changes, types: types}
end
defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) when is_list(permitted) do
empty_values = Keyword.get(opts, :empty_values, @empty_values)
params = convert_params(params)
defaults = case data do
%{__struct__: struct} -> struct.__struct__()
%{} -> %{}
end
{changes, errors, valid?} =
Enum.reduce(permitted, {changes, [], true},
&process_param(&1, params, types, data, empty_values, defaults, &2))
%Changeset{params: params, data: data, valid?: valid?,
errors: Enum.reverse(errors), changes: changes, types: types}
end
defp cast(%{}, %{}, %{}, params, permitted, _opts) when is_list(permitted) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect params}`"
end
defp process_param(key, params, types, data, empty_values, defaults, {changes, errors, valid?}) do
{key, param_key} = cast_key(key)
type = cast_type!(types, key)
current =
case changes do
%{^key => value} -> value
_ -> Map.get(data, key)
end
case cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
{:ok, value, valid?} ->
{Map.put(changes, key, value), errors, valid?}
:missing ->
{changes, errors, valid?}
{:invalid, custom_errors} ->
{message, new_errors} =
custom_errors
|> Keyword.put_new(:validation, :cast)
|> Keyword.put(:type, type)
|> Keyword.pop(:message, "is invalid")
{changes, [{key, {message, new_errors}} | errors], false}
end
end
defp cast_type!(types, key) do
case types do
%{^key => {tag, _}} when tag in @relations ->
raise "casting #{tag}s with cast/4 for #{inspect key} field is not supported, use cast_#{tag}/3 instead"
%{^key => type} ->
type
_ ->
known_fields = types |> Map.keys() |> Enum.map_join(", ", &inspect/1)
raise ArgumentError,
"unknown field `#{inspect(key)}` given to cast. Either the field does not exist or it is a " <>
":through association (which are read-only). The known fields are: #{known_fields}"
end
end
defp cast_key(key) when is_atom(key),
do: {key, Atom.to_string(key)}
defp cast_key(key),
do: raise ArgumentError, "cast/3 expects a list of atom keys, got: `#{inspect key}`"
defp cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
case params do
%{^param_key => value} ->
value = if value in empty_values, do: Map.get(defaults, key), else: value
case Ecto.Type.cast(type, value) do
{:ok, value} ->
if Ecto.Type.equal?(type, current, value) do
:missing
else
{:ok, value, valid?}
end
:error ->
{:invalid, []}
{:error, custom_errors} when is_list(custom_errors) ->
{:invalid, custom_errors}
end
_ ->
:missing
end
end
# TODO: Remove branch when we require Elixir v1.10+.
if Code.ensure_loaded?(:maps) and function_exported?(:maps, :iterator, 1) do
defp convert_params(params) do
case :maps.next(:maps.iterator(params)) do
{key, _, _} when is_atom(key) ->
for {key, value} <- params, into: %{} do
if is_atom(key) do
{Atom.to_string(key), value}
else
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
end
end
_ ->
params
end
end
else
defp convert_params(params) do
params
|> Enum.reduce(nil, fn
{key, _value}, nil when is_binary(key) ->
nil
{key, _value}, _ when is_binary(key) ->
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
{key, value}, nil when is_atom(key) ->
[{Atom.to_string(key), value}]
{key, value}, acc when is_atom(key) ->
[{Atom.to_string(key), value} | acc]
end)
|> case do
nil -> params
list -> :maps.from_list(list)
end
end
end
## Casting related
@doc """
Casts the given association with the changeset parameters.
This function should be used when working with the entire association at
once (and not a single element of a many-style association) and receiving
data external to the application.
`cast_assoc/3` works matching the records extracted from the database
and compares it with the parameters received from an external source.
Therefore, it is expected that the data in the changeset has explicitly
preloaded the association being cast and that all of the IDs exist and
are unique.
For example, imagine a user has many addresses relationship where
post data is sent as follows
%{"name" => "john doe", "addresses" => [
%{"street" => "somewhere", "country" => "brazil", "id" => 1},
%{"street" => "elsewhere", "country" => "poland"},
]}
and then
User
|> Repo.get!(id)
|> Repo.preload(:addresses) # Only required when updating data
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses, with: &MyApp.Address.changeset/2)
The parameters for the given association will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_assoc/3` will match those
parameters with the associations already in the changeset record.
Once `cast_assoc/3` is called, Ecto will compare each parameter
with the user's already preloaded addresses and act as follows:
* If the parameter does not contain an ID, the parameter data
will be passed to `MyApp.Address.changeset/2` with a new struct
and become an insert operation
* If the parameter contains an ID and there is no associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with a new struct and become an insert
operation
* If the parameter contains an ID and there is an associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with the existing struct and become an
update operation
* If there is an associated child with an ID and its ID is not given
as parameter, the `:on_replace` callback for that association will
be invoked (see the "On replace" section on the module documentation)
Every time the `MyApp.Address.changeset/2` function is invoked, it must
return a changeset. Once the parent changeset is given to an `Ecto.Repo`
function, all entries will be inserted/updated/deleted within the same
transaction.
Note developers are allowed to explicitly set the `:action` field of a
changeset to instruct Ecto how to act in certain situations. Let's suppose
that, if one of the associations has only empty fields, you want to ignore
the entry altogether instead of showing an error. The changeset function could
be written like this:
def changeset(struct, params) do
struct
|> cast(params, [:title, :body])
|> validate_required([:title, :body])
|> case do
%{valid?: false, changes: changes} = changeset when changes == %{} ->
# If the changeset is invalid and has no changes, it is
# because all required fields are missing, so we ignore it.
%{changeset | action: :ignore}
changeset ->
changeset
end
end
## Partial changes for many-style associations
By preloading an association using a custom query you can confine the behavior
of `cast_assoc/3`. This opens up the possibility to work on a subset of the data,
instead of all associations in the database.
Taking the initial example of users having addresses imagine those addresses
are set up to belong to a country. If you want to allow users to bulk edit all
addresses that belong to a single country, you can do so by changing the preload
query:
query = from MyApp.Address, where: [country: ^edit_country]
User
|> Repo.get!(id)
|> Repo.preload(addresses: query)
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses)
This will allow you to cast and update only the association for the given country.
The important point for partial changes is that any addresses, which were not
preloaded won't be changed.
## Options
* `:required` - if the association is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the associated module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_assoc(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:assoc, changeset, name, opts)
end
@doc """
Casts the given embed with the changeset parameters.
The parameters for the given embed will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_embed/3` will match those
parameters with the embeds already in the changeset record.
See `cast_assoc/3` for an example of working with casts and
associations which would also apply for embeds.
The changeset must have been previously `cast` using
`cast/4` before this function is invoked.
## Options
* `:required` - if the embed is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the embedded module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_embed(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:embed, changeset, name, opts)
end
defp cast_relation(type, %Changeset{data: data, types: types}, _name, _opts)
when data == nil or types == nil do
raise ArgumentError, "cast_#{type}/3 expects the changeset to be cast. " <>
"Please call cast/4 before calling cast_#{type}/3"
end
defp cast_relation(type, %Changeset{} = changeset, key, opts) do
{key, param_key} = cast_key(key)
%{data: data, types: types, params: params, changes: changes} = changeset
%{related: related} = relation = relation!(:cast, type, key, Map.get(types, key))
params = params || %{}
{changeset, required?} =
if opts[:required] do
{update_in(changeset.required, &[key|&1]), true}
else
{changeset, false}
end
on_cast = Keyword.get_lazy(opts, :with, fn -> on_cast_default(type, related) end)
original = Map.get(data, key)
changeset =
case Map.fetch(params, param_key) do
{:ok, value} ->
current = Relation.load!(data, original)
case Relation.cast(relation, data, value, current, on_cast) do
{:ok, change, relation_valid?} when change != original ->
valid? = changeset.valid? and relation_valid?
changes = Map.put(changes, key, change)
changeset = %{force_update(changeset, opts) | changes: changes, valid?: valid?}
missing_relation(changeset, key, current, required?, relation, opts)
{:error, {message, meta}} ->
meta = [validation: type] ++ meta
error = {key, {message(opts, :invalid_message, message), meta}}
%{changeset | errors: [error | changeset.errors], valid?: false}
# ignore or ok with change == original
_ ->
missing_relation(changeset, key, current, required?, relation, opts)
end
:error ->
missing_relation(changeset, key, original, required?, relation, opts)
end
update_in changeset.types[key], fn {type, relation} ->
{type, %{relation | on_cast: on_cast}}
end
end
defp on_cast_default(type, module) do
fn struct, params ->
try do
module.changeset(struct, params)
rescue
e in UndefinedFunctionError ->
case __STACKTRACE__ do
[{^module, :changeset, args_or_arity, _}] when args_or_arity == 2
when length(args_or_arity) == 2 ->
raise ArgumentError, """
the module #{inspect module} does not define a changeset/2 function,
which is used by cast_#{type}/3. You need to either:
1. implement the #{type}.changeset/2 function
2. pass the :with option to cast_#{type}/3 with an anonymous
function that expects 2 args or an MFA tuple
When using an inline embed, the :with option must be given
"""
stacktrace ->
reraise e, stacktrace
end
end
end
end
defp missing_relation(%{changes: changes, errors: errors} = changeset,
name, current, required?, relation, opts) do
current_changes = Map.get(changes, name, current)
if required? and Relation.empty?(relation, current_changes) do
errors = [{name, {message(opts, :required_message, "can't be blank"), [validation: :required]}} | errors]
%{changeset | errors: errors, valid?: false}
else
changeset
end
end
defp relation!(_op, type, _name, {type, relation}),
do: relation
defp relation!(op, :assoc, name, nil),
do: raise(ArgumentError, "cannot #{op} assoc `#{name}`, assoc `#{name}` not found. Make sure it is spelled correctly and that the association type is not read-only")
defp relation!(op, type, name, nil),
do: raise(ArgumentError, "cannot #{op} #{type} `#{name}`, #{type} `#{name}` not found. Make sure that it exists and is spelled correctly")
defp relation!(op, type, name, {other, _}) when other in @relations,
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{other}`")
defp relation!(op, type, name, schema_type),
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{inspect schema_type}`")
defp force_update(changeset, opts) do
if Keyword.get(opts, :force_update_on_change, true) do
put_in(changeset.repo_opts[:force], true)
else
changeset
end
end
## Working with changesets
@doc """
Merges two changesets.
This function merges two changesets provided they have been applied to the
same data (their `:data` field is equal); if the data differs, an
`ArgumentError` exception is raised. If one of the changesets has a `:repo`
field which is not `nil`, then the value of that field is used as the `:repo`
field of the resulting changeset; if both changesets have a non-`nil` and
different `:repo` field, an `ArgumentError` exception is raised.
The other fields are merged with the following criteria:
* `params` - params are merged (not deep-merged) giving precedence to the
params of `changeset2` in case of a conflict. If both changesets have their
`:params` fields set to `nil`, the resulting changeset will have its params
set to `nil` too.
* `changes` - changes are merged giving precedence to the `changeset2`
changes.
* `errors` and `validations` - they are simply concatenated.
* `required` - required fields are merged; all the fields that appear
in the required list of both changesets are moved to the required
list of the resulting changeset.
## Examples
iex> changeset1 = cast(%Post{}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title", body: "Body"}, [:title, :body])
iex> changeset = merge(changeset1, changeset2)
iex> changeset.changes
%{body: "Body", title: "New title"}
iex> changeset1 = cast(%Post{body: "Body"}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title"}, [:title])
iex> merge(changeset1, changeset2)
** (ArgumentError) different :data when merging changesets
"""
@spec merge(t, t) :: t
def merge(changeset1, changeset2)
def merge(%Changeset{data: data} = cs1, %Changeset{data: data} = cs2) do
new_repo = merge_identical(cs1.repo, cs2.repo, "repos")
new_repo_opts = Keyword.merge(cs1.repo_opts, cs2.repo_opts)
new_action = merge_identical(cs1.action, cs2.action, "actions")
new_filters = Map.merge(cs1.filters, cs2.filters)
new_validations = cs1.validations ++ cs2.validations
new_constraints = cs1.constraints ++ cs2.constraints
cast_merge %{cs1 | repo: new_repo, repo_opts: new_repo_opts, filters: new_filters,
action: new_action, validations: new_validations,
constraints: new_constraints}, cs2
end
def merge(%Changeset{}, %Changeset{}) do
raise ArgumentError, message: "different :data when merging changesets"
end
defp cast_merge(cs1, cs2) do
new_params = (cs1.params || cs2.params) && Map.merge(cs1.params || %{}, cs2.params || %{})
new_changes = Map.merge(cs1.changes, cs2.changes)
new_errors = Enum.uniq(cs1.errors ++ cs2.errors)
new_required = Enum.uniq(cs1.required ++ cs2.required)
new_types = cs1.types || cs2.types
new_valid? = cs1.valid? and cs2.valid?
%{cs1 | params: new_params, valid?: new_valid?, errors: new_errors, types: new_types,
changes: new_changes, required: new_required}
end
defp merge_identical(object, nil, _thing), do: object
defp merge_identical(nil, object, _thing), do: object
defp merge_identical(object, object, _thing), do: object
defp merge_identical(lhs, rhs, thing) do
raise ArgumentError, "different #{thing} (`#{inspect lhs}` and " <>
"`#{inspect rhs}`) when merging changesets"
end
@doc """
Fetches the given field from changes or from the data.
While `fetch_change/2` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `:error` if
no value is available.
For relations, these functions will return the changeset
original data with changes applied. To retrieve raw changesets,
please use `fetch_change/2`.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field(changeset, :title)
{:changes, "New title"}
iex> fetch_field(changeset, :body)
{:data, "Bar baz bong"}
iex> fetch_field(changeset, :not_a_field)
:error
"""
@spec fetch_field(t, atom) :: {:changes, term} | {:data, term} | :error
def fetch_field(%Changeset{changes: changes, data: data, types: types}, key) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
{:changes, change_as_field(types, key, value)}
:error ->
case Map.fetch(data, key) do
{:ok, value} -> {:data, data_as_field(data, types, key, value)}
:error -> :error
end
end
end
@doc """
Same as `fetch_field/2` but returns the value or raises if the given key was not found.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field!(changeset, :title)
"New title"
iex> fetch_field!(changeset, :other)
** (KeyError) key :other not found in: %Post{...}
"""
@spec fetch_field!(t, atom) :: term
def fetch_field!(changeset, key) do
case fetch_field(changeset, key) do
{_, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.data
end
end
@doc """
Gets a field from changes or from the data.
While `get_change/3` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `default` if
no value is available.
For relations, these functions will return the changeset data
with changes applied. To retrieve raw changesets, please use `get_change/3`.
iex> post = %Post{title: "A title", body: "My body is a cage"}
iex> changeset = change(post, %{title: "A new title"})
iex> get_field(changeset, :title)
"A new title"
iex> get_field(changeset, :not_a_field, "Told you, not a field!")
"Told you, not a field!"
"""
@spec get_field(t, atom, term) :: term
def get_field(%Changeset{changes: changes, data: data, types: types}, key, default \\ nil) do
case Map.fetch(changes, key) do
{:ok, value} ->
change_as_field(types, key, value)
:error ->
case Map.fetch(data, key) do
{:ok, value} -> data_as_field(data, types, key, value)
:error -> default
end
end
end
defp change_as_field(types, key, value) do
case Map.get(types, key) do
{tag, relation} when tag in @relations ->
Relation.apply_changes(relation, value)
_other ->
value
end
end
defp data_as_field(data, types, key, value) do
case Map.get(types, key) do
{tag, _relation} when tag in @relations ->
Relation.load!(data, value)
_other ->
value
end
end
@doc """
Fetches a change from the given changeset.
This function only looks at the `:changes` field of the given `changeset` and
returns `{:ok, value}` if the change is present or `:error` if it's not.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change(changeset, :title)
{:ok, "bar"}
iex> fetch_change(changeset, :body)
:error
"""
@spec fetch_change(t, atom) :: {:ok, term} | :error
def fetch_change(%Changeset{changes: changes} = _changeset, key) when is_atom(key) do
Map.fetch(changes, key)
end
@doc """
Same as `fetch_change/2` but returns the value or raises if the given key was not found.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change!(changeset, :title)
"bar"
iex> fetch_change!(changeset, :body)
** (KeyError) key :body not found in: %{title: "bar"}
"""
@spec fetch_change!(t, atom) :: term
def fetch_change!(changeset, key) do
case fetch_change(changeset, key) do
{:ok, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.changes
end
end
@doc """
Gets a change or returns a default value.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> get_change(changeset, :title)
"bar"
iex> get_change(changeset, :body)
nil
"""
@spec get_change(t, atom, term) :: term
def get_change(%Changeset{changes: changes} = _changeset, key, default \\ nil) when is_atom(key) do
Map.get(changes, key, default)
end
@doc """
Updates a change.
The given `function` is invoked with the change value only if there
is a change for the given `key`. Note that the value of the change
can still be `nil` (unless the field was marked as required on `validate_required/3`).
## Examples
iex> changeset = change(%Post{}, %{impressions: 1})
iex> changeset = update_change(changeset, :impressions, &(&1 + 1))
iex> changeset.changes.impressions
2
"""
@spec update_change(t, atom, (term -> term)) :: t
def update_change(%Changeset{changes: changes} = changeset, key, function) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
put_change(changeset, key, function.(value))
:error ->
changeset
end
end
@doc """
Puts a change on the given `key` with `value`.
`key` is an atom that represents any field, embed or
association in the changeset. Note the `value` is directly
stored in the changeset with no validation whatsoever.
For this reason, this function is meant for working with
data internal to the application.
If the change is already present, it is overridden with
the new value. If the change has the same value as in the
changeset data, it is not added to the list of changes.
When changing embeds and associations, see `put_assoc/4`
for a complete reference on the accepted values.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> changeset = put_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = put_change(changeset, :author, "bar")
iex> changeset.changes
%{title: "bar"}
"""
@spec put_change(t, atom, term) :: t
def put_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def put_change(%Changeset{data: data, types: types} = changeset, key, value) do
type = Map.get(types, key)
{changes, errors, valid?} =
put_change(data, changeset.changes, changeset.errors, changeset.valid?, key, value, type)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
defp put_change(data, changes, errors, valid?, key, value, {tag, relation})
when tag in @relations do
original = Map.get(data, key)
current = Relation.load!(data, original)
case Relation.change(relation, value, current) do
{:ok, change, relation_valid?} when change != original ->
{Map.put(changes, key, change), errors, valid? and relation_valid?}
{:error, error} ->
{changes, [{key, error} | errors], false}
# ignore or ok with change == original
_ ->
{Map.delete(changes, key), errors, valid?}
end
end
defp put_change(data, _changes, _errors, _valid?, key, _value, nil) when is_atom(key) do
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(data)}"
end
defp put_change(_data, _changes, _errors, _valid?, key, _value, nil) when not is_atom(key) do
raise ArgumentError, "field names given to change/put_change must be atoms, got: `#{inspect(key)}`"
end
defp put_change(data, changes, errors, valid?, key, value, type) do
if not Ecto.Type.equal?(type, Map.get(data, key), value) do
{Map.put(changes, key, value), errors, valid?}
else
{Map.delete(changes, key), errors, valid?}
end
end
@doc """
Puts the given association entry or entries as a change in the changeset.
This function is used to work with associations as a whole. For example,
if a Post has many Comments, it allows you to add, remove or change all
comments at once. If your goal is to simply add a new comment to a post,
then it is preferred to do so manually, as we will describe later in the
"Example: Adding a comment to a post" section.
This function requires the associated data to have been preloaded, except
when the parent changeset has been newly built and not yet persisted.
Missing data will invoke the `:on_replace` behaviour defined on the
association.
For associations with cardinality one, `nil` can be used to remove the existing
entry. For associations with many entries, an empty list may be given instead.
If the association has no changes, it will be skipped. If the association is
invalid, the changeset will be marked as invalid. If the given value is not any
of values below, it will raise.
The associated data may be given in different formats:
* a map or a keyword list representing changes to be applied to the
associated data. A map or keyword list can be given to update the
associated data as long as they have matching primary keys.
For example, `put_assoc(changeset, :comments, [%{id: 1, title: "changed"}])`
will locate the comment with `:id` of 1 and update its title.
If no comment with such id exists, one is created on the fly.
Since only a single comment was given, any other associated comment
will be replaced. On all cases, it is expected the keys to be atoms.
This API is mostly used in scripts and tests, to make it straight-
forward to create schemas with associations at once, such as:
Ecto.Changeset.change(
%Post{},
title: "foo",
comments: [
%{body: "first"},
%{body: "second"}
]
)
* changesets or structs - when a changeset or struct is given, they
are treated as the canonical data and the associated data currently
stored in the association is ignored. For instance, the operation
`put_assoc(changeset, :comments, [%Comment{id: 1, title: "changed"}])`
will send the `Comment` as is to the database, ignoring any comment
currently associated, even if a matching ID is found. If the comment
is already persisted to the database, then `put_assoc/4` only takes
care of guaranteeing that the comments and the parent data are associated.
This extremely useful when associating existing data, as we will see
in the "Example: Adding tags to a post" section.
Once the parent changeset is given to an `Ecto.Repo` function, all entries
will be inserted/updated/deleted within the same transaction.
## Example: Adding a comment to a post
Imagine a relationship where Post has many comments and you want to add a
new comment to an existing post. While it is possible to use `put_assoc/4`
for this, it would be unnecessarily complex. Let's see an example.
First, let's fetch the post with all existing comments:
post = Post |> Repo.get!(1) |> Repo.preload(:comments)
The following approach is **wrong**:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "bad example!"}])
|> Repo.update!()
The reason why the example above is wrong is because `put_assoc/4` always
works with the **full data**. So the example above will effectively **erase
all previous comments** and only keep the comment you are currently adding.
Instead, you could try:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "so-so example!"} | post.comments])
|> Repo.update!()
In this example, we prepend the new comment to the list of existing comments.
Ecto will diff the list of comments currently in `post` with the list of comments
given, and correctly insert the new comment to the database. Note, however,
Ecto is doing a lot of work just to figure out something we knew since the
beginning, which is that there is only one new comment.
In cases like above, when you want to work only on a single entry, it is
much easier to simply work on the associated directly. For example, we
could instead set the `post` association in the comment:
%Comment{body: "better example"}
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:post, post)
|> Repo.insert!()
Alternatively, we can make sure that when we create a comment, it is already
associated to the post:
Ecto.build_assoc(post, :comments)
|> Ecto.Changeset.change(body: "great example!")
|> Repo.insert!()
Or we can simply set the post_id in the comment itself:
%Comment{body: "better example", post_id: post.id}
|> Repo.insert!()
In other words, when you find yourself wanting to work only with a subset
of the data, then using `put_assoc/4` is most likely unnecessary. Instead,
you want to work on the other side of the association.
Let's see an example where using `put_assoc/4` is a good fit.
## Example: Adding tags to a post
Imagine you are receiving a set of tags you want to associate to a post.
Let's imagine that those tags exist upfront and are all persisted to the
database. Imagine we get the data in this format:
params = %{"title" => "new post", "tags" => ["learner"]}
Now, since the tags already exist, we will bring all of them from the
database and put them directly in the post:
tags = Repo.all(from t in Tag, where: t.name in ^params["tags"])
post
|> Repo.preload(:tags)
|> Ecto.Changeset.cast(params, [:title]) # No need to allow :tags as we put them directly
|> Ecto.Changeset.put_assoc(:tags, tags) # Explicitly set the tags
Since in this case we always require the user to pass all tags
directly, using `put_assoc/4` is a great fit. It will automatically
remove any tag not given and properly associate all of the given
tags with the post.
Furthermore, since the tag information is given as structs read directly
from the database, Ecto will treat the data as correct and only do the
minimum necessary to guarantee that posts and tags are associated,
without trying to update or diff any of the fields in the tag struct.
Although it accepts an `opts` argument, there are no options currently
supported by `put_assoc/4`.
"""
def put_assoc(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:assoc, changeset, name, value, opts)
end
@doc """
Puts the given embed entry or entries as a change in the changeset.
This function is used to work with embeds as a whole. For embeds with
cardinality one, `nil` can be used to remove the existing entry. For
embeds with many entries, an empty list may be given instead.
If the embed has no changes, it will be skipped. If the embed is
invalid, the changeset will be marked as invalid.
The list of supported values and their behaviour is described in
`put_assoc/4`. If the given value is not any of values listed there,
it will raise.
Although this function accepts an `opts` argument, there are no options
currently supported by `put_embed/4`.
"""
def put_embed(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:embed, changeset, name, value, opts)
end
defp put_relation(_tag, %{types: nil}, _name, _value, _opts) do
raise ArgumentError, "changeset does not have types information"
end
defp put_relation(tag, changeset, name, value, _opts) do
%{data: data, types: types, changes: changes, errors: errors, valid?: valid?} = changeset
relation = relation!(:put, tag, name, Map.get(types, name))
{changes, errors, valid?} =
put_change(data, changes, errors, valid?, name, value, {tag, relation})
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
@doc """
Forces a change on the given `key` with `value`.
If the change is already present, it is overridden with
the new value.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> changeset = force_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = force_change(changeset, :author, "bar")
iex> changeset.changes
%{title: "bar", author: "bar"}
"""
@spec force_change(t, atom, term) :: t
def force_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def force_change(%Changeset{types: types} = changeset, key, value) do
case Map.get(types, key) do
{tag, _} when tag in @relations ->
raise "changing #{tag}s with force_change/3 is not supported, " <>
"please use put_#{tag}/4 instead"
nil ->
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(changeset.data)}"
_ ->
put_in changeset.changes[key], value
end
end
@doc """
Deletes a change with the given key.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = delete_change(changeset, :title)
iex> get_change(changeset, :title)
nil
"""
@spec delete_change(t, atom) :: t
def delete_change(%Changeset{} = changeset, key) when is_atom(key) do
update_in changeset.changes, &Map.delete(&1, key)
end
@doc """
Applies the changeset changes to the changeset data.
This operation will return the underlying data with changes
regardless if the changeset is valid or not.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_changes(changeset)
%Post{author: "bar", title: "foo"}
"""
@spec apply_changes(t) :: Ecto.Schema.t | data
def apply_changes(%Changeset{changes: changes, data: data}) when changes == %{} do
data
end
def apply_changes(%Changeset{changes: changes, data: data, types: types}) do
Enum.reduce(changes, data, fn {key, value}, acc ->
case Map.fetch(types, key) do
{:ok, {tag, relation}} when tag in @relations ->
Map.put(acc, key, Relation.apply_changes(relation, value))
{:ok, _} ->
Map.put(acc, key, value)
:error ->
acc
end
end)
end
@doc """
Applies the changeset action only if the changes are valid.
If the changes are valid, all changes are applied to the changeset data.
If the changes are invalid, no changes are applied, and an error tuple
is returned with the changeset containing the action that was attempted
to be applied.
The action may be any atom.
## Examples
iex> {:ok, data} = apply_action(changeset, :update)
iex> {:error, changeset} = apply_action(changeset, :update)
%Ecto.Changeset{action: :update}
"""
@spec apply_action(t, atom) :: {:ok, Ecto.Schema.t() | data} | {:error, t}
def apply_action(%Changeset{} = changeset, action) when is_atom(action) do
if changeset.valid? do
{:ok, apply_changes(changeset)}
else
{:error, %Changeset{changeset | action: action}}
end
end
def apply_action(%Changeset{}, action) do
raise ArgumentError, "expected action to be an atom, got: #{inspect action}"
end
@doc """
Applies the changeset action if the changes are valid or raises an error.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_action!(changeset, :update)
%Post{author: "bar", title: "foo"}
iex> changeset = change(%Post{author: "bar"}, %{title: :bad})
iex> apply_action!(changeset, :update)
** (Ecto.InvalidChangesetError) could not perform update because changeset is invalid.
See `apply_action/2` for more information.
"""
@spec apply_action!(t, atom) :: Ecto.Schema.t() | data
def apply_action!(%Changeset{} = changeset, action) do
case apply_action(changeset, action) do
{:ok, data} ->
data
{:error, changeset} ->
raise Ecto.InvalidChangesetError, action: action, changeset: changeset
end
end
## Validations
@doc ~S"""
Returns a keyword list of the validations for this changeset.
The keys in the list are the names of fields, and the values are a
validation associated with the field. A field may occur multiple
times in the list.
## Example
%Post{}
|> change()
|> validate_format(:title, ~r/^\w+:\s/, message: "must start with a topic")
|> validate_length(:title, max: 100)
|> validations()
#=> [
title: {:length, [ max: 100 ]},
title: {:format, ~r/^\w+:\s/}
]
The following validations may be included in the result. The list is
not necessarily exhaustive. For example, custom validations written
by the developer will also appear in our return value.
This first group contains validations that take a keyword list of validators,
where the validators are shown immediately following the validation type.
This list may also include a `message:` key.
* `{:length, [option]}`
* `min: n`
* `max: n`
* `is: n`
* `count: :graphemes | :codepoints`
* `{:number, [option]}`
* `equal_to: n`
* `greater_than: n`
* `greater_than_or_equal_to: n`
* `less_than: n`
* `less_than_or_equal_to: n`
The other validators simply take a value:
* `{:exclusion, Enum.t}`
* `{:format, ~r/pattern/}`
* `{:inclusion, Enum.t}`
* `{:subset, Enum.t}`
"""
@spec validations(t) :: [{atom, term}]
def validations(%Changeset{validations: validations}) do
validations
end
@doc """
Adds an error to the changeset.
An additional keyword list `keys` can be passed to provide additional
contextual information for the error. This is useful when using
`traverse_errors/2`
## Examples
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty")
iex> changeset.errors
[title: {"empty", []}]
iex> changeset.valid?
false
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty", additional: "info")
iex> changeset.errors
[title: {"empty", [additional: "info"]}]
iex> changeset.valid?
false
"""
@spec add_error(t, atom, String.t, Keyword.t) :: t
def add_error(%Changeset{errors: errors} = changeset, key, message, keys \\ []) when is_binary(message) do
%{changeset | errors: [{key, {message, keys}}|errors], valid?: false}
end
@doc """
Validates the given `field` change.
It invokes the `validator` function to perform the validation
only if a change for the given `field` exists and the change
value is not `nil`. The function must return a list of errors
(with an empty list meaning no errors).
In case there's at least one error, the list of errors will be appended to the
`:errors` field of the changeset and the `:valid?` flag will be set to
`false`.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, fn :title, title ->
...> # Value must not be "foo"!
...> if title == "foo" do
...> [title: "cannot be foo"]
...> else
...> []
...> end
...> end
iex> changeset.errors
[title: {"cannot be foo", []}]
"""
@spec validate_change(t, atom, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{} = changeset, field, validator) when is_atom(field) do
%{changes: changes, errors: errors} = changeset
ensure_field_exists!(changeset, field)
value = Map.get(changes, field)
new = if is_nil(value), do: [], else: validator.(field, value)
new =
Enum.map(new, fn
{key, val} when is_atom(key) and is_binary(val) ->
{key, {val, []}}
{key, {val, opts}} when is_atom(key) and is_binary(val) and is_list(opts) ->
{key, {val, opts}}
end)
case new do
[] -> changeset
[_|_] -> %{changeset | errors: new ++ errors, valid?: false}
end
end
@doc """
Stores the validation `metadata` and validates the given `field` change.
Similar to `validate_change/3` but stores the validation metadata
into the changeset validators. The validator metadata is often used
as a reflection mechanism, to automatically generate code based on
the available validations.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, :useless_validator, fn
...> _, _ -> []
...> end
iex> changeset.validations
[title: :useless_validator]
"""
@spec validate_change(t, atom, term, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{validations: validations} = changeset,
field, metadata, validator) do
changeset = %{changeset | validations: [{field, metadata}|validations]}
validate_change(changeset, field, validator)
end
@doc """
Validates that one or more fields are present in the changeset.
You can pass a single field name or a list of field names that
are required.
If the value of a field is `nil` or a string made only of whitespace,
the changeset is marked as invalid, the field is removed from the
changeset's changes, and an error is added. An error won't be added if
the field already has an error.
If a field is given to `validate_required/3` but it has not been passed
as parameter during `cast/3` (i.e. it has not been changed), then
`validate_required/3` will check for its current value in the data.
If the data contains an non-empty value for the field, then no error is
added. This allows developers to use `validate_required/3` to perform
partial updates. For example, on `insert` all fields would be required,
because their default values on the data are all `nil`, but on `update`,
if you don't want to change a field that has been previously set,
you are not required to pass it as a paramater, since `validate_required/3`
won't add an error for missing changes as long as the value in the
data given to the `changeset` is not empty.
Do not use this function to validate associations are required,
instead pass the `:required` option to `cast_assoc/3`.
Opposite to other validations, calling this function does not store
the validation under the `changeset.validations` key. Instead, it
stores all required fields under `changeset.required`.
## Options
* `:message` - the message on failure, defaults to "can't be blank"
* `:trim` - a boolean that sets whether whitespaces are removed before
running the validation on binaries/strings, defaults to true
## Examples
validate_required(changeset, :title)
validate_required(changeset, [:title, :body])
"""
@spec validate_required(t, list | atom, Keyword.t) :: t
def validate_required(%Changeset{} = changeset, fields, opts \\ []) when not is_nil(fields) do
%{required: required, errors: errors, changes: changes} = changeset
trim = Keyword.get(opts, :trim, true)
fields = List.wrap(fields)
fields_with_errors =
for field <- fields,
missing?(changeset, field, trim),
ensure_field_exists!(changeset, field),
is_nil(errors[field]),
do: field
case fields_with_errors do
[] ->
%{changeset | required: fields ++ required}
_ ->
message = message(opts, "can't be blank")
new_errors = Enum.map(fields_with_errors, &{&1, {message, [validation: :required]}})
changes = Map.drop(changes, fields_with_errors)
%{changeset | changes: changes, required: fields ++ required, errors: new_errors ++ errors, valid?: false}
end
end
@doc """
Validates that no existing record with a different primary key
has the same values for these fields.
This function exists to provide quick feedback to users of your
application. It should not be relied on for any data guarantee as it
has race conditions and is inherently unsafe. For example, if this
check happens twice in the same time interval (because the user
submitted a form twice), both checks may pass and you may end-up with
duplicate entries in the database. Therefore, a `unique_constraint/3`
should also be used to ensure your data won't get corrupted.
However, because constraints are only checked if all validations
succeed, this function can be used as an early check to provide
early feedback to users, since most conflicting data will have been
inserted prior to the current validation phase.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken".
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
* `:error_key` - the key to which changeset error will be added when
check fails, defaults to the first field name of the given list of
fields.
* `:prefix` - the prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). See `Ecto.Repo` documentation
for more information.
* `:repo_opts` - the options to pass to the `Ecto.Repo` call.
## Examples
unsafe_validate_unique(changeset, :city_name, repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, message: "city must be unique within state")
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, prefix: "public")
"""
@spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t, Keyword.t) :: t
def unsafe_validate_unique(changeset, fields, repo, opts \\ []) when is_list(opts) do
fields = List.wrap(fields)
{repo_opts, opts} = Keyword.pop(opts, :repo_opts, [])
{validations, schema} =
case changeset do
%Ecto.Changeset{validations: validations, data: %schema{}} ->
{validations, schema}
%Ecto.Changeset{} ->
raise ArgumentError, "unsafe_validate_unique/4 does not work with schemaless changesets"
end
changeset = %{changeset | validations: [{:unsafe_unique, fields} | validations]}
where_clause = for field <- fields do
{field, get_field(changeset, field)}
end
# No need to query if there is a prior error for the fields
any_prior_errors_for_fields? = Enum.any?(changeset.errors, &(elem(&1, 0) in fields))
# No need to query if we haven't changed any of the fields in question
unrelated_changes? = Enum.all?(fields, ¬ Map.has_key?(changeset.changes, &1))
# If we don't have values for all fields, we can't query for uniqueness
any_nil_values_for_fields? = Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil()))
if unrelated_changes? || any_nil_values_for_fields? || any_prior_errors_for_fields? do
changeset
else
query =
schema
|> maybe_exclude_itself(changeset)
|> Ecto.Query.where(^where_clause)
|> Ecto.Query.select(true)
|> Ecto.Query.limit(1)
query =
if prefix = opts[:prefix] do
Ecto.Query.put_query_prefix(query, prefix)
else
query
end
if repo.one(query, repo_opts) do
error_key = Keyword.get(opts, :error_key, hd(fields))
add_error(changeset, error_key, message(opts, "has already been taken"),
validation: :unsafe_unique, fields: fields)
else
changeset
end
end
end
defp maybe_exclude_itself(schema, changeset) do
:primary_key
|> schema.__schema__()
|> Enum.reduce_while(schema, fn field, query ->
case get_field(changeset, field) do
nil ->
{:halt, schema}
value ->
{:cont, Ecto.Query.or_where(query, [q], field(q, ^field) != ^value)}
end
end)
end
defp ensure_field_exists!(%Changeset{types: types, data: data}, field) do
unless Map.has_key?(types, field) do
raise ArgumentError, "unknown field #{inspect(field)} in #{inspect(data)}"
end
true
end
defp missing?(changeset, field, trim) when is_atom(field) do
case get_field(changeset, field) do
%{__struct__: Ecto.Association.NotLoaded} ->
raise ArgumentError, "attempting to validate association `#{field}` " <>
"that was not loaded. Please preload your associations " <>
"before calling validate_required/3 or pass the :required " <>
"option to Ecto.Changeset.cast_assoc/3"
value when is_binary(value) and trim -> String.trim_leading(value) == ""
value when is_binary(value) -> value == ""
nil -> true
_ -> false
end
end
defp missing?(_changeset, field, _trim) do
raise ArgumentError, "validate_required/3 expects field names to be atoms, got: `#{inspect field}`"
end
@doc """
Validates a change has the given format.
The format has to be expressed as a regular expression.
## Options
* `:message` - the message on failure, defaults to "has invalid format"
## Examples
validate_format(changeset, :email, ~r/@/)
"""
@spec validate_format(t, atom, Regex.t, Keyword.t) :: t
def validate_format(changeset, field, format, opts \\ []) do
validate_change changeset, field, {:format, format}, fn _, value ->
if value =~ format, do: [], else: [{field, {message(opts, "has invalid format"), [validation: :format]}}]
end
end
@doc """
Validates a change is included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is invalid"
## Examples
validate_inclusion(changeset, :gender, ["man", "woman", "other", "prefer not to say"])
validate_inclusion(changeset, :age, 0..99)
"""
@spec validate_inclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_inclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:inclusion, data}, fn _, value ->
if value in data,
do: [],
else: [{field, {message(opts, "is invalid"), [validation: :inclusion, enum: data]}}]
end
end
@doc ~S"""
Validates a change, of type enum, is a subset of the given enumerable. Like
`validate_inclusion/4` for lists.
## Options
* `:message` - the message on failure, defaults to "has an invalid entry"
## Examples
validate_subset(changeset, :pets, ["cat", "dog", "parrot"])
validate_subset(changeset, :lottery_numbers, 0..99)
"""
@spec validate_subset(t, atom, Enum.t, Keyword.t) :: t
def validate_subset(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:subset, data}, fn _, value ->
case Enum.any?(value, fn(x) -> not(x in data) end) do
true -> [{field, {message(opts, "has an invalid entry"), [validation: :subset, enum: data]}}]
false -> []
end
end
end
@doc """
Validates a change is not included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is reserved"
## Examples
validate_exclusion(changeset, :name, ~w(admin superadmin))
"""
@spec validate_exclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_exclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:exclusion, data}, fn _, value ->
if value in data, do:
[{field, {message(opts, "is reserved"), [validation: :exclusion, enum: data]}}], else: []
end
end
@doc """
Validates a change is a string or list of the given length.
Note that the length of a string is counted in graphemes by default. If using
this validation to match a character limit of a database backend,
it's likely that the limit ignores graphemes and limits the number
of unicode characters. Then consider using the `:count` option to
limit the number of codepoints (`:codepoints`), or limit the number of bytes (`:bytes`).
## Options
* `:is` - the length must be exactly this value
* `:min` - the length must be greater than or equal to this value
* `:max` - the length must be less than or equal to this value
* `:count` - what length to count for string, `:graphemes` (default), `:codepoints` or `:bytes`
* `:message` - the message on failure, depending on the validation, is one of:
* for strings:
* "should be %{count} character(s)"
* "should be at least %{count} character(s)"
* "should be at most %{count} character(s)"
* for binary:
* "should be %{count} byte(s)"
* "should be at least %{count} byte(s)"
* "should be at most %{count} byte(s)"
* for lists:
* "should have %{count} item(s)"
* "should have at least %{count} item(s)"
* "should have at most %{count} item(s)"
## Examples
validate_length(changeset, :title, min: 3)
validate_length(changeset, :title, max: 100)
validate_length(changeset, :title, min: 3, max: 100)
validate_length(changeset, :code, is: 9)
validate_length(changeset, :topics, is: 2)
validate_length(changeset, :icon, count: :bytes, max: 1024 * 16)
"""
@spec validate_length(t, atom, Keyword.t) :: t
def validate_length(changeset, field, opts) when is_list(opts) do
validate_change changeset, field, {:length, opts}, fn
_, value ->
count_type = opts[:count] || :graphemes
{type, length} = case {value, count_type} do
{value, :codepoints} when is_binary(value) ->
{:string, codepoints_length(value, 0)}
{value, :graphemes} when is_binary(value) ->
{:string, String.length(value)}
{value, :bytes} when is_binary(value) ->
{:binary, byte_size(value)}
{value, _} when is_list(value) ->
{:list, list_length(changeset, field, value)}
end
error = ((is = opts[:is]) && wrong_length(type, length, is, opts)) ||
((min = opts[:min]) && too_short(type, length, min, opts)) ||
((max = opts[:max]) && too_long(type, length, max, opts))
if error, do: [{field, error}], else: []
end
end
defp codepoints_length(<<_::utf8, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<_, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<>>, acc), do: acc
defp list_length(%{types: types}, field, value) do
case Map.fetch(types, field) do
{:ok, {tag, _association}} when tag in [:embed, :assoc] ->
length(Relation.filter_empty(value))
_ ->
length(value)
end
end
defp wrong_length(_type, value, value, _opts), do: nil
defp wrong_length(:string, _length, value, opts), do:
{message(opts, "should be %{count} character(s)"), count: value, validation: :length, kind: :is, type: :string}
defp wrong_length(:binary, _length, value, opts), do:
{message(opts, "should be %{count} byte(s)"), count: value, validation: :length, kind: :is, type: :binary}
defp wrong_length(:list, _length, value, opts), do:
{message(opts, "should have %{count} item(s)"), count: value, validation: :length, kind: :is, type: :list}
defp too_short(_type, length, value, _opts) when length >= value, do: nil
defp too_short(:string, _length, value, opts), do:
{message(opts, "should be at least %{count} character(s)"), count: value, validation: :length, kind: :min, type: :string}
defp too_short(:binary, _length, value, opts), do:
{message(opts, "should be at least %{count} byte(s)"), count: value, validation: :length, kind: :min, type: :binary}
defp too_short(:list, _length, value, opts), do:
{message(opts, "should have at least %{count} item(s)"), count: value, validation: :length, kind: :min, type: :list}
defp too_long(_type, length, value, _opts) when length <= value, do: nil
defp too_long(:string, _length, value, opts), do:
{message(opts, "should be at most %{count} character(s)"), count: value, validation: :length, kind: :max, type: :string}
defp too_long(:binary, _length, value, opts), do:
{message(opts, "should be at most %{count} byte(s)"), count: value, validation: :length, kind: :max, type: :binary}
defp too_long(:list, _length, value, opts), do:
{message(opts, "should have at most %{count} item(s)"), count: value, validation: :length, kind: :max, type: :list}
@doc """
Validates the properties of a number.
## Options
* `:less_than`
* `:greater_than`
* `:less_than_or_equal_to`
* `:greater_than_or_equal_to`
* `:equal_to`
* `:not_equal_to`
* `:message` - the message on failure, defaults to one of:
* "must be less than %{number}"
* "must be greater than %{number}"
* "must be less than or equal to %{number}"
* "must be greater than or equal to %{number}"
* "must be equal to %{number}"
* "must be not equal to %{number}"
## Examples
validate_number(changeset, :count, less_than: 3)
validate_number(changeset, :pi, greater_than: 3, less_than: 4)
validate_number(changeset, :the_answer_to_life_the_universe_and_everything, equal_to: 42)
"""
@spec validate_number(t, atom, Keyword.t) :: t
def validate_number(changeset, field, opts) do
validate_change changeset, field, {:number, opts}, fn
field, value ->
{message, opts} = Keyword.pop(opts, :message)
Enum.find_value opts, [], fn {spec_key, target_value} ->
case Map.fetch(@number_validators, spec_key) do
{:ok, {spec_function, default_message}} ->
validate_number(field, value, message || default_message,
spec_key, spec_function, target_value)
:error ->
supported_options = @number_validators |> Map.keys() |> Enum.map_join("\n", &" * #{inspect(&1)}")
raise ArgumentError, """
unknown option #{inspect spec_key} given to validate_number/3
The supported options are:
#{supported_options}
"""
end
end
end
end
defp validate_number(field, %Decimal{} = value, message, spec_key, _spec_function, target_value) do
result = Decimal.cmp(value, decimal_new(target_value))
case decimal_compare(result, spec_key) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp validate_number(field, value, message, spec_key, spec_function, target_value) do
case apply(spec_function, [value, target_value]) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp decimal_new(term) when is_float(term), do: Decimal.from_float(term)
defp decimal_new(term), do: Decimal.new(term)
defp decimal_compare(:lt, spec), do: spec in [:less_than, :less_than_or_equal_to, :not_equal_to]
defp decimal_compare(:gt, spec), do: spec in [:greater_than, :greater_than_or_equal_to, :not_equal_to]
defp decimal_compare(:eq, spec), do: spec in [:equal_to, :less_than_or_equal_to, :greater_than_or_equal_to]
@doc """
Validates that the given parameter matches its confirmation.
By calling `validate_confirmation(changeset, :email)`, this
validation will check if both "email" and "email_confirmation"
in the parameter map matches. Note this validation only looks
at the parameters themselves, never the fields in the schema.
As such as, the "email_confirmation" field does not need to be
added as a virtual field in your schema.
Note that if the confirmation field is nil or missing, this does
not add a validation error. You can specify that the confirmation
parameter is required in the options (see below).
## Options
* `:message` - the message on failure, defaults to "does not match confirmation"
* `:required` - boolean, sets whether existence of confirmation parameter
is required for addition of error. Defaults to false
## Examples
validate_confirmation(changeset, :email)
validate_confirmation(changeset, :password, message: "does not match password")
cast(data, params, [:password])
|> validate_confirmation(:password, message: "does not match password")
"""
@spec validate_confirmation(t, atom, Keyword.t) :: t
def validate_confirmation(changeset, field, opts \\ [])
def validate_confirmation(%{params: params} = changeset, field, opts) when is_map(params) do
param = Atom.to_string(field)
error_param = "#{param}_confirmation"
error_field = String.to_atom(error_param)
value = Map.get(params, param)
errors =
case Map.fetch(params, error_param) do
{:ok, ^value} ->
[]
{:ok, _} ->
[{error_field,
{message(opts, "does not match confirmation"), [validation: :confirmation]}}]
:error ->
confirmation_missing(opts, error_field)
end
%{changeset | validations: [{field, {:confirmation, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
def validate_confirmation(%{params: nil} = changeset, _, _) do
changeset
end
defp confirmation_missing(opts, error_field) do
required = Keyword.get(opts, :required, false)
if required, do: [{error_field, {message(opts, "can't be blank"), [validation: :required]}}], else: []
end
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
@doc """
Validates the given parameter is true.
Note this validation only checks the parameter itself is true, never
the field in the schema. That's because acceptance parameters do not need
to be persisted, as by definition they would always be stored as `true`.
## Options
* `:message` - the message on failure, defaults to "must be accepted"
## Examples
validate_acceptance(changeset, :terms_of_service)
validate_acceptance(changeset, :rules, message: "please accept rules")
"""
@spec validate_acceptance(t, atom, Keyword.t) :: t
def validate_acceptance(changeset, field, opts \\ [])
def validate_acceptance(%{params: params} = changeset, field, opts) do
errors = validate_acceptance_errors(params, field, opts)
%{changeset | validations: [{field, {:acceptance, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
defp validate_acceptance_errors(nil, _field, _opts), do: []
defp validate_acceptance_errors(params, field, opts) do
param = Atom.to_string(field)
value = Map.get(params, param)
case Ecto.Type.cast(:boolean, value) do
{:ok, true} -> []
_ -> [{field, {message(opts, "must be accepted"), validation: :acceptance}}]
end
end
## Optimistic lock
@doc ~S"""
Applies optimistic locking to the changeset.
[Optimistic
locking](http://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or
*optimistic concurrency control*) is a technique that allows concurrent edits
on a single record. While pessimistic locking works by locking a resource for
an entire transaction, optimistic locking only checks if the resource changed
before updating it.
This is done by regularly fetching the record from the database, then checking
whether another user has made changes to the record *only when updating the
record*. This behaviour is ideal in situations where the chances of concurrent
updates to the same record are low; if they're not, pessimistic locking or
other concurrency patterns may be more suited.
## Usage
Optimistic locking works by keeping a "version" counter for each record; this
counter gets incremented each time a modification is made to a record. Hence,
in order to use optimistic locking, a field must exist in your schema for
versioning purpose. Such field is usually an integer but other types are
supported.
## Examples
Assuming we have a `Post` schema (stored in the `posts` table), the first step
is to add a version column to the `posts` table:
alter table(:posts) do
add :lock_version, :integer, default: 1
end
The column name is arbitrary and doesn't need to be `:lock_version`. Now add
a field to the schema too:
defmodule Post do
use Ecto.Schema
schema "posts" do
field :title, :string
field :lock_version, :integer, default: 1
end
def changeset(:update, struct, params \\ %{}) do
struct
|> Ecto.Changeset.cast(params, [:title])
|> Ecto.Changeset.optimistic_lock(:lock_version)
end
end
Now let's take optimistic locking for a spin:
iex> post = Repo.insert!(%Post{title: "foo"})
%Post{id: 1, title: "foo", lock_version: 1}
iex> valid_change = Post.changeset(:update, post, %{title: "bar"})
iex> stale_change = Post.changeset(:update, post, %{title: "baz"})
iex> Repo.update!(valid_change)
%Post{id: 1, title: "bar", lock_version: 2}
iex> Repo.update!(stale_change)
** (Ecto.StaleEntryError) attempted to update a stale entry:
%Post{id: 1, title: "baz", lock_version: 1}
When a conflict happens (a record which has been previously fetched is
being updated, but that same record has been modified since it was
fetched), an `Ecto.StaleEntryError` exception is raised.
Optimistic locking also works with delete operations. Just call the
`optimistic_lock/3` function with the data before delete:
iex> changeset = Ecto.Changeset.optimistic_lock(post, :lock_version)
iex> Repo.delete(changeset)
`optimistic_lock/3` by default assumes the field
being used as a lock is an integer. If you want to use another type,
you need to pass the third argument customizing how the next value
is generated:
iex> Ecto.Changeset.optimistic_lock(post, :lock_uuid, fn _ -> Ecto.UUID.generate end)
"""
@spec optimistic_lock(Ecto.Schema.t | t, atom, (term -> term)) :: t
def optimistic_lock(data_or_changeset, field, incrementer \\ &increment_with_rollover/1) do
changeset = change(data_or_changeset, %{})
current = get_field(changeset, field)
# Apply these changes only inside the repo because we
# don't want to permanently track the lock change.
changeset = prepare_changes(changeset, fn changeset ->
put_in(changeset.changes[field], incrementer.(current))
end)
changeset = put_in(changeset.filters[field], current)
changeset
end
# increment_with_rollover expect to be used with lock_version set as :integer in db schema
# 2_147_483_647 is upper limit for signed integer for both PostgreSQL and MySQL
defp increment_with_rollover(val) when val >= 2_147_483_647 do
1
end
defp increment_with_rollover(val) when is_integer(val) do
val + 1
end
@doc """
Provides a function executed by the repository on insert/update/delete.
If the changeset given to the repository is valid, the function given to
`prepare_changes/2` will be called with the changeset and must return a
changeset, allowing developers to do final adjustments to the changeset or
to issue data consistency commands. The repository itself can be accessed
inside the function under the `repo` field in the changeset. If the
changeset given to the repository is invalid, the function will not be
invoked.
The given function is guaranteed to run inside the same transaction
as the changeset operation for databases that do support transactions.
## Example
A common use case is updating a counter cache, in this case updating a post's
comment count when a comment is created:
def create_comment(comment, params) do
comment
|> cast(params, [:body, :post_id])
|> prepare_changes(fn changeset ->
if post_id = get_change(changeset, :post_id) do
query = from Post, where: [id: ^post_id]
changeset.repo.update_all(query, inc: [comment_count: 1])
end
changeset
end)
end
We retrieve the repo from the comment changeset itself and use
update_all to update the counter cache in one query. Finally, the original
changeset must be returned.
"""
@spec prepare_changes(t, (t -> t)) :: t
def prepare_changes(%Changeset{prepare: prepare} = changeset, function) when is_function(function, 1) do
%{changeset | prepare: [function | prepare]}
end
## Constraints
@doc """
Returns all constraints in a changeset.
A constraint is a map with the following fields:
* `:type` - the type of the constraint that will be checked in the database,
such as `:check`, `:unique`, etc
* `:constraint` - the database constraint name as a string
* `:match` - the type of match Ecto will perform on a violated constraint
against the `:constraint` value. It is `:exact`, `:suffix` or `:prefix`
* `:field` - the field a violated constraint will apply the error to
* `:error_message` - the error message in case of violated constraints
* `:error_type` - the type of error that identifies the error message
"""
@spec constraints(t) :: [constraint]
def constraints(%Changeset{constraints: constraints}) do
constraints
end
@doc """
Checks for a check constraint in the given field.
The check constraint works by relying on the database to check
if the check constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the check constraint, the first step is
to define the check constraint in a migration:
create constraint("users", :price_must_be_positive, check: "price > 0")
Now that a constraint exists, when modifying users, we could
annotate the changeset with a check constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:price])
|> check_constraint(:price, name: :price_must_be_positive)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
price is not positive, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails.
Defaults to "is invalid"
* `:name` - the name of the constraint. Required.
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
`:prefix` matches any repo constraint which `starts_with?` `:name`
to this changeset constraint.
"""
def check_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || raise ArgumentError, "must supply the name of the constraint"
message = message(opts, "is invalid")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :check, to_string(constraint), match_type, field, message)
end
@doc """
Checks for a unique constraint in the given field or list of fields.
The unique constraint works by relying on the database to check
if the unique constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the uniqueness constraint, the first step is
to define the unique index in a migration:
create unique_index(:users, [:email])
Now that a constraint exists, when modifying users, we could
annotate the changeset with a unique constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:email])
|> unique_constraint(:email)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
email already exists, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field(s). May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
## Complex constraints
Because the constraint logic is in the database, we can leverage
all the database functionality when defining them. For example,
let's suppose the e-mails are scoped by company id:
# In migration
create unique_index(:users, [:email, :company_id])
# In the changeset function
cast(user, params, [:email])
|> unique_constraint([:email, :company_id])
The first field name, `:email` in this case, will be used as the error
key to the changeset errors keyword list. For example, the above
`unique_constraint/3` would generate something like:
Repo.insert!(%User{email: "john@elixir.org", company_id: 1})
changeset = User.changeset(%User{}, %{email: "john@elixir.org", company_id: 1})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
In complex cases, instead of relying on name inference, it may be best
to set the constraint name explicitly:
# In the migration
create unique_index(:users, [:email, :company_id], name: :users_email_company_id_index)
# In the changeset function
cast(user, params, [:email])
|> unique_constraint(:email, name: :users_email_company_id_index)
### Partitioning
If your table is partitioned, then your unique index might look different
per partition, e.g. Postgres adds p<number> to the middle of your key, like:
users_p0_email_key
users_p1_email_key
...
users_p99_email_key
In this case you can use the name and suffix options together to match on
these dynamic indexes, like:
cast(user, params, [:email])
|> unique_constraint(:email, name: :email_key, match: :suffix)
## Case sensitivity
Unfortunately, different databases provide different guarantees
when it comes to case-sensitiveness. For example, in MySQL, comparisons
are case-insensitive by default. In Postgres, users can define case
insensitive column by using the `:citext` type/extension. In your migration:
execute "CREATE EXTENSION IF NOT EXISTS citext"
create table(:users) do
...
add :email, :citext
...
end
If for some reason your database does not support case insensitive columns,
you can explicitly downcase values before inserting/updating them:
cast(data, params, [:email])
|> update_change(:email, &String.downcase/1)
|> unique_constraint(:email)
"""
@spec unique_constraint(t, atom | [atom, ...], Keyword.t) :: t
def unique_constraint(changeset, field_or_fields, opts \\ [])
def unique_constraint(changeset, field, opts) when is_atom(field) do
unique_constraint(changeset, [field], opts)
end
def unique_constraint(changeset, [first_field | _] = fields, opts) do
constraint = opts[:name] || unique_index_name(changeset, fields)
message = message(opts, "has already been taken")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :unique, to_string(constraint), match_type, first_field, message)
end
defp unique_index_name(changeset, fields) do
field_names = Enum.map(fields, &get_field_source(changeset, &1))
Enum.join([get_source(changeset)] ++ field_names ++ ["index"], "_")
end
@doc """
Checks for foreign key constraint in the given field.
The foreign key constraint works by relying on the database to
check if the associated data exists or not. This is useful to
guarantee that a child will only be created if the parent exists
in the database too.
In order to use the foreign key constraint the first step is
to define the foreign key in a migration. This is often done
with references. For example, imagine you are creating a
comments table that belongs to posts. One would have:
create table(:comments) do
add :post_id, references(:posts)
end
By default, Ecto will generate a foreign key constraint with
name "comments_post_id_fkey" (the name is configurable).
Now that a constraint exists, when creating comments, we could
annotate the changeset with foreign key constraint so Ecto knows
how to convert it into an error message:
cast(comment, params, [:post_id])
|> foreign_key_constraint(:post_id)
Now, when invoking `Repo.insert/2` or `Repo.update/2`, if the
associated post does not exist, it will be converted into an
error and `{:error, changeset}` returned by the repository.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
"""
@spec foreign_key_constraint(t, atom, Keyword.t) :: t
def foreign_key_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_fkey"
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, field, message, :foreign)
end
@doc """
Checks the associated field exists.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that a child will only be created if the parent exists
in the database too. Therefore, it only applies to `belongs_to`
associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when inserting a comment, it is possible to forbid any
comment to be added if the associated post does not exist:
comment
|> Ecto.Changeset.cast(params, [:post_id])
|> Ecto.Changeset.assoc_constraint(:post)
|> Repo.insert
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + association field.
May be required explicitly for complex cases
"""
@spec assoc_constraint(t, atom, Keyword.t) :: t
def assoc_constraint(changeset, assoc, opts \\ []) do
constraint = opts[:name] ||
case get_assoc(changeset, assoc) do
%Ecto.Association.BelongsTo{owner_key: owner_key} ->
"#{get_source(changeset)}_#{owner_key}_fkey"
other ->
raise ArgumentError,
"assoc_constraint can only be added to belongs to associations, got: #{inspect other}"
end
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :assoc)
end
@doc """
Checks the associated field does not exist.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that parent can only be deleted (or have its primary
key changed) if no child exists in the database. Therefore, it only
applies to `has_*` associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when deleting the post, it is possible to forbid any post to
be deleted if they still have comments attached to it:
post
|> Ecto.Changeset.change
|> Ecto.Changeset.no_assoc_constraint(:comments)
|> Repo.delete
## Options
* `:message` - the message in case the constraint check fails,
defaults to "is still associated with this entry" (for `has_one`)
and "are still associated with this entry" (for `has_many`)
* `:name` - the constraint name. By default, the constraint
name is inferred from the association table + association
field. May be required explicitly for complex cases
"""
@spec no_assoc_constraint(t, atom, Keyword.t) :: t
def no_assoc_constraint(changeset, assoc, opts \\ []) do
{constraint, message} =
case get_assoc(changeset, assoc) do
%Ecto.Association.Has{cardinality: cardinality,
related_key: related_key, related: related} ->
{opts[:name] || "#{related.__schema__(:source)}_#{related_key}_fkey",
message(opts, no_assoc_message(cardinality))}
other ->
raise ArgumentError,
"no_assoc_constraint can only be added to has one/many associations, got: #{inspect other}"
end
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :no_assoc)
end
@doc """
Checks for an exclusion constraint in the given field.
The exclusion constraint works by relying on the database to check
if the exclusion constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "violates an exclusion constraint"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
"""
def exclusion_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_exclusion"
message = message(opts, "violates an exclusion constraint")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :exclusion, to_string(constraint), match_type, field, message, :exclusion)
end
defp no_assoc_message(:one), do: "is still associated with this entry"
defp no_assoc_message(:many), do: "are still associated with this entry"
defp add_constraint(changeset, type, constraint, match, field, message) do
add_constraint(changeset, type, constraint, match, field, message, type)
end
defp add_constraint(%Changeset{constraints: constraints} = changeset,
type, constraint, match, field, error_message, error_type)
when is_binary(constraint) and is_atom(field) and is_binary(error_message) do
unless match in @match_types do
raise ArgumentError, "invalid match type: #{inspect match}. Allowed match types: #{inspect @match_types}"
end
constraint = %{
constraint: constraint,
error_message: error_message,
error_type: error_type,
field: field,
match: match,
type: type
}
%{changeset | constraints: [constraint | constraints]}
end
defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source),
do: source
defp get_source(%{data: data}), do:
raise ArgumentError, "cannot add constraint to changeset because it does not have a source, got: #{inspect data}"
defp get_source(item), do:
raise ArgumentError, "cannot add constraint because a changeset was not supplied, got: #{inspect item}"
defp get_assoc(%{types: types}, assoc) do
case Map.fetch(types, assoc) do
{:ok, {:assoc, association}} ->
association
_ ->
raise_invalid_assoc(types, assoc)
end
end
defp raise_invalid_assoc(types, assoc) do
associations = for {_key, {:assoc, %{field: field}}} <- types, do: field
raise ArgumentError, "cannot add constraint to changeset because association `#{assoc}` does not exist. " <>
"Did you mean one of `#{Enum.join(associations, "`, `")}`?"
end
defp get_field_source(%{data: %{__struct__: schema}}, field) when is_atom(schema),
do: schema.__schema__(:field_source, field) || field
defp get_field_source(%{}, field),
do: field
@doc ~S"""
Traverses changeset errors and applies the given function to error messages.
This function is particularly useful when associations and embeds
are cast in the changeset as it will traverse all associations and
embeds and place all errors in a series of nested maps.
A changeset is supplied along with a function to apply to each
error message as the changeset is traversed. The error message
function receives an error tuple `{msg, opts}`, for example:
{"should be at least %{count} characters", [count: 3, validation: :length, min: 3]}
## Examples
iex> traverse_errors(changeset, fn {msg, opts} ->
...> Enum.reduce(opts, msg, fn {key, value}, acc ->
...> String.replace(acc, "%{#{key}}", to_string(value))
...> end)
...> end)
%{title: ["should be at least 3 characters"]}
Optionally function can accept three arguments: `changeset`, `field`
and error tuple `{msg, opts}`. It is useful whenever you want to extract
validations rules from `changeset.validations` to build detailed error
description.
"""
@spec traverse_errors(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [String.t | map]}
def traverse_errors(%Changeset{errors: errors, changes: changes, types: types} = changeset, msg_func)
when is_function(msg_func, 1) or is_function(msg_func, 3) do
errors
|> Enum.reverse()
|> merge_error_keys(msg_func, changeset)
|> merge_related_keys(changes, types, msg_func)
end
defp merge_error_keys(errors, msg_func, _) when is_function(msg_func, 1) do
Enum.reduce(errors, %{}, fn({key, val}, acc) ->
val = msg_func.(val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_error_keys(errors, msg_func, changeset) when is_function(msg_func, 3) do
Enum.reduce(errors, %{}, fn({key, val}, acc) ->
val = msg_func.(changeset, key, val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_related_keys(_, _, nil, _) do
raise ArgumentError, "changeset does not have types information"
end
defp merge_related_keys(map, changes, types, msg_func) do
Enum.reduce types, map, fn
{field, {tag, %{cardinality: :many}}}, acc when tag in @relations ->
if changesets = Map.get(changes, field) do
{errors, all_empty?} =
Enum.map_reduce(changesets, true, fn changeset, all_empty? ->
errors = traverse_errors(changeset, msg_func)
{errors, all_empty? and errors == %{}}
end)
case all_empty? do
true -> acc
false -> Map.put(acc, field, errors)
end
else
acc
end
{field, {tag, %{cardinality: :one}}}, acc when tag in @relations ->
if changeset = Map.get(changes, field) do
case traverse_errors(changeset, msg_func) do
errors when errors == %{} -> acc
errors -> Map.put(acc, field, errors)
end
else
acc
end
{_, _}, acc ->
acc
end
end
end
defimpl Inspect, for: Ecto.Changeset do
import Inspect.Algebra
def inspect(%Ecto.Changeset{data: data} = changeset, opts) do
list = for attr <- [:action, :changes, :errors, :data, :valid?] do
{attr, Map.get(changeset, attr)}
end
redacted_fields = case data do
%type{__meta__: _} -> type.__schema__(:redact_fields)
_ -> []
end
container_doc("#Ecto.Changeset<", list, ">", opts, fn
{:action, action}, opts -> concat("action: ", to_doc(action, opts))
{:changes, changes}, opts -> concat("changes: ", changes |> filter(redacted_fields) |> to_doc(opts))
{:data, data}, _opts -> concat("data: ", to_struct(data, opts))
{:errors, errors}, opts -> concat("errors: ", to_doc(errors, opts))
{:valid?, valid?}, opts -> concat("valid?: ", to_doc(valid?, opts))
end)
end
defp to_struct(%{__struct__: struct}, _opts), do: "#" <> Kernel.inspect(struct) <> "<>"
defp to_struct(other, opts), do: to_doc(other, opts)
defp filter(changes, redacted_fields) do
Enum.reduce(redacted_fields, changes, fn redacted_field, changes ->
if Map.has_key?(changes, redacted_field) do
Map.put(changes, redacted_field, "**redacted**")
else
changes
end
end)
end
end
| 38.337033 | 169 | 0.663245 |
1c834db98f36657f21ead5c84396731cc1388c75 | 2,049 | ex | Elixir | lib/radiator_web/controllers/admin/podcast_controller.ex | jeyemwey/radiator-spark | 2fba0a84eb43ab1d58e8ec58c6a07f64adf9cb9d | [
"MIT"
] | null | null | null | lib/radiator_web/controllers/admin/podcast_controller.ex | jeyemwey/radiator-spark | 2fba0a84eb43ab1d58e8ec58c6a07f64adf9cb9d | [
"MIT"
] | null | null | null | lib/radiator_web/controllers/admin/podcast_controller.ex | jeyemwey/radiator-spark | 2fba0a84eb43ab1d58e8ec58c6a07f64adf9cb9d | [
"MIT"
] | null | null | null | defmodule RadiatorWeb.Admin.PodcastController do
use RadiatorWeb, :controller
alias Radiator.Directory
alias Radiator.Directory.Podcast
def index(conn, _params) do
podcasts = Directory.list_podcasts_with_episode_counts()
render(conn, "index.html", podcasts: podcasts)
end
def new(conn, _params) do
changeset = Directory.change_podcast(%Podcast{})
render(conn, "new.html", changeset: changeset)
end
def create(conn, %{"podcast" => podcast_params}) do
case Directory.create_podcast(podcast_params) do
{:ok, podcast} ->
conn
|> put_flash(:info, "podcast created successfully.")
|> redirect(to: Routes.admin_podcast_path(conn, :show, podcast))
{:error, %Ecto.Changeset{} = changeset} ->
render(conn, "new.html", changeset: changeset)
end
end
def show(conn, %{"id" => id}) do
podcast = Directory.get_podcast!(id)
draft_episodes =
Directory.list_episodes(%{
podcast: podcast,
published: false,
order_by: [desc: :id]
})
published_episodes =
Directory.list_episodes(%{
podcast: podcast,
published: true,
order_by: [desc: :published_at]
})
render(conn, "show.html",
podcast: podcast,
published_episodes: published_episodes,
draft_episodes: draft_episodes
)
end
def edit(conn, %{"id" => id}) do
podcast = Directory.get_podcast!(id)
changeset = Directory.change_podcast(podcast)
render(conn, "edit.html", podcast: podcast, changeset: changeset)
end
def update(conn, %{"id" => id, "podcast" => podcast_params}) do
podcast = Directory.get_podcast!(id)
case Directory.update_podcast(podcast, podcast_params) do
{:ok, podcast} ->
conn
|> put_flash(:info, "podcast updated successfully.")
|> redirect(to: Routes.admin_podcast_path(conn, :show, podcast))
{:error, %Ecto.Changeset{} = changeset} ->
render(conn, "edit.html", podcast: podcast, changeset: changeset)
end
end
end
| 27.689189 | 73 | 0.649097 |
1c83637d0c0e1619e8eb55d31fe106833e80e74d | 339 | exs | Elixir | mix.exs | IgorFB00/bankingengine | b09eb44ee336869625a9afe38fcceef03d06a092 | [
"Apache-2.0"
] | null | null | null | mix.exs | IgorFB00/bankingengine | b09eb44ee336869625a9afe38fcceef03d06a092 | [
"Apache-2.0"
] | null | null | null | mix.exs | IgorFB00/bankingengine | b09eb44ee336869625a9afe38fcceef03d06a092 | [
"Apache-2.0"
] | null | null | null | defmodule Bankingengine.Umbrella.MixProject do
use Mix.Project
def project do
[
apps_path: "apps",
version: "0.1.0",
start_permanent: Mix.env() == :prod,
deps: deps(),
aliases: aliases()
]
end
defp deps do
[]
end
defp aliases do
[
setup: ["cmd mix setup"]
]
end
end
| 14.125 | 46 | 0.554572 |
1c83666e7d4428d62aebb22cd413493d8c59b6fc | 196 | exs | Elixir | test/empty_web/controllers/page_controller_test.exs | manojsamanta/empty-app-with-auth | e601a7c6ef1b4a992758117e6bf0922a5ce08f04 | [
"MIT"
] | null | null | null | test/empty_web/controllers/page_controller_test.exs | manojsamanta/empty-app-with-auth | e601a7c6ef1b4a992758117e6bf0922a5ce08f04 | [
"MIT"
] | null | null | null | test/empty_web/controllers/page_controller_test.exs | manojsamanta/empty-app-with-auth | e601a7c6ef1b4a992758117e6bf0922a5ce08f04 | [
"MIT"
] | null | null | null | defmodule EmptyWeb.PageControllerTest do
use EmptyWeb.ConnCase
test "GET /", %{conn: conn} do
conn = get(conn, "/")
assert html_response(conn, 200) =~ "Welcome to Phoenix!"
end
end
| 21.777778 | 60 | 0.673469 |
1c83afb67cda9fa18162e8a058977a8846abd421 | 1,851 | exs | Elixir | mix.exs | bodrovis/elixir-lokalise-api | ae49309f0fdf56c634157e78387f3bf50eaa6afb | [
"BSD-3-Clause"
] | 3 | 2021-06-24T14:30:31.000Z | 2021-09-06T11:30:17.000Z | mix.exs | bodrovis/elixir-lokalise-api | ae49309f0fdf56c634157e78387f3bf50eaa6afb | [
"BSD-3-Clause"
] | 8 | 2021-09-15T07:30:59.000Z | 2022-02-01T17:40:17.000Z | mix.exs | bodrovis/elixir-lokalise-api | ae49309f0fdf56c634157e78387f3bf50eaa6afb | [
"BSD-3-Clause"
] | 2 | 2021-09-07T11:10:51.000Z | 2021-09-26T07:37:39.000Z | defmodule ElixirLokaliseApi.MixProject do
use Mix.Project
@source_url "https://github.com/lokalise/elixir-lokalise-api"
@version "2.2.0"
def project do
[
app: :elixir_lokalise_api,
version: @version,
elixir: "~> 1.10",
name: "ElixirLokaliseApi",
package: package(),
docs: docs(),
deps: deps(),
test_coverage: [tool: ExCoveralls],
# Dialyxir
dialyzer: [plt_add_deps: :project],
# CLI env
preferred_cli_env: [
vcr: :test,
"vcr.delete": :test,
"vcr.check": :test,
"vcr.show": :test,
coveralls: :test,
"coveralls.detail": :test,
"coveralls.post": :test,
"coveralls.html": :test
]
]
end
def application do
[
extra_applications: [:logger]
]
end
defp deps do
[
{:credo, "~> 1.5", only: [:dev, :test], runtime: false},
{:dialyxir, "~> 1.1", only: :dev, runtime: false},
{:httpoison, "~> 1.8"},
{:jason, "~> 1.2"},
{:ex_doc, "~> 0.25", only: [:dev, :test], runtime: false},
{:exvcr, "~> 0.13", only: :test},
{:excoveralls, "~> 0.14", only: :test}
]
end
def docs do
[
extras: [
"CHANGELOG.md": [title: "Changelog"],
"LICENSE.md": [title: "License"],
"README.md": [title: "Overview"]
],
main: "readme",
homepage_url: @source_url,
source_url: @source_url,
source_ref: "v#{@version}",
formatters: ["html"]
]
end
defp package do
[
description: "Lokalise APIv2 interface for Elixir.",
maintainers: ["Ilya Bodrov-Krukowski"],
licenses: ["BSD-3-Clause"],
links: %{
"Changelog" => "https://lokalise.github.io/elixir-lokalise-api/additional_info/changelog",
"GitHub" => @source_url
}
]
end
end
| 23.1375 | 98 | 0.534306 |
1c83d2b891a2be5d15902e7782f86c85d7abe39c | 950 | exs | Elixir | test/tokenizer_test.exs | alexiob/forthvm | 4dc75790ef98e248ac48a6f4116ad7673cee6287 | [
"MIT"
] | 8 | 2021-11-19T14:02:01.000Z | 2022-03-09T06:29:33.000Z | test/tokenizer_test.exs | alexiob/forthvm | 4dc75790ef98e248ac48a6f4116ad7673cee6287 | [
"MIT"
] | null | null | null | test/tokenizer_test.exs | alexiob/forthvm | 4dc75790ef98e248ac48a6f4116ad7673cee6287 | [
"MIT"
] | 1 | 2021-11-26T18:51:31.000Z | 2021-11-26T18:51:31.000Z | defmodule ForthVM.TokenizerTest do
@moduledoc false
use ExUnit.Case
alias ForthVM.Tokenizer
test "split_line should handle strings correctly" do
assert ["a", "b\n c\n", "d", "e\n"] == Tokenizer.split_line(~s[a "b\n c\n" d "e\n"])
assert ["b\n c\n"] == Tokenizer.split_line(~s["b\n c\n"])
assert ["*** Hello wonderful world! ***\n", "puts"] ==
Tokenizer.split_line(~s["*** Hello wonderful world! ***\n" puts])
end
test "Tokenizer should work" do
source = """
: hello-world
"hello world" 42 4.2 print
;
hello-world
string true false nil
"""
tokens = Tokenizer.parse(source)
assert tokens == [
":",
"hello-world",
"hello world",
42,
4.2,
"print",
";",
"hello-world",
"string",
true,
false,
nil
]
end
end
| 22.093023 | 88 | 0.489474 |
1c83d778a1ed91b05237c249b5fc892abec19ddb | 511 | ex | Elixir | lib/elixir_lokalise_api/endpoints/key_comments.ex | kianmeng/elixir-lokalise-api | fca59438cbd8ee960adbbce8b13cef12ddc68ef6 | [
"BSD-3-Clause"
] | 3 | 2021-06-24T14:30:31.000Z | 2021-09-06T11:30:17.000Z | lib/elixir_lokalise_api/endpoints/key_comments.ex | kianmeng/elixir-lokalise-api | fca59438cbd8ee960adbbce8b13cef12ddc68ef6 | [
"BSD-3-Clause"
] | 8 | 2021-09-15T07:30:59.000Z | 2022-02-01T17:40:17.000Z | lib/elixir_lokalise_api/endpoints/key_comments.ex | kianmeng/elixir-lokalise-api | fca59438cbd8ee960adbbce8b13cef12ddc68ef6 | [
"BSD-3-Clause"
] | 2 | 2021-09-07T11:10:51.000Z | 2021-09-26T07:37:39.000Z | defmodule ElixirLokaliseApi.KeyComments do
@moduledoc """
Key comments endpoint.
"""
@model ElixirLokaliseApi.Model.Comment
@collection ElixirLokaliseApi.Collection.Comments
@endpoint "projects/{!:project_id}/keys/{!:key_id}/comments/{:comment_id}"
@data_key :comments
@singular_data_key :comment
@parent_key :project_id
@item_key :key_id
@subitem_key :comment_id
use ElixirLokaliseApi.DynamicResource,
import: [:item_reader, :subitem_reader, :find3, :all3, :create3, :delete3]
end
| 30.058824 | 78 | 0.757339 |
1c83d8185e96fa5615db5c14f9b0047aa15e35b8 | 514 | ex | Elixir | lib/hexpm_web/controllers/sitemap_controller.ex | Jks15063/hexpm | 3d58dac138e1eeaf16c2ce07b6e6ffdcb0be0e7b | [
"Apache-2.0"
] | 2 | 2020-03-01T18:23:33.000Z | 2021-01-15T10:15:45.000Z | lib/hexpm_web/controllers/sitemap_controller.ex | yitzackRabin/hexpm | 5bf8c491e5aa764b2c60a3aab3e90f814eb3f471 | [
"Apache-2.0"
] | 3 | 2020-06-30T11:24:41.000Z | 2020-08-05T15:47:34.000Z | lib/hexpm_web/controllers/sitemap_controller.ex | yitzackRabin/hexpm | 5bf8c491e5aa764b2c60a3aab3e90f814eb3f471 | [
"Apache-2.0"
] | null | null | null | defmodule HexpmWeb.SitemapController do
use HexpmWeb, :controller
def main(conn, _params) do
conn
|> put_resp_content_type("text/xml")
|> put_resp_header("cache-control", "public, max-age=300")
|> render("packages_sitemap.xml", packages: Sitemaps.packages())
end
def docs(conn, _params) do
conn
|> put_resp_content_type("text/xml")
|> put_resp_header("cache-control", "public, max-age=300")
|> render("docs_sitemap.xml", packages: Sitemaps.packages_with_docs())
end
end
| 28.555556 | 74 | 0.696498 |
1c83e216ca0749fd8b98d4628055e9e90bdef757 | 1,569 | ex | Elixir | clients/dataflow/lib/google_api/dataflow/v1b3/model/source_get_metadata_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/dataflow/lib/google_api/dataflow/v1b3/model/source_get_metadata_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/dataflow/lib/google_api/dataflow/v1b3/model/source_get_metadata_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Dataflow.V1b3.Model.SourceGetMetadataResponse do
@moduledoc """
The result of a SourceGetMetadataOperation.
## Attributes
* `metadata` (*type:* `GoogleApi.Dataflow.V1b3.Model.SourceMetadata.t`, *default:* `nil`) - The computed metadata.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:metadata => GoogleApi.Dataflow.V1b3.Model.SourceMetadata.t()
}
field(:metadata, as: GoogleApi.Dataflow.V1b3.Model.SourceMetadata)
end
defimpl Poison.Decoder, for: GoogleApi.Dataflow.V1b3.Model.SourceGetMetadataResponse do
def decode(value, options) do
GoogleApi.Dataflow.V1b3.Model.SourceGetMetadataResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Dataflow.V1b3.Model.SourceGetMetadataResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 33.382979 | 118 | 0.755895 |
1c83e52946616427e31f420bf6ccad333f72fe6a | 1,288 | exs | Elixir | test/roger/info_test.exs | jnylen/roger | 074338eceae4783221088e8b235a635452708ef1 | [
"MIT"
] | null | null | null | test/roger/info_test.exs | jnylen/roger | 074338eceae4783221088e8b235a635452708ef1 | [
"MIT"
] | null | null | null | test/roger/info_test.exs | jnylen/roger | 074338eceae4783221088e8b235a635452708ef1 | [
"MIT"
] | null | null | null | defmodule Roger.Partition.InfoTest do
use ExUnit.Case
use Roger.AppCase
alias Roger.{Info, NodeInfo}
alias Roger.Partition.Consumer
test "get roger info over the entire cluster" do
node = node()
assert [{^node, _}] = Info.partitions
assert [{^node, _}] = Info.running_partitions
assert [{^node, _}] = Info.waiting_partitions
assert [{^node, _}] = Info.running_jobs
end
defmodule SlowTestJob do
use Job
def perform(queue) do
:timer.sleep 300
send(Roger.Partition.InfoTest, {:done, queue})
end
end
test "retrieve queued jobs" do
:ok = Consumer.pause(@app, :default)
assert NodeInfo.running_partitions[@app].default.paused
{:ok, job} = Job.create(SlowTestJob, 2)
:ok = Job.enqueue(job, @app)
{:ok, job} = Job.create(SlowTestJob, 3)
:ok = Job.enqueue(job, @app)
:timer.sleep 50
q = NodeInfo.running_partitions[@app].default
assert q.message_count == 2
assert q.consumer_count == 0
jobs = Info.queued_jobs(@app, :default)
assert [a, b] = jobs
assert a.args == 3
assert b.args == 2
assert a.queued_at > 0
assert a.started_at == 0
:ok = Consumer.resume(@app, :default)
assert_receive {:done, 2}, 500
assert_receive {:done, 3}, 500
end
end
| 22.206897 | 59 | 0.643634 |
1c83ec01b4f5d2c113c9cbd6b94d33e8c6f82b34 | 4,015 | ex | Elixir | clients/big_query/lib/google_api/big_query/v2/model/get_query_results_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | null | null | null | clients/big_query/lib/google_api/big_query/v2/model/get_query_results_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | null | null | null | clients/big_query/lib/google_api/big_query/v2/model/get_query_results_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | 1 | 2018-07-28T20:50:50.000Z | 2018-07-28T20:50:50.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.BigQuery.V2.Model.GetQueryResultsResponse do
@moduledoc """
## Attributes
- cacheHit (boolean()): Whether the query result was fetched from the query cache. Defaults to: `null`.
- errors ([ErrorProto]): [Output-only] The first errors or warnings encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has completed or was unsuccessful. Defaults to: `null`.
- etag (String.t): A hash of this response. Defaults to: `null`.
- jobComplete (boolean()): Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available. Defaults to: `null`.
- jobReference (JobReference): Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults). Defaults to: `null`.
- kind (String.t): The resource type of the response. Defaults to: `null`.
- numDmlAffectedRows (String.t): [Output-only] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE. Defaults to: `null`.
- pageToken (String.t): A token used for paging results. Defaults to: `null`.
- rows ([TableRow]): An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully. Defaults to: `null`.
- schema (TableSchema): The schema of the results. Present only when the query completes successfully. Defaults to: `null`.
- totalBytesProcessed (String.t): The total number of bytes processed for this query. Defaults to: `null`.
- totalRows (String.t): The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully. Defaults to: `null`.
"""
defstruct [
:"cacheHit",
:"errors",
:"etag",
:"jobComplete",
:"jobReference",
:"kind",
:"numDmlAffectedRows",
:"pageToken",
:"rows",
:"schema",
:"totalBytesProcessed",
:"totalRows"
]
end
defimpl Poison.Decoder, for: GoogleApi.BigQuery.V2.Model.GetQueryResultsResponse do
import GoogleApi.BigQuery.V2.Deserializer
def decode(value, options) do
value
|> deserialize(:"errors", :list, GoogleApi.BigQuery.V2.Model.ErrorProto, options)
|> deserialize(:"jobReference", :struct, GoogleApi.BigQuery.V2.Model.JobReference, options)
|> deserialize(:"rows", :list, GoogleApi.BigQuery.V2.Model.TableRow, options)
|> deserialize(:"schema", :struct, GoogleApi.BigQuery.V2.Model.TableSchema, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.BigQuery.V2.Model.GetQueryResultsResponse do
def encode(value, options) do
GoogleApi.BigQuery.V2.Deserializer.serialize_non_nil(value, options)
end
end
| 55 | 406 | 0.747198 |
1c8413a9a3943dc8b7abb7f7dd006f009ebafb86 | 1,002 | ex | Elixir | lib/phoenix_mongo.ex | zackehh/phoenix_mongo | 3845d0ce9f13b6c8e4a9a3ac6d63a40a2ab7f413 | [
"MIT"
] | 3 | 2015-12-12T04:52:21.000Z | 2017-06-04T09:18:08.000Z | lib/phoenix_mongo.ex | iwhitfield/phoenix_mongo | 3845d0ce9f13b6c8e4a9a3ac6d63a40a2ab7f413 | [
"MIT"
] | 1 | 2015-11-19T04:47:26.000Z | 2015-11-19T04:47:26.000Z | lib/phoenix_mongo.ex | iwhitfield/phoenix_mongo | 3845d0ce9f13b6c8e4a9a3ac6d63a40a2ab7f413 | [
"MIT"
] | 2 | 2015-09-29T05:34:22.000Z | 2015-09-29T05:35:36.000Z | defmodule PhoenixMongo do
use Application
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
# Start the endpoint when the application starts
supervisor(PhoenixMongo.Endpoint, []),
# Start the Ecto repository
worker(PhoenixMongo.Repo, []),
# Here you could define other workers and supervisors as children
# worker(PhoenixMongo.Worker, [arg1, arg2, arg3]),
]
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: PhoenixMongo.Supervisor]
Supervisor.start_link(children, opts)
end
# Tell Phoenix to update the endpoint configuration
# whenever the application is updated.
def config_change(changed, _new, removed) do
PhoenixMongo.Endpoint.config_change(changed, removed)
:ok
end
end
| 32.322581 | 71 | 0.718563 |
1c843d28005b7ea1a832fac5605e9e16b70190b2 | 1,736 | ex | Elixir | clients/datastore/lib/google_api/datastore/v1/model/read_options.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/datastore/lib/google_api/datastore/v1/model/read_options.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/datastore/lib/google_api/datastore/v1/model/read_options.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Datastore.V1.Model.ReadOptions do
@moduledoc """
The options shared by read requests.
## Attributes
* `readConsistency` (*type:* `String.t`, *default:* `nil`) - The non-transactional read consistency to use.
Cannot be set to `STRONG` for global queries.
* `transaction` (*type:* `String.t`, *default:* `nil`) - The identifier of the transaction in which to read. A
transaction identifier is returned by a call to
Datastore.BeginTransaction.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:readConsistency => String.t(),
:transaction => String.t()
}
field(:readConsistency)
field(:transaction)
end
defimpl Poison.Decoder, for: GoogleApi.Datastore.V1.Model.ReadOptions do
def decode(value, options) do
GoogleApi.Datastore.V1.Model.ReadOptions.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Datastore.V1.Model.ReadOptions do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 32.754717 | 114 | 0.724654 |
1c84495c71bd7d89f10045686b2a93341f645030 | 2,902 | exs | Elixir | test/extensions/email_confirmation/ecto/context_test.exs | randaalex/pow | 2a8c8db4652f7cb2c58d3a897e02b1d47e76f27b | [
"MIT"
] | 1 | 2021-06-25T10:36:01.000Z | 2021-06-25T10:36:01.000Z | test/extensions/email_confirmation/ecto/context_test.exs | randaalex/pow | 2a8c8db4652f7cb2c58d3a897e02b1d47e76f27b | [
"MIT"
] | null | null | null | test/extensions/email_confirmation/ecto/context_test.exs | randaalex/pow | 2a8c8db4652f7cb2c58d3a897e02b1d47e76f27b | [
"MIT"
] | null | null | null | defmodule PowEmailConfirmation.Ecto.ContextTest do
use Pow.Test.Ecto.TestCase
doctest PowEmailConfirmation.Ecto.Context
alias Ecto.Changeset
alias PowEmailConfirmation.Ecto.{Context, Schema}
alias PowEmailConfirmation.Test.{RepoMock, Users.User}
@config [repo: RepoMock, user: User]
@user %User{id: 1, email: "test@example.com"}
describe "confirm_email/2" do
test "confirms with no :unconfirmed_email" do
assert {:ok, user} = Context.confirm_email(@user, @config)
assert user.email_confirmed_at
assert user.email == "test@example.com"
end
test "doesn't confirm when previously confirmed" do
previously_confirmed_at = DateTime.from_iso8601("2018-01-01 00:00:00")
user = %{@user | email_confirmed_at: previously_confirmed_at}
assert {:ok, user} = Context.confirm_email(user, @config)
assert user.email_confirmed_at == previously_confirmed_at
end
test "changes :email to :unconfirmed_email" do
user = %{@user | unconfirmed_email: "new@example.com"}
assert {:ok, user} = Context.confirm_email(user, @config)
assert user.email == "new@example.com"
refute user.unconfirmed_email
end
test "handles unique constraint" do
user = %{@user | unconfirmed_email: "taken@example.com"}
assert {:error, changeset} = Context.confirm_email(user, @config)
assert changeset.errors[:email] == {"has already been taken", []}
end
end
@valid_params %{email: "test@example.com", password: "secret1234", confirm_password: "secret1234"}
test "current_email_unconfirmed?/2" do
new_user =
%User{}
|> User.changeset(@valid_params)
|> Changeset.apply_changes()
assert Context.current_email_unconfirmed?(new_user, @config)
updated_user =
new_user
|> Schema.confirm_email_changeset()
|> Changeset.apply_changes()
|> Ecto.put_meta(state: :loaded)
refute Context.current_email_unconfirmed?(updated_user, @config)
updated_user =
updated_user
|> User.changeset(%{email: "updated@example.com", current_password: "secret1234"})
|> Changeset.apply_changes()
refute Context.current_email_unconfirmed?(updated_user, @config)
end
test "pending_email_change?/2" do
new_user =
%User{}
|> User.changeset(@valid_params)
|> Changeset.apply_changes()
refute Context.pending_email_change?(new_user, @config)
updated_user =
new_user
|> Schema.confirm_email_changeset()
|> Changeset.apply_changes()
|> Ecto.put_meta(state: :loaded)
refute Context.pending_email_change?(updated_user, @config)
updated_user =
updated_user
|> User.changeset(%{email: "updated@example.com", current_password: "secret1234"})
|> Changeset.apply_changes()
assert Context.pending_email_change?(updated_user, @config)
end
end
| 31.204301 | 100 | 0.687802 |
1c8451d85f9e14a9fb36a9e2691f15987fb3bf84 | 3,013 | ex | Elixir | web/controllers/admin_controller.ex | houshuang/survey | 948acaf20840af82af1d9af3147acca94cb4fcf8 | [
"Apache-2.0"
] | 48 | 2015-06-29T21:20:25.000Z | 2021-05-09T04:27:41.000Z | web/controllers/admin_controller.ex | houshuang/survey | 948acaf20840af82af1d9af3147acca94cb4fcf8 | [
"Apache-2.0"
] | null | null | null | web/controllers/admin_controller.ex | houshuang/survey | 948acaf20840af82af1d9af3147acca94cb4fcf8 | [
"Apache-2.0"
] | 15 | 2015-06-29T21:13:57.000Z | 2021-07-27T10:02:40.000Z | defmodule Survey.AdminController do
use Survey.Web, :controller
import Prelude
import Ecto.Query
require Ecto.Query
plug :action
def cohorts(conn, _params) do
cohorts = Survey.User.cohorts_csv
text conn, cohorts
end
# ---------------------------------------------------------------
def reflections(conn, params = %{"id" => id}) do
id = string_to_int_safe(id)
prompt = Survey.Prompt.get(id)
questions = prompt.question_def
query = (from f in Survey.Reflection, where: f.prompt_id == ^id)
total = (from f in query, select: count(f.id)) |> Survey.Repo.one
questions = Survey.RenderSurvey.render_survey(questions, {query, :response})
conn
|> put_layout("report.html")
|> render Survey.ReportView, "index.html", questions: questions,
texturl: "/admin/report/reflections/text/#{id}/", total: total
end
def exit(conn, _) do
questions = Survey.HTML.Survey.parse("data/exitsurvey.txt") |> Survey.HTML.Survey.index_mapping
query = (from f in Survey.User, where: f.exitsurvey_state == true)
total = (from f in query, select: count(f.id)) |> Survey.Repo.one
questions = Survey.RenderSurvey.render_survey(questions, {query, :exitsurvey})
conn
|> put_layout("report.html")
|> render Survey.ReportView, "index.html", questions: questions,
texturl: "exit/text/", total: total
end
def exit_text(conn, %{"qid" => qid} = params) do
qid = string_to_int_safe(qid)
query = (from f in Survey.User, where: f.exitsurvey_state == true)
questions = Survey.HTML.Survey.parse("data/exitsurvey.txt") |> Survey.HTML.Survey.index_mapping
assigns = Survey.RenderSurvey.prepare_text({qid, questions[qid]},
params["search"], {query, :exitsurvey})
conn
|> put_layout("report.html")
|> render Survey.ReportView, "textanswer.html", assigns
end
def reflections(conn, params) do
render conn, "reflection_list.html", reflections: Survey.Prompt.list
end
def fulltext(conn, %{"qid" => qid, "id" => id} = params) do
qid = string_to_int_safe(qid)
id = string_to_int_safe(id)
query = (from f in Survey.Reflection, where: f.prompt_id == ^id)
prompt = Survey.Prompt.get(id)
questions = prompt.question_def
assigns = Survey.RenderSurvey.prepare_text({qid, questions[qid]},
params["search"], {query, :response})
conn
|> put_layout("report.html")
|> render Survey.ReportView, "textanswer.html", assigns
end
#----------------------------------------------------------------------
def group_activity(conn, params) do
conn
|> put_layout(false)
|> render "group_activity.html", groups: Survey.DesignGroup.get_all_active_full,
reviews: Survey.Review.get_all, max_review: Survey.Review.max_review,
etherpad_max: Survey.Etherpad.max_weeks,
num_members: Survey.DesignGroup.get_num_members,
online: Survey.ChatPresence.get_all_users_by_room,
chats: Survey.Chat.get_length,
emails: Survey.Email.num_by_group
end
end
| 34.238636 | 99 | 0.659144 |
1c84a070618294030031f45e44b5eac4db98d944 | 2,220 | exs | Elixir | mix.exs | KazW/money_bin_sql | 32c3513e734b4b1f0a9688e9f60bdd50a54b4a4f | [
"MIT"
] | null | null | null | mix.exs | KazW/money_bin_sql | 32c3513e734b4b1f0a9688e9f60bdd50a54b4a4f | [
"MIT"
] | 1 | 2019-01-01T06:50:32.000Z | 2019-01-01T06:50:32.000Z | mix.exs | KazW/money_bin_sql | 32c3513e734b4b1f0a9688e9f60bdd50a54b4a4f | [
"MIT"
] | null | null | null | defmodule MoneyBin.MixProject do
use Mix.Project
@version "0.0.3"
def project do
[
app: :money_bin,
version: @version,
elixir: "~> 1.6",
elixirc_paths: elixirc_paths(Mix.env()),
build_embedded: production?(),
start_permanent: production?(),
preferred_cli_env: [ex_doc: :test],
deps: deps(),
aliases: aliases(),
# Hex
description: "Flexible double entry based accounting for Elixir.",
package: package(),
# Docs
name: "MoneyBin",
docs: [
source_ref: "v#{@version}",
main: "main",
logo: "docs/logo.png",
canonical: "https://hexdocs.pm/money_bin",
source_url: "https://github.com/KazW/money_bin",
extras: [
"docs/main.md",
"docs/usage.md"
]
]
]
end
defp production?, do: Mix.env() == :prod
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[extra_applications: extra_applications(Mix.env())]
end
defp extra_applications(:test), do: [:postgrex, :ecto, :logger]
defp extra_applications(_), do: [:logger]
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:ex_doc, "~> 0.19.0", only: :dev, runtime: false},
{:credo, "~> 1.0", only: [:dev, :test], runtime: false},
{:inch_ex, ">= 0.0.0", only: :docs},
{:ecto, "~> 3.0"},
{:ecto_sql, "~> 3.0"},
{:postgrex, ">= 0.0.0"},
{:jason, "~> 1.0", optional: true}
]
end
# Aliases are shortcuts or tasks specific to the current project.
# See the documentation for `Mix` for more info on aliases.
defp aliases do
[
"ecto.reset": ["ecto.drop --quiet", "ecto.create --quiet", "ecto.migrate --quiet"],
test: ["ecto.reset", "test"]
]
end
defp package do
[
maintainers: ["Kaz Walker"],
licenses: ["MIT"],
links: %{github: "https://github.com/KazW/money_bin"},
files: ~w(lib docs) ++ ~w(LICENSE.md mix.exs README.md)
]
end
end
| 26.117647 | 89 | 0.576577 |
1c84be09de11784865e1e57320875be425f22a62 | 3,875 | exs | Elixir | integration_test/test_helper.exs | LostKobrakai/ecto_sqlite3 | 886bd6f0c27a5b750c356f26c6dce0af28d15f69 | [
"MIT"
] | 121 | 2021-03-18T16:53:33.000Z | 2022-03-30T08:46:07.000Z | integration_test/test_helper.exs | LostKobrakai/ecto_sqlite3 | 886bd6f0c27a5b750c356f26c6dce0af28d15f69 | [
"MIT"
] | 51 | 2021-03-18T16:35:13.000Z | 2022-03-18T13:18:44.000Z | integration_test/test_helper.exs | LostKobrakai/ecto_sqlite3 | 886bd6f0c27a5b750c356f26c6dce0af28d15f69 | [
"MIT"
] | 15 | 2021-03-28T15:00:17.000Z | 2022-03-07T06:53:14.000Z | Logger.configure(level: :info)
Application.put_env(:ecto, :primary_key_type, :id)
Application.put_env(:ecto, :async_integration_tests, false)
ecto = Mix.Project.deps_paths()[:ecto]
ecto_sql = Mix.Project.deps_paths()[:ecto_sql]
Code.require_file("#{ecto_sql}/integration_test/support/repo.exs", __DIR__)
alias Ecto.Integration.TestRepo
Application.put_env(:ecto_sqlite3, TestRepo,
adapter: Ecto.Adapters.SQLite3,
database: "/tmp/exqlite_integration_test.db",
pool: Ecto.Adapters.SQL.Sandbox,
show_sensitive_data_on_connection_error: true
)
# Pool repo for non-async tests
alias Ecto.Integration.PoolRepo
Application.put_env(:ecto_sqlite3, PoolRepo,
adapter: Ecto.Adapters.SQLite3,
database: "/tmp/exqlite_integration_pool_test.db",
show_sensitive_data_on_connection_error: true
)
# needed since some of the integration tests rely on fetching env from :ecto_sql
Application.put_env(:ecto_sql, TestRepo, Application.get_env(:ecto_sqlite3, TestRepo))
Application.put_env(:ecto_sql, PoolRepo, Application.get_env(:ecto_sqlite3, PoolRepo))
defmodule Ecto.Integration.PoolRepo do
use Ecto.Integration.Repo, otp_app: :ecto_sqlite3, adapter: Ecto.Adapters.SQLite3
end
Code.require_file "#{ecto}/integration_test/support/schemas.exs", __DIR__
Code.require_file "#{ecto_sql}/integration_test/support/migration.exs", __DIR__
defmodule Ecto.Integration.Case do
use ExUnit.CaseTemplate
setup do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo)
end
end
{:ok, _} = Ecto.Adapters.SQLite3.ensure_all_started(TestRepo.config(), :temporary)
# Load up the repository, start it, and run migrations
_ = Ecto.Adapters.SQLite3.storage_down(TestRepo.config())
:ok = Ecto.Adapters.SQLite3.storage_up(TestRepo.config())
_ = Ecto.Adapters.SQLite3.storage_down(PoolRepo.config())
:ok = Ecto.Adapters.SQLite3.storage_up(PoolRepo.config())
{:ok, _} = TestRepo.start_link()
{:ok, _pid} = PoolRepo.start_link()
# migrate the pool repo
case Ecto.Migrator.migrated_versions(PoolRepo) do
[] ->
:ok = Ecto.Migrator.up(PoolRepo, 0, Ecto.Integration.Migration, log: false)
_ ->
:ok = Ecto.Migrator.down(PoolRepo, 0, Ecto.Integration.Migration, log: false)
:ok = Ecto.Migrator.up(PoolRepo, 0, Ecto.Integration.Migration, log: false)
end
:ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false)
Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual)
Process.flag(:trap_exit, true)
ExUnit.start(
exclude: [
:delete_with_join,
:right_join,
# SQLite does not have an array type
:array_type,
:transaction_isolation,
:insert_cell_wise_defaults,
:insert_select,
# sqlite does not support microsecond precision, only millisecond
:microsecond_precision,
# sqlite supports FKs, but does not return sufficient data
# for ecto to support matching on a given constraint violation name
# which is what most of the tests validate
:foreign_key_constraint,
# SQLite with DSQLITE_LIKE_DOESNT_MATCH_BLOBS=1
# does not support using LIKE on BLOB types
:like_match_blob,
# SQLite will return a string for schemaless map types as
# Ecto does not have enough information to call the associated loader
# that converts the string JSON representaiton into a map
:map_type_schemaless,
# right now in lock_for_migrations() we do effectively nothing, this is because
# SQLite is single-writer so there isn't really a need for us to do anything.
# ecto assumes all implementing adapters need >=2 connections for migrations
# which is not true for SQLite
:lock_for_migrations,
# Migration we don't support
:prefix,
:add_column_if_not_exists,
:remove_column_if_exists,
:alter_primary_key,
:alter_foreign_key,
:assigns_id_type,
:modify_column,
# SQLite3 does not support the concat function
:concat,
:placeholders
]
)
| 32.838983 | 86 | 0.755355 |
1c84d39fd939992eb6e4f5179a9f2944731b55c2 | 3,883 | exs | Elixir | test/animina_web/controllers/user_reset_password_controller_test.exs | wintermeyer/animina | 44fb98b4cd2efd4ab425fc37e1ae68a6fa1c1f5a | [
"MIT"
] | 1 | 2021-04-17T20:36:24.000Z | 2021-04-17T20:36:24.000Z | test/animina_web/controllers/user_reset_password_controller_test.exs | wintermeyer/animina | 44fb98b4cd2efd4ab425fc37e1ae68a6fa1c1f5a | [
"MIT"
] | 3 | 2021-04-15T19:45:43.000Z | 2021-04-16T06:08:24.000Z | test/animina_web/controllers/user_reset_password_controller_test.exs | wintermeyer/animina | 44fb98b4cd2efd4ab425fc37e1ae68a6fa1c1f5a | [
"MIT"
] | null | null | null | defmodule AniminaWeb.UserResetPasswordControllerTest do
use AniminaWeb.ConnCase, async: true
alias Animina.Accounts
alias Animina.Repo
import Animina.AccountsFixtures
setup do
%{user: user_fixture()}
end
describe "GET /users/reset_password" do
test "renders the reset password page", %{conn: conn} do
conn = get(conn, Routes.user_reset_password_path(conn, :new))
response = html_response(conn, 200)
assert response =~ "Forgot your password?"
end
end
describe "POST /users/reset_password" do
@tag :capture_log
test "sends a new reset password token", %{conn: conn, user: user} do
conn =
post(conn, Routes.user_reset_password_path(conn, :create), %{
"user" => %{"email" => user.email}
})
assert redirected_to(conn) == "/"
assert get_flash(conn, :info) =~ "If your email is in our system"
assert Repo.get_by!(Accounts.UserToken, user_id: user.id).context == "reset_password"
end
test "does not send reset password token if email is invalid", %{conn: conn} do
conn =
post(conn, Routes.user_reset_password_path(conn, :create), %{
"user" => %{"email" => "unknown@example.com"}
})
assert redirected_to(conn) == "/"
assert get_flash(conn, :info) =~ "If your email is in our system"
assert Repo.all(Accounts.UserToken) == []
end
end
describe "GET /users/reset_password/:token" do
setup %{user: user} do
token =
extract_user_token(fn url ->
Accounts.deliver_user_reset_password_instructions(user, url)
end)
%{token: token}
end
test "renders reset password", %{conn: conn, token: token} do
conn = get(conn, Routes.user_reset_password_path(conn, :edit, token))
assert html_response(conn, 200) =~ "Reset password"
end
test "does not render reset password with invalid token", %{conn: conn} do
conn = get(conn, Routes.user_reset_password_path(conn, :edit, "oops"))
assert redirected_to(conn) == "/"
assert get_flash(conn, :error) =~ "Reset password link is invalid or it has expired"
end
end
describe "PUT /users/reset_password/:token" do
setup %{user: user} do
token =
extract_user_token(fn url ->
Accounts.deliver_user_reset_password_instructions(user, url)
end)
%{token: token}
end
test "resets password once", %{conn: conn, user: user, token: token} do
conn =
put(conn, Routes.user_reset_password_path(conn, :update, token), %{
"user" => %{
"password" => "new valid password",
"password_confirmation" => "new valid password"
}
})
assert redirected_to(conn) == Routes.user_session_path(conn, :new)
refute get_session(conn, :user_token)
assert get_flash(conn, :info) =~ "Password reset successfully"
assert Accounts.get_user_by_email_and_password(user.email, "new valid password")
end
test "does not reset password on invalid data", %{conn: conn, token: token} do
conn =
put(conn, Routes.user_reset_password_path(conn, :update, token), %{
"user" => %{
"password" => "too short",
"password_confirmation" => "does not match"
}
})
response = html_response(conn, 200)
assert response =~ "Reset password"
# FIXME
# assert response =~ "should be at least 12 character(s)"
assert response =~ "should be at least"
assert response =~ "does not match password"
end
test "does not reset password with invalid token", %{conn: conn} do
conn = put(conn, Routes.user_reset_password_path(conn, :update, "oops"))
assert redirected_to(conn) == "/"
assert get_flash(conn, :error) =~ "Reset password link is invalid or it has expired"
end
end
end
| 33.474138 | 91 | 0.634046 |
1c851cd14464c3f20778deaad0576de10c02bc69 | 1,462 | ex | Elixir | lib/exchange/message_bus.ex | realyarilabs/exchange | b8b44fc9a44a264ea1e8cc607efcaa06caca55fa | [
"Apache-2.0"
] | 2 | 2021-09-01T08:30:14.000Z | 2022-02-08T18:15:11.000Z | lib/exchange/message_bus.ex | realyarilabs/exchange | b8b44fc9a44a264ea1e8cc607efcaa06caca55fa | [
"Apache-2.0"
] | 35 | 2020-08-12T16:04:46.000Z | 2022-03-24T04:07:16.000Z | lib/exchange/message_bus.ex | realyarilabs/exchange | b8b44fc9a44a264ea1e8cc607efcaa06caca55fa | [
"Apache-2.0"
] | 1 | 2020-09-17T13:07:50.000Z | 2020-09-17T13:07:50.000Z | defmodule Exchange.MessageBus do
@moduledoc """
Behaviour that a message library adapter must implement
in order to communicate with the Exchange
"""
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@required_config opts[:required_config] || []
@required_deps opts[:required_deps] || []
@behaviour Exchange.MessageBus
alias Exchange.Adapters.Helpers
def validate_config(config \\ []) do
Helpers.validate_config(@required_config, config, __MODULE__)
end
@on_load :validate_dependency
def validate_dependency do
Helpers.validate_dependency(@required_deps, __MODULE__)
end
end
end
@doc """
Callback to initialize the given message bus adapter
and return necessary children.
"""
@callback init :: {:ok, list()}
@doc """
The current process subscribes to event of type key
## Parameters
- key: Atom that represents an event
"""
@callback add_listener(key :: String.t()) :: :error | :ok
@doc """
The current process unsubscribes to event of type key
## Parameters
- key: Atom that represents an event
"""
@callback remove_listener(key :: String.t()) :: :error | :ok
@doc """
Sends a message with a topic of event and content of payload
## Parameters
- event: Atom that represents a topic
- payload: Data to send to subscribers
"""
@callback cast_event(event :: atom, payload :: any) :: nil | :ok
end
| 27.074074 | 69 | 0.673051 |
1c853816ec68b69b922257b3650f7b90f506dbf5 | 978 | ex | Elixir | lib/tweet_map.ex | erickgnavar/tweet_map | 4c8839e35dc96cd8d6ca12fb2895496f2a0c100e | [
"MIT"
] | null | null | null | lib/tweet_map.ex | erickgnavar/tweet_map | 4c8839e35dc96cd8d6ca12fb2895496f2a0c100e | [
"MIT"
] | null | null | null | lib/tweet_map.ex | erickgnavar/tweet_map | 4c8839e35dc96cd8d6ca12fb2895496f2a0c100e | [
"MIT"
] | null | null | null | defmodule TweetMap do
use Application
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
# Start the endpoint when the application starts
supervisor(TweetMap.Endpoint, []),
# Start the Ecto repository
worker(TweetMap.Repo, []),
# Here you could define other workers and supervisors as children
# worker(TweetMap.Worker, [arg1, arg2, arg3]),
]
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
opts = [strategy: :one_for_one, name: TweetMap.Supervisor]
Supervisor.start_link(children, opts)
end
# Tell Phoenix to update the endpoint configuration
# whenever the application is updated.
def config_change(changed, _new, removed) do
TweetMap.Endpoint.config_change(changed, removed)
:ok
end
end
| 31.548387 | 71 | 0.711656 |
1c854b00cafa6f467533f6df66b346451598f805 | 26,835 | ex | Elixir | lib/elixir/lib/access.ex | tsutsu/elixir | f51865bd148d6d5f4fc02cfe441b2c1802dcc9ca | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/access.ex | tsutsu/elixir | f51865bd148d6d5f4fc02cfe441b2c1802dcc9ca | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/access.ex | tsutsu/elixir | f51865bd148d6d5f4fc02cfe441b2c1802dcc9ca | [
"Apache-2.0"
] | null | null | null | defmodule Access do
@moduledoc """
Key-based access to data structures.
Elixir supports three main key-value constructs, keywords,
maps and structs, and two mechanisms to access those keys,
by brackets via `data[key]`, and by dot-syntax, via `data.field`.
Next we will briefly recap the key-value constructs and then
discuss the access mechanisms.
## Key-value constructs
Elixir provides three main key-value constructs, summarized below:
* keyword lists - they are lists of two element tuples where
the first element is an atom. Commonly written in the
`[key: value]` syntax, they support only atom keys. Keyword
lists are used almost exclusively to pass options to functions
and macros. They keep the user ordering and allow duplicate
keys. Powered by the `Keyword` module.
* maps - they are the "go to" key-value data structure in Elixir
capable of supporting billions of keys of any type. They are
written in the `%{key => value}` syntax and also support the
`%{key: value}` syntax when the keys are atoms. They do not
have any specified ordering and do not allow duplicate keys.
Powered by the `Map` module.
* structs - they are named maps with a pre-determined set of keys.
They are defined with `defstruct/1` and written in the
`%StructName{key: value}` syntax.
## Key-based accessors
Elixir provides two mechanisms to access data structures by key,
described next.
### Bracket-based access
The `data[key]` syntax is used to access data structures with a
dynamic number of keys, such as keywords and maps. The key can
be of any type and it returns nil if the key does not exist:
iex> keywords = [a: 1, b: 2]
iex> keywords[:a]
1
iex> keywords[:c]
nil
iex> map = %{a: 1, b: 2}
iex> map[:a]
1
iex> star_ratings = %{1.0 => "★", 1.5 => "★☆", 2.0 => "★★"}
iex> star_ratings[1.5]
"★☆"
This syntax is very convenient as it can be nested arbitrarily:
iex> users = %{"john" => %{age: 27}, "meg" => %{age: 23}}
iex> put_in(users["john"][:age], 28)
%{"john" => %{age: 28}, "meg" => %{age: 23}}
Furthermore, the bracket access transparently ignores `nil` values:
iex> keywords = [a: 1, b: 2]
iex> keywords[:c][:unknown]
nil
Internally, `data[key]` translates to `Access.get(term, key, nil)`.
Developers interested in implementing their own key-value data
structures can implement the `Access` behaviour to provide the
bracket-based access syntax. `Access` requires the key comparison
to be implemented using the `===/2` operator.
### Dot-based syntax
The `data.field` syntax is used exclusively to access atom fields
in maps and structs. If the field accessed does not exist, it
raises an error. This is a deliberate decision: since all of the
fields in a struct are pre-determined, structs support only the
dot-based syntax and not the access one.
Imagine a struct named `User` with a `:name` field. The following would raise:
user = %User{name: "John"}
user[:name]
# ** (UndefinedFunctionError) undefined function User.fetch/2 (User does not implement the Access behaviour)
Instead we should use the `user.name` syntax to access fields:
user.name
#=> "John"
Differently from `user[:name]`, `user.name` is not extensible via
a behaviour and is restricted only to structs and atom keys in maps.
### Summing up
The bracket-based syntax, `user[:name]`, is used by dynamic structures,
is extensible and returns nil on misisng keys.
The dot-based syntax, `user.name`, is used exclusively to access atom
keys in maps and structs, and it raises on missing keys.
## Nested data structures
Both key-based access syntaxes can be used with the nested update
functions in `Kernel`, such as `Kernel.get_in/2`, `Kernel.put_in/3`,
`Kernel.update_in/3` and `Kernel.get_and_update_in/3`.
For example, to update a map inside another map of dynamic values,
one can do:
iex> users = %{"john" => %{age: 27}, "meg" => %{age: 23}}
iex> put_in users["john"].age, 28
%{"john" => %{age: 28}, "meg" => %{age: 23}}
This module provides convenience functions for traversing other
structures, like tuples and lists, to be used alongside `Kernel.put_in/2`
and others.
For instance, given a user map with `:name` and `:languages` keys,
here is how to deeply traverse the map and convert all language names
to uppercase:
iex> languages = [
...> %{name: "elixir", type: :functional},
...> %{name: "c", type: :procedural},
...> ]
iex> user = %{name: "john", languages: languages}
iex> update_in(user, [:languages, Access.all(), :name], &String.upcase/1)
%{name: "john",
languages: [%{name: "ELIXIR", type: :functional},
%{name: "C", type: :procedural}]}
See the functions `key/1`, `key!/1`, `elem/1`, and `all/0` for some of the
available accessors.
"""
@type container :: keyword | struct | map
@type nil_container :: nil
@type any_container :: any
@type t :: container | nil_container | any_container
@type key :: any
@type value :: any
@type get_fun(data, get_value) ::
(:get, data, (term -> term) ->
{get_value, new_data :: container})
@type get_and_update_fun(data, get_value) ::
(:get_and_update, data, (term -> term) ->
{get_value, new_data :: container} | :pop)
@type access_fun(data, get_value) ::
get_fun(data, get_value) | get_and_update_fun(data, get_value)
@doc """
Invoked in order to access the value stored under `key` in the given term `term`.
This function should return `{:ok, value}` where `value` is the value under
`key` if the key exists in the term, or `:error` if the key does not exist in
the term.
Many of the functions defined in the `Access` module internally call this
function. This function is also used when the square-brackets access syntax
(`structure[key]`) is used: the `fetch/2` callback implemented by the module
that defines the `structure` struct is invoked and if it returns `{:ok,
value}` then `value` is returned, or if it returns `:error` then `nil` is
returned.
See the `Map.fetch/2` and `Keyword.fetch/2` implementations for examples of
how to implement this callback.
"""
@callback fetch(term :: t, key) :: {:ok, value} | :error
@doc """
Invoked in order to access the value stored under `key` in the given term `term`,
defaulting to `default` if not present.
This function should return the value under `key` in `term` if there's
such key, otherwise `default`.
For most data structures, this can be implemented using `fetch/2` internally;
for example:
def get(structure, key, default) do
case fetch(structure, key) do
{:ok, value} -> value
:error -> default
end
end
See the `Map.get/3` and `Keyword.get/3` implementations for examples of
how to implement this callback.
"""
@callback get(term :: t, key, default :: value) :: value
@doc """
Invoked in order to access the value under `key` and update it at the same time.
The implementation of this callback should invoke `fun` with the value under
`key` in the passed structure `data`, or with `nil` if `key` is not present in it.
This function must return either `{get_value, update_value}` or `:pop`.
If the passed function returns `{get_value, update_value}`,
the return value of this callback should be `{get_value, new_data}`, where:
- `get_value` is the retrieved value (which can be operated on before being returned)
- `update_value` is the new value to be stored under `key`
- `new_data` is `data` after updating the value of `key` with `update_value`.
If the passed function returns `:pop`, the return value of this callback
must be `{value, new_data}` where `value` is the value under `key`
(or `nil` if not present) and `new_data` is `data` without `key`.
See the implementations of `Map.get_and_update/3` or `Keyword.get_and_update/3`
for more examples.
"""
@callback get_and_update(data, key, (value -> {get_value, value} | :pop)) :: {get_value, data}
when get_value: var, data: container | any_container
@doc """
Invoked to "pop" the value under `key` out of the given data structure.
When `key` exists in the given structure `data`, the implementation should
return a `{value, new_data}` tuple where `value` is the value that was under
`key` and `new_data` is `term` without `key`.
When `key` is not present in the given structure, a tuple `{value, data}`
should be returned, where `value` is implementation-defined.
See the implementations for `Map.pop/3` or `Keyword.pop/3` for more examples.
"""
@callback pop(data, key) :: {value, data} when data: container | any_container
defmacrop raise_undefined_behaviour(exception, module, top) do
quote do
stacktrace = System.stacktrace()
exception =
case stacktrace do
[unquote(top) | _] ->
reason = "#{inspect(unquote(module))} does not implement the Access behaviour"
%{unquote(exception) | reason: reason}
_ ->
unquote(exception)
end
reraise exception, stacktrace
end
end
@doc """
Fetches the value for the given key in a container (a map, keyword
list, or struct that implements the `Access` behaviour).
Returns `{:ok, value}` where `value` is the value under `key` if there is such
a key, or `:error` if `key` is not found.
"""
@spec fetch(container, term) :: {:ok, term} | :error
@spec fetch(nil_container, any) :: :error
def fetch(container, key)
def fetch(%module{} = container, key) do
module.fetch(container, key)
rescue
exception in UndefinedFunctionError ->
raise_undefined_behaviour(exception, module, {^module, :fetch, [^container, ^key], _})
end
def fetch(map, key) when is_map(map) do
case map do
%{^key => value} -> {:ok, value}
_ -> :error
end
end
def fetch(list, key) when is_list(list) and is_atom(key) do
case :lists.keyfind(key, 1, list) do
{_, value} -> {:ok, value}
false -> :error
end
end
def fetch(list, key) when is_list(list) do
raise ArgumentError,
"the Access calls for keywords expect the key to be an atom, got: " <> inspect(key)
end
def fetch(nil, _key) do
:error
end
@doc """
Gets the value for the given key in a container (a map, keyword
list, or struct that implements the `Access` behaviour).
Returns the value under `key` if there is such a key, or `default` if `key` is
not found.
"""
@spec get(container, term, term) :: term
@spec get(nil_container, any, default) :: default when default: var
def get(container, key, default \\ nil)
def get(%module{} = container, key, default) do
try do
module.fetch(container, key)
rescue
exception in UndefinedFunctionError ->
raise_undefined_behaviour(exception, module, {^module, :fetch, [^container, ^key], _})
else
{:ok, value} -> value
:error -> default
end
end
def get(map, key, default) when is_map(map) do
case map do
%{^key => value} -> value
_ -> default
end
end
def get(list, key, default) when is_list(list) and is_atom(key) do
case :lists.keyfind(key, 1, list) do
{_, value} -> value
false -> default
end
end
def get(list, key, _default) when is_list(list) do
raise ArgumentError,
"the Access calls for keywords expect the key to be an atom, got: " <> inspect(key)
end
def get(nil, _key, default) do
default
end
@doc """
Gets and updates the given key in a `container` (a map, a keyword list,
a struct that implements the `Access` behaviour).
The `fun` argument receives the value of `key` (or `nil` if `key` is not
present in `container`) and must return a two-element tuple `{get_value, update_value}`:
the "get" value `get_value` (the retrieved value, which can be operated on before
being returned) and the new value to be stored under `key` (`update_value`).
`fun` may also return `:pop`, which means the current value
should be removed from the container and returned.
The returned value is a two-element tuple with the "get" value returned by
`fun` and a new container with the updated value under `key`.
"""
@spec get_and_update(data, key, (value -> {get_value, value} | :pop)) :: {get_value, data}
when get_value: var, data: container
def get_and_update(container, key, fun)
def get_and_update(%module{} = container, key, fun) do
module.get_and_update(container, key, fun)
rescue
exception in UndefinedFunctionError ->
raise_undefined_behaviour(
exception,
module,
{^module, :get_and_update, [^container, ^key, ^fun], _}
)
end
def get_and_update(map, key, fun) when is_map(map) do
Map.get_and_update(map, key, fun)
end
def get_and_update(list, key, fun) when is_list(list) do
Keyword.get_and_update(list, key, fun)
end
def get_and_update(nil, key, _fun) do
raise ArgumentError, "could not put/update key #{inspect(key)} on a nil value"
end
@doc """
Removes the entry with a given key from a container (a map, keyword
list, or struct that implements the `Access` behaviour).
Returns a tuple containing the value associated with the key and the
updated container. `nil` is returned for the value if the key isn't
in the container.
## Examples
With a map:
iex> Access.pop(%{name: "Elixir", creator: "Valim"}, :name)
{"Elixir", %{creator: "Valim"}}
A keyword list:
iex> Access.pop([name: "Elixir", creator: "Valim"], :name)
{"Elixir", [creator: "Valim"]}
An unknown key:
iex> Access.pop(%{name: "Elixir", creator: "Valim"}, :year)
{nil, %{creator: "Valim", name: "Elixir"}}
"""
@spec pop(data, key) :: {value, data} when data: container
def pop(%module{} = container, key) do
module.pop(container, key)
rescue
exception in UndefinedFunctionError ->
raise_undefined_behaviour(exception, module, {^module, :pop, [^container, ^key], _})
end
def pop(map, key) when is_map(map) do
Map.pop(map, key)
end
def pop(list, key) when is_list(list) do
Keyword.pop(list, key)
end
def pop(nil, key) do
raise ArgumentError, "could not pop key #{inspect(key)} on a nil value"
end
## Accessors
@doc """
Returns a function that accesses the given key in a map/struct.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
The returned function uses the default value if the key does not exist.
This can be used to specify defaults and safely traverse missing keys:
iex> get_in(%{}, [Access.key(:user, %{}), Access.key(:name)])
nil
Such is also useful when using update functions, allowing us to introduce
values as we traverse the data structure for updates:
iex> put_in(%{}, [Access.key(:user, %{}), Access.key(:name)], "Mary")
%{user: %{name: "Mary"}}
## Examples
iex> map = %{user: %{name: "john"}}
iex> get_in(map, [Access.key(:unknown, %{}), Access.key(:name, "john")])
"john"
iex> get_and_update_in(map, [Access.key(:user), Access.key(:name)], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{"john", %{user: %{name: "JOHN"}}}
iex> pop_in(map, [Access.key(:user), Access.key(:name)])
{"john", %{user: %{}}}
An error is raised if the accessed structure is not a map or a struct:
iex> get_in(nil, [Access.key(:foo)])
** (BadMapError) expected a map, got: nil
iex> get_in([], [Access.key(:foo)])
** (BadMapError) expected a map, got: []
"""
@spec key(key, term) :: access_fun(data :: struct | map, get_value :: term)
def key(key, default \\ nil) do
fn
:get, data, next ->
next.(Map.get(data, key, default))
:get_and_update, data, next ->
value = Map.get(data, key, default)
case next.(value) do
{get, update} -> {get, Map.put(data, key, update)}
:pop -> {value, Map.delete(data, key)}
end
end
end
@doc """
Returns a function that accesses the given key in a map/struct.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
The returned function raises if the key does not exist.
## Examples
iex> map = %{user: %{name: "john"}}
iex> get_in(map, [Access.key!(:user), Access.key!(:name)])
"john"
iex> get_and_update_in(map, [Access.key!(:user), Access.key!(:name)], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{"john", %{user: %{name: "JOHN"}}}
iex> pop_in(map, [Access.key!(:user), Access.key!(:name)])
{"john", %{user: %{}}}
iex> get_in(map, [Access.key!(:user), Access.key!(:unknown)])
** (KeyError) key :unknown not found in: %{name: \"john\"}
An error is raised if the accessed structure is not a map/struct:
iex> get_in([], [Access.key!(:foo)])
** (RuntimeError) Access.key!/1 expected a map/struct, got: []
"""
@spec key!(key) :: access_fun(data :: struct | map, get_value :: term)
def key!(key) do
fn
:get, %{} = data, next ->
next.(Map.fetch!(data, key))
:get_and_update, %{} = data, next ->
value = Map.fetch!(data, key)
case next.(value) do
{get, update} -> {get, Map.put(data, key, update)}
:pop -> {value, Map.delete(data, key)}
end
_op, data, _next ->
raise "Access.key!/1 expected a map/struct, got: #{inspect(data)}"
end
end
@doc ~S"""
Returns a function that accesses the element at the given index in a tuple.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
The returned function raises if `index` is out of bounds.
## Examples
iex> map = %{user: {"john", 27}}
iex> get_in(map, [:user, Access.elem(0)])
"john"
iex> get_and_update_in(map, [:user, Access.elem(0)], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{"john", %{user: {"JOHN", 27}}}
iex> pop_in(map, [:user, Access.elem(0)])
** (RuntimeError) cannot pop data from a tuple
An error is raised if the accessed structure is not a tuple:
iex> get_in(%{}, [Access.elem(0)])
** (RuntimeError) Access.elem/1 expected a tuple, got: %{}
"""
@spec elem(non_neg_integer) :: access_fun(data :: tuple, get_value :: term)
def elem(index) when is_integer(index) do
pos = index + 1
fn
:get, data, next when is_tuple(data) ->
next.(:erlang.element(pos, data))
:get_and_update, data, next when is_tuple(data) ->
value = :erlang.element(pos, data)
case next.(value) do
{get, update} -> {get, :erlang.setelement(pos, data, update)}
:pop -> raise "cannot pop data from a tuple"
end
_op, data, _next ->
raise "Access.elem/1 expected a tuple, got: #{inspect(data)}"
end
end
@doc ~S"""
Returns a function that accesses all the elements in a list.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
## Examples
iex> list = [%{name: "john"}, %{name: "mary"}]
iex> get_in(list, [Access.all(), :name])
["john", "mary"]
iex> get_and_update_in(list, [Access.all(), :name], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{["john", "mary"], [%{name: "JOHN"}, %{name: "MARY"}]}
iex> pop_in(list, [Access.all(), :name])
{["john", "mary"], [%{}, %{}]}
Here is an example that traverses the list dropping even
numbers and multiplying odd numbers by 2:
iex> require Integer
iex> get_and_update_in([1, 2, 3, 4, 5], [Access.all], fn
...> num -> if Integer.is_even(num), do: :pop, else: {num, num * 2}
...> end)
{[1, 2, 3, 4, 5], [2, 6, 10]}
An error is raised if the accessed structure is not a list:
iex> get_in(%{}, [Access.all()])
** (RuntimeError) Access.all/0 expected a list, got: %{}
"""
@spec all() :: access_fun(data :: list, get_value :: list)
def all() do
&all/3
end
defp all(:get, data, next) when is_list(data) do
Enum.map(data, next)
end
defp all(:get_and_update, data, next) when is_list(data) do
all(data, next, _gets = [], _updates = [])
end
defp all(_op, data, _next) do
raise "Access.all/0 expected a list, got: #{inspect(data)}"
end
defp all([head | rest], next, gets, updates) do
case next.(head) do
{get, update} -> all(rest, next, [get | gets], [update | updates])
:pop -> all(rest, next, [head | gets], updates)
end
end
defp all([], _next, gets, updates) do
{:lists.reverse(gets), :lists.reverse(updates)}
end
@doc ~S"""
Returns a function that accesses the element at `index` (zero based) of a list.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
## Examples
iex> list = [%{name: "john"}, %{name: "mary"}]
iex> get_in(list, [Access.at(1), :name])
"mary"
iex> get_and_update_in(list, [Access.at(0), :name], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{"john", [%{name: "JOHN"}, %{name: "mary"}]}
`at/1` can also be used to pop elements out of a list or
a key inside of a list:
iex> list = [%{name: "john"}, %{name: "mary"}]
iex> pop_in(list, [Access.at(0)])
{%{name: "john"}, [%{name: "mary"}]}
iex> pop_in(list, [Access.at(0), :name])
{"john", [%{}, %{name: "mary"}]}
When the index is out of bounds, `nil` is returned and the update function is never called:
iex> list = [%{name: "john"}, %{name: "mary"}]
iex> get_in(list, [Access.at(10), :name])
nil
iex> get_and_update_in(list, [Access.at(10), :name], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{nil, [%{name: "john"}, %{name: "mary"}]}
An error is raised for negative indexes:
iex> get_in([], [Access.at(-1)])
** (FunctionClauseError) no function clause matching in Access.at/1
An error is raised if the accessed structure is not a list:
iex> get_in(%{}, [Access.at(1)])
** (RuntimeError) Access.at/1 expected a list, got: %{}
"""
@spec at(non_neg_integer) :: access_fun(data :: list, get_value :: term)
def at(index) when is_integer(index) and index >= 0 do
fn op, data, next -> at(op, data, index, next) end
end
defp at(:get, data, index, next) when is_list(data) do
data |> Enum.at(index) |> next.()
end
defp at(:get_and_update, data, index, next) when is_list(data) do
get_and_update_at(data, index, next, [])
end
defp at(_op, data, _index, _next) do
raise "Access.at/1 expected a list, got: #{inspect(data)}"
end
defp get_and_update_at([head | rest], 0, next, updates) do
case next.(head) do
{get, update} -> {get, :lists.reverse([update | updates], rest)}
:pop -> {head, :lists.reverse(updates, rest)}
end
end
defp get_and_update_at([head | rest], index, next, updates) do
get_and_update_at(rest, index - 1, next, [head | updates])
end
defp get_and_update_at([], _index, _next, updates) do
{nil, :lists.reverse(updates)}
end
@doc ~S"""
Returns a function that accesses all elements of a list that match the provided predicate.
The returned function is typically passed as an accessor to `Kernel.get_in/2`,
`Kernel.get_and_update_in/3`, and friends.
## Examples
iex> list = [%{name: "john", salary: 10}, %{name: "francine", salary: 30}]
iex> get_in(list, [Access.filter(&(&1.salary > 20)), :name])
["francine"]
iex> get_and_update_in(list, [Access.filter(&(&1.salary <= 20)), :name], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{["john"], [%{name: "JOHN", salary: 10}, %{name: "francine", salary: 30}]}
`filter/1` can also be used to pop elements out of a list or
a key inside of a list:
iex> list = [%{name: "john", salary: 10}, %{name: "francine", salary: 30}]
iex> pop_in(list, [Access.filter(&(&1.salary >= 20))])
{[%{name: "francine", salary: 30}], [%{name: "john", salary: 10}]}
iex> pop_in(list, [Access.filter(&(&1.salary >= 20)), :name])
{["francine"], [%{name: "john", salary: 10}, %{salary: 30}]}
When no match is found, an empty list is returned and the update function is never called
iex> list = [%{name: "john", salary: 10}, %{name: "francine", salary: 30}]
iex> get_in(list, [Access.filter(&(&1.salary >= 50)), :name])
[]
iex> get_and_update_in(list, [Access.filter(&(&1.salary >= 50)), :name], fn
...> prev -> {prev, String.upcase(prev)}
...> end)
{[], [%{name: "john", salary: 10}, %{name: "francine", salary: 30}]}
An error is raised if the predicate is not a function or is of the incorrect arity:
iex> get_in([], [Access.filter(5)])
** (FunctionClauseError) no function clause matching in Access.filter/1
An error is raised if the accessed structure is not a list:
iex> get_in(%{}, [Access.filter(fn a -> a == 10 end)])
** (RuntimeError) Access.filter/1 expected a list, got: %{}
"""
@since "1.6.0"
@spec filter((term -> boolean)) :: access_fun(data :: list, get_value :: list)
def filter(func) when is_function(func) do
fn op, data, next -> filter(op, data, func, next) end
end
defp filter(:get, data, func, next) when is_list(data) do
data |> Enum.filter(func) |> Enum.map(next)
end
defp filter(:get_and_update, data, func, next) when is_list(data) do
get_and_update_filter(data, func, next, [], [])
end
defp filter(_op, data, _func, _next) do
raise "Access.filter/1 expected a list, got: #{inspect(data)}"
end
defp get_and_update_filter([head | rest], func, next, updates, gets) do
if func.(head) do
case next.(head) do
{get, update} ->
get_and_update_filter(rest, func, next, [update | updates], [get | gets])
:pop ->
get_and_update_filter(rest, func, next, updates, [head | gets])
end
else
get_and_update_filter(rest, func, next, [head | updates], gets)
end
end
defp get_and_update_filter([], _func, _next, updates, gets) do
{:lists.reverse(gets), :lists.reverse(updates)}
end
end
| 33.418431 | 114 | 0.631153 |
1c85513c4993eccec5977c77ffda75d09eed9815 | 1,177 | ex | Elixir | test/support/channel_case.ex | hamilton-pay/phoenix-boilerplate | 1b479e4b19bc62b774f58d383b5038f12e508fdf | [
"MIT"
] | 11 | 2021-04-20T12:42:29.000Z | 2022-03-25T16:38:54.000Z | test/support/channel_case.ex | hamilton-pay/phoenix-boilerplate | 1b479e4b19bc62b774f58d383b5038f12e508fdf | [
"MIT"
] | null | null | null | test/support/channel_case.ex | hamilton-pay/phoenix-boilerplate | 1b479e4b19bc62b774f58d383b5038f12e508fdf | [
"MIT"
] | 4 | 2021-04-17T19:47:41.000Z | 2021-08-01T00:50:15.000Z | defmodule PhoenixBoilerplateWeb.ChannelCase do
@moduledoc """
This module defines the test case to be used by
channel tests.
Such tests rely on `Phoenix.ChannelTest` and also
import other functionality to make it easier
to build common data structures and query the data layer.
Finally, if the test case interacts with the database,
we enable the SQL sandbox, so changes done to the database
are reverted at the end of every test. If you are using
PostgreSQL, you can even run database tests asynchronously
by setting `use PhoenixBoilerplateWeb.ChannelCase, async: true`, although
this option is not recommended for other databases.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with channels
import Phoenix.ChannelTest
import PhoenixBoilerplateWeb.ChannelCase
# The default endpoint for testing
@endpoint PhoenixBoilerplateWeb.Endpoint
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(PhoenixBoilerplate.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(PhoenixBoilerplate.Repo, {:shared, self()})
end
:ok
end
end
| 28.707317 | 80 | 0.742566 |
1c8570420e6fd43a47919efcff4a713246885d73 | 1,456 | ex | Elixir | clients/cloud_build/lib/google_api/cloud_build/v1/model/empty.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | clients/cloud_build/lib/google_api/cloud_build/v1/model/empty.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | clients/cloud_build/lib/google_api/cloud_build/v1/model/empty.ex | yoshi-code-bot/elixir-google-api | cdb6032f01fac5ab704803113c39f2207e9e019d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.CloudBuild.V1.Model.Empty do
@moduledoc """
A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
## Attributes
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{}
end
defimpl Poison.Decoder, for: GoogleApi.CloudBuild.V1.Model.Empty do
def decode(value, options) do
GoogleApi.CloudBuild.V1.Model.Empty.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudBuild.V1.Model.Empty do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 34.666667 | 282 | 0.757555 |
1c85a596928359e7807c6f6ca8a4897fc0f8aa95 | 8,187 | ex | Elixir | lib/erlnote/boards/boards.ex | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | null | null | null | lib/erlnote/boards/boards.ex | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | 1 | 2019-11-02T13:46:12.000Z | 2019-11-02T13:46:12.000Z | lib/erlnote/boards/boards.ex | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | null | null | null | defmodule Erlnote.Boards do
@moduledoc """
The Boards context.
"""
import Ecto
import Ecto.Changeset
import Ecto.Query, warn: false
alias Erlnote.Repo
alias Erlnote.Boards.{Board, BoardUser}
alias Erlnote.Accounts
alias Erlnote.Accounts.User
# @doc """
# Returns the list of boards.
# ## Examples
# iex> list_boards()
# [%Board{}, ...]
# """
# def list_boards do
# Repo.all(Board)
# end
@doc """
Returns the list of boards. Board owner == User ID and board.deleted == false.
## Examples
iex> list_is_owner_boards(1)
[%Board{}]
iex> list_is_owner_boards(-1)
[]
"""
def list_is_owner_boards(user_id) when is_integer(user_id) do
case user = Accounts.get_user_by_id(user_id) do
nil -> []
_ -> Repo.all(from b in assoc(user, :owner_boards), where: b.deleted == false)
end
# with(
# user = Accounts.get_user_by_id(user_id),
# true <- !is_nil(user),
# user = Repo.preload(user, :owner_boards)
# ) do
# user.owner_boards
# else
# _ -> nil
# end
end
@doc """
Returns the list of boards. is_contributor? == User ID.
## Examples
iex> list_is_contributor_boards(1)
[%Board{}]
iex> list_is_contributor_boards(-1)
[]
"""
def list_is_contributor_boards(user_id) when is_integer(user_id) do
case user = Accounts.get_user_by_id(user_id) do
nil -> []
_ -> (from b in assoc(user, :boards)) |> Repo.all
end
# case user = Accounts.get_user_by_id(user_id) do
# nil -> []
# _ -> (user |> Repo.preload(:boards)).boards
# end
end
# @doc """
# Gets a single board.
# Raises `Ecto.NoResultsError` if the Board does not exist.
# ## Examples
# iex> get_board!(123)
# %Board{}
# iex> get_board!(456)
# ** (Ecto.NoResultsError)
# """
# def get_board!(id) when is_integer(id), do: Repo.get!(Board, id)
@doc """
Gets a single board.
Returns nil if the Board does not exist.
## Examples
iex> get_board(1)
%Board{}
iex> get_board(456)
nil
"""
def get_board(id) when is_integer(id) do
Repo.one(from b in Board, where: b.id == ^id and b.deleted == false)
end
def get_board_include_deleted(id) when is_integer(id) do
Repo.one(from b in Board, where: b.id == ^id)
end
def get_board_contributors(id) when is_integer(id) do
Enum.map(
Enum.map(Repo.all(from bu in BoardUser, where: bu.board_id == ^id, select: bu.user_id), &Accounts.get_user_by_id/1),
fn u -> u.username end
)
end
def get_access_info(user_id, board_id) when is_integer(user_id) and is_integer(board_id) do
case board = get_board(board_id) do
nil -> {:error, "invalid data"}
_ ->
cond do
board.owner == user_id or not is_nil(Repo.one(from u in assoc(board, :users), where: u.id == ^user_id)) ->
{:ok, %{board_id: board.id, owner_id: board.owner, user_id: user_id, can_read: true, can_write: true}}
true ->
{:error, "unauthorized"}
end
end
end
@doc """
Creates a empty board. Board owner == User ID.
## Examples
iex> create_board(1)
{:ok, %Board{}}
iex> create_board(user_id_not_found)
{:error, %Ecto.Changeset{}}
"""
def create_board(user_id) when is_integer(user_id) do
case user = Accounts.get_user_by_id(user_id) do
nil ->
{
:error,
change(%Board{}, %{user: %User{id: user_id}})
|> add_error(:user, user_id |> Integer.to_string, additional: "User ID not found.")
}
_ ->
build_assoc(user, :owner_boards)
|> Board.create_changeset(%{text: "", title: "board-" <> Ecto.UUID.generate, deleted: false})
|> Repo.insert()
end
end
defp is_board_owner?(%Board{} = board, user_id) when is_map(board) and is_integer(user_id) do
(board |> Repo.preload(:user)).user.id == user_id
end
defp is_board_user?(%Board{} = board, user_id) when is_map(board) and is_integer(user_id) do
case (from u in assoc(board, :users), where: u.id == ^user_id) |> Repo.one do
%User{} -> true
_ -> false
end
end
@doc """
Updates a board.
## Examples
iex> update_board(1, 1, %{field: new_value})
{:ok, %Board{}}
iex> update_board(1, 1, %{field: bad_value})
{:error, %Ecto.Changeset{}}
iex> update_board(1, -1, %{field: new_value})
{:error, "Permission denied."}
iex> update_board(-1, 1, %{field: new_value})
{:error, "Permission denied."}
"""
def update_board(user_id, board_id, attrs) when is_integer(user_id) and is_integer(board_id) and is_map(attrs) do
with(
board when not is_nil(board) <- get_board(board_id),
true <- is_board_owner?(board, user_id) or is_board_user?(board, user_id)
) do
update_board(board, attrs)
else
_ -> {:error, "Permission denied."}
end
end
defp update_board(%Board{} = board, attrs) do
board
|> Board.update_changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Board in the name of the user with ID == user_id.
## Examples
iex> delete_board(board, user_id)
{:ok, %Board{}}
iex> delete_board(board, user_id)
{:error, %Ecto.Changeset{}}
"""
def delete_board(%Board{} = board, user_id) when is_integer(user_id) do
board = board |> Repo.preload([:user, :users])
cond do
board.users == [] and user_id == board.user.id -> # Board without users (Owner)
Repo.delete(board)
user_id == board.user.id -> # Board with users (Owner)
update_board(board, %{deleted: true})
true ->
from(r in BoardUser, where: r.user_id == ^user_id, where: r.board_id == ^board.id) |> Repo.delete_all
if Repo.all(from(u in BoardUser, where: u.board_id == ^board.id)) == [] and board.deleted do
Repo.delete(board)
else
board = Repo.preload board, :users, force: true
{:ok, board}
end
end
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking board changes.
## Examples
iex> change_board(board)
%Ecto.Changeset{source: %Board{}}
"""
def change_board(%Board{} = board) do
Board.update_changeset(board, %{})
end
# Para unlink usar la función delete_board.
@doc """
Adds user_id as a collaborator on the board.
## Examples
iex> link_board_to_user(owner_id, board_id, user_id)
{:ok, %BoardUser{}}
iex> link_board_to_user(no_owner_id, board_id, user_id)
{:error, "Permission denied."}
iex> link_board_to_user(owner_id, bad_board_id, user_id)
{:error, "User ID or board ID not found."}
iex> link_board_to_user(owner_id, board_id, bad_user_id)
{:error, "User ID or board ID not found."}
"""
def link_board_to_user(owner_id, board_id, user_id) when is_integer(owner_id) and is_integer(board_id) and is_integer(user_id) do
with(
user when not is_nil(user) <- Accounts.get_user_by_id(user_id),
board when not is_nil(board) <- Repo.preload(get_board(board_id), :user),
true <- board.user.id == owner_id
) do
cond do
board.user.id == user_id -> {:ok, "linked"}
true ->
Repo.insert(
BoardUser.changeset(%BoardUser{}, %{board_id: board.id, user_id: user.id})
)
# Return {:ok, _} o {:error, changeset}
end
else
nil -> {:error, "User ID or board ID not found."}
false -> {:error, "Permission denied."}
end
end
# def link_board_to_user(board_id, user_id) when is_integer(board_id) and is_integer(user_id) do
# user = Accounts.get_user_by_id(user_id)
# board = get_board(board_id)
# cond do
# is_nil(user) or is_nil(board) -> {:error, "user ID or board ID not found."}
# (board |> Repo.preload(:user)).user.id == user_id -> {:ok, "linked"}
# true ->
# Repo.insert(
# BoardUser.changeset(%BoardUser{}, %{board_id: board.id, user_id: user.id})
# )
# # Return {:ok, _} o {:error, changeset}
# end
# end
end
| 26.495146 | 131 | 0.602296 |
1c85ad62c4db432c7bb10724ba1b21d0b0d9fad7 | 1,183 | exs | Elixir | mix.exs | lastobelus/sfc_gen_live | 12557794317bf29163844a7007c010e7eac47c17 | [
"MIT"
] | 6 | 2021-04-15T18:21:03.000Z | 2022-03-28T16:00:38.000Z | mix.exs | lastobelus/sfc_gen_live | 12557794317bf29163844a7007c010e7eac47c17 | [
"MIT"
] | 2 | 2021-04-19T01:24:09.000Z | 2021-04-25T05:16:18.000Z | mix.exs | lastobelus/sfc_gen_live | 12557794317bf29163844a7007c010e7eac47c17 | [
"MIT"
] | 1 | 2022-03-28T16:12:35.000Z | 2022-03-28T16:12:35.000Z | defmodule SfcGenLive.MixProject do
use Mix.Project
@version "0.1.5"
def project do
[
app: :sfc_gen_live,
version: @version,
elixir: "~> 1.11",
start_permanent: Mix.env() == :prod,
deps: deps(),
description: "The equivalent of `phx.gen.live` for Surface & Phoenix 1.5",
package: package()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:phoenix, git: "https://github.com/phoenixframework/phoenix.git", override: true},
{:surface, "~> 0.3.0"},
# {:phx_new, "~> 1.5.8", only: [:dev, :test]},
# RADAR: I don't think there is a way to specify this as a git dependency, since phx_new is a directory in phoenixframework/phoenix
{:phx_new, path: "~/github/phoenix/installer", only: [:dev, :test]},
{:ex_doc, "~> 0.20", only: :docs}
]
end
defp package do
[
maintainers: ["Michael Johnston"],
licenses: ["MIT"],
links: %{"GitHub" => "https://github.com/lastobelus/sfc_gen_live"}
]
end
end
| 26.288889 | 137 | 0.598478 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.