code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule CoursePlannerWeb.AttendanceController do
@moduledoc false
use CoursePlannerWeb, :controller
alias CoursePlanner.{Attendances, Attendances.Attendance, Courses.OfferedCourse}
import Canary.Plugs
plug :authorize_controller
action_fallback CoursePlannerWeb.FallbackController
def index(%{assigns: %{current_user: %{id: _id, role: "Coordinator"}}} = conn, _params) do
offered_courses = Attendances.get_all_offered_courses()
render(conn, "index_coordinator.html", offered_courses: offered_courses)
end
def index(%{assigns: %{current_user: %{id: _id, role: "Supervisor"}}} = conn, _params) do
offered_courses = Attendances.get_all_offered_courses()
render(conn, "index_supervisor.html", offered_courses: offered_courses)
end
def index(%{assigns: %{current_user: %{id: id, role: "Teacher"}}} = conn, _params) do
offered_courses = Attendances.get_all_teacher_offered_courses(id)
render(conn, "index_teacher.html", offered_courses: offered_courses)
end
def index(%{assigns: %{current_user: %{id: id, role: "Student"}}} = conn, _params) do
offered_courses = Attendances.get_all_student_offered_courses(id)
render(conn, "index_student.html", offered_courses: offered_courses)
end
def show(%{assigns: %{current_user: %{id: _id, role: role}}} = conn,
%{"id" => offered_course_id}) when role in ["Coordinator", "Supervisor"] do
case Attendances.get_course_attendances(offered_course_id) do
nil -> {:error, :not_found}
offered_course ->
render(conn, "show_coordinator.html", offered_course: offered_course)
end
end
def show(%{assigns: %{current_user: %{id: id, role: "Teacher"}}} = conn,
%{"id" => offered_course_id}) do
case Attendances.get_teacher_course_attendances(offered_course_id, id) do
nil -> {:error, :not_found}
offered_course ->
render(conn, "show_teacher.html", offered_course: offered_course)
end
end
def show(%{assigns: %{current_user: %{id: id, role: "Student"}}} = conn,
%{"id" => offered_course_id}) do
offered_course =
OfferedCourse
|> Repo.get(offered_course_id)
|> Repo.preload([:term, :course, :teachers])
case Attendances.get_student_attendances(offered_course_id, id) do
[] -> {:error, :not_found}
attendances ->
render(conn, "show_student.html", attendances: attendances, offered_course: offered_course)
end
end
def fill_course(%{assigns: %{current_user: %{role: role}}} = conn, %{"attendance_id" => id})
when role in ["Coordinator", "Teacher"] do
offered_course = Attendances.get_course_attendances(id)
changeset = OfferedCourse.changeset(offered_course)
render(conn, "fill_course_attendance.html", offered_course: offered_course,
changeset: changeset)
end
def update_fill(%{assigns: %{current_user: %{role: role}}} = conn,
%{"offered_course" => %{"classes" => classes}, "attendance_id" => offered_course_id})
when role in ["Coordinator", "Teacher"] do
attendances_data =
classes
|> Map.values()
|> Enum.flat_map(fn(class_value) ->
Map.values(class_value["attendances"])
end)
attendance_changeset_list =
attendances_data
|> Enum.map(fn(attendance_params) ->
attendance = Repo.get!(Attendance, attendance_params["id"])
Attendance.changeset(attendance, attendance_params)
end)
case Attendances.update_multiple_attendances(attendance_changeset_list) do
{:ok, _data} ->
conn
|> put_flash(:info, "attendances updated successfully.")
|> redirect(to: attendance_path(conn, :show, offered_course_id))
{:error, _failed_operation, _failed_value, _changes_so_far} ->
offered_course = Attendances.get_course_attendances(offered_course_id)
changeset = OfferedCourse.changeset(offered_course)
conn
|> put_flash(:error, "Something went wrong.")
|> render("fill_course_attendance.html", offered_course: offered_course,
changeset: changeset)
end
end
end
|
lib/course_planner_web/controllers/attendance_controller.ex
| 0.612657
| 0.515864
|
attendance_controller.ex
|
starcoder
|
defmodule SudokuSolver.CPSCandidates do
@moduledoc """
Implements SudokuSolver using continuation passing style
"""
@behaviour SudokuSolver
@doc """
Solve a soduku
"""
@impl SudokuSolver
@spec solve(SudokuBoardCandidates.t()) :: SudokuBoardCandidates.t() | nil
def solve(%SudokuBoardCandidates{size: size} = board) do
max_index = size * size - 1
board = SudokuBoardCandidates.eliminate_candidates(board)
solve_helper(board, max_index, fn -> nil end)
end
# Solves sudoku by attempting to populate cells starting at the end of the board and moving
# to the front. Solve helper keps track of which cell we are currently trying.
# It calls the failure continuation `fc` when needs to backtrack.
@spec solve_helper(SudokuBoardCandidates.t(), integer(), fun()) ::
SudokuBoardCandidates.t() | any()
defp solve_helper(%SudokuBoardCandidates{} = board, -1, fc) do
if SudokuBoardCandidates.solved?(board), do: board, else: fc.()
end
defp solve_helper(%SudokuBoardCandidates{} = board, idx, fc) do
candidates = SudokuBoardCandidates.get_candidates(board, idx)
if Enum.count(candidates) == 1 do
solve_helper(board, idx - 1, fc)
else
try_solve(board, idx, MapSet.to_list(candidates), fc)
end
end
# try_solve attempts to solve a board by populating a cell from a list of suggestions
defp try_solve(%SudokuBoardCandidates{}, _idx, [], fc), do: fc.()
defp try_solve(%SudokuBoardCandidates{} = board, idx, [suggestion | other_suggestions], fc) do
position = SudokuBoardCandidates.index_to_position(board, idx)
{:ok, new_board} = SudokuBoardCandidates.place_number(board, position, suggestion)
new_board = SudokuBoardCandidates.eliminate_candidates(new_board)
if SudokuBoardCandidates.partial_solution?(new_board) do
solve_helper(new_board, idx - 1, fn -> try_solve(board, idx, other_suggestions, fc) end)
else
try_solve(board, idx, other_suggestions, fc)
end
end
@doc """
Finds all possible solutions to a sudoku.
## Parameters
- board: A sudoku board
"""
@impl SudokuSolver
@spec all_solutions(SudokuBoardCandidates.t()) :: [SudokuBoardCandidates.t()]
def all_solutions(%SudokuBoardCandidates{} = board) do
max_index = board.size * board.size - 1
board = SudokuBoardCandidates.eliminate_candidates(board)
find_all_solutions_helper(board, max_index, fn -> [] end)
end
defp find_all_solutions_helper(board, -1, continuation) do
if SudokuBoardCandidates.solved?(board) do
[board | continuation.()]
else
continuation.()
end
end
defp find_all_solutions_helper(board, idx, continuation) do
candidates = SudokuBoardCandidates.get_candidates(board, idx)
if Enum.count(candidates) == 1 do
find_all_solutions_helper(board, idx - 1, continuation)
else
try_find_all_solutions(board, idx, MapSet.to_list(candidates), continuation)
end
end
defp try_find_all_solutions(_board, _idx, [], continuation), do: continuation.()
defp try_find_all_solutions(board, idx, [suggestion | other_suggestions], continuation) do
position = SudokuBoardCandidates.index_to_position(board, idx)
{:ok, new_board} = SudokuBoardCandidates.place_number(board, position, suggestion)
new_board = SudokuBoardCandidates.eliminate_candidates(new_board)
if SudokuBoardCandidates.partial_solution?(new_board) do
find_all_solutions_helper(new_board, idx - 1, fn ->
try_find_all_solutions(board, idx, other_suggestions, continuation)
end)
else
try_find_all_solutions(board, idx, other_suggestions, continuation)
end
end
end
|
lib/sudoku_solver/cps_candidates.ex
| 0.645902
| 0.48438
|
cps_candidates.ex
|
starcoder
|
defmodule Slipstream.Connection.State do
@moduledoc false
alias Slipstream.TelemetryHelper
# a struct for storing the internal state of a Slipstream.Connection
# process
# the module which implements the Slipstream behaviour is hereby known
# as the implementor
defstruct [
:connection_id,
:trace_id,
:client_pid,
:client_ref,
:config,
:conn,
:websocket,
:request_ref,
:join_params,
:heartbeat_timer,
:heartbeat_ref,
:metadata,
status: :opened,
joins: %{},
leaves: %{},
current_ref: 0,
current_ref_str: "0"
]
@doc """
Creates a new State data structure
The life-cycle of a `Slipstream.Connection` matches that of a websocket
connection between client and remote websocket server, so a new `State`
data structure represents a new connection (and therefore generates a new
connection_id for telemetry purposes).
"""
@spec new(config :: Slipstream.Configuration.t(), client_pid :: pid()) ::
%__MODULE__{}
def new(config, client_pid) do
%__MODULE__{
config: config,
client_pid: client_pid,
client_ref: Process.monitor(client_pid),
trace_id: TelemetryHelper.trace_id(),
connection_id: TelemetryHelper.id()
}
end
@doc """
Gets the next ref and increments the ref counter in state
The `ref` passed between a client and phoenix server is a marker which
can be used to link pushes to their replies. E.g. a heartbeat message from
the client will include a ref which will match the associated heartbeat
reply from the server, when the heartbeat is sucessful.
Refs are simply strings of incrementing integers.
"""
def next_ref(state) do
ref = state.current_ref + 1
{to_string(ref),
%__MODULE__{state | current_ref: ref, current_ref_str: to_string(ref)}}
end
# coveralls-ignore-start
def next_heartbeat_ref(state) do
{ref, state} = next_ref(state)
%__MODULE__{state | heartbeat_ref: ref}
end
# coveralls-ignore-stop
@doc """
Resets the heartbeat ref to nil
This is used to clear out a pending heartbeat. If the
`Slipstream.Commands.SendHeartbeat` command is received and the heartbeat_ref
in state is nil, that means we have not received a reply to our heartbeat
request and that the server is potentially stuck or otherwise not responding.
"""
def reset_heartbeat(state) do
%__MODULE__{state | heartbeat_ref: nil}
end
@doc """
Detects if a ref is one used to join a topic
"""
def join_ref?(%__MODULE__{joins: joins}, ref), do: ref in Map.values(joins)
@doc """
Detects if a ref was used to request a topic leave
"""
def leave_ref?(%__MODULE__{leaves: leaves}, ref),
do: ref in Map.values(leaves)
end
|
lib/slipstream/connection/state.ex
| 0.809803
| 0.529568
|
state.ex
|
starcoder
|
defmodule UriQuery do
@doc """
For cases when lists and maps are used in GET parameters.
Returns keyword list corresponding to given enumerable
that is safe to be encoded into a query string.
Keys and Values can be any term that implements the `String.Chars`
protocol. Lists connot be used as keys.
Values can be lists or maps including nested maps.
## Examples
iex> query = %{"foo" => 1, "bar" => 2}
iex> UriQuery.params(query)
[{"bar", "2"}, {"foo", "1"}]
iex> query = %{foo: ["bar", "baz"]}
iex> UriQuery.params(query)
[{"foo[0]", "bar"}, {"foo[1]", "baz"}]
iex> query = %{foo: ["bar", "baz"]}
iex> UriQuery.params(query, add_indices_to_lists: false)
[{"foo[]", "bar"}, {"foo[]", "baz"}]
iex> query = %{"user" => %{"name" => "<NAME>", "email" => "<EMAIL>"}}
iex> UriQuery.params(query)
[{"user[email]", "<EMAIL>"}, {"user[name]", "<NAME>"}]
"""
@noKVListError "parameter for params/1 must be a list of key-value pairs"
def params(enumerable, opts \\ [])
def params(enumerable, opts) when is_list(enumerable) or is_map(enumerable) do
enumerable
|> Enum.reduce([], fn(pair, acc) -> accumulate_kv_pair("", pair, false, acc, opts) end)
|> Enum.reverse
end
def params(_, _), do: raise ArgumentError, @noKVListError
defp accumulate_kv_pair(_, {key, _}, _, _, _) when is_list(key) do
raise ArgumentError, "params/1 keys cannot be lists, got: #{inspect key}"
end
defp accumulate_kv_pair(prefix, {key, values}, _, acc, opts) when is_map(values) do
Enum.reduce(values, acc, fn (value, acc) -> accumulate_kv_pair(prefix, {key, value}, true, acc, opts) end)
end
defp accumulate_kv_pair(prefix, {key, values}, _, acc, [add_indices_to_lists: false] = opts) when is_list(values) do
Enum.reduce(values, acc, fn (value, acc) -> accumulate_kv_pair(prefix, {key, value}, true, acc, opts) end)
end
defp accumulate_kv_pair(prefix, {key, values}, _, acc, opts) when is_list(values) do
tuples =
if Keyword.keyword?(values) do
values
else
values
|> Enum.with_index
|> Enum.map(fn {value, index} -> {index, value} end)
end
tuples
|> Enum.reduce(acc, fn (value, acc) -> accumulate_kv_pair(prefix, {key, value}, true, acc, opts) end)
end
defp accumulate_kv_pair(prefix, {key, {nested_key, value}}, _, acc, opts) do
build_key(prefix, key)
|> accumulate_kv_pair({build_nested_key(nested_key), value}, false, acc, opts)
end
defp accumulate_kv_pair(prefix, {key, value}, true, acc, _opts) do
[{build_key(prefix, key, "[]"), to_string(value)} | acc]
end
defp accumulate_kv_pair(prefix, {key, value}, _false, acc, _opts) do
[{build_key(prefix, key), to_string(value)} | acc]
end
defp accumulate_kv_pair(_, _, _, _, _), do: raise ArgumentError, @noKVListError
defp build_key(prefix, key) , do: prefix <> to_string(key)
defp build_key(prefix, key, suffix), do: prefix <> to_string(key) <> suffix
defp build_nested_key(nested_key), do: "[#{to_string(nested_key)}]"
end
|
lib/uri_query.ex
| 0.831691
| 0.492005
|
uri_query.ex
|
starcoder
|
defmodule Xgit.Util.ObservedFile do
@moduledoc false
# Records the cached parsed state of the file and its modification date
# so that Xgit can avoid the work of re-parsing that file when we can
# be sure it is unchanged.
import Xgit.Util.ForceCoverage
@typedoc ~S"""
Cache for parsed state of the file and information about its
file system state.
## Struct Members
* `path`: path to the file
* `exists?`: `true` if the file existed last time we checked
* `last_modified_time`: POSIX file time for the file last time we checked
(`nil` if file did not exist then)
* `last_checked_time`: POSIX time stamp when file status was checked
(used to help avoid the "racy git problem")
* `parsed_state`: result from either `parse_fn` or `empty_fn`
"""
@type t :: %__MODULE__{
path: Path.t(),
exists?: boolean,
last_modified_time: integer | nil,
last_checked_time: integer | nil,
parsed_state: any
}
@typedoc ~S"""
A function that parses a file at a given path and returns a parsed state
for that file.
"""
@type parse_fn :: (Path.t() -> any)
@typedoc ~S"""
A function that can return a state for the file format when the file
doesn't exist.
"""
@type empty_fn :: (() -> any)
@enforce_keys [:path, :exists?, :parsed_state]
defstruct [
:path,
:exists?,
:last_modified_time,
:last_checked_time,
:parsed_state
]
@doc ~S"""
Record an initial observation of the contents of the file.
## Parameters
`parse_fn` is a function with one argument (path) that parses the file
if it exists and returns the content that will be stored in `parsed_state`.
`empty_fn` is a function with zero arguments that returns the desired state
for `parsed_state` in the event there is no file at this path.
"""
@spec initial_state_for_path(path :: Path.t(), parse_fn :: parse_fn, empty_fn :: empty_fn) :: t
def initial_state_for_path(path, parse_fn, empty_fn)
when is_binary(path) and is_function(parse_fn, 1) and is_function(empty_fn, 0),
do: state_from_file_stat(path, parse_fn, empty_fn, File.stat(path, time: :posix))
defp state_from_file_stat(path, parse_fn, _empty_fn, {:ok, %{type: :regular, mtime: mtime}}) do
%__MODULE__{
path: path,
exists?: true,
last_modified_time: mtime,
last_checked_time: System.os_time(:second),
parsed_state: parse_fn.(path)
}
end
defp state_from_file_stat(path, _parse_fn, _empty_fn, {:ok, %{type: file_type}}) do
raise ArgumentError,
"Xgit.Util.ObservedFile: path #{path} points to an item of type #{file_type}; should be a regular file or no file at all"
end
defp state_from_file_stat(path, _parse_fn, empty_fn, {:error, :enoent}) do
%__MODULE__{
path: path,
exists?: false,
parsed_state: empty_fn.()
}
end
@doc ~S"""
Return `true` if the file has potentially changed since the last
recorded observation. This can happen if:
* The modified time has changed since the previous observation.
* The file exists when it did not previously exist (or vice versa).
* The modified time is so recent as to be indistinguishable from
the time at which the initial snapshot was recorded. (This is often
referred to as the "racy git problem.")
This function does not update the cached state of the file.
"""
@spec maybe_dirty?(observed_file :: t) :: boolean
def maybe_dirty?(%__MODULE__{path: path} = observed_file) when is_binary(path),
do: maybe_dirty_for_file_stat?(observed_file, File.stat(path, time: :posix))
defp maybe_dirty_for_file_stat?(
%__MODULE__{
exists?: true,
last_modified_time: last_modified_time,
last_checked_time: last_checked_time
},
{:ok, %File.Stat{type: :regular, mtime: last_modified_time}}
)
when is_integer(last_modified_time) do
# File still exists and modified time is same as before. Are we in racy git state?
# Certain file systems round to the nearest few seconds, so last mod time has
# to be at least 3 seconds before we checked status for us to start believing file content.
last_modified_time >= last_checked_time - 2
end
defp maybe_dirty_for_file_stat?(
%__MODULE__{exists?: true, last_modified_time: lmt1},
{:ok, %File.Stat{type: :regular, mtime: lmt2}}
)
when is_integer(lmt1) and is_integer(lmt2) do
# File still exists but modified time doesn't match: Dirty.
cover true
end
defp maybe_dirty_for_file_stat?(%__MODULE__{exists?: false}, {:error, :enoent}) do
# File didn't exist before; still doesn't: Not dirty.
cover false
end
defp maybe_dirty_for_file_stat?(%__MODULE__{exists?: false}, {:ok, %File.Stat{type: :regular}}) do
# File didn't exist before; it does now.
cover true
end
defp maybe_dirty_for_file_stat?(%__MODULE__{exists?: true}, {:error, :enoent}) do
# File existed before; now it doesn't.
cover true
end
defp maybe_dirty_for_file_stat?(%__MODULE__{path: path}, {:ok, %{type: file_type}}) do
raise ArgumentError,
"Xgit.Util.ObservedFile: path #{path} points to an item of type #{file_type}; should be a regular file or no file at all"
end
@doc ~S"""
Update the cached state of the file if it has potentially changed since the last
observation.
As noted in `maybe_dirty?/1`, we err on the side of caution if the modification date
alone can not be trusted to reflect changes to the file's content.
## Parameters
`parse_fn` is a function with one argument (path) that parses the file
if it exists and returns the content that will be stored in `parsed_state`.
`empty_fn` is a function with zero arguments that returns the desired state
for `parsed_state` in the event there is no file at this path.
If the file state has potentially changed (see `maybe_dirty?/1`) then either
`parse_fn` or `empty_fn` will be called to generate a new value for `parsed_state`.
## Return Value
Returns an `ObservedFile` struct which may have been updated via either `parse_fn/1`
or `empty_fn/0` as appropriate.
"""
@spec update_state_if_maybe_dirty(
observed_file :: t,
parse_fn :: parse_fn,
empty_fn :: empty_fn
) :: t
def update_state_if_maybe_dirty(%__MODULE__{path: path} = observed_file, parse_fn, empty_fn)
when is_binary(path) and is_function(parse_fn, 1) and is_function(empty_fn, 0) do
file_stat = File.stat(path, time: :posix)
if maybe_dirty_for_file_stat?(observed_file, file_stat) do
state_from_file_stat(path, parse_fn, empty_fn, file_stat)
else
# We're sure the file is unchanged: Return cached state as is.
cover observed_file
end
end
end
|
lib/xgit/util/observed_file.ex
| 0.887263
| 0.638849
|
observed_file.ex
|
starcoder
|
defmodule Main do
@moduledoc """
This module is the main one, either called directly when starting the physical elevator, or indirectly by the Simulation node (below).
The main purpose is to start a supervisor and initialize the child processes from it.
"""
use Supervisor
@strategy :one_for_one
@doc """
For running the actual elevator
"""
def start :normal do
Supervisor.start_link(__MODULE__, :normal, name: __MODULE__)
end
@doc """
For simulation
"""
def start mode, n \\ 1, elevator_number \\ 1 do
Supervisor.start_link(__MODULE__, [mode, n, elevator_number], name: __MODULE__)
end
@doc """
Normal project init
"""
def init :normal do
spawn(:os, :cmd, ['xterm -e ElevatorServer'])
Process.sleep(1000)
start_children :normal
{:ok, :normal_evelator}
end
@doc """
Simulation in Real-time lab init
"""
def init [:simulation, n, elevator_number] do
port = 20000 + elevator_number
spawn(:os, :cmd, [
'xterm -e "path to SimElevatorServer" --port #{port}'
])
Process.sleep(1000)
start_children :simulation, port
{:ok, :simulation}
end
@doc """
Normal procedure
Starts all the child processes of the application sueprvisor. We maintain a simple one layered supervision tree, with "one for one" strategy, i.e. if a child process terminates, only that process is restarted.
"""
def start_children :normal do
children = [
ElevatorDriver,
Poller,
Network,
Distro,
ElevatorState
]
Supervisor.init(children, strategy: @strategy)
# other possible arguments for Supervisor.init:
# [max_restarts: :3 ], default 3
# [max_seconds: :3 ], default 5 (time frame of max restarts)
# [name: :3 ], default 3
end
@doc """
Simulator procedure
Starts all the child processes of the application sueprvisor. We maintain a simple one layered supervision tree, with "one for one" strategy, i.e. if a child process terminates, only that process is restarted.
"""
def start_children :simulation, port do
ElevatorDriver.start_link port
children = [
#{ElevatorDriver, [port]},
#Poller,
#Distro,
#ElevatorState
]
Supervisor.init(children, strategy: @strategy)
# other possible arguments for Supervisor.init:
# [max_restarts: :3 ], default 3
# [max_seconds: :3 ], default 5 (time frame of max restarts)
# [name: :3 ], default 3
end
end
defmodule Simulation do
@moduledoc """
Only used for spawning n*2 xterm windows, one for each process and its elevator simulator.
Each elevator will be named after their given port number, which starts at 12340 + elevatornumber,
i.e. elev_number = 1, n = 2 -> port = 12341, 12342, ... 1234n.
Run "$ epmd -daemon" if the Erlang network don't work properly.
"""
def start mode \\ :simulation, n \\ 1, elevator_number \\1 do
init_elevators mode, n, elevator_number
end
@doc """
Init n elevators on a computer in the Real-time lab
"""
def init_elevators :simulation, n, elevator_number do
if elevator_number <= n do
#spawn(:os, :cmd, ['xterm -hold -e iex -S mix run -e "Main.start :simulator, n, elevator_number"'])
spawn(:os, :cmd, ['start iex -S mix run -e "Main.start :simulator, n, elevator_number"'])
init_elevators(:simulation, n, elevator_number + 1)
end
{:ok, :init_simulation}
end
end
|
lib/main.ex
| 0.608129
| 0.574454
|
main.ex
|
starcoder
|
defmodule Ecto.Query.Validator do
@moduledoc false
# This module does validation on the query checking that it's in a correct
# format, raising if it's not.
alias Ecto.Query.Util
alias Ecto.Query.Query
alias Ecto.Query.QueryExpr
alias Ecto.Query.JoinExpr
alias Ecto.Query.AssocJoinExpr
defrecord State, sources: [], vars: [], grouped: [], grouped?: false,
in_agg?: false, apis: nil, from: nil, query: nil
# Adds type, file and line metadata to the exception
defmacrop rescue_metadata(type, file, line, block) do
quote location: :keep do
try do
unquote(block)
rescue e in [Ecto.InvalidQuery] ->
stacktrace = System.stacktrace
raise Ecto.InvalidQuery, [reason: e.reason, type: unquote(type),
file: unquote(file), line: unquote(line)], stacktrace
end
end
end
def validate(Query[] = query, apis, opts) do
if query.from == nil do
raise Ecto.InvalidQuery, reason: "a query must have a from expression"
end
grouped = group_by_sources(query.group_bys, query.sources)
is_grouped = query.group_bys != [] or query.havings != []
state = State[sources: query.sources, grouped: grouped, grouped?: is_grouped,
apis: apis, from: query.from, query: query]
validate_joins(query.joins, state)
validate_wheres(query.wheres, state)
validate_order_bys(query.order_bys, state)
validate_group_bys(query.group_bys, state)
validate_havings(query.havings, state)
validate_preloads(query.preloads, state)
unless opts[:skip_select] do
validate_select(query.select, state)
preload_selected(query)
end
end
def validate_update(Query[] = query, apis, values) do
validate_only_where(query)
if values == [] do
raise Ecto.InvalidQuery, reason: "no values to update given"
end
if entity = Util.entity(query.from) do
Enum.each(values, fn { field, expr } ->
expected_type = entity.__entity__(:field_type, field)
unless expected_type do
raise Ecto.InvalidQuery, reason: "field `#{field}` is not on the " <>
"entity `#{inspect entity}`"
end
# TODO: Check if entity field allows nil
state = State[sources: query.sources, apis: apis]
type = type_check(expr, state)
format_expected_type = Util.type_to_ast(expected_type) |> Macro.to_string
format_type = Util.type_to_ast(type) |> Macro.to_string
unless expected_type == type do
raise Ecto.InvalidQuery, reason: "expected_type `#{format_expected_type}` " <>
" on `#{inspect entity}.#{field}` doesn't match type `#{format_type}`"
end
end)
end
validate(query, apis, skip_select: true)
end
def validate_delete(query, apis) do
validate_only_where(query)
validate(query, apis, skip_select: true)
end
def validate_get(query, apis) do
validate_only_where(query)
validate(query, apis, skip_select: true)
end
defp validate_only_where(query) do
# Update validation check if assertion fails
unquote(unless size(Query[]) == 12, do: raise "Ecto.Query.Query out of date")
# TODO: File and line metadata
unless match?(Query[joins: [], select: nil, order_bys: [], limit: nil,
offset: nil, group_bys: [], havings: [], preloads: []], query) do
raise Ecto.InvalidQuery, reason: "update query can only have a single `where` expression"
end
end
defp validate_joins(joins, state) do
state = state.grouped?(false)
{ joins, assocs } = Enum.partition(joins, fn
JoinExpr[on: nil] ->
raise Ecto.InvalidQuery, reason: "an `on` query expression have to " <>
"follow a `join` unless it's an association join"
JoinExpr[] -> true
AssocJoinExpr[] -> false
end)
ons = Enum.map(joins, &(&1.on))
validate_booleans(:join_on, ons, state)
validate_assoc_joins(assocs, state)
end
defp validate_wheres(wheres, state) do
state = state.grouped?(false)
validate_booleans(:where, wheres, state)
end
defp validate_havings(havings, state) do
validate_booleans(:having, havings, state)
end
defp validate_booleans(type, query_exprs, state) do
Enum.each(query_exprs, fn(QueryExpr[] = expr) ->
rescue_metadata(type, expr.file, expr.line) do
expr_type = type_check(expr.expr, state)
unless expr_type in [:unknown, :boolean] do
format_expr_type = Util.type_to_ast(expr_type) |> Macro.to_string
raise Ecto.InvalidQuery, reason: "#{type} expression `#{Macro.to_string(expr.expr)}` " <>
"is of type `#{format_expr_type}`, has to be of boolean type"
end
end
end)
end
defp validate_assoc_joins(joins, State[] = state) do
Enum.each(joins, fn AssocJoinExpr[] = expr ->
rescue_metadata(:join, expr.file, expr.line) do
{ :., _, [left, right] } = expr.expr
entity = Util.find_source(state.sources, left) |> Util.entity
if nil?(entity) do
raise Ecto.InvalidQuery, reason: "association join can only be performed " <>
"on fields from an entity"
end
refl = entity.__entity__(:association, right)
unless refl do
raise Ecto.InvalidQuery, reason: "association join can only be performed " <>
"on assocation fields"
end
end
end)
end
defp validate_order_bys(order_bys, state) do
validate_field_list(:order_by, order_bys, state)
end
defp validate_group_bys(group_bys, state) do
validate_field_list(:group_by, group_bys, state)
end
defp validate_field_list(type, query_exprs, state) do
Enum.each(query_exprs, fn(QueryExpr[] = expr) ->
rescue_metadata(type, expr.file, expr.line) do
Enum.map(expr.expr, fn expr ->
validate_field(expr, state)
end)
end
end)
end
# order_by field
defp validate_field({ _, var, field }, state) do
validate_field({ var, field }, state)
end
# group_by field
defp validate_field({ var, field }, State[] = state) do
entity = Util.find_source(state.sources, var) |> Util.entity
if entity, do: do_validate_field(entity, field)
end
defp do_validate_field(entity, field) do
type = entity.__entity__(:field_type, field)
unless type do
raise Ecto.InvalidQuery, reason: "unknown field `#{field}` on `#{inspect entity}`"
end
end
defp validate_preloads(preloads, State[] = state) do
entity = Util.entity(state.from)
if preloads != [] and nil?(entity) do
raise Ecto.InvalidQuery, reason: "can only preload on fields from an entity"
end
Enum.each(preloads, fn(QueryExpr[] = expr) ->
rescue_metadata(:preload, expr.file, expr.line) do
Enum.map(expr.expr, fn field ->
type = entity.__entity__(:association, field)
unless type do
raise Ecto.InvalidQuery, reason: "`#{inspect entity}.#{field}` is not an association field"
end
end)
end
end)
end
defp validate_select(QueryExpr[] = expr, State[] = state) do
rescue_metadata(:select, expr.file, expr.line) do
select_clause(expr.expr, state)
end
end
defp preload_selected(Query[select: select, preloads: preloads]) do
unless preloads == [] do
rescue_metadata(:select, select.file, select.line) do
pos = Util.locate_var(select.expr, { :&, [], [0] })
if nil?(pos) do
raise Ecto.InvalidQuery, reason: "source in from expression " <>
"needs to be selected when using preload query"
end
end
end
end
# var.x
defp type_check({ { :., _, [{ :&, _, [_] } = var, field] }, _, [] }, State[] = state) do
source = Util.find_source(state.sources, var)
check_grouped({ source, field }, state)
if entity = Util.entity(source) do
type = entity.__entity__(:field_type, field)
unless type do
raise Ecto.InvalidQuery, reason: "unknown field `#{field}` on `#{inspect entity}`"
end
type
else
:unknown
end
end
# var
defp type_check({ :&, _, [_] } = var, State[] = state) do
source = Util.find_source(state.sources, var)
if entity = Util.entity(source) do
fields = entity.__entity__(:field_names)
Enum.each(fields, &check_grouped({ source, &1 }, state))
entity
else
source = Util.source(source)
raise Ecto.InvalidQuery, reason: "cannot select on source, `#{inspect source}`, with no entity"
end
end
# ops & functions
defp type_check({ name, _, args } = expr, state) when is_atom(name) and is_list(args) do
length_args = length(args)
api = Enum.find(state.apis, &function_exported?(&1, name, length_args))
unless api do
raise Ecto.InvalidQuery, reason: "function `#{name}/#{length_args}` not defined in query API"
end
is_agg = api.aggregate?(name, length_args)
if is_agg and state.in_agg? do
raise Ecto.InvalidQuery, reason: "aggregate function calls cannot be nested"
end
state = state.in_agg?(is_agg)
arg_types = Enum.map(args, &type_check(&1, state))
if Enum.any?(arg_types, &(&1 == :unknown)) do
:unknown
else
case apply(api, name, arg_types) do
{ :ok, type } ->
type
{ :error, allowed } ->
raise Ecto.TypeCheckError, expr: expr, types: arg_types, allowed: allowed
end
end
end
# list
defp type_check(list, state) when is_list(list) do
types = Enum.map(list, &type_check(&1, state))
case types do
[] ->
{ :list, :any }
[type|rest] ->
unless Enum.all?(rest, &Util.type_eq?(type, &1)) do
raise Ecto.InvalidQuery, reason: "all elements in list has to be of same type"
end
{ :list, type }
end
end
# atom
defp type_check(literal, _vars) when is_atom(literal) and not (literal in [true, false, nil]) do
raise Ecto.InvalidQuery, reason: "atoms are not allowed in queries `#{literal}`"
end
# values
defp type_check(value, _state) do
case Util.value_to_type(value) do
{ :ok, type } -> type
{ :error, reason } ->
raise Ecto.InvalidQuery, reason: reason
end
end
# Handle top level select cases
defp select_clause({ :assoc, _, [parent, child] }, State[] = state) do
entity = Util.find_source(state.sources, parent) |> Util.entity
unless entity == Util.entity(state.from) do
raise Ecto.InvalidQuery, reason: "can only associate on the from entity"
end
expr = Util.find_expr(state.query, child)
unless match?(AssocJoinExpr[qual: qual] when qual in [:inner, :left], expr) do
raise Ecto.InvalidQuery, reason: "can only associate on an inner or left association join"
end
end
# Some two-tuples may be records (ex. Ecto.Binary[]), so check for records
# explicitly. We can do this because we don't allow atoms in queries.
defp select_clause({ atom, _ } = record, state) when is_atom(atom) do
type_check(record, state)
end
defp select_clause({ left, right }, state) do
select_clause(left, state)
select_clause(right, state)
end
defp select_clause({ :{}, _, list }, state) do
Enum.each(list, &select_clause(&1, state))
end
defp select_clause(list, state) when is_list(list) do
Enum.each(list, &select_clause(&1, state))
end
defp select_clause(other, state) do
type_check(other, state)
end
defp group_by_sources(group_bys, sources) do
Enum.map(group_bys, fn(expr) ->
Enum.map(expr.expr, fn({ var, field }) ->
source = Util.find_source(sources, var)
{ source, field }
end)
end) |> Enum.concat |> Enum.uniq
end
defp check_grouped({ source, field } = source_field, state) do
if state.grouped? and not state.in_agg? and not (source_field in state.grouped) do
entity = Util.entity(source) || Util.source(source)
raise Ecto.InvalidQuery, reason: "`#{inspect entity}.#{field}` must appear in `group_by` " <>
"or be used in an aggregate function"
end
end
end
|
lib/ecto/query/validator.ex
| 0.542379
| 0.513363
|
validator.ex
|
starcoder
|
defmodule Multiverses.Application do
@moduledoc """
This module is intended to be a drop-in replacement for `Application`.
When you drop this module in, functions relating to runtime environment
variables have been substituted with equivalent macros that respect the
Multiverse pattern.
## Warning
This module is as dangerous as it is useful, so **please make sure you know
what you're doing before you use this**. Always test your system in a
staging system that has `:use_multiverses` unset, before deploying code
that uses this module.
The functions calls which take options (`:timeout`, `:persist`) are not
supported, since it's likely that if you're using these options, you're
probably not in a situation where you need multiverses.
For the same reason, `:get_all_env` and `:put_all_env` are not supported
and will default the Elixir standard.
## How it works
This module works by substituting the `app` atom with the
`{multiverse, app}` tuple. If that tuple isn't found, it falls back
on the `app` atom for its ETS table lookup.
"""
use Multiverses.Clone,
module: Application,
except: [delete_env: 2,
fetch_env!: 2, fetch_env: 2,
get_env: 2, get_env: 3,
put_env: 3]
# we're abusing the application key format, which works because ETS tables
# are what back the application environment variables, and the system
# tolerates "other terms", even though that's now how they are typespecced
# out.
@dialyzer {:nowarn_function,
delete_env: 2,
fetch_env: 2,
fetch_env!: 2,
get_env: 2,
get_env: 3,
put_env: 3}
defp universe(key) do
require Multiverses
{Multiverses.self(), key}
end
@doc "See `Application.delete_env/2`."
def delete_env(app, key) do
case Application.fetch_env(app, universe(key)) do
{:ok, _} ->
Application.delete_env(app, universe(key))
:error ->
Application.put_env(app, universe(key), :"$tombstone")
end
end
@doc "See `Application.fetch_env/2`."
def fetch_env(app, key) do
case Application.fetch_env(app, universe(key)) do
{:ok, :"$tombstone"} -> :error
result = {:ok, _} -> result
:error ->
Application.fetch_env(app, key)
end
end
@doc "See `Application.fetch_env!/2`."
def fetch_env!(app, key) do
case Application.fetch_env(app, universe(key)) do
{:ok, env} -> env
:error ->
Application.fetch_env!(app, key)
end
end
@doc "See `Application.get_env/2`."
def get_env(app, key) do
Multiverses.Application.get_env(app, key, nil)
end
@doc "See `Application.get_env/3`."
def get_env(app, key, default) do
case Multiverses.Application.fetch_env(app, key) do
{:ok, env} -> env
:error ->
# fall back to the global value.
Application.get_env(app, key, default)
end
end
@doc "See `Application.put_env/3`."
def put_env(app, key, value) do
Application.put_env(app, universe(key), value)
end
end
|
lib/multiverses/application.ex
| 0.73412
| 0.413566
|
application.ex
|
starcoder
|
defmodule AdventOfCode.Day17 do
import AdventOfCode.Utils
@typep int_pair :: {integer, integer}
@typep area :: {int_pair, int_pair}
# An arbitrary range that is suited for the dataset
# Due to the large step size, the solution space may be
# discontinuous. Brute-force is relatively fast and a lot simpler.
@velocity_x_range 0..200
@velocity_y_range -130..150
def part1(args) do
area = parse_args(args)
for(vx <- @velocity_x_range, vy <- @velocity_y_range, do: {vx, vy})
|> Enum.map(&intersects?(area, &1))
|> Enum.filter(&(&1 != nil))
|> Enum.max()
end
def part2(args) do
area = parse_args(args)
for(vx <- @velocity_x_range, vy <- @velocity_y_range, do: {vx, vy})
|> Enum.map(&intersects?(area, &1))
|> Enum.filter(& &1)
|> Enum.count()
end
@spec intersects?(area, int_pair, int_pair, integer) :: integer | nil
defp intersects?(area, position \\ {0, 0}, velocity, max_y \\ 0)
# Projectile has exceeded the target area
defp intersects?({{t_x1, t_x2}, {t_y1, t_y2}}, {p_x, p_y}, _, _)
when (p_x > t_x1 and p_x > t_x2) or (p_y < t_y1 and p_y < t_y2),
do: nil
# Projectile is inside the target area
defp intersects?({{t_x1, t_x2}, {t_y1, t_y2}}, {p_x, p_y}, _, max_y)
when p_x in t_x1..t_x2 and p_y in t_y1..t_y2,
do: max_y
defp intersects?(area, {x, y}, {vx, vy}, max_y) do
{x, y} = {x + vx, y + vy}
velocity = next_velocity({vx, vy})
intersects?(area, {x, y}, velocity, max(max_y, y))
end
@spec next_velocity(int_pair) :: int_pair
defp next_velocity({vx, vy}), do: {closer_to_zero(vx), vy - 1}
defp closer_to_zero(0), do: 0
defp closer_to_zero(n) when n > 0, do: n - 1
defp closer_to_zero(n) when n < 0, do: n + 1
@spec parse_args([binary]) :: area
defp parse_args([<<_::binary-size(13), data::binary>>]) do
data
|> String.split(", ")
|> Enum.map(&parse_range/1)
|> List.to_tuple()
end
defp parse_range(range) do
range
|> String.slice(2..-1)
|> String.split("..")
|> Enum.map(&parse_int!/1)
|> List.to_tuple()
end
end
|
lib/advent_of_code/day_17.ex
| 0.798501
| 0.520923
|
day_17.ex
|
starcoder
|
defmodule Elixush.Instructions.Vectors do
@moduledoc "Instructions that operate on various vector stacks."
import Elixush.PushState
import Elixush.Instructions.Common
import Elixush.Globals.Agent, only: [get_globals: 1]
def vector_integer_pop(state), do: popper(:vector_integer, state)
def vector_float_pop(state), do: popper(:vector_float, state)
def vector_boolean_pop(state), do: popper(:vector_boolean, state)
def vector_string_pop(state), do: popper(:vector_string, state)
def vector_integer_dup(state), do: duper(:vector_integer, state)
def vector_float_dup(state), do: duper(:vector_float, state)
def vector_boolean_dup(state), do: duper(:vector_boolean, state)
def vector_string_dup(state), do: duper(:vector_string, state)
def vector_integer_swap(state), do: swapper(:vector_integer, state)
def vector_float_swap(state), do: swapper(:vector_float, state)
def vector_boolean_swap(state), do: swapper(:vector_boolean, state)
def vector_string_swap(state), do: swapper(:vector_string, state)
def vector_integer_rot(state), do: rotter(:vector_integer, state)
def vector_float_rot(state), do: rotter(:vector_float, state)
def vector_boolean_rot(state), do: rotter(:vector_boolean, state)
def vector_string_rot(state), do: rotter(:vector_string, state)
def vector_integer_flush(state), do: flusher(:vector_integer, state)
def vector_float_flush(state), do: flusher(:vector_float, state)
def vector_boolean_flush(state), do: flusher(:vector_boolean, state)
def vector_string_flush(state), do: flusher(:vector_string, state)
def vector_integer_eq(state), do: eqer(:vector_integer, state)
def vector_float_eq(state), do: eqer(:vector_float, state)
def vector_boolean_eq(state), do: eqer(:vector_boolean, state)
def vector_string_eq(state), do: eqer(:vector_string, state)
def vector_integer_stackdepth(state), do: stackdepther(:vector_integer, state)
def vector_float_stackdepth(state), do: stackdepther(:vector_float, state)
def vector_boolean_stackdepth(state), do: stackdepther(:vector_boolean, state)
def vector_string_stackdepth(state), do: stackdepther(:vector_string, state)
def vector_integer_yank(state), do: yanker(:vector_integer, state)
def vector_float_yank(state), do: yanker(:vector_float, state)
def vector_boolean_yank(state), do: yanker(:vector_boolean, state)
def vector_string_yank(state), do: yanker(:vector_string, state)
def vector_integer_yankdup(state), do: yankduper(:vector_integer, state)
def vector_float_yankdup(state), do: yankduper(:vector_float, state)
def vector_boolean_yankdup(state), do: yankduper(:vector_boolean, state)
def vector_string_yankdup(state), do: yankduper(:vector_string, state)
def vector_integer_shove(state), do: shover(:vector_integer, state)
def vector_float_shove(state), do: shover(:vector_float, state)
def vector_boolean_shove(state), do: shover(:vector_boolean, state)
def vector_string_shove(state), do: shover(:vector_string, state)
def vector_integer_empty(state), do: emptyer(:vector_integer, state)
def vector_float_empty(state), do: emptyer(:vector_float, state)
def vector_boolean_empty(state), do: emptyer(:vector_boolean, state)
def vector_string_empty(state), do: emptyer(:vector_string, state)
### common instructions for vectors
@doc "Takes a type and a state and concats two vectors on the type stack."
def concater(type, state) do
if not(Enum.empty?(Enum.drop(state[type], 1))) do
first_item = stack_ref(type, 0, state)
second_item = stack_ref(type, 1, state)
if get_globals(:max_vector_length) >= Enum.count(first_item) + Enum.count(second_item) do
second_item
|> Enum.concat(first_item)
|> push_item(type, pop_item(type, pop_item(type, state)))
else
state
end
else
state
end
end
def vector_integer_concat(state), do: concater(:vector_integer, state)
def vector_float_concat(state), do: concater(:vector_float, state)
def vector_boolean_concat(state), do: concater(:vector_boolean, state)
def vector_string_concat(state), do: concater(:vector_string, state)
@doc """
Takes a vec_type, a lit_type, and a state and conj's an item onto the
type stack.
"""
def conjer(vec_type, lit_type, state) do
if not(Enum.empty?(state[vec_type])) and not(Enum.empty?(state[lit_type])) do
result = lit_type
|> top_item(state)
|> List.insert_at(-1, top_item(vec_type, state))
if get_globals(:max_vector_length) >= Enum.count(result) do
result
|> push_item(vec_type, pop_item(lit_type, pop_item(vec_type, state)))
else
state
end
else
state
end
end
def vector_integer_conj(state), do: conjer(:vector_integer, :integer, state)
def vector_float_conj(state), do: conjer(:vector_float, :float, state)
def vector_boolean_conj(state), do: conjer(:vector_boolean, :boolean, state)
def vector_string_conj(state), do: conjer(:vector_string, :string, state)
@doc """
Takes a type and a state and takes the first N items from the type stack,
where N is from the integer stack.
"""
def taker(type, state) do
if not(Enum.empty?(state[type])) and not(Enum.empty?(state[:integer])) do
type
|> top_item(state)
|> Enum.take(top_item(:integer, state))
|> push_item(type, pop_item(type, pop_item(:integer, state)))
else
state
end
end
def vector_integer_take(state), do: taker(:vector_integer, state)
def vector_float_take(state), do: taker(:vector_float, state)
def vector_boolean_take(state), do: taker(:vector_boolean, state)
def vector_string_take(state), do: taker(:vector_string, state)
@doc """
Takes a type and a state and takes the subvec of the top item on the
type stack.
"""
def subvecer(type, state) do
if not(Enum.empty?(state[type])) and not(Enum.empty?(Enum.drop(state[:integer], 1))) do
vect = top_item(type, state)
first_index = vect
|> Enum.count
|> min(max(0, stack_ref(:integer, 1, state)))
second_index = vect
|> Enum.count
|> min(max(first_index, stack_ref(:integer, 0, state)))
vect
|> Enum.slice(first_index, second_index - first_index)
|> push_item(type, pop_item(type, pop_item(:integer, pop_item(:integer, state))))
else
state
end
end
def vector_integer_subvec(state), do: subvecer(:vector_integer, state)
def vector_float_subvec(state), do: subvecer(:vector_float, state)
def vector_boolean_subvec(state), do: subvecer(:vector_boolean, state)
def vector_string_subvec(state), do: subvecer(:vector_string, state)
@doc "Takes a type and a state and gets the first item from the type stack."
def firster(type, lit_type, state) do
if not(Enum.empty?(state[type])) and not(Enum.empty?(List.first(state[type]))) do
type
|> top_item(state)
|> List.first
|> push_item(lit_type, pop_item(type, state))
else
state
end
end
def vector_integer_first(state), do: firster(:vector_integer, :integer, state)
def vector_float_first(state), do: firster(:vector_float, :float, state)
def vector_boolean_first(state), do: firster(:vector_boolean, :boolean, state)
def vector_string_first(state), do: firster(:vector_string, :string, state)
@doc "Takes a type and a state and gets the last item from the type stack."
def laster(type, lit_type, state) do
if not(Enum.empty?(state[type])) and not(Enum.empty?(List.first(state[type]))) do
type
|> top_item(state)
|> List.last
|> push_item(lit_type, pop_item(type, state))
else
state
end
end
def vector_integer_last(state), do: laster(:vector_integer, :integer, state)
def vector_float_last(state), do: laster(:vector_float, :float, state)
def vector_boolean_last(state), do: laster(:vector_boolean, :boolean, state)
def vector_string_last(state), do: laster(:vector_string, :string, state)
@doc "Takes a type and a state and gets the nth item from the type stack."
def nther(type, lit_type, state) do
if (not(Enum.empty?(state[type])) and not(Enum.empty?(List.first(state[type])))) and not(Enum.empty?(state[:integr])) do
vect = stack_ref(type, 0, state)
index = rem(stack_ref(:integer, 0, state), Enum.count(vect))
vect |> Enum.at(index) |> push_item(lit_type, pop_item(:integer, pop_item(type, state)))
else
state
end
end
def vector_integer_nth(state), do: nther(:vector_integer, :integer, state)
def vector_float_nth(state), do: nther(:vector_float, :float, state)
def vector_boolean_nth(state), do: nther(:vector_boolean, :boolean, state)
def vector_string_nth(state), do: nther(:vector_string, :string, state)
@doc """
Takes a type and a state and takes the rest of the top item on the type stack.
"""
def rester(type, state) do
if (not(Enum.empty?(state[type]))) do
type |> top_item(state) |> Enum.drop(1) |> push_item(type, pop_item(type, state))
else
state
end
end
def vector_integer_rest(state), do: rester(:vector_integer, state)
def vector_float_rest(state), do: rester(:vector_float, state)
def vector_boolean_rest(state), do: rester(:vector_boolean, state)
def vector_string_rest(state), do: rester(:vector_string, state)
@doc """
Takes a type and a state and takes the butlast of the top item on the
type stack.
"""
def butlaster(type, state) do
if (not(Enum.empty?(state[type]))) do
type |> top_item(state) |> Enum.drop(-1) |> push_item(type, pop_item(type, state))
else
state
end
end
def vector_integer_butlast(state), do: butlaster(:vector_integer, state)
def vector_float_butlast(state), do: butlaster(:vector_float, state)
def vector_boolean_butlast(state), do: butlaster(:vector_boolean, state)
def vector_string_butlast(state), do: butlaster(:vector_string, state)
@doc """
Takes a type and a state and takes the length of the top item on the
type stack.
"""
def lengther(type, state) do
if (not(Enum.empty?(state[type]))) do
type |> top_item(state) |> Enum.count |> push_item(:integer, pop_item(type, state))
else
state
end
end
def vector_integer_length(state), do: lengther(:vector_integer, state)
def vector_float_length(state), do: lengther(:vector_float, state)
def vector_boolean_length(state), do: lengther(:vector_boolean, state)
def vector_string_length(state), do: lengther(:vector_string, state)
@doc """
Takes a type and a state and takes the reverse of the top item on the
type stack.
"""
def reverser(type, state) do
if (not(Enum.empty?(state[type]))) do
type |> top_item(state) |> Enum.reverse |> push_item(type, pop_item(type, state))
else
state
end
end
def vector_integer_reverse(state), do: reverser(:vector_integer, state)
def vector_float_reverse(state), do: reverser(:vector_float, state)
def vector_boolean_reverse(state), do: reverser(:vector_boolean, state)
def vector_string_reverse(state), do: reverser(:vector_string, state)
@doc """
Takes a type and a state and pushes every item from the first vector
onto the appropriate stack.
"""
def pushaller(type, lit_type, state) do
if Enum.empty?(state[type]) do
state
else
loop = fn(f, lit_list, loop_state) ->
if Enum.empty?(lit_list) do
loop_state
else
f.(f, Enum.drop(lit_list, 1), push_item(List.first(lit_list), lit_type, loop_state))
end
end
loop.(loop, Enum.reverse(top_item(type, state)), pop_item(type, state))
end
end
def vector_integer_pushall(state), do: pushaller(:vector_integer, :integer, state)
def vector_float_pushall(state), do: pushaller(:vector_float, :float, state)
def vector_boolean_pushall(state), do: pushaller(:vector_boolean, :boolean, state)
def vector_string_pushall(state), do: pushaller(:vector_string, :string, state)
@doc """
Takes a type and a state and pushes a boolean of whether the top vector
is empty.
"""
def emptyvectorer(type, state) do
if (not(Enum.empty?(state[type]))) do
type |> top_item(state) |> Enum.empty? |> push_item(:boolean, pop_item(type, state))
else
state
end
end
def vector_integer_emptyvector(state), do: emptyvectorer(:vector_integer, state)
def vector_float_emptyvector(state), do: emptyvectorer(:vector_float, state)
def vector_boolean_emptyvector(state), do: emptyvectorer(:vector_boolean, state)
def vector_string_emptyvector(state), do: emptyvectorer(:vector_string, state)
@doc """
Takes a type and a state and tells whether the top lit_type item is in the top
type vector.
"""
def containser(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(state[lit_type]) do
state
else
item = top_item(lit_type, state)
vect = top_item(type, state)
vect |> Enum.member?(item) |> push_item(:boolean, pop_item(lit_type, pop_item(type, state)))
end
end
def vector_integer_contains(state), do: containser(:vector_integer, :integer, state)
def vector_float_contains(state), do: containser(:vector_float, :float, state)
def vector_boolean_contains(state), do: containser(:vector_boolean, :boolean, state)
def vector_string_contains(state), do: containser(:vector_string, :string, state)
@doc """
Takes a type and a state and finds the index of the top lit_type item
in the top type vector.
"""
def indexofer(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(state[lit_type]) do
state
else
item = top_item(lit_type, state)
vect = top_item(type, state)
result = Enum.find_index(vect, item)
result
|> (fn(x) -> x or -1 end).()
|> push_item(:integer, pop_item(lit_type, pop_item(type, state)))
end
end
def vector_integer_indexof(state), do: indexofer(:vector_integer, :integer, state)
def vector_float_indexof(state), do: indexofer(:vector_float, :float, state)
def vector_boolean_indexof(state), do: indexofer(:vector_boolean, :boolean, state)
def vector_string_indexof(state), do: indexofer(:vector_string, :string, state)
@doc """
Takes a type and a state and counts the occurrences of the top lit_type item
in the top type vector.
"""
def occurrencesofer(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(state[lit_type]) do
state
else
item = top_item(lit_type, state)
vect = top_item(type, state)
result = vect |> Enum.filter(&(&1 == item)) |> Enum.count
result |> push_item(:integer, pop_item(lit_type, pop_item(type, state)))
end
end
def vector_integer_occurrencesof(state), do: occurrencesofer(:vector_integer, :integer, state)
def vector_float_occurrencesof(state), do: occurrencesofer(:vector_float, :float, state)
def vector_boolean_occurrencesof(state), do: occurrencesofer(:vector_boolean, :boolean, state)
def vector_string_occurrencesof(state), do: occurrencesofer(:vector_string, :string, state)
@doc """
Takes a type and a state and replaces, in the top type vector, item at index
(from integer stack) with the first lit_type item.
"""
def seter(type, lit_type, state) do
if (Enum.empty?(state[type]) or Enum.empty?(state[lit_type])) or (Enum.empty?(state[:integer]) or (lit_type == :integer and Enum.empty?(Enum.drop(state[:integer], 1)))) do
state
else
vect = top_item(type, state)
item = if lit_type == :integer, do: stack_ref(:integer, 1, state), else: top_item(lit_type, state)
index = if Enum.empty?(vect), do: 0, else: rem(top_item(:integer, state), Enum.count(vect))
result = if Enum.empty?(vect), do: vect, else: List.replace_at(vect, index, item)
result
|> push_item(type, pop_item(lit_type, pop_item(:integer, pop_item(type, state))))
end
end
def vector_integer_set(state), do: seter(:vector_integer, :integer, state)
def vector_float_set(state), do: seter(:vector_float, :float, state)
def vector_boolean_set(state), do: seter(:vector_boolean, :boolean, state)
def vector_string_set(state), do: seter(:vector_string, :string, state)
@doc """
Takes a type and a state and replaces all occurrences of the second lit_type
item with the first lit_type item in the top type vector.
"""
def replaceer(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(Enum.drop(state[lit_type], 1)) do
state
else
replace_with = stack_ref(lit_type, 0, state)
to_replace = stack_ref(lit_type, 1, state)
vect = top_item(type, state)
result = vect |> Enum.map(&(if &1 == to_replace, do: replace_with, else: &1))
result
|> push_item(type, pop_item(lit_type, pop_item(lit_type, pop_item(type, state))))
end
end
def vector_integer_replace(state), do: replaceer(:vector_integer, :integer, state)
def vector_float_replace(state), do: replaceer(:vector_float, :float, state)
def vector_boolean_replace(state), do: replaceer(:vector_boolean, :boolean, state)
def vector_string_replace(state), do: replaceer(:vector_string, :string, state)
@doc """
Takes a type and a state and replaces the first occurrence of the second
lit_type item with the first lit_type item in the top type vector.
"""
def replacefirster(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(Enum.drop(state[lit_type], 1)) do
state
else
replace_with = stack_ref(lit_type, 0, state)
to_replace = stack_ref(lit_type, 1, state)
vect = top_item(type, state)
index = vect |> Enum.find(&(&1 == to_replace))
index = if index == nil, do: -1, else: index
result = List.replace_at(vect, index, replace_with)
result |> push_item(type, pop_item(lit_type, pop_item(lit_type, pop_item(type, state))))
end
end
def vector_integer_replacefirst(state), do: replacefirster(:vector_integer, :integer, state)
def vector_float_replacefirst(state), do: replacefirster(:vector_float, :float, state)
def vector_boolean_replacefirst(state), do: replacefirster(:vector_boolean, :boolean, state)
def vector_string_replacefirst(state), do: replacefirster(:vector_string, :string, state)
@doc """
Takes a type and a state and removes all occurrences of the first lit_type
item in the top type vector.
"""
def removeer(type, lit_type, state) do
if Enum.empty?(state[type]) or Enum.empty?(state[lit_type]) do
state
else
vect = top_item(type, state)
item = top_item(lit_type, state)
result = vect |> Enum.filter(&(&1 != top_item(lit_type, state)))
result |> push_item(type, pop_item(lit_type, pop_item(type, state)))
end
end
def vector_integer_remove(state), do: removeer(:vector_integer, :integer, state)
def vector_float_remove(state), do: removeer(:vector_float, :float, state)
def vector_boolean_remove(state), do: removeer(:vector_boolean, :boolean, state)
def vector_string_remove(state), do: removeer(:vector_string, :string, state)
@doc """
Takes a type and a state and iterates over the type vector using the code on
the exec stack. If the vector isn't empty, expands to:
((first vector) (top-item :exec state) (rest vector) exec_do*vector_type (top-item :exec state) rest_of_program)
"""
def iterateer(type, lit_type, instr, state) do
if Enum.empty?(state[type]) or Enum.empty?(state[:exec]) do
state
else
vect = top_item(type, state)
cond do
Enum.empty?(vect) -> pop_item(:exec, pop_item(type, state))
Enum.empty?(Enum.drop(vect, 1)) -> push_item(List.first(vect), lit_type, pop_item(type, state))
true -> push_item(List.first(vect), lit_type, push_item(top_item(:exec, state), :exec, push_item(Enum.drop(vect, 1), :exec, push_item(instr, :exec, pop_item(type, state)))))
end
end
end
def exec_do_star_vector_integer(state), do: iterateer(:vector_integer, :integer, :exec_do_star_vector_integer, state)
def exec_do_star_vector_float(state), do: iterateer(:vector_float, :float, :exec_do_star_vector_float, state)
def exec_do_star_vector_boolean(state), do: iterateer(:vector_boolean, :boolean, :exec_do_star_vector_boolean, state)
def exec_do_star_vector_string(state), do: iterateer(:vector_string, :string, :exec_do_star_vector_string, state)
end
|
lib/instructions/vectors.ex
| 0.76207
| 0.635024
|
vectors.ex
|
starcoder
|
defmodule Cardanoex.Key do
alias Cardanoex.Backend
@moduledoc """
The Key module helps you to work with public keys for a wallet.
"""
@spec get_account_public_key(String.t()) :: {:error, String.t()} | {:ok, String.t()}
@doc """
Retrieve the account public key of this wallet.
### Options
* `wallet_id` - hex based string. 40 characters
"""
def get_account_public_key(wallet_id) do
case Backend.get_account_public_key(wallet_id) do
{:ok, account_public_key} -> {:ok, account_public_key}
{:error, message} -> {:error, message}
end
end
@spec get_public_key(String.t(), String.t(), String.t()) ::
{:error, String.t()} | {:ok, String.t()}
@doc """
Return a public key for a given role and derivation index.
### Options
* `wallet_id` - hex based string. 40 characters
* `role` - utxo_external, utxo_internal or mutable_account"
* `index` - Example: 1852H. An individual segment within a derivation path. The H suffix indicates a Hardened child private key, which means that children of this key cannot be derived from the public key. Indices without a H suffix are called Soft.
"""
def get_public_key(wallet_id, role, index) do
case Backend.get_public_key(wallet_id, role, index) do
{:ok, public_key} -> {:ok, public_key}
{:error, message} -> {:error, message}
end
end
@spec create_account_public_key(String.t(), map()) ::
{:error, String.t()} | {:ok, String.t()}
@doc """
Derive an account public key for any account index. For this key derivation to be possible, the wallet must have been created from mnemonic.
It is possible to use the optional `purpose` field to override that branch of the derivation path with different hardened derivation index.
If that field is omitted, the default purpose for Cardano wallets (`1852H`) will be used.
*Note:* Only Hardened indexes are supported by this endpoint.
### Options
* `wallet_id` - hex based string. 40 characters
* `index` - Example: 1852H. An individual segment within a derivation path. The `H` suffix indicates a Hardened child private key, which means that children of this key cannot be derived from the public key. Indices without a `H` suffix are called Soft.
* `format` - Enum: "extended" "non_extended". Determines whether extended (with chain code) or normal (without chain code) key is requested.
* `purpose` - An individual segment within a derivation path. The `H` suffix indicates a Hardened child private key, which means that children of this key cannot be derived from the public key. Indices without a H suffix are called Soft.
"""
def create_account_public_key(wallet_id, options) do
passphrase = options[:passphrase]
index = options[:index]
format = options[:format]
purpose = options[:purpose]
case Backend.create_account_public_key(wallet_id, passphrase, index, format, purpose) do
{:ok, public_key} -> {:ok, public_key}
{:error, message} -> {:error, message}
end
end
end
|
lib/key.ex
| 0.923212
| 0.451447
|
key.ex
|
starcoder
|
defmodule EdgeDB.Object do
@moduledoc """
An immutable representation of an object instance returned from a query.
`EdgeDB.Object` implements `Access` behavior to access properties by key.
```elixir
iex(1)> {:ok, pid} = EdgeDB.start_link()
iex(2)> %EdgeDB.Object{} = object =
iex(2)> EdgeDB.query_required_single!(pid, "
...(2)> SELECT schema::ObjectType{
...(2)> name
...(2)> }
...(2)> FILTER .name = 'std::Object'
...(2)> LIMIT 1
...(2)> ")
#EdgeDB.Object<name := "std::Object">
iex(3)> object[:name]
"std::Object"
iex(4)> object["name"]
"std::Object"
```
### Links and links properties
In EdgeDB, objects can have links to other objects or a set of objects.
You can use the same syntax to access links values as for object properties.
Links can also have their own properties (denoted as `@<link_prop_name>` in EdgeQL syntax).
You can use the same property name as in the query to access them from the links.
```elixir
iex(1)> {:ok, pid} = EdgeDB.start_link()
iex(2)> %EdgeDB.Object{} = object =
iex(2)> EdgeDB.query_required_single!(pid, "
...(2)> SELECT schema::Property {
...(2)> name,
...(2)> annotations: {
...(2)> name,
...(2)> @value
...(2)> }
...(2)> }
...(2)> FILTER .name = 'listen_port' AND .source.name = 'cfg::Config'
...(2)> LIMIT 1
...(2)> ")
#EdgeDB.Object<name := "listen_port", annotations := #EdgeDB.Set<{#EdgeDB.Object<name := "cfg::system", @value := "true">}>>
iex(3)> annotations = object[:annotations]
#EdgeDB.Set<{#EdgeDB.Object<name := "cfg::system", @value := "true">}>
iex(4)> link = Enum.at(annotations, 0)
#EdgeDB.Object<name := "cfg::system", @value := "true">
iex(5)> link["@value"]
"true"
```
"""
@behaviour Access
alias EdgeDB.Object.Field
defstruct [
:__fields__,
:__tid__,
:id
]
@typedoc """
UUID value.
"""
@type uuid() :: String.t()
@typedoc since: "0.2.0"
@typedoc """
Options for `EdgeDB.Object.fields/2`
Supported options:
* `:properties` - flag to include object properties in returning list. The default is `true`.
* `:links` - flag to include object links in returning list. The default is `true`.
* `:link_properies` - flag to include object link properties in returning list. The default is `true`.
* `:id` - flag to include implicit `:id` in returning list. The default is `false`.
* `:implicit` - flag to include implicit fields (like `:id` or `:__tid__`) in returning list.
The default is `false`.
"""
@type fields_option() ::
{:properties, boolean()}
| {:links, boolean()}
| {:link_properties, boolean()}
| {:id, boolean()}
| {:implicit, boolean()}
@typedoc since: "0.2.0"
@typedoc """
Options for `EdgeDB.Object.properties/2`
Supported options:
* `:id` - flag to include implicit `:id` in returning list. The default is `false`.
* `:implicit` - flag to include implicit properties (like `:id` or `:__tid__`) in returning list.
The default is `false`.
"""
@type properties_option() ::
{:id, boolean()}
| {:implicit, boolean()}
@typedoc """
An immutable representation of an object instance returned from a query.
Fields:
* `:id` - a unique ID of the object instance in the database.
"""
@type t() :: %{
__struct__: __MODULE__,
id: uuid() | nil
}
@typedoc since: "0.2.0"
@typedoc """
An immutable representation of an object instance returned from a query.
"""
@opaque object :: %__MODULE__{
id: uuid() | nil,
__tid__: uuid() | nil,
__fields__: list(Field.t())
}
defmodule Field do
@moduledoc false
defstruct [
:name,
:value,
:is_link,
:is_link_property,
:is_implicit
]
@type t() :: %__MODULE__{
name: String.t(),
value: any(),
is_link: boolean(),
is_link_property: boolean(),
is_implicit: boolean()
}
end
@doc since: "0.2.0"
@doc """
Get object fields names (properties, links and link propries) as list of strings.
See `t:fields_option/0` for supported options.
"""
@spec fields(object(), list(fields_option())) :: list(String.t())
def fields(%__MODULE__{} = object, opts \\ []) do
include_properies? = Keyword.get(opts, :properties, true)
include_links? = Keyword.get(opts, :links, true)
include_link_properties? = Keyword.get(opts, :link_propeties, true)
include_id? = Keyword.get(opts, :id, false)
include_implicits? = Keyword.get(opts, :implicit, false)
object.__fields__
|> Enum.filter(fn
%Field{name: "id", is_implicit: true} ->
include_id? or include_implicits?
%Field{is_implicit: true} ->
include_implicits?
%Field{is_link: true} ->
include_links?
%Field{is_link_property: true} ->
include_link_properties?
_field ->
include_properies?
end)
|> Enum.map(fn %Field{name: name} ->
name
end)
end
@doc since: "0.2.0"
@doc """
Get object properties names as list.
See `t:properties_option/0` for supported options.
"""
@spec properties(object(), list(properties_option())) :: list(String.t())
def properties(%__MODULE__{} = object, opts \\ []) do
fields(object, Keyword.merge(opts, links: false, link_properties: false))
end
@doc since: "0.2.0"
@doc """
Get object links names as list.
"""
@spec links(object()) :: list(String.t())
def links(%__MODULE__{} = object) do
fields(object, properties: false, link_properties: false)
end
@doc since: "0.2.0"
@doc """
Get object link propeties names as list.
"""
@spec link_properties(object()) :: list(String.t())
def link_properties(%__MODULE__{} = object) do
fields(object, properties: false, links: false)
end
@impl Access
def fetch(%__MODULE__{} = object, key) when is_atom(key) do
fetch(object, Atom.to_string(key))
end
@impl Access
def fetch(%__MODULE__{__fields__: fields}, key) do
case find_field(fields, key) do
nil ->
:error
field ->
{:ok, field.value}
end
end
@impl Access
def get_and_update(%__MODULE__{}, _key, _function) do
raise EdgeDB.Error.interface_error("objects can't be mutated")
end
@impl Access
def pop(%__MODULE__{}, _key) do
raise EdgeDB.Error.interface_error("objects can't be mutated")
end
defp find_field(fields, name_to_find) do
Enum.find(fields, fn %{name: name} ->
name == name_to_find
end)
end
end
defimpl Inspect, for: EdgeDB.Object do
import Inspect.Algebra
@impl Inspect
def inspect(%EdgeDB.Object{__fields__: fields}, opts) do
visible_fields =
Enum.reject(fields, fn %EdgeDB.Object.Field{is_implicit: implicit?} ->
implicit?
end)
fields_count = Enum.count(visible_fields)
elements_docs =
visible_fields
|> Enum.with_index(1)
|> Enum.map(fn
{%EdgeDB.Object.Field{name: name, value: value}, ^fields_count} ->
concat([name, " := ", Inspect.inspect(value, opts)])
{%EdgeDB.Object.Field{name: name, value: value}, _index} ->
concat([name, " := ", Inspect.inspect(value, opts), ", "])
end)
concat(["#EdgeDB.Object<", concat(elements_docs), ">"])
end
end
|
lib/edgedb/types/object.ex
| 0.914157
| 0.695592
|
object.ex
|
starcoder
|
defmodule Guachiman.Auth0.Sandbox.JWTToken do
@moduledoc """
JWT token sandbox to use in tests.
In your `config/test.exs` file add the following:
```
config :guachiman,
update_module: Guachiman.Auth0.Sandbox.JWTToken,
audience: "some_audience"
```
and then in your tests request a valid token using the function
`create_token/1` (without claims) or `create_token/2` (with claims) e.g:
```
alias Guachiman.Sandbox.JWTToken
(...)
test "some test" do
{:ok, token, claims} =
JWTToken.create_token("some_resource_id", some_optional_claims)
(...)
end
```
### Build token on different steps
You can build a %JWTToken{} struct using `build_token/2`, and then add
claims with `add_claim/3`.
```
test "building token step by step" do
{:ok, token, claims} = build_token("some_resource_id")
|> add_claim("aud", "my_auth0_audience_api")
|> create()
assert claims["aud"] == "my_auth0_audience_api"
end
```
"""
@enforce_keys [:resource_id]
defstruct [:resource_id, claims: %{}, algo: "RS256", secret: nil]
@type t :: %__MODULE__{resource_id: binary(), claims: map(), algo: binary()}
### PUBLIC API
@doc """
Fetches the public key from the `"priv/jwt_keys"` folder.
"""
@spec fetch_public_key() :: struct()
def fetch_public_key do
filename = "#{:code.priv_dir(:guachiman)}/jwt_keys/jwtRS256_pub.pem"
JOSE.JWK.from_pem_file(filename)
end
@doc """
Fetches the private key from the `"priv/jwt_keys"` folder.
"""
@spec fetch_private_key() :: struct()
def fetch_private_key do
filename = "#{:code.priv_dir(:guachiman)}/jwt_keys/jwtRS256_key.pem"
JOSE.JWK.from_pem_file(filename)
end
@doc """
Fetches the audience from the configuration.
"""
@spec fetch_audience() :: binary() | list(binary())
def fetch_audience do
Application.get_env(:guachiman, :audience, nil)
end
@doc """
Creates a new token for the given `resource_id` with some optional `claims`
as the JWT's `"sub"` claim.
"""
@spec create_token(binary(), map()) ::
{:ok, Guardian.Token.token(), Guardian.Token.claims()}
| {:error, any()}
def create_token(resource_id, claims \\ %{}) do
# new_claims = Map.put_new(claims, "aud", fetch_audience())
# options = [allowed_algos: ["RS256"], secret: fetch_private_key()]
# Guachiman.Guardian.encode_and_sign(%{id: resource_id}, new_claims, options)
build_token(resource_id, claims)
|> create()
end
@doc """
Build a new JWTToken struct with the given resource id
and claims.
"""
@spec build_token(binary(), map()) :: __MODULE__.t()
def build_token(resource_id, claims \\ %{}) do
%__MODULE__{resource_id: resource_id, claims: claims}
end
@doc """
Puts a claim to the given JWTToken struct
"""
@spec put_claim(__MODULE__.t(), atom(), any()) :: __MODULE__.t()
def put_claim(%__MODULE__{claims: claims} = token, key, value) do
new_claims = Map.put(claims, key, value)
%{token | claims: new_claims}
end
@doc """
Defines the secret to encode and sign this token.
"""
@spec put_secret(__MODULE__.t(), term()) :: __MODULE__.t()
def put_secret(%__MODULE__{secret: nil} = token, secret: secret),
do: token |> Map.put(:secret, secret)
@doc """
Puts token signing algorithm.
"""
@spec put_algorithm(__MODULE__.t(), term()) :: __MODULE__.t()
def put_algorithm(token, algo) do
token
|> Map.put(:algo, algo)
end
@doc """
Encodes and signs a new token using `Guachiman.Guardian.encode_and_sign`
given a `JWTToken` struct.
"""
@spec create(__MODULE__.t()) ::
{:ok, Guardian.Token.token(), Guardian.Token.claims()}
| {:error, term()}
def create(token) do
%__MODULE__{resource_id: id, claims: claims, algo: algo, secret: secret} =
token
|> check_claims
|> check_secret
# Guardian expects a list as allowed_algos
options = [allowed_algos: [algo], secret: secret]
Guachiman.Guardian.encode_and_sign(%{id: id}, claims, options)
end
@doc """
Updates file in the specified `table_name`.
"""
@spec update_file(reference() | atom()) :: :ok | {:error, term()}
def update_file(table_name) do
with true <- :ets.insert(table_name, {:key, fetch_public_key()}) do
:ok
else
_ ->
{:error, "Cannot update key"}
end
end
### HELPERS
# when secret is not set, use `fetch_private_key`
defp check_secret(%__MODULE__{secret: nil} = token),
do: token |> Map.put(:secret, fetch_private_key())
defp check_secret(token), do: token
# check for default claims
defp check_claims(%__MODULE__{claims: claims} = token) do
# add `aud` when it hasn't been set
new_claims = Map.put_new(claims, "aud", fetch_audience())
token
|> Map.put(:claims, new_claims)
end
end
|
lib/guachiman/auth0/sandbox/jwt_token.ex
| 0.820326
| 0.864882
|
jwt_token.ex
|
starcoder
|
defmodule BattleCity.Position do
@moduledoc """
Position
"""
@type direction :: :up | :down | :left | :right
import Integer, only: [is_even: 1]
@size 12
@border 4
@atom 2
@width @border * 2
@xmin 0
@xmax @size * @atom
@ymin 0
@ymax @size * @atom
@xmid round((@xmin + @xmax) * 0.5)
@x_player_1 round(@xmid - @atom - 0.5 * @atom)
@rxmin @xmin * @width
@rxmax @xmax * @width
@rymin @ymin * @width
@rymax @ymax * @width
@x_range @xmin..@xmax
@y_range @ymin..@ymax
# @diretions [:up, :down, :left, :right]
@type speed :: 1..10
@type x :: unquote(@xmin)..unquote(@xmax)
@type y :: unquote(@ymin)..unquote(@ymax)
@type rx :: unquote(@rxmin)..unquote(@rxmax)
@type ry :: unquote(@rymin)..unquote(@rymax)
@type coordinate :: {x, y}
@type width :: 0..unquote(@atom)
@type height :: 0..unquote(@atom)
@typep x_or_y :: x | y
@typep rx_or_ry :: rx | ry
@type path :: [coordinate]
@type t :: %__MODULE__{
direction: direction(),
x: x(),
y: y(),
rx: rx(),
ry: ry(),
rt: rx_or_ry(),
t: x_or_y(),
path: path()
}
@keys [:direction, :x, :y, :rx, :ry]
@enforce_keys [:direction, :x, :y, :rx, :ry]
defstruct @keys ++ [:rt, :t, path: []]
@objects for x <- @x_range,
rem(x, 2) == 0,
y <- @y_range,
rem(y, 2) == 0,
do: {{x, y}, %{}},
into: %{}
defguard is_on_border(p)
when is_struct(p, __MODULE__) and
((p.rx == @rxmin and p.direction == :left) or
(p.rx == @rxmax and p.direction == :right) or
(p.ry == @rymin and p.direction == :up) or
(p.ry == @rymax and p.direction == :down))
defguard is_on_border_xy(p)
when is_struct(p, __MODULE__) and
((p.x == @xmin and p.direction == :left) or
(p.x == @xmax and p.direction == :right) or
(p.y == @ymin and p.direction == :up) or
(p.y == @ymax and p.direction == :down))
def objects, do: @objects
def size, do: @size
def atom, do: @atom
def speed, do: 1
def bullet_speed, do: @atom
def width, do: @width
def quadrant, do: @size + 1
def real_quadrant, do: quadrant() * @atom * @width
def real_width, do: @width * @atom
@spec init(map) :: t()
def init(map \\ %{})
def init(%{x: x} = map) when is_atom(x), do: init(%{map | x: fetch_x(x)})
def init(%{y: y} = map) when is_atom(y), do: init(%{map | y: fetch_y(y)})
def init(%{x: x, y: y} = map) do
map =
map
|> Map.merge(%{rx: x * @width, ry: y * @width})
|> Map.take(@keys)
struct!(__MODULE__, map) |> normalize()
end
defp fetch_x(:x_player_1), do: @x_player_1
defp fetch_x(:x_random_enemy), do: Enum.random([@xmin, @xmid, @xmax])
defp fetch_x(:x_random), do: Enum.random(@x_range)
defp fetch_y(:y_player_1), do: @ymax
defp fetch_y(:y_random_enemy), do: @ymin
defp fetch_y(:y_random), do: Enum.random(@y_range)
@spec destination(t(), speed) :: t()
def destination(%{direction: direction, x: x, y: y} = p, speed) do
rt = target(p, speed)
t = normalize_number(direction, div_even(rt + direction_border(direction)))
if direction in [:up, :down] do
%{p | ry: rt, rt: rt, t: t, path: for(i <- y..t, rem(i, 2) == 0, do: {x, i})}
else
%{p | rx: rt, rt: rt, t: t, path: for(i <- x..t, rem(i, 2) == 0, do: {i, y})}
end
end
@doc """
iex> #{__MODULE__}.div_even(#{@width * 3 + 0.1})
4
iex> #{__MODULE__}.div_even(#{@width * 3 - 0.1})
2
iex> #{__MODULE__}.div_even(#{@width * 4 + 0.1})
4
iex> #{__MODULE__}.div_even(#{@width * 5 + 0.1})
6
iex> #{__MODULE__}.div_even(#{@width * 5 - 0.1})
4
"""
@spec div_even(float) :: x_or_y
def div_even(rt), do: round(rt / @width / @atom) * @atom
defp direction_border(:up), do: -0.1
defp direction_border(:down), do: 0.1
defp direction_border(:right), do: 0.1
defp direction_border(:left), do: -0.1
@spec normalize(t()) :: t()
def normalize(%__MODULE__{x: x, y: y, direction: direction} = p) do
%{p | x: normalize_number(direction, x, :x), y: normalize_number(direction, y, :y)}
end
@doc """
iex> #{__MODULE__}.normalize_number(:up, 2, :x)
2
iex> #{__MODULE__}.normalize_number(:up, 3)
4
iex> #{__MODULE__}.normalize_number(:down, 3)
2
"""
@spec normalize_number(direction(), x_or_y(), :x | :y | nil) :: x_or_y()
def normalize_number(direction, n, x_or_y \\ nil)
def normalize_number(_, n, _) when is_even(n), do: n
def normalize_number(:right, n, :x), do: n + 1
def normalize_number(:right, n, nil), do: n + 1
def normalize_number(:up, n, :y), do: n + 1
def normalize_number(:up, n, nil), do: n + 1
def normalize_number(_, n, _), do: n - 1
@doc """
iex> #{__MODULE__}.target(%{direction: :right, rx: 0}, 2)
2
iex> #{__MODULE__}.target(%{direction: :left, rx: 1}, 2)
0
iex> #{__MODULE__}.target(%{direction: :up, ry: 3}, 2)
1
"""
@spec target(t(), speed) :: rx_or_ry
def target(%{direction: :right, rx: rx}, speed) when rx + speed <= @rxmax, do: rx + speed
def target(%{direction: :right}, _), do: @rxmax
def target(%{direction: :left, rx: rx}, speed) when rx - speed >= 0, do: rx - speed
def target(%{direction: :left}, _), do: 0
def target(%{direction: :up, ry: ry}, speed) when ry - speed >= 0, do: ry - speed
def target(%{direction: :up}, _), do: 0
def target(%{direction: :down, ry: ry}, speed) when ry + speed <= @rymax, do: ry + speed
def target(%{direction: :down}, _), do: @rymax
@spec target_path(t(), direction) :: path
def target_path(%{x: x, y: y}, :left),
do: for(i <- x..@xmin, i != x, rem(i, 2) == 0, do: {i, y})
def target_path(%{x: x, y: y}, :right),
do: for(i <- x..@xmax, i != x, rem(i, 2) == 0, do: {i, y})
def target_path(%{x: x, y: y}, :up),
do: for(i <- y..@ymin, i != y, rem(i, 2) == 0, do: {x, i})
def target_path(%{x: x, y: y}, :down),
do: for(i <- y..@ymax, i != y, rem(i, 2) == 0, do: {x, i})
end
|
lib/battle_city/position.ex
| 0.791499
| 0.610976
|
position.ex
|
starcoder
|
defmodule Sponsorly.DataCase do
@moduledoc """
This module defines the setup for tests requiring
access to the application's data layer.
You may define functions here to be used as helpers in
your tests.
Finally, if the test case interacts with the database,
we enable the SQL sandbox, so changes done to the database
are reverted at the end of every test. If you are using
PostgreSQL, you can even run database tests asynchronously
by setting `use Sponsorly.DataCase, async: true`, although
this option is not recommended for other databases.
"""
use ExUnit.CaseTemplate
using do
quote do
alias Sponsorly.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Sponsorly.DataCase
import Sponsorly.Factory
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Sponsorly.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Sponsorly.Repo, {:shared, self()})
end
:ok
end
@doc """
A helper that transforms changeset errors into a map of messages.
assert {:error, changeset} = Accounts.create_user(%{password: "<PASSWORD>"})
assert "<PASSWORD>" in errors_on(changeset).password
assert %{password: ["<PASSWORD>"]} = errors_on(changeset)
"""
def errors_on(changeset) do
Ecto.Changeset.traverse_errors(changeset, fn {message, opts} ->
Regex.replace(~r"%{(\w+)}", message, fn _, key ->
opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string()
end)
end)
end
@doc """
Helper to unload assocs that are not preloaded when fetching them, but ex_machine
has theme on `insert/1` so it feels they are "preloaded"
"""
def unload_assocs(data, assocs) do
Enum.reduce(assocs, data, fn
{field, fields}, acc when is_list(fields) ->
new_field_value =
Map.get(data, field)
|> unload_assocs(fields)
Map.put(acc, field, new_field_value)
field, acc ->
cardinality =
case Map.get(data, field) do
value when is_list(value) -> :many
_value -> :one
end
unloaded_field = %Ecto.Association.NotLoaded{
__field__: field,
__owner__: data.__struct__,
__cardinality__: cardinality
}
Map.put(acc, field, unloaded_field)
end)
end
@doc """
Helper to check timestamps and give a margin of error (1 second by default) due to latency.
Instead of doing
```
assert time1 == time2
```
Replace with
```
assert check_timestamp(time1, time2)
```
"""
def check_datetime(expected, actual, margin \\ 1) do
DateTime.add(expected, margin, :second)
|> DateTime.compare(actual)
|> case do
:gt -> true
_ -> expected == actual
end
end
end
|
test/support/data_case.ex
| 0.837121
| 0.711732
|
data_case.ex
|
starcoder
|
defmodule Day20 do
@regex_all ~r/p=<(-?\d+),(-?\d+),(-?\d+)>, v=<(-?\d+),(-?\d+),(-?\d+)>, a=<(-?\d+),(-?\d+),(-?\d+)>/
@regex_accelerations ~r/p=<-?\d+,-?\d+,-?\d+>, v=<-?\d+,-?\d+,-?\d+>, a=<(-?\d+),(-?\d+),(-?\d+)>/
def solveA(filename) do
# Position at time t for a particule is:
# Pt = P0 + x * V0 + y * A0
# x is the velocity factor, x(t) = t
# y is the acceleration factor, y(t) = t + y(t - 1) and y(1) = 1
# so in the long term x < y
# The position at t for a particule is thus governed by its initial acceleration.
# We must find the particule with the lowest acceleration.
accels =
filename
|> File.stream!([:utf8], :line)
|> Enum.map(fn l -> parse_line(l, @regex_accelerations) end)
min_accel =
accels
|> Enum.min_by(fn {ax, ay, az} -> abs(ax) + abs(ay) + abs(az) end)
accels
|> Enum.find_index(fn a -> a == min_accel end)
end
def solveB(filename) do
particules =
filename
|> File.stream!([:utf8], :line)
|> Enum.map(fn l -> parse_line(l, @regex_all) end)
run(particules, false)
end
def parse_line(line, regex) do
regex
|> Regex.run(line, capture: :all_but_first)
|> Enum.map(&String.to_integer/1)
|> List.to_tuple
end
def run(particules, true) do
# once all particules are diverging
length particules
end
def run(particules, false) do
IO.puts :stderr, length(particules)
a =
particules
|> Enum.map(&Tuple.to_list/1)
|> Enum.map(fn [px, py, pz | _] -> {px, py, pz} end)
if a != Enum.uniq a do
IO.puts :stderr, "some collisions"
end
# remove collided particules
current = remove_collisions(particules, MapSet.new, [])
# compute next step
next = step(current)
run(next, all_diverging?(true, current, next))
end
def remove_collisions([], _collisions, acc), do: acc
def remove_collisions(particules, collisions, acc) do
positions =
particules
|> Enum.map(&to_pos/1)
[pos | tail] = positions
[p | particules] = particules
{acc, collisions} =
if MapSet.member?(collisions, pos) or pos in tail do
{acc, MapSet.put(collisions, pos)}
else
{[ p | acc ], collisions}
end
remove_collisions(particules, collisions, acc)
end
def step(particules) do
particules
|> Enum.map(fn {px, py, pz, vx, vy, vz, ax, ay, az} ->
{px + vx + ax,
py + vy + ay,
pz + vz + az,
vx + ax,
vy + ay,
vz + az,
ax,
ay,
az}
end)
end
def all_diverging?(res, [], []), do: res
def all_diverging?(false, _prev, _current), do: false
def all_diverging?(true, prev, current) do
[pa_prev | tail_prev] = prev
[pa_current | tail_current] = current
diverges_one_many?(pa_prev, pa_current, tail_prev, tail_current)
|> all_diverging?(tail_prev, tail_current)
end
def diverges_one_many?(pa_prev, pa_current, tail_prev, tail_current) do
Enum.zip(tail_prev, tail_current)
|> Enum.any?(fn {pb_prev, pb_current} ->
diverges_one_one? pa_prev, pa_current, pb_prev, pb_current
end)
end
def diverges_one_one?(pa_prev, pa_current, pb_prev, pb_current) do
{pax, pay, paz} = to_pos pa_prev
{pbx, pby, pbz} = to_pos pb_prev
dist_prev = abs(pax - pbx) + abs(pay - pby) + abs(paz - pbz)
{pax, pay, paz} = to_pos pa_current
{pbx, pby, pbz} = to_pos pb_current
dist_next = abs(pax - pbx) + abs(pay - pby) + abs(paz - pbz)
dist_prev < dist_next
end
def print(particules) do
particules
|> Enum.map(&to_pos/1)
|> Enum.reduce("", fn {px, py, _}, acc ->
acc <> " " <> Integer.to_string(px) <> " " <> Integer.to_string(py)
end)
|> IO.puts
end
def to_pos({px, py, pz, _, _, _, _, _, _}) do
{px, py, pz}
end
end
|
2017/elixir/day20/lib/day20.ex
| 0.621196
| 0.730386
|
day20.ex
|
starcoder
|
defmodule MapRasterizer do
require Logger
def rasterize(map_data, grid_width, grid_height, resolution) do
polygons = Stream.map(map_data, &process_map_data &1)
|> Stream.filter(fn p -> p != nil end)
|> Enum.to_list
rasterize_grids(polygons, grid_width, grid_height, resolution)
end
defp process_map_data(%MapTileRenderer.MapData.Area{type: type, vertices: vertices, bbox: bbox}) do
%MapTileRenderer.Polygon{areas: [{type, vertices, bbox}], bbox: bbox}
end
defp process_map_data(%MapTileRenderer.MapData.MultiArea{areas: areas}) do
poly_bbox = Enum.reduce(areas, hd(areas).bbox, fn %{bbox: {area_min, area_max}}, poly_box ->
MapTileRenderer.Intersection.box_add_point(poly_box, area_min)
|> MapTileRenderer.Intersection.box_add_point(area_max)
end)
%MapTileRenderer.Polygon{bbox: poly_bbox, areas: Enum.map(areas, fn area -> {area.type, area.vertices, area.bbox} end)}
end
defp process_map_data(_) do
nil
end
defp rasterize_grids(polygons, width, height, resolution) do
start_lat = 57.6621
start_lon = 11.8934
(for col <- 0..4, row <- 0..4, do: {col, row})
|> Stream.chunk(8, 8, [])
|> Stream.map(fn chunk ->
Enum.map(chunk, fn {col, row} ->
Task.async(fn ->
Logger.info("Rasterizing tile #{col}, #{row}")
lat = MapTileRenderer.Coordinates.move_lat({start_lat, start_lon}, -resolution, height * row)
lon = MapTileRenderer.Coordinates.move_lon({start_lat, start_lon}, resolution, width * col)
{:ok, grid} = MapTileRenderer.TileGrid.start_link(width, height, lat, lon, resolution, :land)
grid_bbox = MapTileRenderer.TileGrid.get_bbox(grid)
Enum.filter(polygons, fn %{bbox: bbox} -> MapTileRenderer.Intersection.box_vs_box?(grid_bbox, bbox) end)
|> Enum.each(fn polygon ->
MapTileRenderer.TileGrid.render_polygon(grid, polygon)
end)
tiles = MapTileRenderer.TileGrid.get_tiles(grid)
MapTileRenderer.TileGrid.stop(grid)
tiles
end)
end)
|> Enum.map(&Task.await(&1, :infinity))
end)
|> Stream.concat
end
end
|
lib/map_tile_renderer/map_rasterizer.ex
| 0.721743
| 0.57818
|
map_rasterizer.ex
|
starcoder
|
defmodule Nebulex.Adapters.Partitioned do
@moduledoc ~S"""
Built-in adapter for partitioned cache topology.
## Overall features
* Partitioned cache topology (Sharding Distribution Model).
* Configurable primary storage adapter.
* Configurable Keyslot to distributed the keys across the cluster members.
* Support for transactions via Erlang global name registration facility.
* Stats support rely on the primary storage adapter.
## Partitioned Cache Topology
There are several key points to consider about a partitioned cache:
* _**Partitioned**_: The data in a distributed cache is spread out over
all the servers in such a way that no two servers are responsible for
the same piece of cached data. This means that the size of the cache
and the processing power associated with the management of the cache
can grow linearly with the size of the cluster. Also, it means that
operations against data in the cache can be accomplished with a
"single hop," in other words, involving at most one other server.
* _**Load-Balanced**_: Since the data is spread out evenly over the
servers, the responsibility for managing the data is automatically
load-balanced across the cluster.
* _**Ownership**_: Exactly one node in the cluster is responsible for each
piece of data in the cache.
* _**Point-To-Point**_: The communication for the partitioned cache is all
point-to-point, enabling linear scalability.
* _**Location Transparency**_: Although the data is spread out across
cluster nodes, the exact same API is used to access the data, and the
same behavior is provided by each of the API methods. This is called
location transparency, which means that the developer does not have to
code based on the topology of the cache, since the API and its behavior
will be the same with a local cache, a replicated cache, or a distributed
cache.
* _**Failover**_: Failover of a distributed cache involves promoting backup
data to be primary storage. When a cluster node fails, all remaining
cluster nodes determine what data each holds in backup that the failed
cluster node had primary responsible for when it died. Those data becomes
the responsibility of whatever cluster node was the backup for the data.
However, this adapter does not provide fault-tolerance implementation,
each piece of data is kept in a single node/machine (via sharding), then,
if a node fails, the data kept by this node won't be available for the
rest of the cluster memebers.
> Based on **"Distributed Caching Essential Lessons"** by **<NAME>**
and [Coherence Partitioned Cache Service][oracle-pcs].
[oracle-pcs]: https://docs.oracle.com/cd/E13924_01/coh.340/e13819/partitionedcacheservice.htm
## Additional implementation notes
`:pg2` or `:pg` (>= OTP 23) is used under-the-hood by the adapter to manage
the cluster nodes. When the partitioned cache is started in a node, it creates
a group and joins it (the cache supervisor PID is joined to the group). Then,
when a function is invoked, the adapter picks a node from the group members,
and then the function is executed on that specific node. In the same way,
when a partitioned cache supervisor dies (the cache is stopped or killed for
some reason), the PID of that process is automatically removed from the PG
group; this is why it's recommended to use consistent hashing for distributing
the keys across the cluster nodes.
> **NOTE:** `pg2` will be replaced by `pg` in future, since the `pg2` module
is deprecated as of OTP 23 and scheduled for removal in OTP 24.
This adapter depends on a local cache adapter (primary storage), it adds
a thin layer on top of it in order to distribute requests across a group
of nodes, where is supposed the local cache is running already. However,
you don't need to define any additional cache module for the primary
storage, instead, the adapter initializes it automatically (it adds the
primary storage as part of the supervision tree) based on the given
options within the `primary_storage_adapter:` argument.
## Usage
When used, the Cache expects the `:otp_app` and `:adapter` as options.
The `:otp_app` should point to an OTP application that has the cache
configuration. For example:
defmodule MyApp.PartitionedCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Partitioned
end
Optionally, you can configure the desired primary storage adapter with the
option `:primary_storage_adapter`; defaults to `Nebulex.Adapters.Local`.
defmodule MyApp.PartitionedCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Partitioned,
primary_storage_adapter: Nebulex.Adapters.Local
end
Also, you can provide a custom keyslot function:
defmodule MyApp.PartitionedCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Partitioned,
primary_storage_adapter: Nebulex.Adapters.Local
@behaviour Nebulex.Adapter.Keyslot
@impl true
def hash_slot(key, range) do
key
|> :erlang.phash2()
|> :jchash.compute(range)
end
end
Where the configuration for the cache must be in your application environment,
usually defined in your `config/config.exs`:
config :my_app, MyApp.PartitionedCache,
keyslot: MyApp.PartitionedCache,
primary: [
gc_interval: 3_600_000,
backend: :shards
]
If your application was generated with a supervisor (by passing `--sup`
to `mix new`) you will have a `lib/my_app/application.ex` file containing
the application start callback that defines and starts your supervisor.
You just need to edit the `start/2` function to start the cache as a
supervisor on your application's supervisor:
def start(_type, _args) do
children = [
{MyApp.PartitionedCache, []},
...
]
See `Nebulex.Cache` for more information.
## Options
This adapter supports the following options and all of them can be given via
the cache configuration:
* `:primary` - The options that will be passed to the adapter associated
with the local primary storage. These options will depend on the local
adapter to use.
* `:keyslot` - Defines the module implementing `Nebulex.Adapter.Keyslot`
behaviour.
* `:task_supervisor_opts` - Start-time options passed to
`Task.Supervisor.start_link/1` when the adapter is initialized.
## Shared options
Almost all of the cache functions outlined in `Nebulex.Cache` module
accept the following options:
* `:timeout` - The time-out value in milliseconds for the command that
will be executed. If the timeout is exceeded, then the current process
will exit. For executing a command on remote nodes, this adapter uses
`Task.await/2` internally for receiving the result, so this option tells
how much time the adapter should wait for it. If the timeout is exceeded,
the task is shut down but the current process doesn't exit, only the
result associated with that task is skipped in the reduce phase.
## Stats
This adapter depends on the primary storage adapter for the stats support.
Therefore, it is important to ensure the underlying primary storage adapter
does support stats, otherwise, you may get unexpected errors.
## Extended API
This adapter provides some additional convenience functions to the
`Nebulex.Cache` API.
Retrieving the primary storage or local cache module:
MyCache.__primary__()
Retrieving the cluster nodes associated with the given cache `name`:
MyCache.nodes()
Get a cluster node based on the given `key`:
MyCache.get_node("mykey")
Joining the cache to the cluster:
MyCache.join_cluster()
Leaving the cluster (removes the cache from the cluster):
MyCache.leave_cluster()
## Caveats of partitioned adapter
For `c:Nebulex.Cache.get_and_update/3` and `c:Nebulex.Cache.update/4`,
they both have a parameter that is the anonymous function, and it is compiled
into the module where it is created, which means it necessarily doesn't exists
on remote nodes. To ensure they work as expected, you must provide functions
from modules existing in all nodes of the group.
"""
# Provide Cache Implementation
@behaviour Nebulex.Adapter
@behaviour Nebulex.Adapter.Entry
@behaviour Nebulex.Adapter.Queryable
@behaviour Nebulex.Adapter.Stats
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Inherit default persistence implementation
use Nebulex.Adapter.Persistence
# Inherit default keyslot implementation
use Nebulex.Adapter.Keyslot
import Nebulex.Adapter
import Nebulex.Helpers
alias Nebulex.Cache.Cluster
alias Nebulex.RPC
## Nebulex.Adapter
@impl true
defmacro __before_compile__(env) do
otp_app = Module.get_attribute(env.module, :otp_app)
opts = Module.get_attribute(env.module, :opts)
primary = Keyword.get(opts, :primary_storage_adapter, Nebulex.Adapters.Local)
quote do
defmodule Primary do
@moduledoc """
This is the cache for the primary storage.
"""
use Nebulex.Cache,
otp_app: unquote(otp_app),
adapter: unquote(primary)
end
@doc """
A convenience function for getting the primary storage cache.
"""
def __primary__, do: Primary
@doc """
A convenience function for getting the cluster nodes.
"""
def nodes do
Cluster.get_nodes(get_dynamic_cache())
end
@doc """
A convenience function to get the node of the given `key`.
"""
def get_node(key) do
with_meta(get_dynamic_cache(), fn _adapter, %{name: name, keyslot: keyslot} ->
Cluster.get_node(name, key, keyslot)
end)
end
@doc """
A convenience function for joining the cache to the cluster.
"""
def join_cluster do
Cluster.join(get_dynamic_cache())
end
@doc """
A convenience function for removing the cache from the cluster.
"""
def leave_cluster do
Cluster.leave(get_dynamic_cache())
end
end
end
@impl true
def init(opts) do
# Required options
telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix)
telemetry = Keyword.fetch!(opts, :telemetry)
cache = Keyword.fetch!(opts, :cache)
name = opts[:name] || cache
# Maybe use stats
stats = get_boolean_option(opts, :stats)
# Primary cache options
primary_opts =
Keyword.merge(
[telemetry_prefix: telemetry_prefix ++ [:primary], telemetry: telemetry, stats: stats],
Keyword.get(opts, :primary, [])
)
# Maybe put a name to primary storage
primary_opts =
if opts[:name],
do: [name: normalize_module_name([name, Primary])] ++ primary_opts,
else: primary_opts
# Keyslot module for selecting nodes
keyslot =
opts
|> Keyword.get(:keyslot, __MODULE__)
|> assert_behaviour(Nebulex.Adapter.Keyslot, "keyslot")
# Maybe task supervisor for distributed tasks
{task_sup_name, children} = sup_child_spec(name, opts)
# Prepare metadata
adapter_meta = %{
telemetry_prefix: telemetry_prefix,
telemetry: telemetry,
name: name,
primary_name: primary_opts[:name],
task_sup: task_sup_name,
keyslot: keyslot,
stats: stats
}
# Prepare child_spec
child_spec =
Nebulex.Adapters.Supervisor.child_spec(
name: normalize_module_name([name, Supervisor]),
strategy: :rest_for_one,
# children: [{cache.__primary__, primary_opts}] ++ children,
children: [
{cache.__primary__, primary_opts},
{__MODULE__.Bootstrap, Map.put(adapter_meta, :cache, cache)}
| children
]
)
{:ok, child_spec, adapter_meta}
end
if Code.ensure_loaded?(:erpc) do
defp sup_child_spec(_name, _opts) do
{nil, []}
end
else
defp sup_child_spec(name, opts) do
# task supervisor to execute parallel and/or remote commands
task_sup_name = normalize_module_name([name, TaskSupervisor])
task_sup_opts = Keyword.get(opts, :task_supervisor_opts, [])
children = [
{Task.Supervisor, [name: task_sup_name] ++ task_sup_opts}
]
{task_sup_name, children}
end
end
## Nebulex.Adapter.Entry
@impl true
defspan get(adapter_meta, key, opts) do
call(adapter_meta, key, :get, [key, opts], opts)
end
@impl true
defspan get_all(adapter_meta, keys, opts) do
map_reduce(
keys,
adapter_meta,
:get_all,
[opts],
Keyword.get(opts, :timeout),
{
%{},
fn
{:ok, res}, _, acc when is_map(res) ->
Map.merge(acc, res)
_, _, acc ->
acc
end
}
)
end
@impl true
defspan put(adapter_meta, key, value, _ttl, on_write, opts) do
case on_write do
:put ->
:ok = call(adapter_meta, key, :put, [key, value, opts], opts)
true
:put_new ->
call(adapter_meta, key, :put_new, [key, value, opts], opts)
:replace ->
call(adapter_meta, key, :replace, [key, value, opts], opts)
end
end
@impl true
defspan put_all(adapter_meta, entries, _ttl, on_write, opts) do
case on_write do
:put ->
do_put_all(:put_all, adapter_meta, entries, opts)
:put_new ->
do_put_all(:put_new_all, adapter_meta, entries, opts)
end
end
def do_put_all(action, adapter_meta, entries, opts) do
reducer = {
{true, []},
fn
{:ok, :ok}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} ->
{bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])}
{:ok, true}, {_, {_, _, [_, _, [kv, _]]}}, {bool, acc} ->
{bool, Enum.reduce(kv, acc, &[elem(&1, 0) | &2])}
{:ok, false}, _, {_, acc} ->
{false, acc}
{:error, _}, _, {_, acc} ->
{false, acc}
end
}
entries
|> map_reduce(
adapter_meta,
action,
[opts],
Keyword.get(opts, :timeout),
reducer
)
|> case do
{true, _} ->
true
{false, keys} ->
:ok = Enum.each(keys, &delete(adapter_meta, &1, []))
action == :put_all
end
end
@impl true
defspan delete(adapter_meta, key, opts) do
call(adapter_meta, key, :delete, [key, opts], opts)
end
@impl true
defspan take(adapter_meta, key, opts) do
call(adapter_meta, key, :take, [key, opts], opts)
end
@impl true
defspan has_key?(adapter_meta, key) do
call(adapter_meta, key, :has_key?, [key])
end
@impl true
defspan update_counter(adapter_meta, key, amount, _ttl, _default, opts) do
call(adapter_meta, key, :incr, [key, amount, opts], opts)
end
@impl true
defspan ttl(adapter_meta, key) do
call(adapter_meta, key, :ttl, [key])
end
@impl true
defspan expire(adapter_meta, key, ttl) do
call(adapter_meta, key, :expire, [key, ttl])
end
@impl true
defspan touch(adapter_meta, key) do
call(adapter_meta, key, :touch, [key])
end
## Nebulex.Adapter.Queryable
@impl true
defspan execute(adapter_meta, operation, query, opts) do
reducer =
case operation do
:all -> &List.flatten/1
_ -> &Enum.sum/1
end
adapter_meta.task_sup
|> RPC.multi_call(
Cluster.get_nodes(adapter_meta.name),
__MODULE__,
:with_dynamic_cache,
[adapter_meta, operation, [query, opts]],
opts
)
|> handle_rpc_multi_call(operation, reducer)
end
@impl true
defspan stream(adapter_meta, query, opts) do
Stream.resource(
fn ->
Cluster.get_nodes(adapter_meta.name)
end,
fn
[] ->
{:halt, []}
[node | nodes] ->
elements =
rpc_call(
adapter_meta.task_sup,
node,
__MODULE__,
:eval_stream,
[adapter_meta, query, opts],
opts
)
{elements, nodes}
end,
& &1
)
end
## Nebulex.Adapter.Persistence
@impl true
defspan dump(adapter_meta, path, opts) do
super(adapter_meta, path, opts)
end
@impl true
defspan load(adapter_meta, path, opts) do
super(adapter_meta, path, opts)
end
## Nebulex.Adapter.Transaction
@impl true
defspan transaction(adapter_meta, opts, fun) do
super(adapter_meta, Keyword.put(opts, :nodes, Cluster.get_nodes(adapter_meta.name)), fun)
end
@impl true
defspan in_transaction?(adapter_meta) do
super(adapter_meta)
end
## Nebulex.Adapter.Stats
@impl true
defspan stats(adapter_meta) do
with_dynamic_cache(adapter_meta, :stats, [])
end
## Helpers
@doc """
Helper function to use dynamic cache for internal primary cache storage
when needed.
"""
def with_dynamic_cache(%{cache: cache, primary_name: nil}, action, args) do
apply(cache.__primary__, action, args)
end
def with_dynamic_cache(%{cache: cache, primary_name: primary_name}, action, args) do
cache.__primary__.with_dynamic_cache(primary_name, fn ->
apply(cache.__primary__, action, args)
end)
end
@doc """
Helper to perform `stream/3` locally.
"""
def eval_stream(meta, query, opts) do
meta
|> with_dynamic_cache(:stream, [query, opts])
|> Enum.to_list()
end
## Private Functions
defp get_node(%{name: name, keyslot: keyslot}, key) do
Cluster.get_node(name, key, keyslot)
end
defp call(adapter_meta, key, action, args, opts \\ []) do
adapter_meta
|> get_node(key)
|> rpc_call(adapter_meta, action, args, opts)
end
defp rpc_call(node, %{task_sup: task_sup} = meta, fun, args, opts) do
rpc_call(task_sup, node, __MODULE__, :with_dynamic_cache, [meta, fun, args], opts)
end
if Code.ensure_loaded?(:erpc) do
defp rpc_call(supervisor, node, mod, fun, args, opts) do
RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000)
end
else
defp rpc_call(supervisor, node, mod, fun, args, opts) do
case RPC.call(supervisor, node, mod, fun, args, opts[:timeout] || 5000) do
{:badrpc, remote_ex} ->
raise remote_ex
response ->
response
end
end
end
defp group_keys_by_node(enum, adapter_meta) do
Enum.reduce(enum, %{}, fn
{key, _} = entry, acc ->
node = get_node(adapter_meta, key)
Map.put(acc, node, [entry | Map.get(acc, node, [])])
key, acc ->
node = get_node(adapter_meta, key)
Map.put(acc, node, [key | Map.get(acc, node, [])])
end)
end
defp map_reduce(
enum,
%{task_sup: task_sup} = meta,
action,
args,
timeout,
reducer
) do
groups =
enum
|> group_keys_by_node(meta)
|> Enum.map(fn {node, group} ->
{node, {__MODULE__, :with_dynamic_cache, [meta, action, [group | args]]}}
end)
RPC.multi_call(task_sup, groups, timeout: timeout, reducer: reducer)
end
defp handle_rpc_multi_call({res, []}, _action, fun) do
fun.(res)
end
defp handle_rpc_multi_call({responses, errors}, action, _) do
raise Nebulex.RPCMultiCallError, action: action, responses: responses, errors: errors
end
end
defmodule Nebulex.Adapters.Partitioned.Bootstrap do
@moduledoc false
use GenServer
import Nebulex.Helpers
alias Nebulex.Cache.Cluster
## API
@doc false
def start_link(%{name: name} = adapter_meta) do
GenServer.start_link(
__MODULE__,
adapter_meta,
name: normalize_module_name([name, Bootstrap])
)
end
## GenServer Callbacks
@impl true
def init(adapter_meta) do
# Trap exit signals to run cleanup job
_ = Process.flag(:trap_exit, true)
# Ensure joining the cluster only when the cache supervision tree is started
:ok = Cluster.join(adapter_meta.name)
# Start bootstrap process
{:ok, adapter_meta}
end
@impl true
def terminate(_reason, adapter_meta) do
# Ensure leaving the cluster when the cache stops
:ok = Cluster.leave(adapter_meta.name)
end
end
|
lib/nebulex/adapters/partitioned.ex
| 0.904531
| 0.732185
|
partitioned.ex
|
starcoder
|
defmodule Madari.Accounts.UserToken do
use Ecto.Schema
import Ecto.Query
@hash_algorithm :sha256
@rand_size 32
# It is very important to keep the reset password token expiry short,
# since someone with access to the email may take over the account.
@reset_password_validity_in_days 1
@confirm_validity_in_days 7
@change_email_validity_in_days 7
@session_validity_in_days 60
@primary_key {:id, :binary_id, autogenerate: true}
@foreign_key_type :binary_id
schema "users_tokens" do
field :token, :binary
field :context, :string
field :sent_to, :string
belongs_to :user, Madari.Accounts.User
timestamps(updated_at: false)
end
@doc """
Generates a token that will be stored in a signed place,
such as session or cookie. As they are signed, those
tokens do not need to be hashed.
The reason why we store session tokens in the database, even
though Phoenix already provides a session cookie, is because
Phoenix' default session cookies are not persisted, they are
simply signed and potentially encrypted. This means they are
valid indefinitely, unless you change the signing/encryption
salt.
Therefore, storing them allows individual user
sessions to be expired. The token system can also be extended
to store additional data, such as the device used for logging in.
You could then use this information to display all valid sessions
and devices in the UI and allow users to explicitly expire any
session they deem invalid.
"""
def build_session_token(user) do
token = :crypto.strong_rand_bytes(@rand_size)
{token, %Madari.Accounts.UserToken{token: token, context: "session", user_id: user.id}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the user found by the token, if any.
The token is valid if it matches the value in the database and it has
not expired (after @session_validity_in_days).
"""
def verify_session_token_query(token) do
query =
from token in token_and_context_query(token, "session"),
join: user in assoc(token, :user),
where: token.inserted_at > ago(@session_validity_in_days, "day"),
select: user
{:ok, query}
end
@doc """
Builds a token and its hash to be delivered to the user's email.
The non-hashed token is sent to the user email while the
hashed part is stored in the database. The original token cannot be reconstructed,
which means anyone with read-only access to the database cannot directly use
the token in the application to gain access. Furthermore, if the user changes
their email in the system, the tokens sent to the previous email are no longer
valid.
Users can easily adapt the existing code to provide other types of delivery methods,
for example, by phone numbers.
"""
def build_email_token(user, context) do
build_hashed_token(user, context, user.email)
end
defp build_hashed_token(user, context, sent_to) do
token = :crypto.strong_rand_bytes(@<PASSWORD>)
hashed_token = :crypto.hash(@<PASSWORD>, token)
{Base.url_encode64(token, padding: false),
%Madari.Accounts.UserToken{
token: hashed_token,
context: context,
sent_to: sent_to,
user_id: user.id
}}
end
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the user found by the token, if any.
The given token is valid if it matches its hashed counterpart in the
database and the user email has not changed. This function also checks
if the token is being used within a certain period, depending on the
context. The default contexts supported by this function are either
"confirm", for account confirmation emails, and "reset_password",
for resetting the password. For verifying requests to change the email,
see `verify_change_email_token_query/2`.
"""
def verify_email_token_query(token, context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(<PASSWORD>, decoded_token)
days = days_for_context(context)
query =
from token in token_and_context_query(hashed_token, context),
join: user in assoc(token, :user),
where: token.inserted_at > ago(^days, "day") and token.sent_to == user.email,
select: user
{:ok, query}
:error ->
:error
end
end
defp days_for_context("confirm"), do: @confirm_validity_in_days
defp days_for_context("reset_password"), do: @reset_password_validity_in_days
@doc """
Checks if the token is valid and returns its underlying lookup query.
The query returns the user found by the token, if any.
This is used to validate requests to change the user
email. It is different from `verify_email_token_query/2` precisely because
`verify_email_token_query/2` validates the email has not changed, which is
the starting point by this function.
The given token is valid if it matches its hashed counterpart in the
database and if it has not expired (after @change_email_validity_in_days).
The context must always start with "change:".
"""
def verify_change_email_token_query(token, "change:" <> _ = context) do
case Base.url_decode64(token, padding: false) do
{:ok, decoded_token} ->
hashed_token = :crypto.hash(<PASSWORD>, decoded_token)
query =
from token in token_and_context_query(hashed_token, context),
where: token.inserted_at > ago(@change_email_validity_in_days, "day")
{:ok, query}
:error ->
:error
end
end
@doc """
Returns the token struct for the given token value and context.
"""
def token_and_context_query(token, context) do
from Madari.Accounts.UserToken, where: [token: ^token, context: ^context]
end
@doc """
Gets all tokens for the given user for the given contexts.
"""
def user_and_contexts_query(user, :all) do
from t in Madari.Accounts.UserToken, where: t.user_id == ^user.id
end
def user_and_contexts_query(user, [_ | _] = contexts) do
from t in Madari.Accounts.UserToken, where: t.user_id == ^user.id and t.context in ^contexts
end
end
|
lib/madari/accounts/user_token.ex
| 0.722625
| 0.488344
|
user_token.ex
|
starcoder
|
defmodule AWS.FSx do
@moduledoc """
Amazon FSx is a fully managed service that makes it easy for storage and
application administrators to launch and use shared file storage.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-03-01",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "fsx",
global?: false,
protocol: "json",
service_id: "FSx",
signature_version: "v4",
signing_name: "fsx",
target_prefix: "AWSSimbaAPIService_v20180301"
}
end
@doc """
Use this action to associate one or more Domain Name Server (DNS) aliases with
an existing Amazon FSx for Windows File Server file system.
A file system can have a maximum of 50 DNS aliases associated with it at any one
time. If you try to associate a DNS alias that is already associated with the
file system, FSx takes no action on that alias in the request. For more
information, see [Working with DNS Aliases](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html)
and [Walkthrough 5: Using DNS aliases to access your file system](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html),
including additional steps you must take to be able to access your file system
using a DNS alias.
The system response shows the DNS aliases that Amazon FSx is attempting to
associate with the file system. Use the API operation to monitor the status of
the aliases Amazon FSx is associating with the file system.
"""
def associate_file_system_aliases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AssociateFileSystemAliases", input, options)
end
@doc """
Cancels an existing Amazon FSx for Lustre data repository task if that task is
in either the `PENDING` or `EXECUTING` state.
When you cancel a task, Amazon FSx does the following.
* Any files that FSx has already exported are not reverted.
* FSx continues to export any files that are "in-flight" when the
cancel operation is received.
* FSx does not export any files that have not yet been exported.
"""
def cancel_data_repository_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CancelDataRepositoryTask", input, options)
end
@doc """
Copies an existing backup within the same Amazon Web Services account to another
Amazon Web Services Region (cross-Region copy) or within the same Amazon Web
Services Region (in-Region copy).
You can have up to five backup copy requests in progress to a single destination
Region per account.
You can use cross-Region backup copies for cross-Region disaster recovery. You
can periodically take backups and copy them to another Region so that in the
event of a disaster in the primary Region, you can restore from backup and
recover availability quickly in the other Region. You can make cross-Region
copies only within your Amazon Web Services partition. A partition is a grouping
of Regions. Amazon Web Services currently has three partitions: `aws` (Standard
Regions), `aws-cn` (China Regions), and `aws-us-gov` (Amazon Web Services
GovCloud [US] Regions). You can also use backup copies to clone your file dataset to another Region or
within the same Region.
You can use the `SourceRegion` parameter to specify the Amazon Web Services
Region from which the backup will be copied. For example, if you make the call
from the `us-west-1` Region and want to copy a backup from the `us-east-2`
Region, you specify `us-east-2` in the `SourceRegion` parameter to make a
cross-Region copy. If you don't specify a Region, the backup copy is created in
the same Region where the request is sent from (in-Region copy).
For more information about creating backup copies, see [ Copying
backups](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#copy-backups)
in the *Amazon FSx for Windows User Guide*, [Copying backups](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html#copy-backups)
in the *Amazon FSx for Lustre User Guide*, and [Copying backups](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/using-backups.html#copy-backups)
in the *Amazon FSx for OpenZFS User Guide*.
"""
def copy_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CopyBackup", input, options)
end
@doc """
Creates a backup of an existing Amazon FSx for Windows File Server file system,
Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon
FSx for OpenZFS file system.
We recommend creating regular backups so that you can restore a file system or
volume from a backup if an issue arises with the original file system or volume.
For Amazon FSx for Lustre file systems, you can create a backup only for file
systems that have the following configuration:
* A Persistent deployment type
* Are *not* linked to a data repository
For more information about backups, see the following:
* For Amazon FSx for Lustre, see [Working with FSx for Lustre backups](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html).
* For Amazon FSx for Windows, see [Working with FSx for Windows backups](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html).
* For Amazon FSx for NetApp ONTAP, see [Working with FSx for NetApp ONTAP
backups](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/using-backups.html).
* For Amazon FSx for OpenZFS, see [Working with FSx for OpenZFS backups](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/using-backups.html).
If a backup with the specified client request token exists and the parameters
match, this operation returns the description of the existing backup. If a
backup with the specified client request token exists and the parameters don't
match, this operation returns `IncompatibleParameterError`. If a backup with the
specified client request token doesn't exist, `CreateBackup` does the following:
* Creates a new Amazon FSx backup with an assigned ID, and an
initial lifecycle state of `CREATING`.
* Returns the description of the backup.
By using the idempotent operation, you can retry a `CreateBackup` operation
without the risk of creating an extra backup. This approach can be useful when
an initial call fails in a way that makes it unclear whether a backup was
created. If you use the same client request token and the initial call created a
backup, the operation returns a successful result because all the parameters are
the same.
The `CreateBackup` operation returns while the backup's lifecycle state is still
`CREATING`. You can check the backup creation status by calling the
[DescribeBackups](https://docs.aws.amazon.com/fsx/latest/APIReference/API_DescribeBackups.html)
operation, which returns the backup state along with other information.
"""
def create_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateBackup", input, options)
end
@doc """
Creates an Amazon FSx for Lustre data repository association (DRA).
A data repository association is a link between a directory on the file system
and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository
associations on a file system. Data repository associations are supported only
for file systems with the `Persistent_2` deployment type.
Each data repository association must have a unique Amazon FSx file system
directory and a unique S3 bucket or prefix associated with it. You can configure
a data repository association for automatic import only, for automatic export
only, or for both. To learn more about linking a data repository to your file
system, see [Linking your file system to an S3 bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-dra-linked-data-repo.html).
"""
def create_data_repository_association(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDataRepositoryAssociation", input, options)
end
@doc """
Creates an Amazon FSx for Lustre data repository task.
You use data repository tasks to perform bulk operations between your Amazon FSx
file system and its linked data repositories. An example of a data repository
task is exporting any data and metadata changes, including POSIX metadata, to
files, directories, and symbolic links (symlinks) from your FSx file system to a
linked data repository. A `CreateDataRepositoryTask` operation will fail if a
data repository is not linked to the FSx file system. To learn more about data
repository tasks, see [Data Repository Tasks](https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html).
To learn more about linking a data repository to your file system, see [Linking your file system to an S3
bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-dra-linked-data-repo.html).
"""
def create_data_repository_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDataRepositoryTask", input, options)
end
@doc """
Creates a new, empty Amazon FSx file system.
You can create the following supported Amazon FSx file systems using the
`CreateFileSystem` API operation:
* Amazon FSx for Lustre
* Amazon FSx for NetApp ONTAP
* Amazon FSx for Windows File Server
This operation requires a client request token in the request that Amazon FSx
uses to ensure idempotent creation. This means that calling the operation
multiple times with the same client request token has no effect. By using the
idempotent operation, you can retry a `CreateFileSystem` operation without the
risk of creating an extra file system. This approach can be useful when an
initial call fails in a way that makes it unclear whether a file system was
created. Examples are if a transport level timeout occurred, or your connection
was reset. If you use the same client request token and the initial call created
a file system, the client receives success as long as the parameters are the
same.
If a file system with the specified client request token exists and the
parameters match, `CreateFileSystem` returns the description of the existing
file system. If a file system with the specified client request token exists and
the parameters don't match, this call returns `IncompatibleParameterError`. If a
file system with the specified client request token doesn't exist,
`CreateFileSystem` does the following:
* Creates a new, empty Amazon FSx file system with an assigned ID,
and an initial lifecycle state of `CREATING`.
* Returns the description of the file system.
This operation requires a client request token in the request that Amazon FSx
uses to ensure idempotent creation. This means that calling the operation
multiple times with the same client request token has no effect. By using the
idempotent operation, you can retry a `CreateFileSystem` operation without the
risk of creating an extra file system. This approach can be useful when an
initial call fails in a way that makes it unclear whether a file system was
created. Examples are if a transport-level timeout occurred, or your connection
was reset. If you use the same client request token and the initial call created
a file system, the client receives a success message as long as the parameters
are the same.
The `CreateFileSystem` call returns while the file system's lifecycle state is
still `CREATING`. You can check the file-system creation status by calling the
[DescribeFileSystems](https://docs.aws.amazon.com/fsx/latest/APIReference/API_DescribeFileSystems.html)
operation, which returns the file system state along with other information.
"""
def create_file_system(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFileSystem", input, options)
end
@doc """
Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File Server, or
Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.
If a file system with the specified client request token exists and the
parameters match, this operation returns the description of the file system. If
a client request token with the specified by the file system exists and the
parameters don't match, this call returns `IncompatibleParameterError`. If a
file system with the specified client request token doesn't exist, this
operation does the following:
* Creates a new Amazon FSx file system from backup with an assigned
ID, and an initial lifecycle state of `CREATING`.
* Returns the description of the file system.
Parameters like the Active Directory, default share name, automatic backup, and
backup settings default to the parameters of the file system that was backed up,
unless overridden. You can explicitly supply other settings.
By using the idempotent operation, you can retry a `CreateFileSystemFromBackup`
call without the risk of creating an extra file system. This approach can be
useful when an initial call fails in a way that makes it unclear whether a file
system was created. Examples are if a transport level timeout occurred, or your
connection was reset. If you use the same client request token and the initial
call created a file system, the client receives a success message as long as the
parameters are the same.
The `CreateFileSystemFromBackup` call returns while the file system's lifecycle
state is still `CREATING`. You can check the file-system creation status by
calling the [
DescribeFileSystems](https://docs.aws.amazon.com/fsx/latest/APIReference/API_DescribeFileSystems.html)
operation, which returns the file system state along with other information.
"""
def create_file_system_from_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateFileSystemFromBackup", input, options)
end
@doc """
Creates a snapshot of an existing Amazon FSx for OpenZFS file system.
With snapshots, you can easily undo file changes and compare file versions by
restoring the volume to a previous version.
If a snapshot with the specified client request token exists, and the parameters
match, this operation returns the description of the existing snapshot. If a
snapshot with the specified client request token exists, and the parameters
don't match, this operation returns `IncompatibleParameterError`. If a snapshot
with the specified client request token doesn't exist, `CreateSnapshot` does the
following:
* Creates a new OpenZFS snapshot with an assigned ID, and an initial
lifecycle state of `CREATING`.
* Returns the description of the snapshot.
By using the idempotent operation, you can retry a `CreateSnapshot` operation
without the risk of creating an extra snapshot. This approach can be useful when
an initial call fails in a way that makes it unclear whether a snapshot was
created. If you use the same client request token and the initial call created a
snapshot, the operation returns a successful result because all the parameters
are the same.
The `CreateSnapshot` operation returns while the snapshot's lifecycle state is
still `CREATING`. You can check the snapshot creation status by calling the
[DescribeSnapshots](https://docs.aws.amazon.com/fsx/latest/APIReference/API_DescribeSnapshots.html)
operation, which returns the snapshot state along with other information.
"""
def create_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateSnapshot", input, options)
end
@doc """
Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system.
"""
def create_storage_virtual_machine(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateStorageVirtualMachine", input, options)
end
@doc """
Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage volume.
"""
def create_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateVolume", input, options)
end
@doc """
Creates a new Amazon FSx for NetApp ONTAP volume from an existing Amazon FSx
volume backup.
"""
def create_volume_from_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateVolumeFromBackup", input, options)
end
@doc """
Deletes an Amazon FSx backup.
After deletion, the backup no longer exists, and its data is gone.
The `DeleteBackup` call returns instantly. The backup won't show up in later
`DescribeBackups` calls.
The data in a deleted backup is also deleted and can't be recovered by any
means.
"""
def delete_backup(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteBackup", input, options)
end
@doc """
Deletes a data repository association on an Amazon FSx for Lustre file system.
Deleting the data repository association unlinks the file system from the Amazon
S3 bucket. When deleting a data repository association, you have the option of
deleting the data in the file system that corresponds to the data repository
association. Data repository associations are supported only for file systems
with the `Persistent_2` deployment type.
"""
def delete_data_repository_association(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDataRepositoryAssociation", input, options)
end
@doc """
Deletes a file system.
After deletion, the file system no longer exists, and its data is gone. Any
existing automatic backups and snapshots are also deleted.
To delete an Amazon FSx for NetApp ONTAP file system, first delete all the
volumes and storage virtual machines (SVMs) on the file system. Then provide a
`FileSystemId` value to the `DeleFileSystem` operation.
By default, when you delete an Amazon FSx for Windows File Server file system, a
final backup is created upon deletion. This final backup isn't subject to the
file system's retention policy, and must be manually deleted.
The `DeleteFileSystem` operation returns while the file system has the
`DELETING` status. You can check the file system deletion status by calling the
[DescribeFileSystems](https://docs.aws.amazon.com/fsx/latest/APIReference/API_DescribeFileSystems.html)
operation, which returns a list of file systems in your account. If you pass the
file system ID for a deleted file system, the `DescribeFileSystems` operation
returns a `FileSystemNotFound` error.
If a data repository task is in a `PENDING` or `EXECUTING` state, deleting an
Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad
Request).
The data in a deleted file system is also deleted and can't be recovered by any
means.
"""
def delete_file_system(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteFileSystem", input, options)
end
@doc """
Deletes the Amazon FSx snapshot.
After deletion, the snapshot no longer exists, and its data is gone. Deleting a
snapshot doesn't affect snapshots stored in a file system backup.
The `DeleteSnapshot` operation returns instantly. The snapshot appears with the
lifecycle status of `DELETING` until the deletion is complete.
"""
def delete_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteSnapshot", input, options)
end
@doc """
Deletes an existing Amazon FSx for ONTAP storage virtual machine (SVM).
Prior to deleting an SVM, you must delete all non-root volumes in the SVM,
otherwise the operation will fail.
"""
def delete_storage_virtual_machine(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteStorageVirtualMachine", input, options)
end
@doc """
Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.
"""
def delete_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteVolume", input, options)
end
@doc """
Returns the description of a specific Amazon FSx backup, if a `BackupIds` value
is provided for that backup.
Otherwise, it returns all backups owned by your Amazon Web Services account in
the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all backups, you can optionally specify the `MaxResults`
parameter to limit the number of backups in a response. If more backups remain,
Amazon FSx returns a `NextToken` value in the response. In this case, send a
later request with the `NextToken` request parameter set to the value of the
`NextToken` value from the last response.
This operation is used in an iterative process to retrieve a list of your
backups. `DescribeBackups` is called first without a `NextToken` value. Then the
operation continues to be called with the `NextToken` parameter set to the value
of the last `NextToken` value until a response has no `NextToken` value.
When using this operation, keep the following in mind:
* The operation might return fewer than the `MaxResults` value of
backup descriptions while still including a `NextToken` value.
* The order of the backups returned in the response of one
`DescribeBackups` call and the order of the backups returned across the
responses of a multi-call iteration is unspecified.
"""
def describe_backups(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeBackups", input, options)
end
@doc """
Returns the description of specific Amazon FSx for Lustre data repository
associations, if one or more `AssociationIds` values are provided in the
request, or if filters are used in the request.
Data repository associations are supported only for file systems with the
`Persistent_2` deployment type.
You can use filters to narrow the response to include just data repository
associations for specific file systems (use the `file-system-id` filter with the
ID of the file system) or data repository associations for a specific repository
type (use the `data-repository-type` filter with a value of `S3`). If you don't
use filters, the response returns all data repository associations owned by your
Amazon Web Services account in the Amazon Web Services Region of the endpoint
that you're calling.
When retrieving all data repository associations, you can paginate the response
by using the optional `MaxResults` parameter to limit the number of data
repository associations returned in a response. If more data repository
associations remain, Amazon FSx returns a `NextToken` value in the response. In
this case, send a later request with the `NextToken` request parameter set to
the value of `NextToken` from the last response.
"""
def describe_data_repository_associations(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDataRepositoryAssociations", input, options)
end
@doc """
Returns the description of specific Amazon FSx for Lustre data repository tasks,
if one or more `TaskIds` values are provided in the request, or if filters are
used in the request.
You can use filters to narrow the response to include just tasks for specific
file systems, or tasks in a specific lifecycle state. Otherwise, it returns all
data repository tasks owned by your Amazon Web Services account in the Amazon
Web Services Region of the endpoint that you're calling.
When retrieving all tasks, you can paginate the response by using the optional
`MaxResults` parameter to limit the number of tasks returned in a response. If
more tasks remain, Amazon FSx returns a `NextToken` value in the response. In
this case, send a later request with the `NextToken` request parameter set to
the value of `NextToken` from the last response.
"""
def describe_data_repository_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDataRepositoryTasks", input, options)
end
@doc """
Returns the DNS aliases that are associated with the specified Amazon FSx for
Windows File Server file system.
A history of all DNS aliases that have been associated with and disassociated
from the file system is available in the list of `AdministrativeAction` provided
in the `DescribeFileSystems` operation response.
"""
def describe_file_system_aliases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFileSystemAliases", input, options)
end
@doc """
Returns the description of specific Amazon FSx file systems, if a
`FileSystemIds` value is provided for that file system.
Otherwise, it returns descriptions of all file systems owned by your Amazon Web
Services account in the Amazon Web Services Region of the endpoint that you're
calling.
When retrieving all file system descriptions, you can optionally specify the
`MaxResults` parameter to limit the number of descriptions in a response. If
more file system descriptions remain, Amazon FSx returns a `NextToken` value in
the response. In this case, send a later request with the `NextToken` request
parameter set to the value of `NextToken` from the last response.
This operation is used in an iterative process to retrieve a list of your file
system descriptions. `DescribeFileSystems` is called first without a
`NextToken`value. Then the operation continues to be called with the `NextToken`
parameter set to the value of the last `NextToken` value until a response has no
`NextToken`.
When using this operation, keep the following in mind:
* The implementation might return fewer than `MaxResults` file
system descriptions while still including a `NextToken` value.
* The order of file systems returned in the response of one
`DescribeFileSystems` call and the order of file systems returned across the
responses of a multicall iteration is unspecified.
"""
def describe_file_systems(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeFileSystems", input, options)
end
@doc """
Returns the description of specific Amazon FSx snapshots, if a `SnapshotIds`
value is provided.
Otherwise, this operation returns all snapshots owned by your Amazon Web
Services account in the Amazon Web Services Region of the endpoint that you're
calling.
When retrieving all snapshots, you can optionally specify the `MaxResults`
parameter to limit the number of snapshots in a response. If more backups
remain, Amazon FSx returns a `NextToken` value in the response. In this case,
send a later request with the `NextToken` request parameter set to the value of
`NextToken` from the last response.
Use this operation in an iterative process to retrieve a list of your snapshots.
`DescribeSnapshots` is called first without a `NextToken` value. Then the
operation continues to be called with the `NextToken` parameter set to the value
of the last `NextToken` value until a response has no `NextToken` value.
When using this operation, keep the following in mind:
* The operation might return fewer than the `MaxResults` value of
snapshot descriptions while still including a `NextToken` value.
* The order of snapshots returned in the response of one
`DescribeSnapshots` call and the order of backups returned across the responses
of a multi-call iteration is unspecified.
"""
def describe_snapshots(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeSnapshots", input, options)
end
@doc """
Describes one or more Amazon FSx for NetApp ONTAP storage virtual machines
(SVMs).
"""
def describe_storage_virtual_machines(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeStorageVirtualMachines", input, options)
end
@doc """
Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS
volumes.
"""
def describe_volumes(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeVolumes", input, options)
end
@doc """
Use this action to disassociate, or remove, one or more Domain Name Service
(DNS) aliases from an Amazon FSx for Windows File Server file system.
If you attempt to disassociate a DNS alias that is not associated with the file
system, Amazon FSx responds with a 400 Bad Request. For more information, see
[Working with DNS Aliases](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html).
The system generated response showing the DNS aliases that Amazon FSx is
attempting to disassociate from the file system. Use the API operation to
monitor the status of the aliases Amazon FSx is disassociating with the file
system.
"""
def disassociate_file_system_aliases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DisassociateFileSystemAliases", input, options)
end
@doc """
Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx
for Windows File Server.
When retrieving all tags, you can optionally specify the `MaxResults` parameter
to limit the number of tags in a response. If more tags remain, Amazon FSx
returns a `NextToken` value in the response. In this case, send a later request
with the `NextToken` request parameter set to the value of `NextToken` from the
last response.
This action is used in an iterative process to retrieve a list of your tags.
`ListTagsForResource` is called first without a `NextToken`value. Then the
action continues to be called with the `NextToken` parameter set to the value of
the last `NextToken` value until a response has no `NextToken`.
When using this action, keep the following in mind:
* The implementation might return fewer than `MaxResults` file
system descriptions while still including a `NextToken` value.
* The order of tags returned in the response of one
`ListTagsForResource` call and the order of tags returned across the responses
of a multi-call iteration is unspecified.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Releases the file system lock from an Amazon FSx for OpenZFS file system.
"""
def release_file_system_nfs_v3_locks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ReleaseFileSystemNfsV3Locks", input, options)
end
@doc """
Returns an Amazon FSx for OpenZFS volume to the state saved by the specified
snapshot.
"""
def restore_volume_from_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RestoreVolumeFromSnapshot", input, options)
end
@doc """
Tags an Amazon FSx resource.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
This action removes a tag from an Amazon FSx resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Updates the configuration of an existing data repository association on an
Amazon FSx for Lustre file system.
Data repository associations are supported only for file systems with the
`Persistent_2` deployment type.
"""
def update_data_repository_association(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDataRepositoryAssociation", input, options)
end
@doc """
Use this operation to update the configuration of an existing Amazon FSx file
system.
You can update multiple properties in a single request.
For Amazon FSx for Windows File Server file systems, you can update the
following properties:
* `AuditLogConfiguration`
* `AutomaticBackupRetentionDays`
* `DailyAutomaticBackupStartTime`
* `SelfManagedActiveDirectoryConfiguration`
* `StorageCapacity`
* `ThroughputCapacity`
* `WeeklyMaintenanceStartTime`
For FSx for Lustre file systems, you can update the following properties:
* `AutoImportPolicy`
* `AutomaticBackupRetentionDays`
* `DailyAutomaticBackupStartTime`
* `DataCompressionType`
* `StorageCapacity`
* `WeeklyMaintenanceStartTime`
For FSx for ONTAP file systems, you can update the following properties:
* `AutomaticBackupRetentionDays`
* `DailyAutomaticBackupStartTime`
* `FsxAdminPassword`
* `WeeklyMaintenanceStartTime`
For the Amazon FSx for OpenZFS file systems, you can update the following
properties:
* `AutomaticBackupRetentionDays`
* `CopyTagsToBackups`
* `CopyTagsToVolumes`
* `DailyAutomaticBackupStartTime`
* `DiskIopsConfiguration`
* `ThroughputCapacity`
* `WeeklyMaintenanceStartTime`
"""
def update_file_system(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateFileSystem", input, options)
end
@doc """
Updates the name of a snapshot.
"""
def update_snapshot(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateSnapshot", input, options)
end
@doc """
Updates an Amazon FSx for ONTAP storage virtual machine (SVM).
"""
def update_storage_virtual_machine(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateStorageVirtualMachine", input, options)
end
@doc """
Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for
OpenZFS volume.
"""
def update_volume(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateVolume", input, options)
end
end
|
lib/aws/generated/fsx.ex
| 0.872849
| 0.490602
|
fsx.ex
|
starcoder
|
defmodule Placebo do
@moduledoc """
Placebo is a mocking library based on [meck](http://eproxus.github.io/meck/).
It is inspired by [RSpec](http://rspec.info/) and [Mock](https://github.com/jjh42/mock).
All the functionality covered below is provided by meck.
Placebo is just a pretty wrapper around the features of meck.
To enable just use Placebo in your ExUnit tests
```
defmodule SomeTests do
use ExUnit.Case
use Placebo
## some awesome tests
end
```
## Stubbing
```
allow Some.Module.hello("world"), return: "some value"
or
allow(Some.Module.hello("world")) |> return("some value")
```
This will mock the module "Some.Module" and stub out the hello function with an argument of "world" to return "some value" when called.
Any Elixir term can be returned using this syntax.
If you want more dyanmic behavior you can have a function executed when the mock is called.
```
allow Some.Module.hello(any()), exec: &String.upcase/1
allow Some.Module.hello(any()), exec: fn arg -> String.upcase(arg) end
or
allow(Some.Module.hello(any())) |> exec(&String.upcase/1)
allow(Some.Module.hello(any())) |> exec(fn arg -> String.upcase(arg) end)
```
To return different values on subsequent calls use seq or loop
```
allow Some.Module.hello(any()), seq: [1,2,3,4]
allow Some.Module.hello(any()), loop: [1,2,3,4]
or
allow(Some.Module.hello(any())) |> seq([1,2,3,4])
allow(Some.Module.hello(any())) |> loop([1,2,3,4])
```
seq will return the last value for every call after the last one is called
loop will continue to loop around the list after the last one is called
# Argument Matching
Any term passed in as an argument will be matched exactly.
`Placebo.Matchers` provides several argument matchers to be used for more dynamic scenarios.
If you need something custom you can pass a function to the is/1 matcher.
```
allow Some.Module.hello(is(fn arg -> rem(arg,2) == 0 end)), return: "Even"
or
allow(Some.Module.hello(is(fn arg -> rem(arg,2) == 0 end))) |> return("Even")
```
all mocks created by Placebo are automaticaly created with :merge_expects option.
So multiple allow statements can be made for the same function and will match in the order defined.
```
allow Some.Module.hello(is(fn arg -> rem(arg,2) == 0 end))), return: "Even"
allow Some.Module.hello(any()), return: "Odd"
or
allow(Some.Module.hello(is(fn arg -> rem(arg,2) == 0 end)))) |> return("Even")
allow(Some.Module.hello(any())) |> return("Odd")
```
# Verification
Verification is done with assert_called and refute_called.
All argument matchers also work in the verification step.
```
assert_called Some.Module.hello("world")
assert_called Some.Module.hello("world"), once()
assert_called Some.Module.hello("world"), times(2)
```
if you use expect instead of allow for any scenario. The interaction will automatically be verified at the end of the test.
```
expect Some.Module.hello("world"), return: "some value"
or
expect(Some.Module.hello("world")) |> return("some value")
```
Failed verifications will automatically print out all recorded interactions with the mocked module.
# Asynchronous Testing
Placebo supports asynchronous testing by using `$callers` and making the mock active for only the processess in the calling heirarchy.
If the test is marked as `async: false` the mock is available globally.
"""
defmacro __using__(_args) do
quote do
import Placebo
import Placebo.Matchers
import Placebo.Validators
import Placebo.Actions
require Placebo.Macros
setup(context) do
async? = Map.get(context, :async, false)
unless async? do
Placebo.Server.clear()
end
Placebo.Server.set_async(async?)
test_pid = self()
on_exit(fn ->
try do
Placebo.Server.expects(test_pid)
|> Enum.each(fn stub ->
assert Placebo.Server.num_calls(stub.module, stub.function, stub.args, test_pid) > 0,
Placebo.Helpers.failure_message(
stub.module,
stub.function,
stub.args,
test_pid
)
end)
after
unless async? do
Placebo.Server.clear()
end
end
end)
:ok
end
end
end
defmacro capture({{:., _, [module, f]}, _, args}, arg_num) do
quote do
case Placebo.Server.capture(:first, unquote(module), unquote(f), unquote(args), unquote(arg_num)) do
{:ok, capture} -> capture
{:error, reason} -> raise "Unable to find capture: #{inspect(reason)}"
end
end
end
defmacro capture(history_position, {{:., _, [module, f]}, _, args}, arg_num) do
quote do
case Placebo.Server.capture(
unquote(history_position),
unquote(module),
unquote(f),
unquote(args),
unquote(arg_num)
) do
{:ok, capture} -> capture
{:error, reason} -> raise "Unable to find capture: #{inspect(reason)}"
end
end
end
defmacro allow({{:., _, [module, f]}, _, args}, opts \\ []) do
record_expectation(module, f, args, opts, false)
end
defmacro expect({{:., _, [module, f]}, _, args}, opts \\ []) do
record_expectation(module, f, args, opts, true)
end
defp record_expectation(module, function, args, opts, expect?) do
quote bind_quoted: [
module: module,
function: function,
args: args,
arity: length(args),
opts: opts,
expect?: expect?
] do
meck_options = Keyword.get(opts, :meck_options, [])
mock_config = {module, function, args, expect?, meck_options}
setup_mock(mock_config, Map.new(opts))
mock_config
end
end
defmacro assert_called({{:., _, [module, f]}, _, args}, validator \\ :any) do
quote bind_quoted: [module: module, f: f, args: args, validator: validator] do
Placebo.Helpers.called?(module, f, args, validator)
|> assert(Placebo.Helpers.failure_message(module, f, args))
end
end
defmacro called?({{:., _, [module, f]}, _, args}, validator \\ :any) do
quote bind_quoted: [module: module, f: f, args: args, validator: validator] do
Placebo.Helpers.called?(module, f, args, validator)
end
end
defmacro num_calls({{:., _, [module, f]}, _, args}) do
quote bind_quoted: [module: module, f: f, args: args] do
Placebo.Server.num_calls(module, f, args)
end
end
defmacro refute_called({{:., _, [module, f]}, _, args}, validator \\ :any) do
quote bind_quoted: [module: module, f: f, args: args, validator: validator] do
Placebo.Helpers.called?(module, f, args, validator)
|> refute(Placebo.Helpers.failure_message(module, f, args))
end
end
def setup_mock(mock_config, %{return: value}), do: Placebo.Actions.return(mock_config, value)
def setup_mock(mock_config, %{exec: function}), do: Placebo.Actions.exec(mock_config, function)
def setup_mock(mock_config, %{seq: values}), do: Placebo.Actions.seq(mock_config, values)
def setup_mock(mock_config, %{loop: values}), do: Placebo.Actions.loop(mock_config, values)
def setup_mock(_, _), do: :ok
defdelegate unstub, to: Placebo.Server, as: :clear
end
|
lib/placebo.ex
| 0.776199
| 0.934335
|
placebo.ex
|
starcoder
|
defmodule Expug.Compiler do
@moduledoc """
Compiles tokens into an AST.
## How it works
Nodes are maps with a `:type` key. They are then filled up using a function
with the same name as the type:
node = %{type: :document}
document({node, tokens})
This function returns another `{node, tokens}` tuple, where `node` is the
updated node, and `tokens` are the rest of the tokens to parse.
The functions (`document/1`) here can do 1 of these things:
- Spawn a child, say, `%{type: :element}`, then delegate to its function (eg, `element()`).
- Simply return a `{node, tokens}` - no transformation here.
The functions `indent()` and `statement()` are a little different. It can
give you an element, or a text node, or whatever.
## Also see
- `Expug.Tokenizer` is used to build the tokens used by this compiler.
- `Expug.Builder` uses the AST made by this compiler.
"""
require Logger
@doc """
Compiles tokens. Returns `{:ok, ast}` on success.
On failure, it returns `{:error, [type: type, position: {line, col}]}`.
"""
def compile(tokens, _opts \\ []) do
tokens = Enum.reverse(tokens)
node = %{type: :document}
try do
{node, _tokens} = document({node, tokens})
node = Expug.Transformer.transform(node)
node
catch
{:compile_error, type, [{pos, token, _} | _]} ->
# TODO: create an EOF token
throw %{ type: type, position: pos, token_type: token }
{:compile_error, type, []} ->
throw %{ type: type }
end
end
@doc """
A document.
"""
def document({node, [{_, :doctype, type} = t | tokens]}) do
node = Map.put(node, :doctype, %{
type: :doctype,
value: type,
token: t
})
indent({node, tokens}, [0])
end
def document({node, tokens}) do
indent({node, tokens}, [0]) # optional
end
@doc """
Indentation. Called with `depth` which is the current level its at.
"""
def indent({node, [{_, :indent, subdepth} | [_|_] = tokens]}, [d | _] = depths)
when subdepth > d do
if node[:children] == nil do
throw {:compile_error, :unexpected_indent, hd(tokens)}
end
# Found children, start a new subtree.
[child | rest] = Enum.reverse(node[:children] || [])
{child, tokens} = statement({child, tokens}, [ subdepth | depths ])
|> indent([ subdepth | depths ])
# Go back to our tree.
children = Enum.reverse([child | rest])
node = Map.put(node, :children, children)
{node, tokens}
|> indent(depths)
end
def indent({node, [{_, :indent, subdepth} | [_|_] = tokens]}, [d | _] = depths)
when subdepth == d do
{node, tokens}
|> statement(depths)
|> indent(depths)
end
def indent({node, [{_, :indent, subdepth} | [_|_]] = tokens}, [d | _])
when subdepth < d do
# throw {:compile_error, :ambiguous_indentation, token}
{node, tokens}
end
# End of file, no tokens left.
def indent({node, []}, _depth) do
{node, []}
end
def indent({_node, tokens}, _depth) do
throw {:compile_error, :unexpected_token, tokens}
end
@doc """
A statement after an `:indent`.
Can consume these:
:element_name
:element_class
:element_id
[:attribute_open [...] :attribute_close]
[:buffered_text | :unescaped_text | :raw_text | :block_text]
"""
def statement({node, [{_, :line_comment, _} | [{_, :subindent, _} | _] = tokens]}, _depths) do
# Pretend to be an element and capture stuff into it; discard it afterwards.
# This is wrong anyway; it should be tokenized differently.
subindent({node, tokens})
end
def statement({node, [{_, :line_comment, _} | tokens]}, _depths) do
{node, tokens}
end
def statement({node, [{_, :html_comment, value} = t | tokens]}, _depths) do
child = %{type: :html_comment, value: value, token: t}
{child, tokens} = append_subindent({child, tokens})
node = add_child(node, child)
{node, tokens}
end
def statement({node, [{_, :element_name, _} = t | _] = tokens}, depths) do
add_element(node, t, tokens, depths)
end
def statement({node, [{_, :element_class, _} = t | _] = tokens}, depths) do
add_element(node, t, tokens, depths)
end
def statement({node, [{_, :element_id, _} = t | _] = tokens}, depths) do
add_element(node, t, tokens, depths)
end
def statement({node, [{_, :raw_text, value} = t | tokens]}, _depth) do
child = %{type: :raw_text, value: value, token: t}
node = add_child(node, child)
{node, tokens}
end
def statement({node, [{_, :buffered_text, value} = t | tokens]}, _depth) do
child = %{type: :buffered_text, value: value, token: t}
{child, tokens} = append_subindent({child, tokens})
node = add_child(node, child)
{node, tokens}
end
def statement({node, [{_, :unescaped_text, value} = t | tokens]}, _depth) do
child = %{type: :unescaped_text, value: value, token: t}
{child, tokens} = append_subindent({child, tokens})
node = add_child(node, child)
{node, tokens}
end
def statement({node, [{_, :statement, value} = t | tokens]}, _depth) do
child = %{type: :statement, value: value, token: t}
{child, tokens} = append_subindent({child, tokens})
node = add_child(node, child)
{node, tokens}
end
def statement({_node, tokens}, _depths) do
throw {:compile_error, :unexpected_token, tokens}
end
@doc """
Consumes `:subindent` tokens and adds them to the `value` of `node`.
"""
def append_subindent({node, [{_, :subindent, value} | tokens]}) do
node = node
|> Map.update(:value, value, &(&1 <> "\n#{value}"))
{node, tokens}
|> append_subindent()
end
def append_subindent({node, tokens}) do
{node, tokens}
end
def add_element(node, t, tokens, depth) do
child = %{type: :element, name: "div", token: t}
{child, rest} = element({child, tokens}, node, depth)
node = add_child(node, child)
{node, rest}
end
@doc """
Parses an element.
Returns a `%{type: :element}` node.
"""
def element({node, tokens}, parent, depths) do
case tokens do
[{_, :element_name, value} | rest] ->
node = Map.put(node, :name, value)
element({node, rest}, parent, depths)
[{_, :element_id, value} | rest] ->
attr_list = add_attribute(node[:attributes] || %{}, "id", {:text, value})
node = Map.put(node, :attributes, attr_list)
element({node, rest}, parent, depths)
[{_, :element_class, value} | rest] ->
attr_list = add_attribute(node[:attributes] || %{}, "class", {:text, value})
node = Map.put(node, :attributes, attr_list)
element({node, rest}, parent, depths)
[{_, :raw_text, value} = t | rest] ->
# should be in children
child = %{type: :raw_text, value: value, token: t}
node = add_child(node, child)
element({node, rest}, parent, depths)
[{_, :buffered_text, value} = t | rest] ->
child = %{type: :buffered_text, value: value, token: t}
{child, rest} = append_subindent({child, rest})
node = add_child(node, child)
element({node, rest}, parent, depths)
[{_, :unescaped_text, value} = t | rest] ->
child = %{type: :unescaped_text, value: value, token: t}
{child, rest} = append_subindent({child, rest})
node = add_child(node, child)
element({node, rest}, parent, depths)
[{_, :block_text, _} | rest] ->
t = hd(rest)
{rest, lines} = subindent_capture(rest)
child = %{type: :block_text, value: Enum.join(lines, "\n"), token: t}
node = add_child(node, child)
element({node, rest}, parent, depths)
[{_, :attribute_open, _} | rest] ->
{attr_list, rest} = attribute({node[:attributes] || %{}, rest})
node = Map.put(node, :attributes, attr_list)
element({node, rest}, parent, depths)
tokens ->
{node, tokens}
end
end
@doc """
Returns a list of `[type: :attribute]` items.
"""
def attribute({attr_list, tokens}) do
case tokens do
[{_, :attribute_key, key}, {_, :attribute_value, value} | rest] ->
attr_list = add_attribute(attr_list, key, {:eval, value})
{attr_list, rest}
|> attribute()
[{_, :attribute_key, key} | rest] ->
attr_list = add_attribute(attr_list, key, {:eval, true})
{attr_list, rest}
|> attribute()
[{_, :attribute_close, _} | rest] ->
{attr_list, rest}
rest ->
{attr_list, rest}
end
end
def add_attribute(list, key, value) do
Map.update(list, key, [value], &(&1 ++ [value]))
end
@doc """
Adds a child to a Node.
iex> Expug.Compiler.add_child(%{}, %{type: :a})
%{children: [%{type: :a}]}
iex> src = %{children: [%{type: :a}]}
...> Expug.Compiler.add_child(src, %{type: :b})
%{children: [%{type: :a}, %{type: :b}]}
"""
def add_child(node, child) do
Map.update(node, :children, [child], &(&1 ++ [child]))
end
@doc """
Matches `:subindent` tokens and discards them. Used for line comments (`-#`).
"""
def subindent({node, [{_, :subindent, _} | rest]}) do
subindent({node, rest})
end
def subindent({node, rest}) do
{node, rest}
end
def subindent_capture(tokens, lines \\ [])
def subindent_capture([{_, :subindent, line} | rest], lines) do
lines = lines ++ [line]
subindent_capture(rest, lines)
end
def subindent_capture(rest, lines) do
{rest, lines}
end
end
|
lib/expug/compiler.ex
| 0.831554
| 0.647561
|
compiler.ex
|
starcoder
|
defmodule Abacus do
@moduledoc """
A math-expression parser, evaluator and formatter for Elixir.
## Features
### Supported operators
- `+`, `-`, `/`, `*`
- Exponentials with `^`
- Factorial (`n!`)
- Bitwise operators
* `<<` `>>` bitshift
* `&` bitwise and
* `|` bitwise or
* `|^` bitwise xor
* `~` bitwise not (unary)
- Boolean operators
* `&&`, `||`, `not`
* `==`, `!=`, `>`, `>=`, `<`, `<=`
* Ternary `condition ? if_true : if_false`
### Supported functions
- `sin(x)`, `cos(x)`, `tan(x)`
- `round(n, precision = 0)`, `ceil(n, precision = 0)`, `floor(n, precision = 0)`
### Reserved words
- `true`
- `false`
- `null`
### Access to variables in scope
- `a` with scope `%{"a" => 10}` would evaluate to `10`
- `a.b` with scope `%{"a" => %{"b" => 42}}` would evaluate to `42`
- `list[2]` with scope `%{"list" => [1, 2, 3]}` would evaluate to `3`
### Data types
- Boolean: `true`, `false`
- None: `null`
- Integer: `0`, `40`, `-184`
- Float: `0.2`, `12.`, `.12`
- String: `"Hello World"`, `"He said: \"Let's write a math parser\""`
If a variable is not in the scope, `eval/2` will result in `{:error, error}`.
"""
@doc """
Evaluates the given expression with no scope.
If `expr` is a string, it will be parsed first.
"""
@spec eval(expr :: tuple | charlist | String.t()) :: {:ok, result :: number} | {:error, error :: map}
@spec eval(expr :: tuple | charlist | String.t(), scope :: map) ::
{:ok, result :: number} | {:error, error :: map}
@spec eval!(expr :: tuple | charlist | String.t()) :: result :: number
@spec eval!(expr :: tuple | charlist | String.t(), scope :: map) :: result :: number
def eval(expr) do
eval(expr, %{})
end
@doc """
Evaluates the given expression.
Raises errors when parsing or evaluating goes wrong.
"""
def eval!(expr) do
eval!(expr, %{})
end
@doc """
Evaluates the given expression with the given scope.
If `expr` is a string, it will be parsed first.
"""
def eval!(expr, scope) do
case Abacus.Eval.eval(expr, scope) do
{:ok, result} -> result
{:error, error} -> raise error
end
end
def eval(expr, scope) when is_binary(expr) or is_bitstring(expr) do
with {:ok, parsed} <- parse(expr) do
eval(parsed, scope)
end
end
def eval(expr, scope) do
Abacus.Tree.reduce(expr, &Abacus.Eval.eval(&1, scope))
end
@spec format(expr :: tuple | String.t() | charlist) :: {:ok, String.t()} | {:error, error :: map}
@doc """
Pretty-prints the given expression.
If `expr` is a string, it will be parsed first.
"""
def format(expr) when is_binary(expr) or is_bitstring(expr) do
case parse(expr) do
{:ok, expr} ->
format(expr)
{:error, _} = error ->
error
end
end
def format(expr) do
try do
{:ok, Abacus.Format.format(expr)}
rescue
error -> {:error, error}
end
end
@spec parse(expr :: String.t() | charlist) :: {:ok, expr :: tuple} | {:error, error :: map}
@doc """
Parses the given `expr` to a syntax tree.
"""
def parse(expr) do
with {:ok, tokens} <- lex(expr) do
:math_term_parser.parse(tokens)
else
{:error, error, _} -> {:error, error}
{:error, error} -> {:error, error}
end
end
def variables(expr) do
Abacus.Tree.reduce(expr, fn
{:access, variables} ->
res =
Enum.map(variables, fn
{:variable, var} -> var
{:index, index} -> variables(index)
end)
|> List.flatten()
|> Enum.uniq()
{:ok, res}
{_operator, a, b, c} ->
res =
Enum.concat([a, b, c])
|> Enum.uniq()
{:ok, res}
{_operator, a, b} ->
res =
Enum.concat(a, b)
|> Enum.uniq()
{:ok, res}
{_operator, a} ->
a
_ ->
{:ok, []}
end)
end
defp lex(string) when is_binary(string) do
string
|> String.to_charlist()
|> lex
end
defp lex(string) do
with {:ok, tokens, _} <- :math_term.string(string) do
{:ok, tokens}
end
end
end
|
lib/abacus.ex
| 0.741861
| 0.76145
|
abacus.ex
|
starcoder
|
defprotocol MapShaper do
@moduledoc """
Protocol to turn a map into nested structs.
"""
@fallback_to_any true
@doc """
Function to translate JSON map to a structure or a list of structures given
as the first argument.
First argument defines the shape of nested structures like the following:
`[%Post{comments: %Comment{author: %Author{}}}]`.
The desired shape of nested structs can be also defined with the `defstruct/1`
calls of appropriate modules, then the first argument of `from_map/3`
can be shortened to `[%Post{}]`.
Calls `post_process_fn` to map values that are not struct themselves
before adding them to the struct.
Define how to filter or translate JSON map keys into the struct fields
by conforming the struct to `MapShaper.Target` protocol.
To override the keys conversion rules conform the struct
to `MapShaper.Target.Key` protocol.
"""
def from_map(value, map, post_process_fn \\ & &1)
defprotocol Target do
@moduledoc false
@fallback_to_any true
@doc """
Translates the JSON map to match the target struct fields.
Should return a map with keys matching the fields of the struct.
The `MapShaper.from_map/3` function will call this one to prepare the map
before taking fields from it for building the structure.
"""
def translate_source_map(value, map)
defprotocol Key do
@moduledoc false
@fallback_to_any true
@doc """
Returns a list of keys for the given struct fields key.
`from_map/3` uses the return value from this function to get first
matching key from the JSON map that may have camel or underscore case keys.
The default implementation returns the following list for :a_key input:
of possible keys `[:a_key, "a_key", "AKey", "aKey"]`.
"""
def key_variants(value, struct_key_atom)
end
end
end
defimpl MapShaper, for: Any do
def from_map(_value, nil, _post_process_fn) do
nil
end
def from_map([value], list, post_process_fn) when is_list(list) do
Enum.map(list, &MapShaper.from_map(value, &1, post_process_fn))
end
def from_map([value], not_list, _post_process_fn) do
raise "Call with the list shape [#{inspect(value)}] given as the first argument expects the list of maps as the second argument. Received instead: #{inspect(not_list)}"
end
def from_map(%struct_name{} = value, map, post_process_fn) do
alias MapShaper.Target
alias MapShaper.Target.Key
map = Target.translate_source_map(value, map)
fields =
value
|> Map.from_struct()
|> Enum.reduce([], fn {struct_key, struct_value}, fields ->
key_variants = Key.key_variants(value, struct_key)
map_value = Enum.find_value(key_variants, &Map.get(map, &1))
casted_value = MapShaper.from_map(struct_value, map_value, post_process_fn)
[{struct_key, casted_value} | fields]
end)
struct!(struct_name, fields)
end
def from_map(_value, map, post_process_fn) do
post_process_fn.(map)
end
end
defimpl MapShaper.Target, for: Any do
def translate_source_map(_value, map), do: map
end
defimpl MapShaper.Target.Key, for: Any do
def key_variants(_value, struct_key_atom) do
key_str = Atom.to_string(struct_key_atom)
underscore_str = Macro.underscore(key_str)
camel_str = Macro.camelize(key_str)
first = String.slice(camel_str, 0..0) |> String.downcase()
lf_camel_str = first <> String.slice(camel_str, 1..-1)
[
struct_key_atom,
key_str,
underscore_str,
camel_str,
lf_camel_str
]
end
end
|
example_json_parse/lib/map_shaper.ex
| 0.896485
| 0.612107
|
map_shaper.ex
|
starcoder
|
defmodule Membrane.RTP.SSRCRouter do
@moduledoc """
A filter separating RTP packets from different SSRCs into different outpts.
When receiving a new SSRC, it creates a new pad and notifies its parent (`t:new_stream_notification_t/0`) that should link
the new output pad.
"""
use Membrane.Filter
alias Membrane.RTP
def_input_pad :input, demand_unit: :buffers, caps: RTP, availability: :on_request
def_output_pad :output, caps: RTP, availability: :on_request
defmodule State do
@moduledoc false
use Bunch.Access
alias Membrane.RTP
@type t() :: %__MODULE__{
pads: %{RTP.ssrc_t() => [input_pad :: Pad.ref_t()]},
linking_buffers: %{RTP.ssrc_t() => [Membrane.Buffer.t()]}
}
defstruct pads: %{},
linking_buffers: %{}
end
@typedoc """
Notification sent when an RTP packet with new SSRC arrives and new output pad should be linked
"""
@type new_stream_notification_t ::
{:new_rtp_stream, RTP.ssrc_t(), RTP.payload_type_t()}
@impl true
def handle_init(_opts), do: {:ok, %State{}}
@impl true
def handle_pad_added(Pad.ref(:output, ssrc) = pad, _ctx, state) do
{buffered_actions, state} = pop_in(state, [:linking_buffers, ssrc])
{{:ok, [caps: {pad, %RTP{}}] ++ Enum.reverse(buffered_actions)}, state}
end
@impl true
def handle_pad_added(Pad.ref(:input, _id), _ctx, state) do
{:ok, state}
end
@impl true
def handle_pad_removed(Pad.ref(:input, _) = pad, _ctx, state) do
new_pads =
state.pads
|> Enum.filter(fn {_ssrc, p} -> p != pad end)
|> Enum.into(%{})
{:ok, %State{state | pads: new_pads}}
end
@impl true
def handle_prepared_to_playing(ctx, state) do
actions =
ctx.pads
|> Enum.filter(fn {_pad_ref, pad_data} -> pad_data.direction == :input end)
|> Enum.map(fn {pad_ref, _pad_data} -> {:demand, {pad_ref, 1}} end)
{{:ok, actions}, state}
end
@impl true
def handle_caps(Pad.ref(:input, _ref), _caps, _ctx, state) do
{:ok, state}
end
@impl true
def handle_demand(Pad.ref(:output, ssrc), _size, _unit, ctx, state) do
input_pad = state.pads[ssrc]
{{:ok, demand: {input_pad, &(&1 + ctx.incoming_demand)}}, state}
end
@impl true
def handle_process(Pad.ref(:input, _id) = pad, buffer, _ctx, state) do
%{ssrc: ssrc, payload_type: payload_type} = buffer.metadata.rtp
{new_stream_actions, state} = maybe_handle_new_stream(pad, ssrc, payload_type, state)
{actions, state} = maybe_add_to_linking_buffer(:buffer, buffer, ssrc, state)
{{:ok, new_stream_actions ++ actions}, state}
end
@impl true
def handle_event(Pad.ref(:input, _id), event, _ctx, state) do
{actions, state} =
Enum.flat_map_reduce(state.pads, state, fn {ssrc, _input}, state ->
maybe_add_to_linking_buffer(:event, event, ssrc, state)
end)
{{:ok, actions}, state}
end
@impl true
def handle_event(pad, event, ctx, state) do
super(pad, event, ctx, state)
end
defp maybe_handle_new_stream(pad, ssrc, payload_type, state) do
if Map.has_key?(state.pads, ssrc) do
{[], state}
else
state = state |> put_in([:pads, ssrc], pad) |> put_in([:linking_buffers, ssrc], [])
{[notify: {:new_rtp_stream, ssrc, payload_type}], state}
end
end
defp maybe_add_to_linking_buffer(type, value, ssrc, state) do
action = {type, {Pad.ref(:output, ssrc), value}}
if waiting_for_linking?(ssrc, state) do
state = update_in(state, [:linking_buffers, ssrc], &[action | &1])
actions = if type == :buffer, do: [demand: {state.pads[ssrc], &(&1 + 1)}], else: []
{actions, state}
else
{[action], state}
end
end
defp waiting_for_linking?(ssrc, %State{linking_buffers: lb}), do: Map.has_key?(lb, ssrc)
end
|
lib/membrane/rtp/ssrc_router.ex
| 0.832134
| 0.425665
|
ssrc_router.ex
|
starcoder
|
defmodule ABI do
@moduledoc """
Documentation for ABI, the function interface language for Solidity.
Generally, the ABI describes how to take binary Ethereum and transform
it to or from types that Solidity understands.
"""
@doc """
Encodes the given data into the function signature or tuple signature.
In place of a signature, you can also pass one of the `ABI.FunctionSelector` structs returned from `parse_specification/1`.
## Examples
iex> ABI.encode("baz(uint,address)", [50, <<1::160>> |> :binary.decode_unsigned])
...> |> Base.encode16(case: :lower)
"a291add600000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000001"
iex> ABI.encode("baz(uint8)", [9999])
** (RuntimeError) Data overflow encoding uint, data `9999` cannot fit in 8 bits
iex> ABI.encode("(uint,address)", [{50, <<1::160>> |> :binary.decode_unsigned}])
...> |> Base.encode16(case: :lower)
"00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000001"
iex> ABI.encode("(string)", [{"Ether Token"}])
...> |> Base.encode16(case: :lower)
"0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000b457468657220546f6b656e000000000000000000000000000000000000000000"
iex> ABI.encode("(string)", [{String.duplicate("1234567890", 10)}])
...> |> Base.encode16(case: :lower)
"000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000643132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393000000000000000000000000000000000000000000000000000000000"
iex> File.read!("priv/dog.abi.json")
...> |> Jason.decode!
...> |> ABI.parse_specification
...> |> Enum.find(&(&1.function == "bark")) # bark(address,bool)
...> |> ABI.encode([<<1::160>> |> :binary.decode_unsigned, true])
...> |> Base.encode16(case: :lower)
"b85d0bd200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001"
"""
def encode(function_signature, data) when is_binary(function_signature) do
encode(ABI.Parser.parse!(function_signature), data)
end
def encode(%ABI.FunctionSelector{} = function_selector, data) do
ABI.TypeEncoder.encode(data, function_selector)
end
@doc """
Decodes the given data based on the function or tuple
signature.
In place of a signature, you can also pass one of the `ABI.FunctionSelector` structs returned from `parse_specification/1`.
## Examples
iex> ABI.decode("baz(uint,address)", "00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000001" |> Base.decode16!(case: :lower))
[50, <<1::160>>]
iex> ABI.decode("(address[])", "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000" |> Base.decode16!(case: :lower))
[{[]}]
iex> ABI.decode("(string)", "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000b457468657220546f6b656e000000000000000000000000000000000000000000" |> Base.decode16!(case: :lower))
[{"Ether Token"}]
iex> File.read!("priv/dog.abi.json")
...> |> Jason.decode!
...> |> ABI.parse_specification
...> |> Enum.find(&(&1.function == "bark")) # bark(address,bool)
...> |> ABI.decode("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001" |> Base.decode16!(case: :lower))
[<<1::160>>, true]
"""
def decode(function_signature, data) when is_binary(function_signature) do
decode(ABI.Parser.parse!(function_signature), data)
end
def decode(%ABI.FunctionSelector{} = function_selector, data) do
ABI.TypeDecoder.decode(data, function_selector)
end
@doc """
Parses the given ABI specification document into an array of `ABI.FunctionSelector`s.
Non-function entries (e.g. constructors) in the ABI specification are skipped. Fallback function entries are accepted.
This function can be used in combination with a JSON parser, e.g. [`Jason`](https://hex.pm/packages/jason), to parse ABI specification JSON files.
## Examples
iex> File.read!("priv/dog.abi.json")
...> |> Jason.decode!
...> |> ABI.parse_specification
[%ABI.FunctionSelector{function: "bark", returns: nil, types: [:address, :bool]},
%ABI.FunctionSelector{function: "rollover", returns: :bool, types: []}]
iex> [%{
...> "constant" => true,
...> "inputs" => [
...> %{"name" => "at", "type" => "address"},
...> %{"name" => "loudly", "type" => "bool"}
...> ],
...> "name" => "bark",
...> "outputs" => [],
...> "payable" => false,
...> "stateMutability" => "nonpayable",
...> "type" => "function"
...> }]
...> |> ABI.parse_specification
[%ABI.FunctionSelector{function: "bark", returns: nil, types: [:address, :bool]}]
iex> [%{
...> "inputs" => [
...> %{"name" => "_numProposals", "type" => "uint8"}
...> ],
...> "payable" => false,
...> "stateMutability" => "nonpayable",
...> "type" => "constructor"
...> }]
...> |> ABI.parse_specification
[]
iex> ABI.decode("(string)", "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000643132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393000000000000000000000000000000000000000000000000000000000" |> Base.decode16!(case: :lower))
[{String.duplicate("1234567890", 10)}]
iex> [%{
...> "payable" => false,
...> "stateMutability" => "nonpayable",
...> "type" => "fallback"
...> }]
...> |> ABI.parse_specification
[%ABI.FunctionSelector{function: nil, returns: nil, types: []}]
"""
def parse_specification(doc) do
doc
|> Enum.map(&ABI.FunctionSelector.parse_specification_item/1)
|> Enum.filter(& &1)
end
end
|
lib/abi.ex
| 0.859221
| 0.520801
|
abi.ex
|
starcoder
|
defexception Dynamo.Router.InvalidHookError, kind: nil, hook: nil, actual: nil do
def message(exception) do
"expected #{exception.kind} hook #{inspect exception.hook} to return " <>
"a HTTP connection, but got #{inspect exception.actual}"
end
end
defmodule Dynamo.Router.Base do
@moduledoc """
This module contains the basic structure for a `Dynamo.Router`.
It provides a set of macros to generate routes as well as
preparing and finalizing requests. For example:
defmodule HomeRouter do
use Dynamo.Router
prepare do
conn.assign :layout, "hello.html"
end
get "/hello" do
conn.resp 200, "world"
end
forward "/posts", to: PostsRouter
end
## Routes
get "/hello" do
conn.resp 200, "world"
end
In the example above, a request will only match if it is
a `GET` request and the route "/hello". The supported
verbs are `get`, `post`, `put`, `patch`, `delete` and `options`.
A route can also specify parameters which will then be
available in the function body:
get "/hello/:name" do
conn.resp 200, "hello \#{name}"
end
Routes allow for globbing which will match the remaining parts
of a route and can be available as a parameter in the function
body, also note that a glob can't be followed by other segments:
get "/hello/*" do
conn.resp 200, "match all routes starting with /hello"
end
get "/hello/*glob" do
conn.resp 200, "route after /hello: \#{glob}"
end
Finally, a general `match` function is also supported:
match "/hello" do
conn.resp 200, "world"
end
A `match` will match any route regardless of the HTTP verb.
Check `match/3` for more information on how route compilation
works and a list of supported options.
## Forwarding
A Dynamo router can also forward a specific route to any other
router, allowing split a Dynamo into many routers instead of
having a big monolitic routes handler:
defmodule ApplicationRouter do
use Dynamo.Router
forward "/home", to: HomeRouter
end
Now any request starting with "/home" in `ApplicationRouter` router
will be forwarded to `HomeRouter`, but without the "/home" prefix.
Therefore a request to "/home/hello" is seen by the `HomeRouter`
simply as "/hello", matching the route we defined at the beginning
of this section.
Although in the example above we forwarded to another Dynamo router,
we can forward to any module, as long as it exports the function
`service/1`. This function receives the connection as argument and
must return a (possibly updated) connection.
## Hooks
This module also provides both `prepare/1` and `finalize/1` hooks to
routers. Such hooks are useful to check conditions, fetch aspects
or updating the connection:
prepare do
conn.assign :layout, "hello.html"
end
prepare :require_authentication
defp check_authentication do
unless conn.session[:user_id] do
redirect! conn, to: "/"
end
end
Such hooks can be defined in the following formats:
* `:function_name` - the atom denotes the function name to be invoked
in the current module;
* `module_or_tuple` - a module or a tuple where `prepare/1` or `finalize/1`
will be invoked passing the connection as argument;
* `[do: block]` - a chunk of code to be executed as hook. The block
has access to the connection as `conn`.
Hooks receive the connection as argument and must return the updated
connection (if any change happens).
## Per-route hooks
Besides the hooks define above which runs right after any route match,
per route hooks can be added using `@prepare` and `@finalize` attributes:
@prepare :require_authentication
get "/account/info" do
# ...
end
Annotations works similarly to `prepare` and `finalize` macros, except
annotations do not support the `do` syntax.
Finally, Dynamo also provides `@skip_prepare` and `@skip_finalize`
attributes, which is useful for whitelisting some filters:
# Require authentication for all routes
prepare :require_authentication
# Except the sign in route
@skip_prepare :require_authentication
get "/sign_in" do
# Sign in the user
end
"""
@doc false
defmacro __using__(_) do
[ quote do
Enum.each [:prepare, :finalize, :skip_prepare, :skip_finalize],
&Module.register_attribute(__MODULE__, &1, accumulate: true, persist: false)
@before_compile unquote(__MODULE__)
@dynamo_prepare []
@dynamo_finalize []
require Bitwise
import unquote(__MODULE__)
end,
quote location: :keep do
@doc false
def service(conn) do
dispatch(conn.method, conn.path_info_segments, conn)
end
defoverridable [service: 1]
end ]
end
@doc false
defmacro __before_compile__(env) do
module = env.module
prepare = compile_hooks module, :dynamo_prepare,
quote(do: var!(conn)), :prepare, &compile_prepare/4, true
finalize = compile_hooks module, :dynamo_finalize,
quote(do: var!(conn)), :finalize, &compile_finalize/4, true
[ quote location: :keep do
@doc false
def run_prepare_hooks(var!(conn), key), do: unquote(prepare)
@doc false
def run_finalize_hooks(var!(conn), key), do: unquote(finalize)
end,
quote do
@doc false
match _ do
raise Dynamo.NotFoundError, conn: var!(conn)
end
end ]
end
## Match
@doc """
Main API to define routes. It accepts an expression representing
the path and many options allowing the match to be configured.
## Examples
match "/foo/bar", via: :get do
conn.send 200, "hello world"
end
## Options
`match` accepts the following options:
* `via:` matches the route against some specific verbs
* `do:` contains the implementation to be invoked in case
the route matches
* `to:` forward the request to another module that implements
the service/1 API
## Routes compilation
All routes are compiled to a dispatch method that receives
three arguments: the verb, the request path split on "/"
and the connection. Consider this example:
match "/foo/bar", via: :get do
conn.send 200, "hello world"
end
It is compiled to:
def dispatch("GET", ["foo", "bar"], conn) do
conn.send 200, "hello world"
end
This opens up a few possibilities. First, guards can be given
to match:
match "/foo/:bar" when size(bar) <= 3, via: :get do
conn.send 200, "hello world"
end
Second, a list of splitten paths (which is the compiled result)
is also allowed:
match ["foo", bar], via: :get do
conn.send 200, "hello world"
end
"""
defmacro match(expression, options, contents \\ []) do
compile(:generate_match, expression, Keyword.merge(contents, options))
end
@doc """
Forwards the given route to the specified module.
## Examples
forward "/foo/bar", to: Posts
Now all the routes that start with `/foo/bar` will automatically
be dispatched to `Posts` that needs to implement the service API.
"""
defmacro forward(expression, options) do
what = Keyword.get(options, :to, nil)
unless what, do:
raise(ArgumentError, message: "Expected to: to be given to forward")
block =
quote do
target = unquote(what)
conn = var!(conn).forward_to var!(glob), target
target.service(conn)
end
options = Keyword.put(options, :do, block)
compile(:generate_forward, expression, options)
end
@doc """
Dispatches to the path only if it is get request.
See `match/3` for more examples.
"""
defmacro get(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :get))
end
@doc """
Dispatches to the path only if it is post request.
See `match/3` for more examples.
"""
defmacro post(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :post))
end
@doc """
Dispatches to the path only if it is put request.
See `match/3` for more examples.
"""
defmacro put(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :put))
end
@doc """
Dispatches to the path only if it is patch request.
See `match/3` for more examples.
"""
defmacro patch(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :patch))
end
@doc """
Dispatches to the path only if it is delete request.
See `match/3` for more examples.
"""
defmacro delete(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :delete))
end
@doc """
Dispatches to the path only if it is options request.
See `match/3` for more examples.
"""
defmacro options(path, contents) do
compile(:generate_match, path, Keyword.merge(contents, via: :options))
end
## Match Helpers
# Entry point for both forward and match that is actually
# responsible to compile the route.
defp compile(generator, expression, options) do
verb = Keyword.get options, :via, nil
block = Keyword.get options, :do, nil
to = Keyword.get options, :to, nil
verb_guards = convert_verbs(List.wrap(verb))
{ path, guards } = extract_path_and_guards(expression, default_guards(verb_guards))
contents =
cond do
Keyword.has_key?(options, :do) -> block
to -> quote do: unquote(to).service(var!(conn))
true -> raise ArgumentError, message: "Expected :to or :do to be given as option"
end
{ vars, match } = apply Dynamo.Router.Utils, generator, [path]
args = quote do: [_verb, unquote(match), var!(conn)]
quote bind_quoted: [args: Macro.escape(args),
guards: Macro.escape(guards),
vars: Macro.escape(vars),
contents: Macro.escape(contents)] do
body = Dynamo.Router.Base.__hooks__(__MODULE__, vars, contents)
def dispatch(unquote_splicing(args)) when unquote(guards), do: unquote(body)
end
end
# Convert the verbs given with :via into a variable
# and guard set that can be added to the dispatch clause.
defp convert_verbs([]) do
true
end
defp convert_verbs(raw) do
[h|t] =
Enum.map raw, fn(verb) ->
verb = Dynamo.Router.Utils.normalize_verb(verb)
quote do
_verb == unquote(verb)
end
end
Enum.reduce t, h, fn(i, acc) ->
quote do
unquote(acc) or unquote(i)
end
end
end
# Extract the path and guards from the path.
defp extract_path_and_guards({ :when, _, [path, guards] }, extra_guard) do
{ path, { :and, [], [guards, extra_guard] } }
end
defp extract_path_and_guards(path, extra_guard) do
{ path, extra_guard }
end
# Generate a default guard that is mean to avoid warnings
# when the request or the response are not used. It
# automatically merges the guards related to the verb.
defp default_guards(true) do
default_guard
end
defp default_guards(other) do
{ :and, [], [other, default_guard] }
end
defp default_guard do
quote do: is_tuple(var!(conn))
end
## Hooks
@doc """
Defines a prepare hook that is executed before any route matches.
"""
defmacro prepare(do: block) do
quote bind_quoted: [block: Macro.escape(block)] do
name = :"__dynamo_prepare_hook_#{length(@dynamo_prepare)}"
defp unquote(name)(var!(conn)) when is_tuple(var!(conn)), do: unquote(block)
@dynamo_prepare [{ name, nil }|@dynamo_prepare]
end
end
defmacro prepare(spec) do
quote do
@dynamo_prepare [{ unquote(spec), nil }|@dynamo_prepare]
end
end
@doc """
Defines an after hook that is executed after dispatch.
"""
defmacro finalize(do: block) do
quote bind_quoted: [block: Macro.escape(block)] do
name = :"__dynamo_finalize_hook_#{length(@dynamo_finalize)}"
defp unquote(name)(var!(conn)) when is_tuple(var!(conn)), do: unquote(block)
@dynamo_finalize [{ name, nil }|@dynamo_finalize]
end
end
defmacro finalize(spec) do
quote do
@dynamo_finalize [{ unquote(spec), nil }|@dynamo_finalize]
end
end
## Hooks helpers
# Used to retrieve hooks at function definition.
@doc false
def __hooks__(module, vars, contents) do
hooks = compile_hooks module, :finalize, quote(do: var!(conn)),
:finalize, &compile_finalize/4, false
hooks = quote do
var!(conn) = unquote(contents)
unquote(hooks)
end
unless vars == [] do
route_params = for var <- vars, do: { var, { var, [], nil } }
hooks = quote do
var!(conn) = var!(conn).route_params(unquote(route_params))
unquote(hooks)
end
end
hooks = compile_hooks module, :prepare, hooks,
:prepare, &compile_prepare/4, false
prepare_key = compile_skip_hooks module, :skip_prepare, :dynamo_prepare
finalize_key = compile_skip_hooks module, :skip_finalize, :dynamo_finalize
hooks =
quote do
var!(conn) = run_prepare_hooks(var!(conn), unquote(prepare_key))
run_finalize_hooks(unquote(hooks), unquote(finalize_key))
end
Enum.each [:prepare, :finalize, :skip_prepare, :skip_finalize],
&Module.delete_attribute(module, &1)
hooks
end
# The boolean argument is if the set of hooks are
# tuples with a mod value or not. The mod values
# are a bit system to check if a hook should be
# run or not.
defp compile_hooks(module, attr, acc, kind, fun, true) do
hooks = Module.get_attribute(module, attr)
Enum.reduce hooks, acc, fn({ hook, key }, acc) ->
compile_hook(hook, key, acc, kind, fun)
end
end
defp compile_hooks(module, attr, acc, kind, fun, false) do
hooks = Module.get_attribute(module, attr)
Enum.reduce hooks, acc, fn(hook, acc) ->
compile_hook(hook, nil, acc, kind, fun)
end
end
defp compile_hook(ref, key, acc, kind, fun) when is_atom(ref) do
case atom_to_binary(ref) do
"Elixir." <> _ ->
call = quote(do: unquote(ref).unquote(kind)(var!(conn)))
fun.(call, key, ref, acc)
_ ->
call = quote(do: unquote(ref)(var!(conn)))
fun.(call, key, ref, acc)
end
end
defp compile_hook(ref, key, acc, kind, fun) when is_tuple(ref) do
if is_mod_fun?(ref, kind) do
{ mod, modfun } = ref
call = quote(do: unquote(mod).unquote(modfun)(var!(conn)))
fun.(call, key, ref, acc)
else
ref = Macro.escape(ref)
call = quote(do: unquote(ref).unquote(kind)(var!(conn)))
fun.(call, key, ref, acc)
end
end
defp is_mod_fun?({ mod, fun } = ref, kind) when is_atom(mod) and is_atom(fun) do
not Dynamo.Router.Utils.is_function_exported?(ref, kind, 1)
end
defp is_mod_fun?(_ref, _kind), do: false
defp compile_prepare(call, key, ref, acc) do
quote do
case unquote(compile_condition(call, key)) do
var!(conn) when is_tuple(var!(conn)) -> unquote(acc)
x when x in [nil, false] -> unquote(acc)
actual -> raise Dynamo.Router.InvalidHookError, kind: :prepare, hook: unquote(ref), actual: actual
end
end
end
defp compile_finalize(call, key, ref, acc) do
quote do
case unquote(compile_condition(call, key)) do
var!(conn) when is_tuple(var!(conn)) -> unquote(acc)
x when x in [nil, false] -> unquote(acc)
actual -> raise Dynamo.Router.InvalidHookError, kind: :finalize, hook: unquote(ref), actual: actual
end
end
end
defp compile_condition(call, nil), do: call
defp compile_condition(call, key) do
quote do: Bitwise.band(unquote(key), key) == 0 and unquote(call)
end
# Compile skip hooks. If any skip hook was given, we
# search for it in the chain, add an index if there
# isn't one yet and add it to the key.
defp compile_skip_hooks(module, skip, chain) do
hooks = Module.get_attribute(module, skip)
Enum.reduce hooks, 0, fn(hook, acc) ->
acc + compile_skip_hook(module, chain, hook)
end
end
defp compile_skip_hook(module, chain, name) do
hooks = Module.get_attribute(module, chain)
hook = Enum.find(hooks, &match?({ ^name, _ }, &1))
case hook do
{ _, nil } ->
# Hook does not have a key, calculate a new one
# and update the hook chain to contain it
calculate_skip_hook_key(module, chain, hooks, name)
{ _, key } ->
key
nil ->
raise ArgumentError, message: "Could not skip hook #{inspect name} because it could not be found in this router"
end
end
defp calculate_skip_hook_key(module, chain, hooks, name) do
key =
Enum.reduce hooks, 1, fn
{ _, nil }, acc -> acc
_, acc -> acc * 2
end
hooks =
Enum.map hooks, fn hook ->
case hook do
{ ^name, _ } -> { name, key }
other -> other
end
end
Module.put_attribute(module, chain, hooks)
key
end
end
|
lib/dynamo/router/base.ex
| 0.910903
| 0.464355
|
base.ex
|
starcoder
|
defmodule EpicenterWeb.Test.Pages.People do
import ExUnit.Assertions
import Phoenix.LiveViewTest
alias Epicenter.Test
alias EpicenterWeb.Test.LiveViewAssertions
alias EpicenterWeb.Test.Pages
alias Phoenix.LiveViewTest.View
def visit(%Plug.Conn{} = conn),
do: conn |> Pages.visit("/people")
def assert_here(view_or_conn_or_html),
do: view_or_conn_or_html |> Pages.assert_on_page("people")
def assign(%View{} = view, people, user) do
for person <- people do
view |> element("[data-tid=#{person.tid}]") |> render_click(%{"person-id" => person.id, "value" => "on"})
end
view |> element("#assignment-form") |> render_change(%{"user" => user.id})
view
end
def assignees(%View{} = view) do
view
|> Pages.parse()
|> Test.Table.table_contents(columns: ["Name", "Assignee"], headers: false, role: "people")
|> Enum.map(fn [name, assignee] -> {name, assignee} end)
|> Enum.into(%{})
end
def assert_assignees(%View{} = view, expected_assignees) do
assert view |> assignees() == expected_assignees
view
end
def assert_assign_dropdown_options(%View{} = view, data_role: data_role, expected: expected) do
LiveViewAssertions.assert_select_dropdown_options(view: view, data_role: data_role, expected: expected)
view
end
def assert_assignment_dropdown_enabled(%View{} = view) do
LiveViewAssertions.assert_enabled(view, "[data-role=users]")
view
end
def assert_assignment_dropdown_disabled(%View{} = view) do
LiveViewAssertions.assert_disabled(view, "[data-role=users]")
view
end
def assert_checked(%View{} = view, selector) do
LiveViewAssertions.assert_attribute(view, selector, "checked", ["checked"])
view
end
def assert_filter_selected(%View{} = view, filter_name) do
LiveViewAssertions.assert_attribute(view, "[data-role=people-filter][data-tid=#{filter_name}]", "data-active", ["true"])
view
end
def assert_table_contents(%View{} = view, expected_table_content, opts \\ []) do
assert table_contents(view, opts) == expected_table_content
view
end
def assert_unchecked(%View{} = view, selector) do
LiveViewAssertions.assert_attribute(view, selector, "checked", [])
view
end
def assert_archive_button_disabled(%View{} = view) do
LiveViewAssertions.assert_disabled(view, "[data-role=archive-button]")
view
end
def click_archive(%View{} = view) do
view
|> element("[data-role=archive-button]")
|> render_click()
view
end
def click_assigned_to_me_checkbox(%View{} = view) do
view |> element("[data-tid=assigned-to-me-checkbox]") |> render_click()
view
end
def click_person_checkbox(%View{} = view, person: person, value: value) do
view |> element("[data-tid=#{person.tid}]") |> render_click(%{"person-id" => person.id, "value" => value})
view
end
def change_form(%View{} = view, params) do
view |> element("#assignment-form") |> render_change(params)
view
end
def select_filter(%View{} = view, filter_name) do
view |> element("[data-tid=#{filter_name}]") |> render_click()
view
end
def import_button_visible?(%View{} = view), do: view |> element("[data-role=import-labs]") |> has_element?()
defp table_contents(index_live, opts),
do: index_live |> render() |> Test.Html.parse_doc() |> Test.Table.table_contents(opts |> Keyword.merge(role: "people"))
end
|
test/support/pages/people.ex
| 0.58522
| 0.558959
|
people.ex
|
starcoder
|
defmodule Wallaby.Helpers.KeyCodes do
@moduledoc """
Shortcuts for various keys.
- :null
- :cancel
- :help
- :backspace
- :tab
- :clear
- :return
- :enter
- :shift
- :control
- :alt
- :pause
- :escape
- :space
- :pageup
- :pagedown
- :end
- :home
- :left_arrow
- :up_arrow
- :right_arrow
- :down_arrow
- :insert
- :delete
- :semicolon
- :equals
- :num0
- :num1
- :num2
- :num3
- :num4
- :num5
- :num6
- :num7
- :num8
- :num9
- :multiply
- :add
- :seperator
- :subtract
- :decimal
- :divide
- :command
"""
# Encode a list of key codes to a usable JSON representation.
@spec json(list(atom)) :: String.t()
def json(keys) when is_list(keys) do
unicode =
keys
|> Enum.reduce([], fn x, acc -> acc ++ split_strings(x) end)
|> Enum.map(&"\"#{code(&1)}\"")
|> Enum.join(",")
"{\"value\": [#{unicode}]}"
end
# Ensures a list of keys are in binary form to check for local files.
@spec chars(list() | binary()) :: [binary()]
def chars(keys) do
keys
|> List.wrap()
|> Enum.map(fn
a when is_atom(a) -> code(a)
s -> s
end)
end
defp split_strings(x) when is_binary(x), do: String.graphemes(x)
defp split_strings(x), do: [x]
defp code(:null), do: "\\uE000"
defp code(:cancel), do: "\\uE001"
defp code(:help), do: "\\uE002"
defp code(:backspace), do: "\\uE003"
defp code(:tab), do: "\\uE004"
defp code(:clear), do: "\\uE005"
defp code(:return), do: "\\uE006"
defp code(:enter), do: "\\uE007"
defp code(:shift), do: "\\uE008"
defp code(:control), do: "\\uE009"
defp code(:alt), do: "\\uE00A"
defp code(:pause), do: "\\uE00B"
defp code(:escape), do: "\\uE00C"
defp code(:space), do: "\\uE00D"
defp code(:pageup), do: "\\uE00E"
defp code(:pagedown), do: "\\uE00F"
defp code(:end), do: "\\uE010"
defp code(:home), do: "\\uE011"
defp code(:left_arrow), do: "\\uE012"
defp code(:up_arrow), do: "\\uE013"
defp code(:right_arrow), do: "\\uE014"
defp code(:down_arrow), do: "\\uE015"
defp code(:insert), do: "\\uE016"
defp code(:delete), do: "\\uE017"
defp code(:semicolon), do: "\\uE018"
defp code(:equals), do: "\\uE019"
defp code(:num0), do: "\\uE01A"
defp code(:num1), do: "\\uE01B"
defp code(:num2), do: "\\uE01C"
defp code(:num3), do: "\\uE01D"
defp code(:num4), do: "\\uE01E"
defp code(:num5), do: "\\uE01F"
defp code(:num6), do: "\\uE020"
defp code(:num7), do: "\\uE021"
defp code(:num8), do: "\\uE022"
defp code(:num9), do: "\\uE023"
defp code(:multiply), do: "\\uE024"
defp code(:add), do: "\\uE025"
defp code(:seperator), do: "\\uE026"
defp code(:subtract), do: "\\uE027"
defp code(:decimal), do: "\\uE028"
defp code(:divide), do: "\\uE029"
defp code(:command), do: "\\uE03D"
defp code(char), do: char
end
|
lib/wallaby/helpers/key_codes.ex
| 0.673406
| 0.447219
|
key_codes.ex
|
starcoder
|
defmodule Pokecards do
@moduledoc """
Provides functions to create and handle a deck of pokemons
"""
@moduledoc since: "0.1.0"
require Pokemon
@doc """
Creates a deck of pokemons
## Examples
iex> deck = Pokecards.create_deck
iex> deck
"""
@doc since: "0.1.0"
def create_deck do
pokemons = [
%Pokemon{name: "Pikachu", ability: "Static"},
%Pokemon{name: "Bulbasaur", ability: "Overgrow"},
%Pokemon{name: "Piplup", ability: "Torrent"}
]
damages = ["10", "20", "30", "40", "50"]
for pokemon <- pokemons, damage <- damages do
"Name: #{pokemon.name}, Ability: #{pokemon.ability}, Damage: #{damage}"
end
end
@doc """
Shuffles a deck of pokemons
## Examples
iex> deck = Pokecards.create_deck
iex> deck = Pokecards.shuffle(deck)
iex> deck
"""
@doc since: "0.1.0"
def shuffle(deck) do
Enum.shuffle(deck)
end
@doc """
Verifies if a `deck` constains a `pokemon`
## Examples
iex> deck = Pokecards.create_deck
iex> Pokecards.constains?(deck, "Name: Piplup, Ability: Torrent, Damage: 50")
true
"""
@doc since: "0.1.0"
def constains?(deck, pokemon) do
Enum.member?(deck, pokemon)
end
@doc """
Deal a deck of pokemons by a given a `hand_size`
## Examples
iex> deck = Pokecards.create_deck
iex> { hand, _rest_of_hand } = Pokecards.deal(deck, 2)
iex> length(hand) == 2
true
"""
@doc since: "0.1.0"
def deal(deck, hand_size) do
Enum.split(deck, hand_size)
end
@doc """
Saves a ``deck`` with a given ``deckname`` to the filesystem
Returns `:ok`
## Examples
iex> deck = Pokecards.create_deck
iex> Pokecards.save("cardname", deck)
:ok
"""
@doc since: "0.1.0"
def save(deckname, deck) do
try do
binary = :erlang.term_to_binary(deck)
File.write!("decks/#{deckname}", binary)
rescue
error in RuntimeError -> IO.puts("An error ocurred: #{error.message}")
end
end
@doc """
Loads a deck with the given ``deckname`` from the filesystem
## Examples
iex> deck = Pokecards.load("test")
iex> deck
"""
@doc since: "0.1.0"
def load(deckname) do
try do
case File.read("decks/#{deckname}") do
{ :ok, binary } -> :erlang.binary_to_term(binary)
{ :error, _reason } -> raise "This file doesn't exists. Did you right the correct pathname?"
end
rescue
error in RuntimeError -> IO.puts("An error ocurred: #{error.message}")
end
end
@doc """
Returns a hand of a pokemon deck based on a `hand_size`
## Examples
iex> hand = Pokecards.create_hand(5)
iex> length(hand) == 5
true
"""
@doc since: "0.1.0"
def create_hand(hand_size) do
if (!hand_size) do
raise "You must provide the size of the deck hand"
end
{ hand, _rest_of_hand } = Pokecards.create_deck
|> Pokecards.shuffle
|> Pokecards.deal(hand_size)
hand
end
end
|
lib/pokecards.ex
| 0.783368
| 0.47792
|
pokecards.ex
|
starcoder
|
defmodule Lily.Operator do
@moduledoc """
Creates operators using anonymous functions.
Lily operators extends Elixir operators with anonymous functions.
Instead of defining an operator using `def` or `defmacro`,
you provide an equivalent anonymous function to `operator/1` or `operator_macro/1`.
Use a function of arity one for unary operators and a function of arity two for binary operators.
The `Concat` module below creates concat operators
with Elixir operators and Lily operators:
```
#{File.read!("test/lily/support/concat.ex")}
```
```
iex> import Concat
iex> "a" >>> "b" # <- Elixir operator
"ab"
iex> "a" <<< "b" # <- Lily operator
"ab"
```
## Supported Operators
Lily supports all the Elixir operators except:
`.`, `=>`, `^`, `not in`, `when` as these are used by the Elixir parser.
| Operator | Associativity |
|:-----------:|:---------------------|
| `!` | Unary |
| `@` | Unary |
| `~~~` | Unary |
| `&` | Unary |
| `+` | Unary, Left to Right |
| `-` | Unary, Left to Right |
| `*` | Left to Right |
| `/` | Left to Right |
| `^^^` | Left to Right |
| `&&` | Left to Right |
| `&&&` | Left to Right |
| `<-` | Left to Right |
| `\\\\` | Left to Right |
| `\\|\\|` | Left to Right |
| `\\|\\|\\|` | Left to Right |
| `=~` | Left to Right |
| `==` | Left to Right |
| `===` | Left to Right |
| `!=` | Left to Right |
| `!==` | Left to Right |
| `<` | Left to Right |
| `>` | Left to Right |
| `<=` | Left to Right |
| `\\|>` | Left to Right |
| `>=` | Left to Right |
| `<\\|>` | Left to Right |
| `<~>` | Left to Right |
| `~>` | Left to Right |
| `~>>` | Left to Right |
| `>>>` | Left to Right |
| `<~` | Left to Right |
| `<<~` | Left to Right |
| `<<<` | Left to Right |
| `in` | Left to Right |
| `and` | Left to Right |
| `or` | Left to Right |
| `not` | Left to Right |
| `..` | Right to Left |
| `=` | Right to Left |
| `++` | Right to Left |
| `--` | Right to Left |
| `\\|` | Right to Left |
| `<>` | Right to Left |
"""
alias Lily.Error
import Lily.Function
@doc """
Creates operators.
Inserts operator functions for each operator in `operators`.
The key is the operator name and the value is the function that the operator should call.
The function should return an expression which results from the operator inputs.
## Examples
Math operators that work with numbers or complex numbers expressed as `{real, imaginary}`:
```
#{File.read!("test/lily/support/math.ex")}
```
Regular math:
iex> use Math
iex> +1
1
iex> 1 + 2
3
Complex math:
iex> use Math
iex> +{1, 2}
{1, 2}
iex> {1, 2} + {3, 4}
{4, 6}
"""
@spec operator(keyword(f :: Macro.t())) :: Macro.t()
defmacro operator(operators) do
operator(operators, __CALLER__)
end
@doc """
Creates operator macros.
Insert operator macros for each operator in the `operators` keyword list.
The key is the operator name and the value is the function that the operator should call.
The function should return a quoted expression.
## Examples
A tee `a ~> function_call` operator that calls `IO.inspect(a)` and then ``.
This could be useful for inspecting intermediate values in a pipeline.
```
#{File.read!("test/lily/support/io_inspect.ex")}
```
iex> import IO.Inspect
iex> :a ~> to_string() # => :a
"a"
"""
@spec operator_macro(keyword(f :: Macro.t())) :: Macro.t()
defmacro operator_macro(operators) do
operator_macro(operators, __CALLER__)
end
@doc """
Returns the AST to create an operator.
This is useful for programmatically creating operators.
"""
@spec operator(keyword(f :: Macro.t()), Macro.Env.t()) :: Macro.t()
def operator(operators, env) do
create(:def, operators, env)
end
@doc """
Returns the AST to create an operator macro.
This is useful for programmatically creating operator macros.
"""
@spec operator_macro(keyword(f :: Macro.t()), Macro.Env.t()) :: Macro.t()
def operator_macro(operators, env) do
create(:defmacro, operators, env)
end
defp create(type, operators, env) do
[
Enum.map(
operators,
fn {operator, f} -> create(type, operator, f, env) end
)
]
end
defp create(_, operator, _, _) when operator in [:., :"=>", :^, :"not in", :when] do
raise Error, "can't use #{operator}, because it is used by the Elixir parser."
end
defp create(type, operator, f, env) do
arity = arity(f, env)
operator_arities = arities(operator)
cond do
operator_arities == [] ->
raise Error,
"expected an operator but got #{operator}."
arity not in operator_arities ->
raise Error,
"expected an operator function for #{operator} with arity in #{
inspect(operator_arities)
}, but got an operator function with arity #{arity}."
true ->
operator(type, operator, f, arity)
end
end
defp operator(:def, operator, f, 1) do
quote do
import Kernel, except: [{unquote(operator), 1}]
def unquote(operator)(a) do
unquote(f).(a)
end
end
end
defp operator(:def, operator, f, 2) do
quote do
import Kernel, except: [{unquote(operator), 2}]
def unquote(operator)(a, b) do
unquote(f).(a, b)
end
end
end
defp operator(:defmacro, operator, f, 1) do
quote do
import Kernel, except: [{unquote(operator), 1}]
defmacro unquote(operator)(a) do
unquote(f).(a)
end
end
end
defp operator(:defmacro, operator, f, 2) do
quote do
import Kernel, except: [{unquote(operator), 2}]
import Lily.Operator
defmacro unquote(operator)(a, b) do
unquote(f).(a, b)
end
end
end
@doc """
Returns an operator macro function that calls a tap function before piping.
You provide a function of arity 1 that will be called with the input.
The operator macro function will call your function and
then pipe the left input to the function call on the right.
This can be useful for inserting effects.
## Examples
A tap operator that calls `IO.inspect/1`.
```
#{File.read!("test/lily/support/io_inspect.ex")}
```
iex> import IO.Inspect
iex> :a ~> to_string() # IO.inspect(:a) is called, :a is piped through
"a"
This could be useful for inspecting intermediate values in a pipeline.
iex> import IO.Inspect
iex> "a b"
...> ~> String.upcase() # > "a b"
...> ~> String.split() # > "A B"
["A", "B"]
Using `~>` results in the `IO.inspect/1` being called but is otherwise identical to `|>`.
"""
@spec tap((any -> any)) :: (any, function -> Macro.t())
def tap(tap) when is_function(tap, 1) do
fn a, f ->
quote do
a = unquote(a)
unquote(tap).(a)
a |> unquote(f)
end
end
end
defp arities(operator) do
Enum.filter([1, 2], fn arity -> Macro.operator?(operator, arity) end)
end
end
|
lib/lily/operator.ex
| 0.893117
| 0.901791
|
operator.ex
|
starcoder
|
defmodule Ockam.Hub.Service.PubSub do
@moduledoc """
PubSub service
Subscribes workers (by return route) to a string topic
Each topic can have multiple subscrtiptions
Each subscription has a unique name and topic
Name and topic are parsed from the payload as a BARE `string` type
with the following format "name:topic".
Name cannot contain `:` symbol
New subscriptions with the same name replace previous ones.
Topic address is created from topic prefix and topic as <prefix>_<topic>
e.g. if prefix is `pub_sub_t` and topic is `my_topic`, topic address will be: `pub_sub_t_my_topic`
Messages sent to the topic address will be forwarded to all subscribers routes
Options:
`prefix` - topic address prefix
"""
use Ockam.Worker
alias __MODULE__.Topic
alias Ockam.Message
require Logger
@impl true
def setup(options, state) do
prefix = Keyword.get(options, :prefix, state.address)
{:ok, Map.put(state, :prefix, prefix)}
end
@impl true
def handle_message(message, state) do
payload = Message.payload(message)
with {:ok, name_topic, ""} <- :bare.decode(payload, :string),
[name, topic] <- String.split(name_topic, ":") do
return_route = Message.return_route(message)
subscribe(name, topic, return_route, state)
else
err ->
Logger.error("Invalid message format: #{inspect(payload)}, reason #{inspect(err)}")
end
end
def subscribe(name, topic, route, state) do
with {:ok, worker} <- ensure_topic_worker(topic, state) do
## NOTE: Non-ockam message routing here
Topic.subscribe(worker, name, route)
{:ok, state}
end
end
def ensure_topic_worker(topic, state) do
topic_address = topic_address(topic, state)
case Ockam.Node.whereis(topic_address) do
nil -> Topic.create(topic: topic, address: topic_address)
_pid -> {:ok, topic_address}
end
end
def topic_address(topic, state) do
Map.get(state, :prefix, "") <> "_" <> topic
end
end
defmodule Ockam.Hub.Service.PubSub.Topic do
@moduledoc """
Topic subscription for pub_sub service
Forwards all messages to all subscribed routes
Subscribe API is internal, it adds a route to the subscribers set
"""
use Ockam.Worker
alias Ockam.Message
def subscribe(worker, name, route) do
## TODO: reply to the subscriber?
Ockam.Worker.call(worker, {:subscribe, name, route})
end
@impl true
def setup(options, state) do
topic = Keyword.get(options, :topic)
{:ok, Map.merge(state, %{topic: topic, routes: %{}})}
end
@impl true
def handle_call({:subscribe, name, route}, _from, state) do
{:reply, :ok,
Map.update(state, :routes, %{name => route}, fn routes -> Map.put(routes, name, route) end)}
end
@impl true
def handle_message(message, state) do
[_me | onward_route] = Message.onward_route(message)
state
|> Map.get(:routes, MapSet.new())
|> Enum.each(fn {_name, route} ->
## TODO: forward_through
Ockam.Router.route(Message.forward(message, route ++ onward_route))
end)
{:ok, state}
end
end
|
implementations/elixir/ockam/ockam_hub/lib/hub/service/pub_sub.ex
| 0.782953
| 0.483039
|
pub_sub.ex
|
starcoder
|
defmodule ExDjango.Utils.Baseconv do
@moduledoc """
Functions for converting numbers from base 10 integers to base X strings and back again.
Compatible with django/utils/baseconv.py
"""
@base10_chars "0123456789"
def get_chars(base) do
case base do
:base2 -> "012"
:base10 -> @base10_chars
:base16 -> "0123456789ABCDEF"
:base36 -> "0123456789abcdefghijklmnopqrstuvwxyz"
:base56 -> "23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz"
:base62 -> "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
:base64 -> "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_"
end
end
def get_sign(:base64), do: "$"
def get_sign(_base), do: "-"
def convert(number, from_digits, to_digits, sign) do
{neg, number} = case String.starts_with?(number, sign) do
false -> {false, number}
true -> {true, String.slice(number, 1..-1)}
end
res = case convert_from(String.graphemes(number), from_digits, 0) do
0 -> String.slice(to_digits, 1..-1)
x -> convert_to(x, to_digits, "")
end
{neg, res}
end
def convert_from([], _from_digits, x), do: x
def convert_from([digit|t], from_digits, x) do
x = x * String.length(from_digits) + Enum.find_index(String.graphemes(from_digits), fn(x) -> x == digit end)
convert_from(t, from_digits, x)
end
def convert_to(x, _to_digits, res) when x <= 0, do: res
def convert_to(x, to_digits, res) when x > 0 do
digit = rem(x, String.length(to_digits))
res = String.slice(to_digits, digit, 1) <> res
x = div(x, String.length(to_digits))
convert_to(x, to_digits, res)
end
def encode(base, s), do: encode(get_chars(base), get_sign(base), s)
def encode(chars, sign, s) when is_integer(s), do: encode(chars, sign, Integer.to_string(s))
def encode(chars, sign, s) do
{neg, res} = convert(s, @base10_chars, chars, "-")
case neg do
true -> sign <> res
_ -> res
end
end
def decode(base, s), do: decode(get_chars(base), get_sign(base), s)
def decode(chars, sign, s) do
{neg, res} = convert(s, chars, @base10_chars, sign)
case neg do
true -> String.to_integer("-" <> res)
_ -> String.to_integer(res)
end
end
end
|
lib/utils/baseconv.ex
| 0.655667
| 0.408336
|
baseconv.ex
|
starcoder
|
defmodule GoogleMaps do
@moduledoc """
Helper functions for working with the Google Maps API.
"""
alias Util.Position
alias GoogleMaps.MapData
@host "https://maps.googleapis.com"
@host_uri URI.parse(@host)
@web "https://maps.google.com"
@web_uri URI.parse(@web)
@doc """
Given a path, returns a full URL with a signature.
Options:
* client_id: client to use for the request
* google_api_key: if no client ID is specified, a key to use
* signing_key: the private key used to sign the path.
If no options are passed, they'll be looked up out of the GoogleMaps
configuration in config.exs
"""
@spec signed_url(binary, Keyword.t()) :: binary
def signed_url(path, opts \\ []) do
opts =
default_options()
|> Keyword.merge(opts)
path
|> URI.parse()
|> do_signed_url(opts[:client_id], opts[:signing_key], opts)
end
@doc """
For URLs which don't require a signature (JS libraries), this function
returns the URL but without the signature.
"""
@spec unsigned_url(binary, Keyword.t()) :: binary
def unsigned_url(path, opts \\ []) do
opts = Keyword.merge(default_options(), opts)
parsed = URI.parse(path)
parsed =
case opts[:client_id] do
"" ->
append_api_key(parsed, opts[:google_api_key])
nil ->
append_api_key(parsed, opts[:google_api_key])
client_id ->
append_query(parsed, :client, client_id)
end
prepend_host(parsed)
end
@doc """
Returns the url to view directions to a location on https://maps.google.com.
"""
@spec direction_map_url(Position.t(), Position.t()) :: String.t()
def direction_map_url(origin, destination) do
origin_lat = Position.latitude(origin)
origin_lng = Position.longitude(origin)
dest_lat = Position.latitude(destination)
dest_lng = Position.longitude(destination)
path =
Path.join([
"/",
"maps",
"dir",
URI.encode("#{origin_lat},#{origin_lng}"),
URI.encode("#{dest_lat},#{dest_lng}")
])
URI.to_string(%{@web_uri | path: path})
end
defp default_options do
[
client_id: get_env(:client_id),
google_api_key: get_env(:google_api_key),
signing_key: get_env(:signing_key)
]
end
@doc "Given a GoogleMaps.MapData struct, returns a URL to a static map image."
@spec static_map_url(MapData.t()) :: String.t()
def static_map_url(map_data) do
map_data
|> MapData.static_query()
|> URI.encode_query()
|> (fn query -> "/maps/api/staticmap?#{query}" end).()
|> signed_url
end
defp get_env(key) do
case Application.get_env(:google_maps, key) do
"${" <> _ ->
# relx configuration that wasn't overriden; ignore
""
value ->
value
end
end
defp do_signed_url(uri, "", _, opts) do
uri
|> append_api_key(opts[:google_api_key])
|> prepend_host
end
defp do_signed_url(uri, _, "", opts) do
uri
|> append_api_key(opts[:google_api_key])
|> prepend_host
end
defp do_signed_url(uri, client_id, signing_key, _) do
uri
|> append_query(:client, client_id)
|> append_signature(signing_key)
|> prepend_host
end
defp append_query(%URI{query: nil} = uri, key, value) do
%{uri | query: "#{key}=#{value}"}
end
defp append_query(%URI{query: query} = uri, key, value) when is_binary(query) do
%{uri | query: "#{query}&#{key}=#{value}"}
end
@spec prepend_host(URI.t()) :: binary
defp prepend_host(uri) do
host = get_env(:domain) || @host_uri
host
|> URI.merge(uri)
|> URI.to_string()
end
defp append_api_key(uri, key) do
# Fallback to the existing API key for now. -ps
uri
|> append_query(:key, key)
end
defp append_signature(uri, signing_key) do
uri
|> append_query(:signature, signature(uri, signing_key))
end
defp signature(uri, key) do
de64ed_key = Base.url_decode64!(key)
uri_string = uri |> URI.to_string()
binary_hash = :crypto.hmac(:sha, de64ed_key, uri_string)
Base.url_encode64(binary_hash)
end
def scale(list_of_width_height_pairs) do
list_of_width_height_pairs
|> Enum.flat_map(fn {width, height} ->
[
{width, height, 1},
{width, height, 2}
]
end)
|> Enum.sort_by(fn {width, _, scale} -> width * scale end)
end
end
|
apps/google_maps/lib/google_maps.ex
| 0.806358
| 0.488893
|
google_maps.ex
|
starcoder
|
defmodule Statifier.Zipper.List do
@moduledoc """
An implementation of a Zipper over a list
Introductory Article: https://ferd.ca/yet-another-article-on-zippers.html
Supports being able to traverse a list both forwards and backwards while
maintaining a focus on the current element much like a doubly-linked list.
All while using functional immutable data structures and supporting O(1)
lookup and traversal from focus.
"""
@typedoc """
`left` represents items to the left of the focus. `right` is a list where the
head of it is the current focus and the rest is the items to the right of
focus.
"""
@type t(a) :: {left :: [a], right :: [a]}
@typedoc """
Error for invalid attemtps to move left or right when no element exists in
that direction.
"""
@type invalid_move :: {:error, :cannot_make_move}
@spec from_list([term()]) :: t(term())
@doc """
Creates a zipper from a list of elements
"""
def from_list([_non_empty | _rest] = nonempty_list), do: {[], nonempty_list}
@spec left(t(term())) :: {:ok, t(term())} | invalid_move()
@doc """
Moves focus to element to the left
"""
def left({[], _right}), do: {:error, :cannot_make_move}
def left({[head | left], right}), do: {:ok, {left, [head | right]}}
@spec left?(t(term())) :: boolean()
@doc """
Returns whether moving to left is alllowed
"""
def left?({[], _right}), do: false
def left?({_has_left_element, _right}), do: true
@spec left?(t(term())) :: boolean
@spec right(t(term())) :: {:ok, t(term())} | invalid_move()
@doc """
Moves focus to element to the right
"""
def right({_left, [_focus | []]}), do: {:error, :cannot_make_move}
def right({left, [focus | right]}), do: {:ok, {[focus | left], right}}
@doc """
Returns whether moving to right is allowed
"""
def right?({_left, [_focus | []]}), do: false
def right?({_left, _has_right_element}), do: true
@spec focus(t(term())) :: term()
@doc """
Returns the current element in focus
"""
def focus({_left, [focus | _reset]}), do: focus
@spec update(t(term()), term()) :: t(term())
@doc """
Updates the current element in focus
"""
def update({left, [_focus | right]}, value), do: {left, [value | right]}
end
|
impl/ex/lib/zipper/list.ex
| 0.885869
| 0.751671
|
list.ex
|
starcoder
|
defmodule ExDhcp.Packet do
@moduledoc """
provides a structure for the DHCP packet, according to the spec.
(http://en.wikipedia.org/wiki/DHCP)
the fields are as follows:
- op: operation (request: 1, response: 2). This operation value allows DHCP providers
to coexist with other DHCP providers. Since DHCP packets are broadcast to 255.255.255.255,
without this feature, they might be
- htype: specifies the hardware address type. Currently ony ethernet is supported.
- hlen: specifies the hardware address length. Currently only 6-byte MAC is supported.
- hops:
- xid: transmission id. Allows multiple DHCP requests to be serviced concurrently. In
this library, separate servers will be spawned to handle different transmissions.
- secs:
- flags:
- ciaddr: "current internet address"
- yiaddr: "your internet address"
- siaddr: "server internet address"
- giaddr: "gateway internet address
- options: {integer, tuple} list. Supported opcodes will be translated
into {atom, value} pairs.
"""
alias ExDhcp.BasicOptions
alias ExDhcp.Options
alias ExDhcp.Utils
@magic_cookie <<0x63, 0x82, 0x53, 0x63>>
@op_response 2
@htype_ethernet 1
@hlen_macaddr 6
defstruct op: @op_response,
htype: @htype_ethernet,
hlen: @hlen_macaddr,
hops: 0,
xid: 0,
secs: 0,
flags: 0,
ciaddr: {0, 0, 0, 0},
yiaddr: {0, 0, 0, 0},
siaddr: {0, 0, 0, 0},
giaddr: {0, 0, 0, 0},
chaddr: {0, 0, 0, 0, 0, 0},
options: []
@type option :: {non_neg_integer, binary} | {atom, any}
@type t(v)::%__MODULE__{
op: v,
htype: 1,
hlen: 6,
hops: non_neg_integer,
xid: non_neg_integer,
secs: non_neg_integer,
flags: non_neg_integer,
ciaddr: Utils.ip4,
yiaddr: Utils.ip4,
siaddr: Utils.ip4,
giaddr: Utils.ip4,
chaddr: Utils.mac,
options: %{
optional(non_neg_integer) => binary,
optional(atom) => any
}
}
@type t::t(1) | t(2)
@type request ::t(1)
@type response::t(2)
@typedoc """
erlang's internal representation of an active udp packet.
"""
@type udp_packet :: {
:udp,
:gen_udp.socket,
ExDhcp.ip4,
:inet.ancillary_data,
binary}
@bootp_octets 192 * 8
@doc """
pass the contents of a udp packet
"""
@spec decode(udp_packet | binary, [module]) :: t | udp_packet
def decode(udp_packet, option_parsers \\ [BasicOptions])
def decode({:udp, _, _, _, binary = <<_::1888>> <> @magic_cookie <> _}, option_parsers) do
decode(binary, option_parsers)
end
def decode(
<<op, htype, @hlen_macaddr, hops, xid::size(32), secs::size(16),
flags::size(16), ciaddr::binary-size(4), yiaddr::binary-size(4),
siaddr::binary-size(4), giaddr::binary-size(4), chaddr::binary-size(6),
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::@bootp_octets, @magic_cookie>> <> options,
option_parsers) do
%__MODULE__{
op: op,
htype: htype,
hops: hops,
xid: xid,
secs: secs,
flags: flags,
ciaddr: Utils.bin2ip(ciaddr),
yiaddr: Utils.bin2ip(yiaddr),
siaddr: Utils.bin2ip(siaddr),
giaddr: Utils.bin2ip(giaddr),
chaddr: Utils.bin2mac(chaddr),
options: Options.decode(options, option_parsers)
}
end
@doc """
Encode a message so that it can be put in a UDP packet
"""
@spec encode(t) :: iolist
def encode(message, modules \\ [BasicOptions]) do
options = Options.encode(message.options, modules)
ciaddr = Utils.ip2bin(message.ciaddr)
yiaddr = Utils.ip2bin(message.yiaddr)
siaddr = Utils.ip2bin(message.siaddr)
giaddr = Utils.ip2bin(message.giaddr)
chaddr = Utils.mac2bin(message.chaddr)
[message.op, message.htype, message.hlen, message.hops, <<message.xid::32>>,
<<message.secs::16>>, <<message.flags::16>>, ciaddr, yiaddr, siaddr, giaddr,
chaddr, <<0::80>>, <<0::@bootp_octets>>, @magic_cookie | options]
end
@builtin_options [:op, :htype, :hlen, :hops, :xid, :secs, :flags,
:ciaddr, :yiaddr, :siaddr, :giaddr, :chaddr]
@message_type 53
@message_map %{discover: <<1>>,
offer: <<2>>,
request: <<3>>,
decline: <<4>>,
ack: <<5>>,
nak: <<6>>,
release: <<7>>,
inform: <<8>>}
def respond(packet = %__MODULE__{}, type, opts) do
builtins = opts
|> Keyword.take(@builtin_options)
|> Enum.into(%{op: 2})
extras = opts
|> Keyword.drop(@builtin_options)
|> Enum.into(%{@message_type => @message_map[type]})
packet
|> Map.merge(builtins)
|> Map.put(:options, extras)
end
end
|
lib/ex_dhcp/packet.ex
| 0.73431
| 0.502991
|
packet.ex
|
starcoder
|
defmodule Exnoops.Mashbot do
@moduledoc """
Module to interact with Github's Noop: Mashbot
See the [official `noop` documentation](https://noopschallenge.com/challenges/mashbot) for API information including the accepted parameters.
"""
require Logger
import Exnoops.API
import Exnoops.Vexbot, only: [format_vectors: 1]
import Exnoops.Polybot, only: [format_polygons: 1]
import Exnoops.Hexbot, only: [format_colors: 1]
import Exnoops.Directbot, only: [format_directions: 1]
@noop "mashbot"
@doc """
Query Mashbot for... well, a mash of responses
+ Parameters are sent with a keyword list into the function.
+ Parameters that accept multiple values should be put into the keyword list like `{:key, [value1, value2]}`. See example below.
## Examples
iex> Exnoops.Mashbot.query()
{:ok, %{
"hexbot" => [
{"#E9B104", nil}
],
"directbot" => [
{ :right, 58, 7, nil }
],
"polybot" => [
[
{823, 349},
{812, 405},
{775, 406},
{748, 407},
{724, 427},
{673, 484},
{656, 454},
{648, 407},
{546, 419},
{604, 370},
{529, 337},
{533, 322},
{552, 276},
{581, 255},
{614, 240},
{653, 222},
{692, 186},
{732, 176},
{788, 209},
{768, 253},
{824, 286},
{820, 291}
]
],
"vexbot" => [
{
{65, 197},
{640, 879},
57
}
]
}}
"""
@spec query(keyword()) :: {atom(), map()}
def query(opts \\ []) when is_list(opts) do
Logger.debug("Calling Mashbot.query()")
case get("/" <> @noop, opts) do
{:ok, res_map} ->
{:ok, mash_map_handler(res_map)}
error ->
error
end
end
defp mash_map_handler(res_map) when is_map(res_map) do
res_map
|> Task.async_stream(&mash_handler/1)
|> Stream.map(fn {:ok, m} -> m end)
|> Enum.into(%{})
end
defp mash_handler({"hexbot" = noop, colors}) do
{noop, format_colors(colors)}
end
defp mash_handler({"directbot" = noop, directions}) do
{noop, format_directions(directions)}
end
defp mash_handler({"polybot" = noop, polygons}) do
{noop, format_polygons(polygons)}
end
defp mash_handler({"vexbot" = noop, vectors}) do
{noop, format_vectors(vectors)}
end
end
|
lib/exnoops/mashbot.ex
| 0.698946
| 0.600423
|
mashbot.ex
|
starcoder
|
defmodule Matrix do
import ExclusiveRange
@moduledoc "Matrix like things"
@type t(of) :: %{required(non_neg_integer()) => %{required(non_neg_integer()) => of}}
@spec of_dims(non_neg_integer(), non_neg_integer(), of) :: t(of) when of: var
def of_dims(x, y, init) do
col = Enum.map(erange(0..y), fn i -> {i, init} end) |> Map.new()
Enum.map(erange(0..x), fn i -> {i, col} end) |> Map.new()
end
@spec of_dims_f(
non_neg_integer(),
non_neg_integer(),
(non_neg_integer(), non_neg_integer() -> of)
) :: t(of)
when of: var
def of_dims_f(x, y, f) do
Enum.map(erange(0..x), fn ix ->
col = Enum.map(erange(0..y), fn iy -> {iy, f.(ix, iy)} end) |> Map.new()
{ix, col}
end)
|> Map.new()
end
@spec map(t(of_in), (non_neg_integer(), non_neg_integer(), of_in -> of_out)) :: t(of_out)
when of_in: var, of_out: var
def map(m, f) do
m
|> Enum.map(fn {x, col} ->
col = col |> Enum.map(fn {y, val} -> {y, f.(x, y, val)} end) |> Map.new()
{x, col}
end)
|> Map.new()
end
@spec diff(t(of), t(of)) :: [%{x: non_neg_integer(), y: non_neg_integer(), new: of}]
when of: var
def diff(a, b) do
Enum.reduce(a, [], fn {x, col}, acc -> diff_inner(x, col, b[x], acc) end)
end
@spec reduce(t(of), acc, (non_neg_integer(), non_neg_integer(), of, acc -> acc)) :: acc
when of: var, acc: var
def reduce(m, i, f) do
for {x, col} <- m,
{y, val} <- col,
reduce: i do
acc -> f.(x, y, val, acc)
end
end
@spec draw_at(t(of), non_neg_integer(), non_neg_integer(), of) :: t(of) when of: var
def draw_at(screen, x, y, val) do
put_in(screen[x][y], val)
end
@spec draw_at(t(of), {non_neg_integer(), non_neg_integer()}, of) :: t(of) when of: var
def draw_at(screen, {x, y}, val), do: draw_at(screen, x, y, val)
@spec draw_rect(
t(of),
{non_neg_integer(), non_neg_integer()},
{non_neg_integer(), non_neg_integer()},
of
) :: t(of)
when of: var
def draw_rect(screen, {x0, y0} = _top_left, {x1, y1} = _bottom_right, val) do
for x <- erange(x0..x1),
y <- erange(y0..y1),
reduce: screen do
screen -> draw_at(screen, x, y, val)
end
end
@spec at(t(of), {non_neg_integer(), non_neg_integer()}) :: of when of: var
def at(m, {x, y}), do: at(m, x, y)
@spec at(t(of), non_neg_integer(), non_neg_integer()) :: of when of: var
def at(m, x, y), do: m[x][y]
defp diff_inner(x, a, b, acc) do
Enum.reduce(a, acc, fn {y, val}, acc ->
val2 = b[y]
if val != val2, do: [%{x: x, y: y, new: val2} | acc], else: acc
end)
end
end
|
web/lib/infolab_light_games/matrix.ex
| 0.68763
| 0.541348
|
matrix.ex
|
starcoder
|
defmodule AWS.Cloudsearchdomain do
@moduledoc """
You use the AmazonCloudSearch2013 API to upload documents to a search domain and
search those documents.
The endpoints for submitting `UploadDocuments`, `Search`, and `Suggest` requests
are domain-specific. To get the endpoints for your domain, use the Amazon
CloudSearch configuration service `DescribeDomains` action. The domain endpoints
are also displayed on the domain dashboard in the Amazon CloudSearch console.
You submit suggest requests to the search endpoint.
For more information, see the [Amazon CloudSearch Developer Guide](http://docs.aws.amazon.com/cloudsearch/latest/developerguide).
"""
@doc """
Retrieves a list of documents that match the specified search criteria.
How you specify the search criteria depends on which query parser you use.
Amazon CloudSearch supports four query parsers:
* `simple`: search all `text` and `text-array` fields for the
specified string. Search for phrases, individual terms, and prefixes.
* `structured`: search specific fields, construct compound queries
using Boolean operators, and use advanced features such as term boosting and
proximity searching.
* `lucene`: specify search criteria using the Apache Lucene query
parser syntax.
* `dismax`: specify search criteria using the simplified subset of
the Apache Lucene query parser syntax defined by the DisMax query parser.
For more information, see [Searching Your Data](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html)
in the *Amazon CloudSearch Developer Guide*.
The endpoint for submitting `Search` requests is domain-specific. You submit
search requests to a domain's search endpoint. To get the search endpoint for
your domain, use the Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain dashboard in the
Amazon CloudSearch console.
"""
def search(client, cursor \\ nil, expr \\ nil, facet \\ nil, filter_query \\ nil, highlight \\ nil, partial \\ nil, query, query_options \\ nil, query_parser \\ nil, return \\ nil, size \\ nil, sort \\ nil, start \\ nil, stats \\ nil, options \\ []) do
path_ = "/2013-01-01/search?format=sdk&pretty=true"
headers = []
query_ = []
query_ = if !is_nil(stats) do
[{"stats", stats} | query_]
else
query_
end
query_ = if !is_nil(start) do
[{"start", start} | query_]
else
query_
end
query_ = if !is_nil(sort) do
[{"sort", sort} | query_]
else
query_
end
query_ = if !is_nil(size) do
[{"size", size} | query_]
else
query_
end
query_ = if !is_nil(return) do
[{"return", return} | query_]
else
query_
end
query_ = if !is_nil(query_parser) do
[{"q.parser", query_parser} | query_]
else
query_
end
query_ = if !is_nil(query_options) do
[{"q.options", query_options} | query_]
else
query_
end
query_ = if !is_nil(query) do
[{"q", query} | query_]
else
query_
end
query_ = if !is_nil(partial) do
[{"partial", partial} | query_]
else
query_
end
query_ = if !is_nil(highlight) do
[{"highlight", highlight} | query_]
else
query_
end
query_ = if !is_nil(filter_query) do
[{"fq", filter_query} | query_]
else
query_
end
query_ = if !is_nil(facet) do
[{"facet", facet} | query_]
else
query_
end
query_ = if !is_nil(expr) do
[{"expr", expr} | query_]
else
query_
end
query_ = if !is_nil(cursor) do
[{"cursor", cursor} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves autocomplete suggestions for a partial query string.
You can use suggestions enable you to display likely matches before users finish
typing. In Amazon CloudSearch, suggestions are based on the contents of a
particular text field. When you request suggestions, Amazon CloudSearch finds
all of the documents whose values in the suggester field start with the
specified query string. The beginning of the field must match the query string
to be considered a match.
For more information about configuring suggesters and retrieving suggestions,
see [Getting Suggestions](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html)
in the *Amazon CloudSearch Developer Guide*.
The endpoint for submitting `Suggest` requests is domain-specific. You submit
suggest requests to a domain's search endpoint. To get the search endpoint for
your domain, use the Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain dashboard in the
Amazon CloudSearch console.
"""
def suggest(client, query, size \\ nil, suggester, options \\ []) do
path_ = "/2013-01-01/suggest?format=sdk&pretty=true"
headers = []
query_ = []
query_ = if !is_nil(suggester) do
[{"suggester", suggester} | query_]
else
query_
end
query_ = if !is_nil(size) do
[{"size", size} | query_]
else
query_
end
query_ = if !is_nil(query) do
[{"q", query} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Posts a batch of documents to a search domain for indexing.
A document batch is a collection of add and delete operations that represent the
documents you want to add, update, or delete from your domain. Batches can be
described in either JSON or XML. Each item that you want Amazon CloudSearch to
return as a search result (such as a product) is represented as a document.
Every document has a unique ID and one or more fields that contain the data that
you want to search and return in results. Individual documents cannot contain
more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best
possible upload performance, group add and delete operations in batches that are
close the 5 MB limit. Submitting a large volume of single-document batches can
overload a domain's document service.
The endpoint for submitting `UploadDocuments` requests is domain-specific. To
get the document endpoint for your domain, use the Amazon CloudSearch
configuration service `DescribeDomains` action. A domain's endpoints are also
displayed on the domain dashboard in the Amazon CloudSearch console.
For more information about formatting your data for Amazon CloudSearch, see
[Preparing Your Data](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html)
in the *Amazon CloudSearch Developer Guide*. For more information about
uploading data for indexing, see [Uploading Data](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/uploading-data.html)
in the *Amazon CloudSearch Developer Guide*.
"""
def upload_documents(client, input, options \\ []) do
path_ = "/2013-01-01/documents/batch?format=sdk"
{headers, input} =
[
{"contentType", "Content-Type"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "cloudsearch"}
host = build_host("cloudsearchdomain", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/cloudsearchdomain.ex
| 0.884108
| 0.593786
|
cloudsearchdomain.ex
|
starcoder
|
defmodule Judgment.Majority.Analysis do
@moduledoc """
An analysis of the tally of a single proposal (its merit profile).
This does not hold the score nor the rank and is used to compute the score.
"""
defstruct [
:median_grade,
:second_group_grade,
:second_group_size,
:second_group_sign,
:adhesion_group_grade,
:adhesion_group_size,
:contestation_group_grade,
:contestation_group_size
]
def run_on(proposal_tally) do
run_on(proposal_tally, true)
end
@doc """
Run the analysis on the provided proposal tally.
Returns a Judgment.Majority.Analysis.
This is like a factory, I guess?
"""
def run_on(proposal_tally, favor_contestation) do
amount_of_judgments =
proposal_tally
|> Enum.sum()
median_threshold =
if favor_contestation do
amount_of_judgments - 1
else
amount_of_judgments
end
# euclidean
|> div(2)
cumul =
proposal_tally
|> Enum.scan(fn a, b -> a + b end)
# |> Enum.scan(&(&1 + &2)) # less obvious equivalent
merit =
proposal_tally
|> Enum.with_index()
|> Enum.zip(
# add start
[0 | cumul]
|> Enum.reverse()
|> tl()
|> Enum.reverse()
)
# add stop
|> Enum.zip(cumul)
|> Enum.map(fn {{{grade_tally, index}, start}, stop} ->
{index, grade_tally, start, stop}
end)
|> Enum.map(fn {index, grade_tally, start, stop} ->
if start < median_threshold && stop <= median_threshold do
{index, grade_tally, start, stop, :contestation}
else
if start <= median_threshold && stop > median_threshold do
{index, grade_tally, start, stop, :majority}
else
if start > median_threshold && stop > median_threshold do
{index, grade_tally, start, stop, :adhesion}
else
{index, grade_tally, start, stop, :majority}
end
end
end
end)
median_grade =
merit
|> filter_by_type(:majority)
|> Enum.map(fn {index, _grade_tally, _start, _stop, _type} ->
index
end)
|> hd()
contestationGroup =
merit
|> filter_by_type(:contestation)
adhesionGroup =
merit
|> filter_by_type(:adhesion)
contestation_group_size =
contestationGroup
|> sum()
adhesion_group_size =
adhesionGroup
|> sum()
contestation_group_grade =
contestationGroup
|> keep_only_with_grades
|> fetch_index
|> Enum.reverse()
|> List.first()
|> default(0)
adhesion_group_grade =
adhesionGroup
|> keep_only_with_grades
|> fetch_index
|> List.first()
|> default(0)
contestation_is_biggest =
if favor_contestation do
contestation_group_size >= adhesion_group_size
else
contestation_group_size > adhesion_group_size
end
second_group_grade =
if contestation_is_biggest do
contestation_group_grade
else
adhesion_group_grade
end
second_group_size =
if contestation_is_biggest do
contestation_group_size
else
adhesion_group_size
end
second_group_sign =
if contestation_is_biggest do
-1
else
1
end
%Judgment.Majority.Analysis{
median_grade: median_grade,
second_group_grade: second_group_grade,
second_group_size: second_group_size,
second_group_sign: second_group_sign,
adhesion_group_grade: adhesion_group_grade,
adhesion_group_size: adhesion_group_size,
contestation_group_grade: contestation_group_grade,
contestation_group_size: contestation_group_size
}
end
defp filter_by_type(merit, discriminator) do
merit
|> Enum.filter(fn {_index, _grade_tally, _start, _stop, type} ->
type == discriminator
end)
end
defp keep_only_with_grades(merit) do
merit
|> Enum.filter(fn {_index, grade_tally, _start, _stop, _type} ->
grade_tally > 0
end)
end
defp sum(merit) do
merit
|> Enum.map(fn {_index, grade_tally, _start, _stop, _type} ->
grade_tally
end)
|> Enum.sum()
end
defp fetch_index(merit) do
merit
|> Enum.map(fn {index, _grade_tally, _start, _stop, _type} ->
index
end)
end
defp default(value, default) do
if nil != value do
value
else
default
end
end
end
|
lib/judgment/majority/analysis.ex
| 0.738858
| 0.530662
|
analysis.ex
|
starcoder
|
defmodule VintageNetEthernet do
@behaviour VintageNet.Technology
require Logger
alias VintageNet.Interface.RawConfig
alias VintageNet.IP.{DhcpdConfig, IPv4Config}
alias VintageNetEthernet.MacAddress
@moduledoc """
Support for common wired Ethernet interface configurations
Configurations for this technology are maps with a `:type` field set to
`VintageNetEthernet`. The following additional fields are supported:
* `:ipv4` - IPv4 options. See VintageNet.IP.IPv4Config.
* `:dhcpd` - DHCP daemon options if running a static IP configuration. See
VintageNet.IP.DhcpdConfig.
* `:mac_address` - A MAC address string or an MFA tuple. VintageNet will set
the MAC address of the network interface to the value specified. If an MFA
tuple is passed, VintageNet will `apply` it and use the return value as the
address.
An example DHCP configuration is:
```elixir
%{type: VintageNetEthernet, ipv4: %{method: :dhcp}}
```
An example static IP configuration is:
```elixir
%{
type: VintageNetEthernet,
ipv4: %{
method: :static,
address: {192, 168, 0, 5},
prefix_length: 24,
gateway: {192, 168, 0, 1}
}
}
```
"""
@impl VintageNet.Technology
def normalize(%{type: __MODULE__} = config) do
config
|> normalize_mac_address()
|> IPv4Config.normalize()
|> DhcpdConfig.normalize()
end
defp normalize_mac_address(%{mac_address: mac_address} = config) do
if MacAddress.valid?(mac_address) or mfa?(mac_address) do
config
else
raise ArgumentError, "Invalid MAC address #{inspect(mac_address)}"
end
end
defp normalize_mac_address(config), do: config
defp mfa?({m, f, a}) when is_atom(m) and is_atom(f) and is_list(a), do: true
defp mfa?(_), do: false
@impl VintageNet.Technology
def to_raw_config(ifname, %{type: __MODULE__} = config, opts) do
normalized_config = normalize(config)
%RawConfig{
ifname: ifname,
type: __MODULE__,
source_config: normalized_config,
required_ifnames: [ifname]
}
|> add_mac_address_config(normalized_config)
|> IPv4Config.add_config(normalized_config, opts)
|> DhcpdConfig.add_config(normalized_config, opts)
end
defp add_mac_address_config(raw_config, %{mac_address: mac_address}) do
resolved_mac = resolve_mac(mac_address)
if MacAddress.valid?(resolved_mac) do
new_up_cmds =
raw_config.up_cmds ++
[{:run, "ip", ["link", "set", raw_config.ifname, "address", resolved_mac]}]
%{raw_config | up_cmds: new_up_cmds}
else
Logger.warn("vintage_net_ethernet: ignoring invalid MAC address '#{inspect(resolved_mac)}'")
raw_config
end
end
defp add_mac_address_config(raw_config, _config) do
raw_config
end
defp resolve_mac({m, f, a}) do
apply(m, f, a)
rescue
e -> {:error, e}
end
defp resolve_mac(mac_address), do: mac_address
@impl VintageNet.Technology
def ioctl(_ifname, _command, _args) do
{:error, :unsupported}
end
@impl VintageNet.Technology
def check_system(_opts) do
# TODO
:ok
end
end
|
lib/vintage_net_ethernet.ex
| 0.814422
| 0.605391
|
vintage_net_ethernet.ex
|
starcoder
|
defmodule MoneyBin.Ledgers do
use MoneyBin, :service
@moduledoc """
This module is the primary interface for creating and retrieving ledgers.
It also contains a query that can be used for preloading ecto associations
with aggregate data.
"""
@doc """
Creates a `MoneyBin.Ledger`.
Must have at least 1 member accounts.
`credit` is set to false by default, representing an asset account that
increases in value when the account is debited. When `credit` is true,
the account increases in value when it is credited.
## Examples
iex> MoneyBin.Ledgers.create(%{
members: [
%{account_id: provider_acc_id, credit: true},
%{account_id: provider_fees_id, credit: false}
]
})
%MoneyBin.Ledger{}
"""
def create(attrs \\ %{}), do: attrs |> @schemas[:ledger].changeset |> @repo.insert! |> find()
@doc """
Retrieves a `MoneyBin.Ledger` with the aggregate data included, uses `ledger_query/0`.
## Examples
iex> MoneyBin.Ledgers.find(ledger_id)
%MoneyBin.Ledger{}
"""
def find(%_{ledger_id: id}), do: find(id)
def find(id), do: ledger_query() |> where([ledger], ledger.id == ^id) |> @repo.one
@doc """
An `Ecto.Query` to retrieve the aggregate information for a ledger.
## Examples
iex> MoneyBin.Ledgers.ledger_query()
%Ecto.Query{}
"""
def ledger_query do
query = from(ledger in @schemas[:ledger])
query
|> ledger_joins
|> group_by([ledger], ledger.id)
|> select_merge([_, _, acc, _, de, _, ce], %{
value:
fragment(
"? + ?",
fragment(
"? - ?",
coalesce(sum(de.debit_amount), 0),
coalesce(sum(de.credit_amount), 0)
),
fragment(
"? - ?",
coalesce(sum(ce.credit_amount), 0),
coalesce(sum(ce.debit_amount), 0)
)
),
entry_count:
fragment(
"? + ?",
count(fragment("DISTINCT ?", de.id)),
count(fragment("DISTINCT ?", ce.id))
),
account_count: count(fragment("DISTINCT ?", acc.id))
})
end
defp ledger_joins(query),
do:
query
|> join(:left, [ledger], onmembers in assoc(ledger, :members))
|> join(:left, [_, mem], acc in assoc(mem, :account))
|> join(
:left,
[_, mem],
da in ^@schemas[:account],
on: mem.account_id == da.id and mem.credit == false
)
|> join(
:left,
[_, _, _, da],
de in ^@schemas[:journal_entry],
on: de.account_id == da.id
)
|> join(
:left,
[_, mem],
ca in ^@schemas[:account],
on: mem.account_id == ca.id and mem.credit == true
)
|> join(
:left,
[_, _, _, _, _, ca],
ce in ^@schemas[:journal_entry],
on: ce.account_id == ca.id
)
end
|
lib/money_bin/ledgers/ledgers.ex
| 0.77081
| 0.404655
|
ledgers.ex
|
starcoder
|
defmodule Placid.Response.Helpers do
@moduledoc """
Placid bundles these response helpers with handlers to
assist in sending a response.
#### Example
defmodule Hello do
use Placid.Handler
def index(conn, []) do
render conn, "showing index handler"
end
def show(conn, args) do
forward conn, OtherHandler, :show, args
end
def create(conn, []) do
render conn, "page created"
|> status(201)
|> send_resp
end
end
"""
import Plug.Conn
alias Placid.Response.Rendering
alias Placid.Response.StatusCode
@type status_code :: atom | 100..999
@type enumerable :: Map | List
@doc """
sets connection status
## Arguments
* `conn` - `Plug.Conn`
* `status_code` - `Integer`
## Returns
`Plug.Conn`
"""
@spec status(Plug.Conn.t, status_code) :: Plug.Conn.t
def status(conn, status_code) when is_integer(status_code)
when is_atom(status_code) do
%StatusCode{ code: code } = status_code |> StatusCode.find
%Plug.Conn{ conn | status: code,
state: :set }
end
@doc """
sets response headers
## Arguments
* `conn` - `Plug.Conn`
* `headers` - `Map`
## Returns
`Plug.Conn`
"""
@spec headers(Plug.Conn.t, Plug.Conn.headers) :: Plug.Conn.t
def headers(conn, headers) do
%Plug.Conn{ conn | resp_headers: headers,
state: :set }
end
@doc """
Sends response as-is. It is expected that status codes,
headers, body, etc have been set by the handler
action.
## Arguments
* `conn` - `Plug.Conn`
## Returns
`Plug.Conn`
"""
@spec raw(Plug.Conn.t) :: Plug.Conn.t
def raw(conn) do
conn |> send_resp
end
@doc """
Sends a normal response.
## Arguments
* `conn` - `Plug.Conn`
* `data` - `Enumerable`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec render(Plug.Conn.t, enumerable, Keyword.t) :: Plug.Conn.t
def render(conn, data, opts \\ []) do
req_ct = case Plug.Conn.get_req_header(conn, "accept") do
[ct] -> ct
_ -> Application.get_env(:placid, :default_content_type, "application/json")
end
content_type = Keyword.get(opts, :content_type, req_ct)
%StatusCode{ code: code } = Keyword.get(opts, :status, :ok)
|> StatusCode.find
conn = Rendering.serialize_to_body(conn, data, content_type)
conn
|> put_status(code)
|> send_resp_if_not_sent
end
@doc """
Ends the response.
## Arguments
* `conn` - `Plug.Conn`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec halt!(Plug.Conn.t, Keyword.t) :: Plug.Conn.t
def halt!(conn, opts \\ []) do
opts = [ status: 401] |> Keyword.merge(opts)
%StatusCode{ code: code, reason: reason } = opts[:status] |> StatusCode.find
conn
|> send_resp_if_not_sent(code, reason)
end
@doc """
Sends a 404 (Not found) response.
## Arguments
* `conn` - `Plug.Conn`
## Returns
`Plug.Conn`
"""
@spec not_found(Plug.Conn.t, binary) :: Plug.Conn.t
def not_found(conn, message \\ "Not Found") do
conn
|> send_resp_if_not_sent(404, message)
end
@doc """
Forwards the response to another handler action.
## Arguments
* `conn` - `Plug.Conn`
* `handler` - `Atom`
* `action` - `Atom`
* `args` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec forward(Plug.Conn.t, atom, atom, Keyword.t) :: Plug.Conn.t
def forward(conn, handler, action, args \\ []) do
handler.call conn, [ action: action, args: args ++ conn.params ]
end
@doc """
Redirects the response.
## Arguments
* `conn` - `Plug.Conn`
* `location` - `String`
* `opts` - `Keyword`
## Returns
`Plug.Conn`
"""
@spec redirect(Plug.Conn.t, binary, Keyword.t) :: Plug.Conn.t
def redirect(conn, location, opts \\ []) do
opts = [ status: 302 ] |> Keyword.merge(opts)
%StatusCode{code: code, reason: reason} = opts[:status] |> StatusCode.find
conn
|> put_resp_header_if_not_sent("location", location)
|> send_resp_if_not_sent(code, reason)
end
defp put_resp_header_if_not_sent(%Plug.Conn{ state: :sent } = conn, _, _) do
conn
end
defp put_resp_header_if_not_sent(conn, key, value) do
conn |> put_resp_header(key, value)
end
defp send_resp_if_not_sent(%Plug.Conn{ state: :sent } = conn) do
conn
end
defp send_resp_if_not_sent(%Plug.Conn{} = conn) do
conn |> send_resp
end
defp send_resp_if_not_sent(%Plug.Conn{ state: :sent } = conn, _, _) do
conn
end
defp send_resp_if_not_sent(conn, status, body) do
conn |> send_resp(status, body)
end
end
|
lib/placid/response/helpers.ex
| 0.867808
| 0.528108
|
helpers.ex
|
starcoder
|
defmodule Resourceful.Type.Ecto.Query do
@moduledoc """
This module is something of hack designed to deal with the fact that Ecto does
not make it simple to:
1. Dynamically assigned aliases to joins. When specifying `:as` it _must_ be
a compile-time atom. A recent reference to this issue can be found
[here](https://elixirforum.com/t/build-dynamic-bindings-for-ecto-query-order-by/1638/17).
This was learned the hard way. This is especially frustrating because
the aside from assignment, the aliases can be referenced dynamically
throughout the rest of the query DSL with a `^`.
2. Preload joins if a join is already present and handle join introspection
in general.
The easiest way to solve this was to manipulate the struct because there's
nothing magical about aliases. They're just a named reference to a positional
binding. Preloads operate similarly. A tree of tuples and keyword lists
defines which associations should be preloaded.
Join introspection is also important so fields can be added to a query in an
ad-hoc manner without having to care whether the query contains joins or not.
## A Note About Intended Use Cases
It's important to remember that unless you go out of your way to mess with
data structures in fields (I say this because this module is going out of its
way to mess with data structures) relationships that support inclusion must
follow a one-to-one path from start to finish.
See `Resourceful.Type.Relationship` for a broader explanation on this design
decision.
## Is this a good idea?
It's certainly possible the underlying `Ecto.Query` structs change. This sort
of manipulation is effectively using a private API. Hopefully getting proper
support for this behavior is doable.
Additionally, it's possible to handle some (and maybe all) of this using
macros. However, that ultimately seemed every more convoluted and confusing.
At least this is mostly simple recursion.
What it does do is solve the problem it needed to solve.
"""
import Ecto.Query, warn: false
alias Resourceful.Type
@doc """
Including a field does two things:
1. It automatically joins any necessary relationships (see `join_field/3`).
2. It preloads said relationships.
This allows a single query to bring back all of the data and also allows
filtering and sorting to work on related attributes, not just those of the
root type.
For reference, an `Ecto.Query` stores preload instructions meant to happen
with joins in the `:assocs` key in the form of a tree of keyword lists and
tuples.
A single join looks like this: `[<assoc_name>: {<position>, []}]` The tuple
contains the position of the binging for the related join and a list of any
child joins which would be in the same form as the parent.
In the song/album/artist example this could look like the following:
`[album: {1, [artist: {2, []}]}]`.
"""
@spec include_field(any(), %Type{}, String.t()) :: %Ecto.Query{}
def include_field(queryable, %Type{} = type, field_name) do
graph_path =
type
|> Type.fetch_graphed_field!(field_name)
|> graph_path()
queryable
|> join_graph(graph_path)
|> merge_preloads(graph_path)
end
defp merge_preloads(queryable, graph_path) do
%{queryable | assocs: build_new_preloads(graph_path, queryable.assocs, queryable.aliases)}
end
defp build_new_preloads([], assocs, _), do: assocs
defp build_new_preloads([graphed_rel | path], assocs, aliases) do
assoc_name = graphed_rel.field.map_to
child =
case Keyword.get(assocs, assoc_name) do
nil ->
{postition(graphed_rel, aliases), build_new_preloads(path, [], aliases)}
{postition, child_assocs} ->
{postition, build_new_preloads(path, child_assocs, aliases)}
end
Keyword.put(assocs, assoc_name, child)
end
defp postition(%{query_alias: {alias_name, _}}, aliases) do
Map.fetch!(aliases, alias_name)
end
@doc """
Joining a field joins all associations connected to all relationships in the
field's graph. For example, if viewing a song and the field
`album.artist.name` is joined, two relationships will be joined: the song's
`album`, and the album's `artist`.
The join will be given an alias of the full relationship name. So, the album
will be aliases as `album` and the artist will be aliased as `album.artist`.
If those joins have already been made, the queryable will remain unchanged.
Fields resolving to attributes are ignored, but if they are children of a
relationship, the relationship will be included.
"""
@spec join_field(any(), %Type.GraphedField{}) :: %Ecto.Query{}
def join_field(queryable, graphed_field) do
join_graph(queryable, graph_path(graphed_field))
end
@doc """
Like `join_field/2` except it takes a type and a field name rather than the
graphed field directly.
"""
@spec join_field(any(), %Type{}, String.t()) :: %Ecto.Query{}
def join_field(queryable, %Type{} = type, field_name) do
graphed_field = Type.fetch_graphed_field!(type, field_name)
join_field(queryable, graphed_field)
end
defp graph_path(graphed_field, path \\ [])
defp graph_path(nil, path), do: path
defp graph_path(%{field: %Type.Attribute{}, parent: parent}, path) do
graph_path(parent, path)
end
defp graph_path(graphed_field, path) do
graph_path(graphed_field.parent, [graphed_field | path])
end
defp join_graph(queryable, graph_path) do
Enum.reduce(graph_path, queryable, fn graphed_rel, q ->
maybe_join(q, graphed_rel)
end)
end
defp maybe_join(queryable, %{query_alias: join_as} = graphed_field) do
case has_named_binding?(queryable, join_as) do
true -> queryable
_ -> named_join(queryable, graphed_field, join_as)
end
end
defp named_join(queryable, %{field: field, parent: nil}, join_as) do
queryable
|> join(:left, [q], assoc(q, ^field.map_to))
|> alias_last_join(join_as)
end
defp named_join(
queryable,
%{field: field, parent: %{query_alias: {parent_alias, _}}},
join_as
) do
queryable
|> join(:left, [_, {^parent_alias, p}], assoc(p, ^field.map_to))
|> alias_last_join(join_as)
end
defp alias_last_join(%{aliases: aliases, joins: joins} = query, {alias_name, _}) do
new_joins =
Enum.map(joins, fn join ->
case join do
%{as: nil} -> %{join | as: alias_name}
_ -> join
end
end)
new_aliases = Map.put(aliases, alias_name, length(new_joins))
%{query | aliases: new_aliases, joins: new_joins}
end
end
|
lib/resourceful/type/ecto/query.ex
| 0.856752
| 0.732855
|
query.ex
|
starcoder
|
defmodule Pulsar.Dashboard do
@moduledoc """
The logic for managing a set of jobs and updating them.
"""
alias IO.ANSI
alias __MODULE__.Terminal, as: T
# jobs is a map from job id to job
# new_jobs is a count of the number of jobs added since the most recent flush, e.g., number of new lines to print on next flush
defstruct jobs: %{}, new_jobs: 0, active_highlight_duration: 0
@doc """
Creates a new, empty dashboard.
`active_highlight_duration` is the number of milliseconds that a newly added or updated job
should be rendered as active (bright). `clear_inactive` is used periodically to identify
jobs that should be downgraded to inactive and re-rendered.
"""
def new_dashboard(active_highlight_duration) when active_highlight_duration > 0 do
%__MODULE__{active_highlight_duration: active_highlight_duration}
end
@doc """
Updates an existing job in the dashboard, returning a modified dashboard.
If the job does not exist, or is marked completed, the dashboard is returned unchanged.
`job_data` is a keyword list of changes to make to the job. Supported keys are:
* `:message` - a string
* `:prefix` - a string
* `:status` - an atom, one of `:normal`, `:error`, or `:ok`
Returns the updated dashboard.
"""
def update_job(dashboard = %__MODULE__{}, jobid, job_data) do
job = dashboard.jobs[jobid]
if job && not(job.completed) do
new_job = Enum.into(job_data, %{job | dirty: true,
active: true,
active_until: active_until(dashboard)})
put_in(dashboard.jobs[jobid], new_job)
else
dashboard
end
end
@doc """
Add a new job to the dashboard.
Returns the dashboard unchanged if the jobid already exists.
"""
def add_job(dashboard = %__MODULE__{}, jobid) when jobid != nil do
if Map.has_key?(dashboard.jobs, jobid) do
dashboard
else
job = %{
status: :normal,
prefix: nil,
message: nil,
dirty: true,
line: 1,
active: true,
completed: false,
active_until: active_until(dashboard),
}
updater = fn (jobs) ->
jobs
|> move_each_job_up()
|> Map.put(jobid, job)
end
dashboard
|> update_jobs(updater)
|> Map.update!(:new_jobs, &(&1 + 1))
end
end
@doc """
Marks a job as completed. Completed jobs float to the top of the list, above any
non-completed jobs. Once marked as complete, a job is removed from the dashboard at the next
flush.
Returns the updated dashboard, or the input dashboard if the job doesn't exist or is already completed.
"""
def complete_job(dashboard = %__MODULE__{}, jobid) when jobid != nil do
jobs = dashboard.jobs
job = jobs[jobid]
unless job && not(job.completed) do
dashboard # job gone or missing
else
new_job = %{job | dirty: true,
completed: true,
active: true,
active_until: active_until(dashboard)}
new_jobs = jobs
|> Map.put(jobid, new_job)
Map.put(dashboard, :jobs, new_jobs)
end
end
@doc """
Invoked periodically to clear the active flag of any job that has not been updated recently.
Inactive jobs are marked dirty, to force a redisplay.
"""
def update(dashboard = %__MODULE__{}) do
now = System.system_time(:milliseconds)
updater = fn (job) ->
if job.active and job.active_until <= now do
%{job | active: false, dirty: true}
else
job
end
end
update_each_job(dashboard, updater)
end
@doc """
Identify jobs that are 'dirty' (have pending updates) and redraws just those jobs
in the dashboard.
Returns a tuple of the updated dashboard and a enum of strings to send to `IO.write`.
"""
def flush(dashboard=%__MODULE__{}) do
# When there are new jobs, add blank lines to the output for those new jobs
new_job_lines = if dashboard.new_jobs == 0 do
[]
else
for _ <- 1..dashboard.new_jobs, do: "\n"
end
dirty_job_groups = for {_, job} <- dashboard.jobs do
if job.dirty do
[
T.cursor_invisible(),
T.save_cursor_position(),
T.cursor_up(job.line),
T.leftmost_column(),
(case {job.active, job.status} do
{true, :error} -> ANSI.light_red()
{_, :error} -> ANSI.red()
{true, :ok} -> ANSI.light_green()
{_, :ok} -> ANSI.green()
{true, _} -> ANSI.light_white()
_ -> nil
end),
job.prefix,
job.message,
T.clear_to_eol(),
T.restore_cursor_position(),
T.cursor_visible(),
ANSI.reset()
]
end
end
# IO.write hates nils, so we have to filter out nil groups,
# and nil chunks.
all_chunks = dirty_job_groups
|> Enum.reject(&nil?/1)
|> Enum.reduce([], &Enum.into/2)
|> Enum.reject(&nil?/1)
|> Enum.into(new_job_lines)
incomplete_line = dashboard.jobs
|> Map.values()
|> Enum.reject(fn m -> m.completed end)
|> Enum.map(fn m -> m.line end)
|> Enum.reduce(0, &max/2)
# Everything has been flushed to screen and is no longer dirty.
# Inactive, completed lines above incomplete_line are no longer
# needed.
new_jobs = Enum.reduce(dashboard.jobs,
%{},
fn {jobid, job}, m ->
if job.completed && not(job.active) && job.line > incomplete_line do
m
else
Map.put(m, jobid, %{job | dirty: false})
end
end)
new_dashboard = %{dashboard | jobs: new_jobs, new_jobs: 0}
{new_dashboard, all_chunks}
end
@doc """
A variant of flush used to temporarily shut down the dashboard before
some other output.
Returns a tuple of the updated dashboard, and output.
The output moves the cursor to the top line of the dashboard,
then clears to the end of the screen. This temporarily removes
the dashboard from visibility, so that other output can be produced.
The returned dashboard is configured so that the next call to `flush/1`
will add new lines for all jobs, and repaint all lines (e.g., as if
every job was freshly added).
"""
def pause(dashboard=%__MODULE__{}) do
lines = Enum.count(dashboard.jobs) - dashboard.new_jobs
output = if lines > 0 do
[
T.leftmost_column(),
T.cursor_up(lines),
T.clear_to_eos()
]
end
new_dashboard = dashboard
|> update_each_job(fn job -> put_in(job.dirty, true) end)
|> Map.put(:new_jobs, Enum.count(dashboard.jobs))
{new_dashboard, output}
end
# -- PRIVATE --
defp nil?(x), do: x == nil
defp move_each_job_up(jobs) do
# This shifts "up" all existing lines but *does not* mark them dirty
# (because they are on the screen just like they should be).
map_values(jobs, fn model -> update_in(model.line, &(&1 + 1)) end)
end
defp map_values(m = %{}, f) do
m
|> Enum.map(fn {k, v} -> {k, f.(v)} end)
|> Enum.into(%{})
end
defp update_jobs(dashboard = %__MODULE__{}, f) do
%{dashboard | jobs: f.(dashboard.jobs)}
end
defp update_each_job(dashboard , f) do
update_jobs(dashboard, & map_values(&1, f))
end
defp active_until(dashboard) do
System.system_time(:milliseconds) + dashboard.active_highlight_duration
end
end
|
lib/pulsar/dashboard.ex
| 0.830353
| 0.485905
|
dashboard.ex
|
starcoder
|
defmodule Sanbase.Metric.Transform do
def transform_to_value_pairs(data, key_name \\ nil)
def transform_to_value_pairs({:ok, []}, _), do: {:ok, []}
def transform_to_value_pairs({:ok, result}, nil) do
# deduce the key name. It is the key other than the :datetime
# If there are more than 1 key different than :datetime then this will
# fail. In such cases an explicit key name should be passed
[key_name] = (result |> hd() |> Map.keys()) -- [:datetime]
result =
result
|> Enum.map(fn %{^key_name => value, datetime: datetime} ->
%{value: value, datetime: datetime}
end)
{:ok, result}
end
def transform_to_value_pairs({:ok, result}, key_name) do
result =
result
|> Enum.map(fn %{^key_name => value, datetime: datetime} ->
%{value: value, datetime: datetime}
end)
{:ok, result}
end
def transform_to_value_pairs({:error, error}, _), do: {:error, error}
@doc ~s"""
Replace all values except :slug and :datetime in every element with `nil`
if :has_changed is 0
"""
def maybe_nullify_values({:ok, data}) do
result =
Enum.map(
data,
fn
%{has_changed: 0} = elem ->
# use :maps.map/2 instead of Enum.map/2 to avoid unnecessary Map.new/1
:maps.map(
fn
key, value when key in [:slug, :datetime] -> value
_, _ -> nil
end,
Map.delete(elem, :has_changed)
)
elem ->
Map.delete(elem, :has_changed)
end
)
{:ok, result}
end
def maybe_nullify_values({:error, error}), do: {:error, error}
@doc ~s"""
Remove all elements for which :has_changed is 0
"""
def remove_missing_values({:ok, data}) do
{:ok, Enum.reject(data, &(&1.has_changed == 0))}
end
def remove_missing_values({:error, error}), do: {:error, error}
def exec_timeseries_data_query(query, args) do
Sanbase.ClickhouseRepo.query_transform(query, args, fn
[unix, value] ->
%{datetime: DateTime.from_unix!(unix), value: value}
[unix, open, high, low, close] ->
%{
datetime: DateTime.from_unix!(unix),
value_ohlc: %{
open: open,
high: high,
low: low,
close: close
}
}
end)
end
end
|
lib/sanbase/metric/transform.ex
| 0.692954
| 0.602822
|
transform.ex
|
starcoder
|
defmodule Grid do
@moduledoc """
Grid creation
:rand.seed(:exsplus, seed)
"""
import Util
alias Grid.Obstacle
@type grid :: %{{integer, integer} => :empty | :obstacle | :occupied}
@type coordinates :: {integer, integer}
@type t :: grid
@doc ~S"""
Example usage `Grid.generate(:disk, [17], 30, %{1 => 4, 2 => 4})`
"""
@spec generate(:disk, [non_neg_integer], non_neg_integer, %{any => pos_integer}) ::
{grid, %{any => [coordinates]}}
def generate(type, dimensions, obstacles, teams) do
grid =
apply(Grid, type, dimensions)
|> Obstacle.generate(:count, obstacles)
|> Obstacle.redimension(:square, 2, trunc(obstacles / 2))
placement_grid =
grid
|> Grid.empty_grids()
|> Enum.max_by(&Enum.count/1)
nb_participants = teams |> Map.values() |> Enum.sum()
if map_size(placement_grid) < nb_participants do
raise "Not enough free space to place participants"
end
max_tries = 500
placement = place_participants(placement_grid, teams, dimensions, max_tries)
grid =
placement
|> Map.values()
|> Enum.concat()
|> Enum.map(fn x -> {x, :occupied} end)
|> Enum.into(grid)
{grid, placement}
end
@spec disk(pos_integer) :: grid
def disk(radius) do
mkPair = fn x, y -> {x, y} end
xs = -radius..radius
ys = xs
liftA2(mkPair, xs, ys)
|> Enum.filter(fn {x, y} -> abs(x) + abs(y) <= radius end)
|> Enum.map(fn x -> {x, :empty} end)
|> Enum.into(%{})
end
def place_participants(grid, teams, dimensions, max_tries) do
average_dim = Enum.sum(dimensions) / Enum.count(dimensions)
fn ->
generate_individual(teams, grid)
end
|> Stream.repeatedly()
|> Enum.reduce_while({max_tries, average_dim, {-1, %{}}}, fn x,
{tries, average_dim,
historical_best} ->
{fit, best} =
current_best =
Enum.max_by([historical_best, {evaluate_fitness(x), x}], fn {y, _} -> y end)
cond do
fit >= average_dim ->
{:halt, best}
tries == 0 ->
{:cont, {max_tries, average_dim - 1, current_best}}
true ->
{:cont, {tries - 1, average_dim, current_best}}
end
end)
end
@spec get_adjacent_cells(coordinates, grid) :: grid
def get_adjacent_cells({x, y}, grid) do
grid
|> Map.take([
{x - 1, y},
{x + 1, y},
{x, y - 1},
{x, y + 1}
])
end
@spec empty_grid(coordinates, grid) :: grid
def empty_grid(starting_cell, grid) do
empty_grid_go([starting_cell], grid, %{})
end
defp empty_grid_go([], _, acc) do
acc
end
defp empty_grid_go(starting_cells, grid, acc) do
{current_area, grid} = Map.split(grid, starting_cells)
wave =
starting_cells
|> Enum.flat_map(fn x -> get_adjacent_cells(x, grid) end)
|> Enum.filter(&empty?/1)
|> Enum.map(&fst/1)
|> Enum.uniq()
empty_grid_go(wave, grid, Map.merge(current_area, acc))
end
@spec empty_grids(grid) :: [grid]
def empty_grids(grid) do
empty_cells =
grid
|> Enum.filter(&empty?/1)
|> Enum.map(&fst/1)
if empty_cells == [] do
[]
else
empty_grid = empty_grid(Enum.random(empty_cells), grid)
{_, unexplored_area} = Map.split(grid, Map.keys(empty_grid))
[empty_grid | empty_grids(unexplored_area)]
end
end
def empty?({_, :empty}) do
true
end
def empty?({_, _}) do
false
end
def obstacle?({_, :obstacle}) do
true
end
def obstacle?({_, _}) do
false
end
defp split_random_n(map, n) do
keys =
map
|> Map.keys()
|> Enum.take_random(n)
Map.split(map, keys)
end
defp manhattan({x, y}, {p, q}) do
abs(x - p) + abs(y - q)
end
# GENETIC ALGO
def evaluate_fitness(teams) do
teams
|> Enum.flat_map(fn {team, positions} ->
other_positions =
teams
|> Map.delete(team)
|> Map.values()
|> Enum.concat()
liftA2(&manhattan/2, positions, other_positions)
end)
|> Enum.min()
end
def generate_individual(teams, grid) do
{teams, _} =
Enum.reduce(teams, {%{}, grid}, fn {team, nb_members}, {teams, grid} ->
{positions, grid} = split_random_n(grid, nb_members)
{Map.put(teams, team, Map.keys(positions)), grid}
end)
teams
end
def mutate_individual(_) do
%{}
end
def breed_individuals(_, _) do
{%{}, %{}}
end
def print(grid, :disk, [radius]) do
for x <- -radius..radius do
for y <- -radius..radius do
case grid[{x, y}] do
nil -> " "
:empty -> "░░"
:obstacle -> "▓▓"
:occupied -> "[]"
end
end
|> IO.puts()
end
end
end
|
lib/grid.ex
| 0.744099
| 0.573201
|
grid.ex
|
starcoder
|
defmodule Resemblixir.Compare do
require EEx
use Wallaby.DSL
use GenServer
alias Resemblixir.{Scenario, Screenshot, Breakpoint}
defstruct [:test, :scenario, :breakpoint, :mismatch_percentage, :bypass, :session,
:raw_mismatch_percentage, :is_same_dimensions, :analysis_time,
dimension_difference: %{height: nil, width: nil},
diff_bounds: %{top: nil, bottom: nil, left: nil, right: nil},
images: %{ref: nil, test: nil, diff: nil}]
@type t :: %__MODULE__{
test: String.t | nil,
scenario: String.t | nil,
breakpoint: atom | nil,
images: %{required(:ref) => String.t | nil, required(:test) => String.t | nil, required(:diff) => String.t | nil},
bypass: Bypass.t | nil,
session: Wallaby.Session.t | nil,
mismatch_percentage: String.t | nil,
raw_mismatch_percentage: float | nil,
dimension_difference: %{required(:width) => integer, required(:height) => integer} | nil,
is_same_dimensions: boolean | nil
}
@type error :: {:javascript_error, String.t} | :timeout
@type success :: {:ok, __MODULE__.t}
@type failure :: {:error, error, __MODULE__.t}
@type result :: success | failure
@spec compare(test_image_path :: String.t, Scenario.t, breakpoint :: atom, ref_image_path :: String.t) :: result
def compare(%Screenshot{path: test_image_path}, %Scenario{name: test_name}, breakpoint, ref_image_path) when is_binary(test_image_path) do
%__MODULE__{ scenario: test_name, breakpoint: breakpoint, images: %{ref: ref_image_path, test: test_image_path} }
|> setup_bypass()
|> do_compare()
rescue
error -> {:error, error}
end
def server_name(%__MODULE__{scenario: <<scenario::binary>>, breakpoint: breakpoint}) when is_atom(breakpoint) do
%Scenario{name: scenario}
|> Breakpoint.server_name(breakpoint)
|> Module.concat(:Compare)
end
defp do_compare(%__MODULE__{bypass: %Bypass{port: port}, scenario: test_name, breakpoint: breakpoint} = state) do
# TODO: use :poolboy.transation to run
{:ok, session} = Wallaby.start_session()
session
|> Wallaby.Browser.visit("http://localhost:" <> Integer.to_string(port) <> http_path(test_name, breakpoint))
|> Wallaby.Browser.assert_has(Wallaby.Query.css("#result"))
|> Wallaby.Browser.execute_script("return window.RESULT", &send_result(&1, state) )
|> Wallaby.end_session()
rescue
error -> {:error, error}
end
def send_result(%{"data" => %{}, "diff" => _} = response, %__MODULE__{} = state) do
result = response
|> Poison.encode!()
|> Poison.decode(keys: :atoms!)
|> format(state)
|> analyze_result()
send self(), {:result, result}
end
def container_id(ref, test) do
IO.iodata_to_binary(["test_", path_to_id(ref, "ref"), "_", path_to_id(test, "test")])
end
def path_to_id(path, type) when is_binary(path) do
IO.iodata_to_binary(["image_", type, "_", Path.basename(path, ".png")])
end
defp setup_bypass(%__MODULE__{ scenario: test_name, breakpoint: breakpoint, images: %{ref: ref, test: test}} = state) do
bypass = open_bypass()
Bypass.expect(bypass, "GET", http_path(test_name, breakpoint), &render_template(&1, test_name))
Bypass.expect(bypass, "GET", IO.iodata_to_binary(["/ref/", test_name, ".png"]), &render_image(&1, ref))
Bypass.expect(bypass, "GET", IO.iodata_to_binary(["/test/", test_name, ".png"]), &render_image(&1, test))
Bypass.expect(bypass, "GET", "/resemble.js", &render_js/1)
%{state | bypass: bypass}
end
defp http_path(test_name, breakpoint) when is_binary(test_name) and is_atom(breakpoint) do
IO.iodata_to_binary(["/", test_name, "_", Atom.to_string(breakpoint)])
end
defp render_template(conn, test_name) do
Plug.Conn.resp(conn, 200, """
<html>
<head><script type="text/javascript" src="resemble.js"></script></head>
<body>
<script type="text/javascript">
window.RESULT = "not returned";
const testName = "#{test_name}";
function path(type) {
return "/" + type + "/" + testName + ".png"
}
function appendImage(type) {
const img = document.createElement("img");
img.src = path(type);
img.id = type;
document.body.appendChild(img);
return img;
}
appendImage("ref");
appendImage("test");
resemble(path("ref")).compareTo(path("test")).onComplete(function(data) {
const isDiff = (data.rawMisMatchPercentage > 0) ||
(data.dimensionDifference.height != 0) ||
(data.dimensionDifference.width != 0);
const diff = isDiff ? data.getImageDataUrl() : null;
window.RESULT = {data: data, diff: diff};
const container = document.createElement("div")
container.innerHTML = JSON.stringify(window.RESULT);
container.id = "result";
document.body.appendChild(container);
});
</script>
</body>
</html>
""")
end
defp render_image(conn, image_path) do
Plug.Conn.resp(conn, 200, File.read!(image_path))
end
defp render_js(conn) do
js = Path.join([Application.app_dir(:resemblixir), "priv", "js", "resemble.js"])
Plug.Conn.resp(conn, 200, File.read!(js))
end
defp open_bypass do
case Supervisor.start_child(Bypass.Supervisor, [[]]) do
{:ok, pid} ->
port = Bypass.Instance.call(pid, :port)
%Bypass{pid: pid, port: port}
other ->
other
end
end
def format({:ok, %{diff: diff, data: comparison}}, %__MODULE__{} = data), do: do_format(data, diff, comparison)
def format({:error, error}, %__MODULE__{} = data), do: {:error, {:json_error, error, data}}
def format(error, %__MODULE__{} = data), do: {:error, {{:unexpected_error, error}, data}}
defp do_format(%__MODULE__{} = data, diff, %{
misMatchPercentage: mismatch, rawMisMatchPercentage: raw_mismatch,
dimensionDifference: %{ height: _, width: _} = dimensions,
diffBounds: %{ top: _, bottom: _, left: _, right: _} = diff_bounds,
isSameDimensions: is_same_dim, analysisTime: time,
getImageDataUrl: _
}), do: %{data | mismatch_percentage: mismatch,
raw_mismatch_percentage: raw_mismatch,
dimension_difference: dimensions,
is_same_dimensions: is_same_dim,
diff_bounds: diff_bounds,
analysis_time: time,
images: Map.put(data.images, :diff, diff)}
defp analyze_result({:error, error}), do: {:error, error}
defp analyze_result(%__MODULE__{images: %{diff: nil}} = result), do: {:ok, result}
defp analyze_result(%__MODULE__{images: %{test: path, diff: diff} = images} = result) when is_binary(diff) do
diff_path = Path.join([Path.dirname(path), ["diff_", Path.basename(path, "")]])
File.write!(diff_path, diff)
{:error, %{result | images: %{images | diff: diff_path}}}
end
end
|
lib/resemblixir/compare.ex
| 0.621656
| 0.468487
|
compare.ex
|
starcoder
|
defmodule Oban.Plugins.Lifeline do
@moduledoc """
Naively transition jobs stuck `executing` back to `available`.
The `Lifeline` plugin periodically rescues orphaned jobs, i.e. jobs that are stuck in the
`executing` state because the node was shut down before the job could finish. Rescuing is
purely based on time, rather than any heuristic about the job's expected execution time or
whether the node is still alive.
If an executing job has exhausted all attempts, the Lifeline plugin will mark it `discarded`
rather than `available`.
_🌟 This plugin may transition jobs that are genuinely `executing` and cause duplicate
execution. For more accurate rescuing, or to rescue jobs that have exhaused retry attempts, see
the `DynamicLifeline` plugin in [Oban Pro][pro]._
[pro]: dynamic_lifeline.html
## Using the Plugin
Rescue orphaned jobs that are still `executing` after the default of 60 minutes:
config :my_app, Oban,
plugins: [Oban.Plugins.Lifeline],
...
Override the default period to rescue orphans after a more aggressive period of 5 minutes:
config :my_app, Oban,
plugins: [{Oban.Plugins.Lifeline, rescue_after: :timer.minutes(5)}],
...
## Options
* `:rescue_after` — the maximum amount of time, in milliseconds, that a job may execute before
being rescued. 60 minutes by default, and rescuing is performed once a minute.
## Instrumenting with Telemetry
The `Oban.Plugins.Lifeline` plugin adds the following metadata to the `[:oban, :plugin, :stop]`
event:
* `:rescued_count` — the number of jobs transitioned back to `available`
"""
use GenServer
import Ecto.Query, only: [update: 3, where: 3]
alias Oban.{Config, Job, Repo, Senator}
@type option ::
{:conf, Config.t()}
| {:interval, timeout()}
| {:name, GenServer.name()}
| {:rescue_after, pos_integer()}
defmodule State do
@moduledoc false
defstruct [
:conf,
:name,
:timer,
interval: :timer.minutes(1),
rescue_after: :timer.minutes(60)
]
end
defmacrop rescued_state(attempt, max_attempts) do
quote do
fragment(
"(CASE WHEN ? < ? THEN 'available' ELSE 'discarded' END)::oban_job_state",
unquote(attempt),
unquote(max_attempts)
)
end
end
@doc false
@spec start_link([option()]) :: GenServer.on_start()
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: opts[:name])
end
@impl GenServer
def init(opts) do
state = struct!(State, opts)
{:ok, schedule_rescue(state)}
end
@impl GenServer
def terminate(_reason, %State{timer: timer}) do
if is_reference(timer), do: Process.cancel_timer(timer)
:ok
end
@impl GenServer
def handle_info(:rescue, %State{} = state) do
meta = %{conf: state.conf, plugin: __MODULE__}
:telemetry.span([:oban, :plugin], meta, fn ->
case check_leadership_and_rescue_jobs(state) do
{:ok, {rescued_count, _}} when is_integer(rescued_count) ->
{:ok, Map.put(meta, :rescued_count, rescued_count)}
error ->
{:error, Map.put(meta, :error, error)}
end
end)
{:noreply, schedule_rescue(state)}
end
defp check_leadership_and_rescue_jobs(state) do
if Senator.leader?(state.conf) do
Repo.transaction(state.conf, fn ->
time = DateTime.add(DateTime.utc_now(), -state.rescue_after, :millisecond)
query =
Job
|> where([j], j.state == "executing" and j.attempted_at < ^time)
|> update([j], set: [state: rescued_state(j.attempt, j.max_attempts)])
Repo.update_all(state.conf, query, [])
end)
else
{:ok, 0}
end
end
defp schedule_rescue(state) do
timer = Process.send_after(self(), :rescue, state.interval)
%{state | timer: timer}
end
end
|
lib/oban/plugins/lifeline.ex
| 0.86212
| 0.655257
|
lifeline.ex
|
starcoder
|
defmodule Elastic.Index do
alias Elastic.HTTP
alias Elastic.Query
@moduledoc ~S"""
Collection of functions to work with indices.
"""
@doc """
Helper function for getting the name of an index combined with the
`index_prefix` and `mix_env` configuration.
"""
def name(index) do
[index_prefix(), mix_env(), index]
|> Enum.reject(&(&1 == nil || &1 == ""))
|> Enum.join("_")
end
@doc """
Creates the specified index.
If you've configured `index_prefix` and `use_mix_env` for Elastic, it will use those.
## Examples
```elixir
# With index_prefix set to 'elastic'
# And with `use_mix_env` set to `true`
# This will create the `elastic_dev_answer` index
Elastic.Index.create("answer")
```
"""
def create(index) do
HTTP.put(name(index))
end
@doc """
Creates the specified index with optional configuration parameters like settings,
mappings, aliases (see the ES Indices API documentation for information on what
you can pass).
If you've configured `index_prefix` and `use_mix_env` for Elastic, it will use those.
## Examples
```elixir
# With index_prefix set to 'elastic'
# And with `use_mix_env` set to `true`
# This will create the `elastic_dev_answer` index
Elastic.Index.create("answer", %{settings: {number_of_shards: 2}})
```
"""
def create(index, parameters) do
HTTP.post(name(index), body: parameters)
end
@doc """
Deletes the specified index.
If you've configured `index_prefix` and `use_mix_env` for Elastic, it will use those.
## Examples
```elixir
# With index_prefix set to 'elastic'
# And with `use_mix_env` set to `true`
# This will delete the `elastic_dev_answer` index
Elastic.Index.delete("answer")
```
"""
def delete(index) do
index |> name |> HTTP.delete
end
@doc """
Refreshes the specified index by issuing a [refresh HTTP call](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html).
"""
def refresh(index) do
HTTP.post("#{name(index)}/_refresh")
end
@doc """
Checks if the specified index exists.
The index name will be automatically prefixed as per this package's configuration.
"""
def exists?(index) do
{_, status, _} = index |> name |> HTTP.head
status == 200
end
@doc """
Opens the specified index.
"""
def open(index) do
HTTP.post("#{name(index)}/_open")
end
@doc """
Closes the specified index.
"""
def close(index) do
HTTP.post("#{name(index)}/_close")
end
@doc false
def search(%Query{index: index, body: body}) do
HTTP.get("#{name(index)}/_search", body: body)
end
@doc false
def count(%Query{index: index, body: body}) do
HTTP.get("#{name(index)}/_count", body: body)
end
defp index_prefix do
Application.get_env(:elastic, :index_prefix)
end
defp mix_env do
if Application.get_env(:elastic, :use_mix_env),
do: Mix.env(),
else: nil
end
end
|
lib/elastic/index.ex
| 0.911618
| 0.845241
|
index.ex
|
starcoder
|
defmodule ProxerEx.Api.Info do
@moduledoc """
Contains helper methods to build requests for the info api.
"""
use ProxerEx.Api.Base, api_class: "info"
api_func "character" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Character``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.character(id: 61)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "character",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 61]
}}
""")
parameter("id", :post)
end
api_func "characters" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Characters``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.characters(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "characters",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 42]
}}
""")
parameter("id", :post)
end
api_func "comments" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Comments``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.comments(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "comments",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.Info.comments(id: 42, p: 6, limit: 82, sort: "rating")
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "comments",
authorization: false,
extra_header: [],
get_args: %{id: 42, p: 6, limit: 82, sort: "rating"},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
parameter("sort", :get, optional: true)
paging_parameters()
end
api_func "entry" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Entry``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.entry(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "entry",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "entrygenres" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Entry Genres``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.entrygenres(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "entrygenres",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "entrytags" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Entry Tags``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.entrytags(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "entrytags",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "forum" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Forum``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.forum(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "forum",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 42]
}}
""")
parameter("id", :post)
end
api_func "fullentry" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Full Entry``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.fullentry(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "fullentry",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "gate" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Gate``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.gate(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "gate",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "groups" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Groups``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.groups(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "groups",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "industry" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Industry``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.industry(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "industry",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "lang" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Lang``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.lang(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "lang",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "listinfo" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Listinfo``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.listinfo(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "listinfo",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.Info.listinfo(id: 42, p: 5, limit: 12, includeNotAvailableChapters: true)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "listinfo",
authorization: false,
extra_header: [],
get_args: %{id: 42, p: 5, limit: 12, includeNotAvailableChapters: true},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
parameter("includeNotAvailableChapters", :get, optional: true)
paging_parameters()
end
api_func "names" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Names``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.names(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "names",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "person" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Person``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.person(id: 61)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "person",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 61]
}}
""")
parameter("id", :post)
end
api_func "persons" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Persons``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.persons(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "persons",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 42]
}}
""")
parameter("id", :post)
end
api_func "publisher" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Publisher``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.publisher(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "publisher",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "recommendations" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Recommendations``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.recommendations(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "recommendations",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "relations" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Relations``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.relations(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "relations",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.Info.relations(id: 42, isH: true)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "relations",
authorization: false,
extra_header: [],
get_args: %{id: 42, isH: true},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
parameter("isH", :get, optional: true)
end
api_func "season" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Seasons``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.season(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "season",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "setuserinfo", authorization: true do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Set Userinfo``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.setuserinfo(id: 42, type: "note")
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "setuserinfo",
authorization: true,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 42, type: "note"]
}}
""")
parameter("id", :post)
parameter("type", :post)
end
api_func "translatorgroup" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Translatorgroup``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.Info.translatorgroup(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "translatorgroup",
authorization: false,
extra_header: [],
get_args: %{id: 42},
method: :get,
post_args: []
}}
""")
parameter("id", :get)
end
api_func "userinfo", authorization: true do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```Info/Get Userinfo``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
Because this function tries to view the information of the authenticated user, it is required to set
`ProxerEx.Request.authorization` to `true`.
## Examples
iex> ProxerEx.Api.Info.userinfo(id: 42)
{:ok,
%ProxerEx.Request{
api_class: "info",
api_func: "userinfo",
authorization: true,
extra_header: [],
get_args: %{},
method: :post,
post_args: [id: 42]
}}
""")
parameter("id", :post)
end
end
|
lib/api_classes/info_api.ex
| 0.891841
| 0.779574
|
info_api.ex
|
starcoder
|
defmodule ESpec.Assertions.BeType do
@moduledoc """
Defines 'be_type' assertion.
it do: expect("abc").to be_binary
"""
use ESpec.Assertions.Interface
defp match(subject, :null) do
result = is_nil(subject)
{result, result}
end
defp match(subject, [:function, arity]) do
result = is_function(subject, arity)
{result, result}
end
defp match(%{__struct__: actual}, [:struct, expected]) when actual == expected, do: {true, true}
defp match(_, [:struct, _expected]), do: {false, false}
defp match(%{__struct__: _}, :struct), do: {true, true}
defp match(_, :struct), do: {false, false}
defp match(subject, type) do
result = apply(Kernel, :"is_#{type}", [subject])
{result, result}
end
defp success_message(subject, :null, _result, positive) do
to = if positive, do: "is", else: "is not"
"`#{inspect(subject)}` #{to} nil."
end
defp success_message(subject, [:function, arity], _result, positive) do
to = if positive, do: "is", else: "is not"
"`#{inspect(subject)}` #{to} `function` with arity `#{arity}`."
end
defp success_message(subject, [:struct, name], _result, positive) do
to = if positive, do: "is", else: "is not"
"`#{inspect(subject)}` #{to} struct named `#{name}`"
end
defp success_message(subject, type, _result, positive) do
to = if positive, do: "is", else: "is not"
"`#{inspect(subject)}` #{to} is `#{type}`."
end
defp error_message(subject, :null, _result, positive) do
to = if positive, do: "to", else: "not to"
but = if positive, do: "isn't", else: "is"
"Expected `#{inspect(subject)}` #{to} be nil but it #{but}."
end
defp error_message(subject, [:function, arity], _result, positive) do
to = if positive, do: "to", else: "not to"
but = if positive, do: "isn't", else: "is"
"Expected `#{inspect(subject)}` #{to} be function with arity `#{arity}` but it #{but}."
end
defp error_message(subject, [:struct, name], _result, positive) do
to = if positive, do: "to", else: "not to"
but = if positive, do: "isn't", else: "is"
"Expected `#{inspect(subject)}` #{to} be struct with name `#{name}` but it #{but}"
end
defp error_message(subject, type, _result, positive) do
to = if positive, do: "to", else: "not to"
but = if positive, do: "isn't", else: "is"
"Expected `#{inspect(subject)}` #{to} be `#{type}` but it #{but}."
end
end
|
lib/espec/assertions/be_type.ex
| 0.671255
| 0.709849
|
be_type.ex
|
starcoder
|
defmodule LinkedMapSet do
@moduledoc """
A `LinkedMapSet` is an extension to `MapSet` that keeps pointers to previous
and next elements based on add order.
"""
alias LinkedMapSet.{DuplicateValueError, MissingValueError}
alias LinkedMapSet.Node
@enforce_keys [:items]
defstruct head: nil, tail: nil, items: %{}
@type t :: %__MODULE__{head: any(), tail: any(), items: map()}
@doc """
Create a new `LinkedMapSet`
Returns a new empty `LinkedMapSet`.
## Examples
iex> LinkedMapSet.new()
%LinkedMapSet{head: nil, items: %{}, tail: nil}
"""
@spec new :: LinkedMapSet.t()
def new(), do: %__MODULE__{items: %{}}
@doc """
Adds an item to the linked map, or moves an existing one to tail.
Returns the updated `LinkedMapSet`.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("foo")
iex> LinkedMapSet.new |> LinkedMapSet.add("foo") |> LinkedMapSet.add("bar")
%LinkedMapSet{
head: "foo",
items: %{
"bar" => %Node{next: nil, previous: "foo", value: "bar"},
"foo" => %Node{next: "bar", previous: nil, value: "foo"}
},
tail: "bar"
}
"""
@spec add(__MODULE__.t(), any) :: __MODULE__.t()
def add(linked_map_set, value)
def add(%__MODULE__{head: nil, tail: nil, items: %{}}, value) do
new_node = %Node{value: value}
%__MODULE__{head: value, tail: value, items: %{value => new_node}}
end
def add(%__MODULE__{head: head, tail: tail} = lms, value) do
if head == tail do
if head == value, do: lms, else: add_second_item(lms, value)
else
add_nth_item(lms, value)
end
end
defp add_second_item(%__MODULE__{head: head}, value) do
second_node = %Node{value: value, previous: head}
first_node = %Node{value: head, next: second_node.value}
items = %{
first_node.value => first_node,
second_node.value => second_node
}
%__MODULE__{head: first_node.value, tail: second_node.value, items: items}
end
defp add_nth_item(%__MODULE__{tail: tail, items: items} = lms, value) do
new_node = %Node{value: value, previous: tail}
clean_items = if Map.has_key?(items, value), do: remove(lms, value).items, else: items
replacement_tail = %{clean_items[tail] | next: new_node.value}
updated_items =
clean_items
|> Map.put(tail, replacement_tail)
|> Map.put(new_node.value, new_node)
%{lms | tail: new_node.value, items: updated_items}
end
@doc """
Adds a new item to the linked map, unless it already exists.
Returns the updated `LinkedMapSet`.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.add_new("a")
%LinkedMapSet{
head: "a",
items: %{
"a" => %Node{next: nil, previous: nil, value: "a"}
},
tail: "a"
}
"""
@spec add_new(__MODULE__.t(), any) :: __MODULE__.t()
def add_new(linked_map_set, value)
def add_new(%__MODULE__{items: items} = lms, value) do
if Map.has_key?(items, value) do
lms
else
add(lms, value)
end
end
@doc """
Adds a new item to the linked map, or raises if `value` already exists.
Returns the updated `LinkedMapSet` or raises if `value` already exists.
Behaves the same as `add_new/2` but raises if `value` already exists.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.add_new!("a")
** (LinkedMapSet.DuplicateValueError) value "a" is already present
"""
@spec add_new!(__MODULE__.t(), any) :: __MODULE__.t()
def add_new!(linked_map_set, value)
def add_new!(%__MODULE__{items: items} = lms, value) do
if Map.has_key?(items, value) do
raise DuplicateValueError, value: value
else
add(lms, value)
end
end
@doc """
Remove an item from the linked map if it exists.
Returns the updated `LinkedMapSet`.
## Examples
iex> linked_map_set = LinkedMapSet.new |> LinkedMapSet.add("a") |> LinkedMapSet.add("b") |> LinkedMapSet.add("c")
%LinkedMapSet{
head: "a",
items: %{
"a" => %Node{next: "b", previous: nil, value: "a"},
"b" => %Node{next: "c", previous: "a", value: "b"},
"c" => %Node{next: nil, previous: "b", value: "c"}
},
tail: "c"
}
iex> LinkedMapSet.remove(linked_map_set, "b")
%LinkedMapSet{
head: "a",
items: %{
"a" => %Node{next: "c", previous: nil, value: "a"},
"c" => %Node{next: nil, previous: "a", value: "c"}
},
tail: "c"
}
"""
@spec remove(__MODULE__.t(), any) :: __MODULE__.t()
def remove(linked_map_set, value)
def remove(%__MODULE__{head: head, tail: tail, items: items} = lms, value) do
cond do
!Map.has_key?(items, value) ->
lms
head == value && tail == value ->
new()
head == value ->
remove_first_item(lms, value)
tail == value ->
remove_last_item(lms, value)
true ->
remove_nth_item(lms, value)
end
end
defp remove_first_item(%__MODULE__{items: items} = lms, value) do
next_head_node = items[items[value].next]
replacement_head_node = %{next_head_node | previous: nil}
updated_items =
items
|> Map.delete(value)
|> Map.put(replacement_head_node.value, replacement_head_node)
%{lms | head: replacement_head_node.value, items: updated_items}
end
defp remove_last_item(%__MODULE__{items: items} = lms, value) do
next_tail_node = items[items[value].previous]
replacement_tail_node = %{next_tail_node | next: nil}
updated_items =
items
|> Map.delete(value)
|> Map.put(replacement_tail_node.value, replacement_tail_node)
%{lms | tail: replacement_tail_node.value, items: updated_items}
end
defp remove_nth_item(%__MODULE__{items: items} = lms, value) do
node_to_remove = items[value]
previous_node = items[node_to_remove.previous]
next_node = items[node_to_remove.next]
replacement_previous_node = %{previous_node | next: next_node.value}
replacement_next_node = %{next_node | previous: previous_node.value}
updated_items =
items
|> Map.delete(node_to_remove.value)
|> Map.put(previous_node.value, replacement_previous_node)
|> Map.put(next_node.value, replacement_next_node)
%{lms | items: updated_items}
end
@doc """
Removes an item from the linked map, or raises if it doesn't exist.
Returns the updated `LinkedMapSet`, or raises if `value` doesn't exist.
Behavies the same as `remove/2`, but raises if `value` doesn't exist.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.remove!("b")
** (LinkedMapSet.MissingValueError) value "b" is not present
"""
@spec remove!(LinkedMapSet.t(), any) :: LinkedMapSet.t()
def remove!(linked_map_set, value)
def remove!(%__MODULE__{items: items} = lms, value) do
if Map.has_key?(items, value) do
remove(lms, value)
else
raise MissingValueError, value: value
end
end
@doc """
Returns the number of items in the `linked_map_set`.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.size()
1
"""
@spec size(LinkedMapSet.t()) :: non_neg_integer
def size(%__MODULE__{items: items}), do: map_size(items)
@doc """
Returns whether the given `value` exists in the given `linked_map_set`.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.member?("a")
true
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.member?("b")
false
"""
@spec member?(LinkedMapSet.t(), any) :: boolean
def member?(%__MODULE__{items: items}, value), do: Map.has_key?(items, value)
@doc """
Returns the values as a `List` in order.
## Examples
iex> LinkedMapSet.new() |> LinkedMapSet.add("a") |> LinkedMapSet.add("b") |> LinkedMapSet.to_list()
"""
@spec to_list(LinkedMapSet.t()) :: [any()]
def to_list(linked_map_set)
def to_list(%__MODULE__{head: head, items: items}) do
case map_size(items) do
0 -> []
1 -> [head]
_ -> [head] ++ remaining_items(head, items)
end
end
defp remaining_items(nil, _items), do: []
defp remaining_items(current, items) do
next = items[current].next
if next == nil do
[]
else
[next] ++ remaining_items(next, items)
end
end
defimpl Enumerable do
def count(linked_map_set) do
{:ok, LinkedMapSet.size(linked_map_set)}
end
def member?(linked_map_set, value) do
{:ok, LinkedMapSet.member?(linked_map_set, value)}
end
# Let the default reduce-based implementation be used since we
# require traversal of all items to maintain ordering.
def slice(_linked_map_set), do: {:error, __MODULE__}
def reduce(linked_map_set, acc, fun) do
Enumerable.List.reduce(LinkedMapSet.to_list(linked_map_set), acc, fun)
end
end
end
|
lib/linked_map_set.ex
| 0.941714
| 0.619097
|
linked_map_set.ex
|
starcoder
|
defmodule Roger.Partition.Retry do
@moduledoc """
Implements the retry logic for jobs.
Jobs are retried at a fixed number of delay intervals. By default,
these are the following (in seconds):
1, 3, 5, 10, 20, 35, 60, 100, 200, 400, 1000, 1800.
To override these levels, set the following configuration:
config :roger,
retry_levels: [1, 10, 60]
This sets 3 retry levels, the first time is a job retried after 1
second, then after 10 seconds and finally after 60.
## Implementation
For every retry level, a separate AMQP queue is made. The messages
in the queue have a TTL value that is set to the retry delay. After
the TTL expires, the messages are sent to the "dead letter
exchange", which for this queue points to the original queue.
The queues themselves also have an expiry so that when the queue is
empty, it will be removed.
"""
alias Roger.{Queue, Job}
@default_levels [1, 3, 5, 10, 20, 35, 60, 100, 200, 400, 1000, 1800]
@max_buried_queue_size Application.get_env(:roger, :max_buried_queue_size, 100)
@doc """
Given an AMQP channel and the partition, queues the given job for retry.
"""
def retry(channel, partition, job) do
{queue, expiration} = setup_retry_queue(channel, partition, job)
payload = Job.encode(%Job{job | retry_count: job.retry_count + 1})
opts_extra =
case expiration do
:buried -> []
_ -> [expiration: Integer.to_string(expiration * 1000)]
end
AMQP.Basic.publish(channel, "", queue, payload, Job.publish_opts(job, partition) ++ opts_extra)
{:ok, expiration}
end
defp setup_retry_queue(channel, partition, job) do
levels = Application.get_env(:roger, :retry_levels, @default_levels)
queue_type = Job.queue_type(job)
expiration =
if job.retry_count < Enum.count(levels) do
:lists.nth(job.retry_count + 1, levels)
else
:buried
end
arguments = [
{"x-dead-letter-exchange", ""},
{"x-dead-letter-routing-key", Queue.make_name(partition, queue_type)}
]
{queue_name, arguments} =
if expiration == :buried do
{Queue.make_name(partition, queue_type, ".buried"), [{"x-max-length", @max_buried_queue_size}]}
else
{Queue.make_name(partition, queue_type, ".retry.#{expiration}"),
arguments ++ [{"x-expires", expiration * 1000 + 2000}]}
end
{:ok, _stats} = AMQP.Queue.declare(channel, queue_name, durable: true, arguments: arguments)
{queue_name, expiration}
end
end
|
lib/roger/partition/retry.ex
| 0.734501
| 0.467332
|
retry.ex
|
starcoder
|
defmodule AccessTokens do
@moduledoc """
A module that provides basic functionality to retrieve
an Access Token via its ID.
"""
import Ecto.Query
alias AccessToken
alias Issuer
alias Audience
@doc """
Retrieve an AccessToken schema struct, given it's ID.
See query_by_id/2 for further details on how records are matched.
"""
@spec get_by_id(access_token_id :: pos_integer()) :: AccessToken.t() | nil
def get_by_id(access_token_id) do
query()
|> query_by_id(access_token_id)
|> Repo.one()
end
@doc """
Builds the base query for an AccessToken
"""
@spec query() :: Ecto.Query.t()
def query() do
from(t in AccessToken, as: :token)
end
@doc """
Given an AccessToken query, it will match records by the given ID
and also ensure that the record has not been revoked, nor expired.
Futhermore, it will validate the issuer and audience associations to be valid.
Records that have been revoked, or where the audience has been disabled
temporarily or permanently, will not be matched.
"""
@spec query_by_id(query :: Ecto.Queryable.t(), access_token_id :: pos_integer()) ::
Ecto.Queryable.t()
def query_by_id(queryable, id) do
queryable
|> where([..., t], t.id == ^id)
|> not_revoked()
|> not_expired()
|> query_with_issuer()
|> query_with_audience()
end
@doc """
Given a query, append join statements on the Issuers table,
and validate that the issuer has not been disabled temporarily or permanently.
"""
@spec query_with_issuer(query :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def query_with_issuer(queryable) do
queryable
|> join(:inner, [token: t], i in Issuer, on: t.issuer_id == i.id)
|> not_deleted()
|> not_disabled()
end
@doc """
Given a query, append join statements on the Audience table,
and validate that the audience has not been disabled temporarily or permanently.
"""
@spec query_with_audience(query :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def query_with_audience(queryable) do
queryable
|> join(:inner, [token: t], a in Audience, on: t.audience_id == a.id)
|> not_deleted()
|> not_disabled()
end
@doc """
Given a query, append a where clause to only select access tokens
that have not yet expired, by comparing CURRENT_TIMESTAMP and token.created_at + token.expires_in
"""
@spec not_expired(query :: Ecto.Queryable.t()) ::
Ecto.Queryable.t()
def not_expired(queryable) do
queryable
|> where(
[..., t],
fragment("current_timestamp <= ? + (interval '1' second * ?)", t.created_at, t.expires_in)
)
end
@doc """
Given a query, append a where clause to only select the records
that have the `revoked_at` field set to NULL.
Otherwise the record is considered to have been revoked and not valid.
"""
@spec not_revoked(query :: Ecto.Queryable.t()) ::
Ecto.Queryable.t()
def not_revoked(queryable) do
queryable
|> where([..., t], is_nil(t.revoked_at))
end
@doc """
Given a query, append a where clause to only select the records
that have the `enabled` field set to `true`.
Otherwise the record is considered to have been temporarily disabled and not valid.
"""
@spec not_disabled(queryable :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def not_disabled(queryable) do
queryable
|> where([..., r], r.enabled == true)
end
@doc """
Given a query, append a where clause to only select the records
that have the `deleted_at` field set to NULL.
Otherwise the record is considered to have been permanently disabled and not valid.
"""
@spec not_deleted(queryable :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def not_deleted(queryable) do
queryable
|> where([..., r], is_nil(r.deleted_at))
end
end
|
lib/access_tokens.ex
| 0.743447
| 0.433322
|
access_tokens.ex
|
starcoder
|
defmodule BattleBox.Games.Marooned.Error do
alias BattleBox.Games.Marooned
alias BattleBox.Games.Marooned.Logic
defmodule Template do
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__)
@derive Jason.Encoder
@enforce_keys [:msg]
defstruct [:msg]
end
end
def decoration(msg, %Marooned{turn: turn, next_player: next_player}, module) do
error =
module
|> Module.split()
|> List.last()
"""
====== (Debug) Marooned - #{error} ======
Turn: #{turn}
Player: #{next_player}
Explanation:
#{String.trim(msg)}
To keep the game moving, your player will move randomly
====== (Debug) Marooned - #{error} ======
"""
end
def adjacency_info do
"""
If your bot is at 1, it can move to any space not removed/occupied/off the board
that has an x in it
0 0 0 0 0
0 x x x 0
0 x 1 x 0
0 x x x 0
0 0 0 0 0
"""
end
def game_size_info(%Marooned{rows: rows, cols: cols}) do
"""
The game board is always zero indexed and this particular ruleset has #{rows} rows,
and #{cols} cols, The range of acceptable coordinates are as follows (inclusive)
x -> 0..#{cols - 1}
y -> 0..#{rows - 1}
"""
end
end
defmodule InvalidInputFormat do
use Template
def new(%Marooned{} = game, input) do
msg =
decoration(
"""
Your bot sent the following commands with an invalid format:
#{Jason.encode!(input, pretty: true)}
All commands must be in the following format
#{Jason.encode!(%{"to" => "<location>", "remove" => "<location>"}, pretty: true)}
Where <location> is an [x, y] coordinate pair, such that x and y are integers and
within the board
If you'd like to move to the location [0, 0] and remove the location [1, 1] you'd
send the following JSON
#{Jason.encode!(%{"to" => [0, 0], "remove" => [1, 1]}, pretty: true)}
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotMoveToNonAdjacentSquare do
use Template
def new(%Marooned{next_player: next_player} = game, target) do
%{^next_player => current_position} = Logic.player_positions(game)
msg =
decoration(
"""
Your bot sent the following as part of it's commands
#{Jason.encode!(%{to: target})}
Your player is currently at #{inspect(current_position)} at can only move
to spaces that are adjacent to it's current position
#{adjacency_info()}
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotMoveToSquareYouAlreadyOccupy do
use Template
def new(game, _target) do
msg =
decoration(
"""
You cannot move to a square you already occupy
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotMoveIntoOpponent do
use Template
def new(game, _target) do
msg =
decoration(
"""
You cannot move into the square your opponent occupies
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotMoveOffBoard do
use Template
def new(game, target) do
msg =
decoration(
"""
Your bot sent the following as part of it's commands
#{Jason.encode!(%{to: target})}
This is invalid because the square you tried to move to (#{inspect(target)})
falls outside of the field of play.
#{game_size_info(game)}
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotMoveIntoRemovedSquare do
use Template
def new(game, _target) do
msg =
decoration(
"""
You can not move into a square that's been removed
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotRemoveSquareAPlayerIsOn do
use Template
def new(game, _target) do
msg =
decoration(
"""
You can not remove a square a player is on
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotRemoveASquareAlreadyRemoved do
use Template
def new(game, _target) do
msg =
decoration(
"""
You can not remove a square a that is already removed
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotRemoveASquareOutsideTheBoard do
use Template
def new(game, target) do
msg =
decoration(
"""
Your bot sent the following as part of it's commands
#{Jason.encode!(%{remove: target})}
This is invalid because the square you tried to remove (#{inspect(target)})
falls outside of the field of play.
#{game_size_info(game)}
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
defmodule CannotRemoveSameSquareAsMoveTo do
use Template
def new(game, target) do
msg =
decoration(
"""
Your bot sent the following
#{Jason.encode!(%{to: target, remove: target})}
The square you tried to remove, and the square you tried to
move to are the same (#{inspect(target)})
This is invalid, as you can not move into the same square you
are removing
""",
game,
__MODULE__
)
%__MODULE__{msg: msg}
end
end
end
|
lib/battle_box/games/marooned/error.ex
| 0.719088
| 0.432303
|
error.ex
|
starcoder
|
defmodule BoggleEngine.Utilities do
@moduledoc """
Utility functions for project.
"""
@doc """
Takes a string and returns a list of strings separated on uppercase. Leading
lowercase letters will be dropped. Letters that do not have cases will be
chunked like uppercase letters. Lower case letters trailing no case letters
will be dropped.
"""
@spec chunk_string_on_uppercase(String.t) :: [String.t]
def chunk_string_on_uppercase(string) do
string
|> String.graphemes
|> Enum.chunk_while("", &handle_chunking/2, &handle_after_chunking/1)
end
# Helper function for chunk_string_on_uppercase/1 to chunk the list of
# strings.
@spec handle_chunking(String.grapheme, String.t | {nocase}) :: {:cont, String.t} | {:cont, String.t, String.t} | {:cont, {nocase}} | {:cont, String.t, {nocase}} when nocase: String.grapheme
defp handle_chunking(letter, accumulator) do
uppercase? = uppercase?(letter)
lowercase? = lowercase?(letter)
case {uppercase?, lowercase?, accumulator} do
{true, true, ""} -> {:cont, {letter}}
{true, true, {no_case_letter}} -> {:cont, no_case_letter, {letter}}
{true, true, string} -> {:cont, string, {letter}}
{true, false, ""} -> {:cont, letter}
{true, false, {no_case_letter}} -> {:cont, no_case_letter, letter}
{true, false, string} -> {:cont, string, letter}
{false, true, ""} -> {:cont, ""}
{false, true, {no_case_letter}} -> {:cont, no_case_letter, ""}
{false, true, string} -> {:cont, string <> letter}
end
end
# Helper function for chunk_string_on_uppercase/1 to handle the last
# accumulator value.
@spec handle_after_chunking(String.t | {nocase}) :: {:cont, String.t} | {:cont, String.t, String.t} | {:cont, nocase, String.t} when nocase: String.grapheme
defp handle_after_chunking(accumulator) do
case accumulator do
"" -> {:cont, ""}
{no_case_letter} -> {:cont, no_case_letter, ""}
string -> {:cont, string, ""}
end
end
@doc """
Determines if string is uppercase. A string is uppercase if it does not
contain lowercase letters.
"""
@spec uppercase?(String.t) :: boolean
def uppercase?(string) do
string == String.upcase(string)
end
@doc """
Determines if string is lowercase. A string is lowercase if it does not
contain uppercase letters.
"""
@spec lowercase?(String.t) :: boolean
def lowercase?(string) do
string == String.downcase(string)
end
@doc """
Converts integer to string representation with 0s as lead padding.
"""
@spec integer_to_string_with_padding(integer, integer) :: String.t
def integer_to_string_with_padding(integer, count) do
integer
|> Integer.to_string()
|> String.pad_leading(count, "0")
end
@doc """
Converts list to a map where key is the position of value in list.
"""
@spec list_to_map_with_index([term]) :: %{required(integer) => term}
def list_to_map_with_index(list) do
for {value, key} <- Enum.with_index(list), into: %{} do
{key, value}
end
end
@doc """
List will be truncated if longer than count. List will have filler appended
if shorter than count.
"""
@spec fit_list([term], integer, term) :: [term]
def fit_list(list, count, filler) do
fit_list(list, count, filler, [])
end
@spec fit_list([term], integer, term, [term]) :: [term]
defp fit_list(_configuration, 0, _filler, result) do
Enum.reverse(result)
end
defp fit_list([], count, filler, result) do
result =
for _i <- 1..count, reduce: result do
result -> [filler | result]
end
Enum.reverse(result)
end
defp fit_list([first | rest], count, filler, result) do
count = count - 1
result = [first | result]
fit_list(rest, count, filler, result)
end
end
|
lib/boggle_engine/utilities.ex
| 0.808899
| 0.452415
|
utilities.ex
|
starcoder
|
defmodule Roman.Decoder do
@moduledoc false
alias Roman.Validators.{Numeral, Sequence}
@default_error {:error, {:invalid_numeral, "numeral is invalid"}}
@valid_options [:explain, :ignore_case, :strict, :zero]
@type decoded_numeral :: {Roman.numeral(), map}
@spec decode(String.t(), keyword | map) :: {:ok, integer} | Roman.error()
def decode(numeral, options \\ [])
def decode(numeral, options) when is_binary(numeral) and is_list(options) do
flags =
options
|> Keyword.take(@valid_options)
|> Enum.into(default_flags())
maybe_upcase = fn
numeral, %{ignore_case: true} -> String.upcase(numeral)
numeral, _ -> numeral
end
numeral
|> maybe_upcase.(flags)
|> decode(flags)
end
def decode("", _),
do: {:error, {:empty_string, "expected a numeral, got an empty string"}}
def decode("N", %{zero: true}),
do: {:ok, 0}
for {val, num} <- Roman.numeral_pairs() do
def decode(unquote(num), _), do: {:ok, unquote(val)}
end
def decode(numeral, %{strict: true, explain: false}) when is_binary(numeral) do
@default_error
end
# complete numeral decoder to handle "alternative forms"
# see e.g. https://en.wikipedia.org/wiki/Roman_numerals#Alternative_forms
def decode(numeral, %{explain: explain} = opts) when is_binary(numeral) do
case sequence(numeral, opts) do
{:error, _} = error ->
if explain, do: error, else: @default_error
{:ok, seq} ->
{:ok, Enum.reduce(seq, 0, fn {_, %{value: v}}, acc -> v + acc end)}
end
end
@spec default_flags() :: map
defp default_flags do
config =
:roman
|> Application.get_env(:default_flags, %{})
|> Map.take(@valid_options)
%{
explain: false,
ignore_case: false,
strict: true
}
|> Map.merge(config)
end
@spec sequence(Roman.numeral(), map) :: {:ok, [decoded_numeral]} | Roman.error()
defp sequence(numeral, %{strict: strict}) do
with {:ok, numeral} <- Numeral.validate(numeral, strict: strict),
{:ok, seq} <- decode_sections(numeral) do
if strict, do: Sequence.validate(seq), else: {:ok, seq}
else
{:error, _} = error -> error
end
end
@spec decode_sections(Roman.numeral()) :: {:ok, [decoded_numeral]} | Roman.error()
defp decode_sections(numeral) do
sequence =
numeral
|> Stream.unfold(&decode_section/1)
|> Enum.to_list()
{:ok, sequence}
end
# When decoding, we need to keep track of whether we've already encountered a
# subtractive combination, and how much the subtraction was: this will be
# necessary for further validation later
@spec decode_section(Roman.numeral()) :: {decoded_numeral, Roman.numeral()} | nil
defp decode_section("CM" <> rest),
do: {{"CM", %{value: 900, delta: 100}}, rest}
defp decode_section("CD" <> rest),
do: {{"CD", %{value: 400, delta: 100}}, rest}
defp decode_section("XC" <> rest),
do: {{"XC", %{value: 90, delta: 10}}, rest}
defp decode_section("XL" <> rest),
do: {{"XL", %{value: 40, delta: 10}}, rest}
defp decode_section("IX" <> rest),
do: {{"IX", %{value: 9, delta: 1}}, rest}
defp decode_section("IV" <> rest),
do: {{"IV", %{value: 4, delta: 1}}, rest}
defp decode_section("M" <> rest),
do: {{"M", %{value: 1_000}}, rest}
defp decode_section("D" <> rest),
do: {{"D", %{value: 500}}, rest}
defp decode_section("C" <> rest),
do: {{"C", %{value: 100}}, rest}
defp decode_section("L" <> rest),
do: {{"L", %{value: 50}}, rest}
defp decode_section("X" <> rest),
do: {{"X", %{value: 10}}, rest}
defp decode_section("V" <> rest),
do: {{"V", %{value: 5}}, rest}
defp decode_section("I" <> rest),
do: {{"I", %{value: 1}}, rest}
defp decode_section(""), do: nil
end
|
lib/roman/decoder.ex
| 0.856677
| 0.464355
|
decoder.ex
|
starcoder
|
defmodule Dwarves.BinanceFutures do
alias Binance.Rest.HTTPClient
require Logger
# Server
@spec ping ::
{:error,
{:http_error, HTTPoison.Error.t()}
| {:poison_decode_error, Poison.ParseError.t()}
| map}
| {:ok, false | nil | true | binary | list | number | map}
@doc """
Pings binance API. Returns `{:ok, %{}}` if successful, `{:error, reason}` otherwise
"""
def ping(is_testnet \\ false) do
HTTPClient.get_binance("/fapi/v1/ping", is_testnet)
end
# Ticker
@doc """
Get all symbols and current prices listed in binance
Returns `{:ok, [%Binance.SymbolPrice{}]}` or `{:error, reason}`.
## Example
```
{:ok,
[%Binance.SymbolPrice{price: "0.07579300", symbol: "ETHBTC"},
%Binance.SymbolPrice{price: "0.01670200", symbol: "LTCBTC"},
%Binance.SymbolPrice{price: "0.00114550", symbol: "BNBBTC"},
%Binance.SymbolPrice{price: "0.00640000", symbol: "NEOBTC"},
%Binance.SymbolPrice{price: "0.00030000", symbol: "123456"},
%Binance.SymbolPrice{price: "0.04895000", symbol: "QTUMETH"},
...]}
```
"""
def get_all_prices(is_testnet \\ false) do
case HTTPClient.get_binance("/fapi/v1/ticker/price", is_testnet) do
{:ok, data} ->
{:ok, Enum.map(data, &Binance.SymbolPrice.new(&1))}
err ->
err
end
end
# Order
@doc """
Creates a new order on binance
Returns `{:ok, %{}}` or `{:error, reason}`.
In the case of a error on binance, for example with invalid parameters, `{:error, {:binance_error, %{code: code, msg: msg}}}` will be returned.
Please read https://www.binance.com/restapipub.html#user-content-account-endpoints to understand all the parameters
"""
def create_order(
%{"symbol" => symbol, "side" => side, "type" => type, "quantity" => quantity} = params,
api_secret,
api_key,
is_testnet \\ false
) do
timestamp =
case params["timestamp"] do
# timestamp needs to be in milliseconds
nil ->
:os.system_time(:millisecond)
t ->
t
end
receiving_window =
case params["receiving_window"] do
nil ->
1000
t ->
t
end
arguments =
%{
symbol: symbol,
side: side,
type: type,
quantity: quantity,
recvWindow: receiving_window,
timestamp: timestamp
}
|> Map.merge(
unless(
is_nil(params["new_client_order_id"]),
do: %{newClientOrderId: params["new_client_order_id"]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params["stop_price"]),
do: %{stopPrice: format_price(params["stop_price"])},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params["new_client_order_id"]),
do: %{icebergQty: params["iceberg_quantity"]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params["time_in_force"]),
do: %{timeInForce: params["time_in_force"]},
else: %{}
)
)
|> Map.merge(
unless(is_nil(params["price"]), do: %{price: format_price(params["price"])}, else: %{})
)
case HTTPClient.signed_request_binance(
"/fapi/v1/order",
arguments,
:post,
api_secret,
api_key,
is_testnet
) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
@doc """
Creates a new **limit** **buy** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_limit_buy(
%{"symbol" => symbol, "quantity" => quantity, "price" => price} = params,
api_secret,
api_key,
is_testnet \\ false
)
when is_binary(symbol)
when is_number(quantity)
when is_number(price) do
params
|> Map.merge(%{"side" => "BUY", "type" => "LIMIT"})
|> create_order(
api_secret,
api_key,
is_testnet
)
|> parse_order_response
end
@doc """
Creates a new **limit** **sell** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_limit_sell(
%{"symbol" => symbol, "quantity" => quantity, "price" => price} = params,
api_secret,
api_key,
is_testnet \\ false
)
when is_binary(symbol)
when is_number(quantity)
when is_number(price) do
params
|> Map.merge(%{"side" => "SELL", "type" => "LIMIT"})
|> create_order(
api_secret,
api_key,
is_testnet
)
|> parse_order_response
end
@doc """
Creates a new **market** **buy** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_market_buy(%Binance.TradePair{from: from, to: to} = symbol, quantity)
when is_number(quantity)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_market_buy(binance_symbol, quantity)
e -> e
end
end
def order_market_buy(
%{"symbol" => symbol, "quantity" => quantity} = params,
api_secret,
api_key,
is_testnet \\ false
)
when is_binary(symbol)
when is_number(quantity) do
params
|> Map.merge(%{"side" => "BUY", "type" => "MARKET"})
|> create_order(
api_secret,
api_key,
is_testnet
)
end
@doc """
Creates a new **market** **sell** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_market_sell(%Binance.TradePair{from: from, to: to} = symbol, quantity)
when is_number(quantity)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_market_sell(binance_symbol, quantity)
e -> e
end
end
def order_market_sell(
%{"symbol" => symbol, "quantity" => quantity} = params,
api_secret,
api_key,
is_testnet \\ false
)
when is_binary(symbol)
when is_number(quantity) do
params
|> Map.merge(%{"side" => "SELL", "type" => "MARKET"})
|> create_order(
api_secret,
api_key,
is_testnet
)
end
defp parse_order_response({:ok, response}) do
{:ok, Binance.OrderResponse.new(response)}
end
defp parse_order_response({
:error,
{
:binance_error,
%{code: -2010, msg: "Account has insufficient balance for requested action."} = reason
}
}) do
{:error, %Binance.InsufficientBalanceError{reason: reason}}
end
# Misc
defp format_price(num) when is_float(num), do: :erlang.float_to_binary(num, [{:decimals, 8}])
defp format_price(num) when is_integer(num), do: inspect(num)
defp format_price(num) when is_binary(num), do: num
@doc """
Searches and normalizes the symbol as it is listed on binance.
To retrieve this information, a request to the binance API is done. The result is then **cached** to ensure the request is done only once.
Order of which symbol comes first, and case sensitivity does not matter.
Returns `{:ok, "SYMBOL"}` if successfully, or `{:error, reason}` otherwise.
## Examples
These 3 calls will result in the same result string:
```
find_symbol(%Binance.TradePair{from: "ETH", to: "REQ"})
```
```
find_symbol(%Binance.TradePair{from: "REQ", to: "ETH"})
```
```
find_symbol(%Binance.TradePair{from: "rEq", to: "eTH"})
```
Result: `{:ok, "REQETH"}`
"""
def find_symbol(%Binance.TradePair{from: from, to: to} = tp)
when is_binary(from)
when is_binary(to) do
case Binance.SymbolCache.get() do
# cache hit
{:ok, data} ->
from = String.upcase(from)
to = String.upcase(to)
found = Enum.filter(data, &Enum.member?([from <> to, to <> from], &1))
case Enum.count(found) do
1 -> {:ok, found |> List.first()}
0 -> {:error, :symbol_not_found}
end
# cache miss
{:error, :not_initialized} ->
case get_all_prices() do
{:ok, price_data} ->
price_data
|> Enum.map(fn x -> x.symbol end)
|> Binance.SymbolCache.store()
find_symbol(tp)
err ->
err
end
err ->
err
end
end
end
|
lib/binance_futures.ex
| 0.880989
| 0.683736
|
binance_futures.ex
|
starcoder
|
defmodule Exq.Redis.Connection do
@moduledoc """
The Connection module encapsulates interaction with a live Redis connection or pool.
"""
require Logger
def flushdb!() do
qa(["flushdb"])
end
def incr!(key) do
{:ok, count} = q(["INCR", key])
count
end
def get!(key) do
{:ok, val} = q(["GET", key])
val
end
def del!(key) do
q(["DEL", key])
end
def llen!(list) do
{:ok, len} = q(["LLEN", list])
len
end
def scard!(set) do
{:ok, count} = q(["SCARD", set])
count
end
def smembers!(set) do
{:ok, members} = q(["SMEMBERS", set])
members
end
def sadd!(set, member) do
{:ok, res} = q(["SADD", set, member])
res
end
def srem!(set, member) do
{:ok, res} = q(["SREM", set, member])
res
end
def lrange!(list, range_start \\ "0", range_end \\ "-1") do
{:ok, items} = q(["LRANGE", list, range_start, range_end])
items
end
def lrem!(list, value, count \\ 1) do
{:ok, res} = q(["LREM", list, count, value])
res
end
def lpop(key) do
q(["LPOP", key])
end
def rpoplpush(key, backup) do
q(["RPOPLPUSH", key, backup])
end
def zadd(set, score, member) do
q(["ZADD", set, score, member])
end
def zadd!(set, score, member) do
{:ok, res} = q(["ZADD", set, score, member])
res
end
def zcard!(set) do
{:ok, count} = q(["ZCARD", set])
count
end
def zrangebyscore!(set, min \\ "0", max \\ "+inf") do
{:ok, items} = q(["ZRANGEBYSCORE", set, min, max])
items
end
def zrangebyscorewithscore!(set, min \\ "0", max \\ "+inf") do
{:ok, items} = q(["ZRANGEBYSCORE", set, min, max, "WITHSCORES"])
items
end
def zrange!(set, range_start \\ "0", range_end \\ "-1") do
{:ok, items} = q(["ZRANGE", set, range_start, range_end])
items
end
def zrem!(set, member) do
{:ok, res} = q(["ZREM", set, member])
res
end
def zrem(set, member) do
q(["ZREM", set, member])
end
defdelegate q(command), to: :eredis_cluster
defdelegate qa(command), to: :eredis_cluster
defdelegate qp(commands), to: :eredis_cluster
defdelegate qmn(commands), to: :eredis_cluster
def qmn!(commands) do
:eredis_cluster.qmn(commands)
|> Enum.each(fn {:ok, _any} -> :ok end)
end
end
|
lib/exq/redis/connection.ex
| 0.640523
| 0.411879
|
connection.ex
|
starcoder
|
defmodule Liaison.IP do
@moduledoc """
Allow the conversion of common IP formats to and from
* string: "1.2.3.4"
* component: {1, 2, 3, 4}
* integer: 16909060
Input formats can be equal to output formats
## Example
iex> Liaison.IP.as_string("1.2.3.4")
"1.2.3.4"
"""
use Bitwise
@type ip :: integer
@type ip_comp :: {integer, integer, integer, integer}
@type ip_addr :: ip | ip_comp | String.t()
@type format :: :integer | :components | :string
@doc """
Converts any valid address into a string `a.b.c.d`
## Example
iex> Liaison.IP.as_string("1.2.3.4")
"1.2.3.4"
iex> Liaison.IP.as_string(16909060)
"1.2.3.4"
iex> Liaison.IP.as_string({1, 2, 3, 4})
"1.2.3.4"
"""
@spec as_string(ip_addr) :: String.t()
def as_string(ip_addr) when is_binary(ip_addr), do: ip_addr
def as_string(ip_addr) when is_integer(ip_addr) do
ip_addr
|> as_components()
|> as_string()
end
def as_string(ip_addr) when is_tuple(ip_addr) do
ip_addr
|> Tuple.to_list()
|> Enum.join(".")
end
@doc """
Converts any valid address into a tuple `{a, b, c, d}`
## Example
iex> Liaison.IP.as_components("1.2.3.4")
{1, 2, 3, 4}
iex> Liaison.IP.as_components(16909060)
{1, 2, 3, 4}
iex> Liaison.IP.as_components({1, 2, 3, 4})
{1, 2, 3, 4}
"""
@spec as_components(ip_addr) :: ip_comp
def as_components(ip_addr) when is_tuple(ip_addr), do: ip_addr
def as_components(ip_addr) when is_integer(ip_addr) do
a = (ip_addr &&& 0xFF000000) >>> 24
b = (ip_addr &&& 0x00FF0000) >>> 16
c = (ip_addr &&& 0x0000FF00) >>> 8
d = ip_addr &&& 0x000000FF
{a, b, c, d}
end
def as_components(ip_addr) when is_binary(ip_addr) do
ip_addr
|> String.split(".")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end
@doc """
Converts any valid address into an integer
## Example
iex> Liaison.IP.as_integer("1.2.3.4")
16909060
iex> Liaison.IP.as_integer(16909060)
16909060
iex> Liaison.IP.as_integer({1, 2, 3, 4})
16909060
"""
@spec as_integer(ip_addr) :: ip
def as_integer(ip_addr) when is_integer(ip_addr), do: ip_addr
def as_integer({a, b, c, d}), do: (a <<< 24) + (b <<< 16) + (c <<< 8) + d
def as_integer(ip_addr) when is_binary(ip_addr) do
as_integer(as_components(ip_addr))
end
@doc """
Creates a range of integer ip addresses between the given addresses.
Inclusivity: [from, to]
## Example
iex> Liaison.IP.to_ip_range("1.2.3.4", "1.2.3.6", :string)
["1.2.3.4", "1.2.3.5", "1.2.3.6"]
iex> Liaison.IP.to_ip_range("1.2.3.6", "1.2.3.4")
{:error, "ip1 > ip2"}
"""
@spec to_ip_range(ip_addr, ip_addr, format) :: list(ip) | {:error, String.t()}
def to_ip_range(from, to, format \\ :integer) do
ip1_int = as_integer(from)
ip2_int = as_integer(to)
case ip1_int > ip2_int do
true ->
{:error, "ip1 > ip2"}
_ ->
range = to_range(ip1_int, ip2_int)
case format do
:integer -> Enum.map(range, &as_integer/1)
:components -> Enum.map(range, &as_components/1)
:string -> Enum.map(range, &as_string/1)
end
end
end
defp to_range(ip1, ip2) do
case ip1 >= ip2 do
true -> [ip2]
false -> [ip1 | to_range(ip1 + 1, ip2)]
end
end
@doc """
Returns the localhost ip address in component form.
If there is more than one ip address for the machine, the nth
can be returned. Else `nil`
"""
@spec get_ipv4_addr(integer) :: ip_comp | nil
def get_ipv4_addr(pos \\ 0) do
{:ok, host} = :inet.gethostname()
{:ok, {:hostent, _name, _aliases, _type, _length, ips}} = :inet.gethostbyname(host)
Enum.at(ips, pos)
end
@doc """
Returns `true` if the given address is valid.
This checks if the format is correct, not if the address is
functionally valid or accessible
## Example
iex> Liaison.IP.is_valid?("1.2.3.4")
true
iex> Liaison.IP.is_valid?({1, 2, 3, 400})
false
"""
@spec is_valid?(any) :: boolean
def is_valid?(ip_addr) do
lowest_valid = as_integer("1.0.0.0")
highest_valid = as_integer("255.255.255.255")
try do
ip_int = as_integer(ip_addr)
{a, b, c, d} = as_components(ip_addr)
ip_int < highest_valid &&
ip_int > lowest_valid &&
between?(a, 0, 255) &&
between?(b, 0, 255) &&
between?(c, 0, 255) &&
between?(d, 0, 255)
rescue
_ -> false
end
end
defp between?(a, b, c) do
a >= b && a <= c
end
end
|
lib/ip.ex
| 0.87785
| 0.602354
|
ip.ex
|
starcoder
|
defmodule Estated.CastHelpers do
@moduledoc false
@moduledoc since: "0.1.0"
@doc """
Cast a `t:String.t/0` to a `t:Date.t/0`.
Passes through invalid `t:String.t/0` values and values that are not a `t:String.t/0`.
## Examples
iex> Estated.CastHelpers.cast_date("2020-04-17")
~D[2020-04-17]
iex> Estated.CastHelpers.cast_date("invalid date")
"invalid date"
iex> Estated.CastHelpers.cast_date(nil)
nil
"""
@doc since: "0.1.0"
@spec cast_date(any()) :: any()
def cast_date(binary) when is_binary(binary) do
case Date.from_iso8601(binary) do
{:ok, date} -> date
{:error, _reason} -> binary
end
end
def cast_date(date) do
date
end
@doc """
Cast a `t:String.t/0` to a `t:DateTime.t/0`.
Passes through invalid `t:String.t/0` values and values that are not a `t:String.t/0`.
## Examples
iex> Estated.CastHelpers.cast_datetime("2020-04-17T11:17:22Z")
~U[2020-04-17T11:17:22Z]
iex> Estated.CastHelpers.cast_datetime("invalid datetime")
"invalid datetime"
iex> Estated.CastHelpers.cast_datetime(nil)
nil
"""
@doc since: "0.1.0"
@spec cast_datetime(any()) :: any()
def cast_datetime(binary) when is_binary(binary) do
case DateTime.from_iso8601(binary) do
{:ok, datetime, _utc_offset} -> datetime
{:error, _reason} -> binary
end
end
def cast_datetime(datetime) do
datetime
end
@doc """
Cast a `t:String.t/0` to an `t:integer/0`.
Passes through invalid `t:String.t/0` values and values that are not a `t:String.t/0`.
## Examples
iex> Estated.CastHelpers.cast_integer("0")
0
iex> Estated.CastHelpers.cast_integer("invalid integer")
"invalid integer"
iex> Estated.CastHelpers.cast_integer(nil)
nil
"""
@doc since: "0.1.0"
@spec cast_integer(any()) :: any()
def cast_integer(binary) when is_binary(binary) do
case Integer.parse(binary) do
{integer, _remainder_of_binary} -> integer
:error -> binary
end
end
def cast_integer(integer) do
integer
end
end
|
lib/estated/cast_helpers.ex
| 0.917912
| 0.462352
|
cast_helpers.ex
|
starcoder
|
defmodule Neuron do
use GenServer
@moduledoc """
A neuron for neural networks, for now it only works with raw numeric data
"""
@impl true
def init(%{
activation_function: activation_function,
learning_rate: learning_rate,
input_weights: input_weights,
bias_weight: bias_weight
})
when is_function(activation_function) and is_list(input_weights) and is_number(bias_weight) do
{:ok,
%{
activation_function: activation_function,
learning_rate: learning_rate,
last_inputs: [],
last_output: [],
input_weights: input_weights,
bias_weight: bias_weight
}}
end
@impl true
def init(%{
activation_function: activation_function,
number_of_inputs: number_of_inputs,
learning_rate: learning_rate
})
when is_function(activation_function) do
{:ok,
%{
activation_function: activation_function,
learning_rate: learning_rate,
last_inputs: [],
last_output: [],
input_weights: initialize_weights(number_of_inputs),
bias_weight: :rand.uniform() * 2 - 1
}}
end
@impl true
def handle_call({:calculate_inputs, inputs}, _from, state = %{input_weights: weights})
when is_list(inputs) and length(inputs) == length(weights) do
weighted_sum =
inputs
|> Enum.zip(weights)
|> Enum.reduce(0, fn {input, weight}, acc -> input * weight + acc end)
result = state.activation_function.(weighted_sum + state.bias_weight * -1)
{:reply, result, %{state | last_inputs: inputs, last_output: result}}
end
@impl true
def handle_call({:update_weights_from_error, delta}, _from, state) do
calculate_weights_from_delta(delta, state)
end
@impl true
def handle_call(
{:update_weights_from_expected_output, expected_output},
_from,
state = %{last_output: last_output}
) do
delta = last_output * (1 - last_output) * (expected_output - last_output)
calculate_weights_from_delta(delta, state)
end
defp calculate_weights_from_delta(
delta,
state = %{
last_inputs: last_inputs,
input_weights: weights,
bias_weight: bias_weight,
learning_rate: learning_rate
}
)
when length(last_inputs) == length(weights) do
new_bias_weight = bias_weight + learning_rate * delta * -1
new_weights =
weights
|> Enum.zip(last_inputs)
|> Enum.map(fn {weight, input} -> weight + learning_rate * delta * input end)
{:reply, {delta, new_weights},
%{state | input_weights: new_weights, bias_weight: new_bias_weight}}
end
defp initialize_weights(number_of_inputs) do
1..number_of_inputs
|> Enum.map(fn _ -> :rand.uniform() * 2 - 1 end)
end
end
|
lib/neuron.ex
| 0.874761
| 0.635456
|
neuron.ex
|
starcoder
|
defmodule Day9 do
defstruct [:scores, :players, :board]
alias Board
def parse(str) do
[_, players, turns] = Regex.run(~r/(\d+) players.+ (\d+) points/, str)
{String.to_integer(players), String.to_integer(turns)}
end
def new(players) do
%__MODULE__{
scores: %{},
players: players,
board: Board.new()
}
end
def play(game, marble) when rem(marble, 23) == 0 do
player = rem(marble, game.players)
{mine, board} =
game.board
|> Board.move(-7)
|> Board.pop()
scores = Map.update(game.scores, player, mine + marble, &(&1 + mine + marble))
%{game | board: board, scores: scores}
end
def play(game, marble) do
board =
game.board
|> Board.move(1)
|> Board.place(marble)
%{game | board: board}
end
def play_game(players, turns) do
Enum.reduce(1..turns, new(players), fn turn, game ->
play(game, turn)
end)
end
def high_score(%{scores: scores}) do
scores
|> Map.values()
|> Enum.max()
end
end
defmodule Board do
defstruct [:before, :after]
def new() do
%__MODULE__{
before: [0],
after: []
}
end
def place(board, marble) do
%{board | before: [marble | board.before]}
end
def pop(%{before: [marble | rest]} = board) do
{marble, move(%{board | before: rest}, 1)}
end
def pop(%{before: []} = board) do
board
|> swap()
|> pop()
end
def move(%{after: []} = board, motion) when motion > 0 do
board
|> swap()
|> move(motion)
end
def move(%{after: [h | rest]} = board, motion) when motion > 0 do
move(%{board | after: rest, before: [h | board.before]}, motion - 1)
end
def move(%{before: []} = board, motion) when motion < 0 do
board
|> swap()
|> move(motion)
end
def move(%{before: [h | rest]} = board, motion) when motion < 0 do
move(%{board | after: [h | board.after], before: rest}, motion + 1)
end
def move(board, 0) do
board
end
defp swap(board) do
%{board | before: Enum.reverse(board.after), after: Enum.reverse(board.before)}
end
defimpl Inspect, for: __MODULE__ do
def inspect(%{before: b = [c | _], after: a}, _opts) do
Kernel.inspect(%{list: Enum.reverse(b) ++ a, current: c})
end
end
end
|
lib/day9.ex
| 0.797044
| 0.489442
|
day9.ex
|
starcoder
|
defmodule Arb do
@moduledoc """
A NIF for controlling the ABACOM CH341A relay board.
"""
use Rustler,
otp_app: :arb,
crate: :arb
@port_definition [
type: :non_neg_integer,
doc: "The USB port to be used. Only necessary if multiple relay boards are connected."
]
@verify_definition [
type: :boolean,
doc: "Configures whether the activation should be verified.",
default: true
]
@typedoc """
The relays are labeled from 1 to 8 according to the
[data sheet](http://www.abacom-online.de/div/ABACOM_USB_LRB.pdf).
"""
@type relay_id :: 1..8
@doc """
Activates the relays that correspond to the given a list of IDs. An empty
list deactivates all relays.
## Options
#{NimbleOptions.docs(port: @port_definition, verify: @verify_definition)}
## Examples
iex> Arb.activate([1, 4, 7])
:ok
"""
@spec activate([relay_id], Keyword.t()) :: :ok | {:error, term}
def activate(ids, opts \\ []) when is_list(ids) do
opts = NimbleOptions.validate!(opts, port: @port_definition, verify: @verify_definition)
__activate__(ids, opts[:verify], opts[:port]) |> to_ok()
end
@doc """
Returns the ids of active relays.
## Options
#{NimbleOptions.docs(port: @port_definition)}
## Examples
iex> Arb.get_active()
{:ok, [1, 3, 6]}
"""
@spec get_active(Keyword.t()) :: {:ok, [relay_id]} | {:error, term}
def get_active(opts \\ []) do
opts = NimbleOptions.validate!(opts, port: @port_definition)
__get_active__(opts[:port])
end
@doc """
Resets the relay board.
If, under some circumstances relay board operations fail due to a USB error
e.g. `{:error, {:usb, "Input/Output Error"}}`, this function may resolve
the issue by reseting the relay board. The effect is similar to replugging
the device.
**Note:** Previously activated relays stay active.
## Options
#{NimbleOptions.docs(port: @port_definition)}
## Examples
iex> Arb.reset()
:ok
"""
@spec reset(Keyword.t()) :: {:ok, [relay_id]} | {:error, term}
def reset(opts \\ []) do
opts = NimbleOptions.validate!(opts, port: @port_definition)
__reset__(opts[:port]) |> to_ok()
end
defp __activate__(_ids, _verify, _port), do: :erlang.nif_error(:nif_not_loaded)
defp __get_active__(_port), do: :erlang.nif_error(:nif_not_loaded)
defp __reset__(_port), do: :erlang.nif_error(:nif_not_loaded)
defp to_ok({:ok, {}}), do: :ok
defp to_ok(other), do: other
end
|
lib/arb.ex
| 0.883072
| 0.465995
|
arb.ex
|
starcoder
|
defmodule HAP.Characteristic do
@moduledoc """
Functions to aid in the manipulation of characteristics tuples
"""
@typedoc """
Represents a single characteristic consisting of a static definition and a value source
"""
@type t :: {module(), value_source()}
@typedoc """
Represents a source for a characteristic value. May be either a static literal or
a `{mod, opts}` tuple which is consulted when reading / writing a characteristic
"""
@type value_source :: value() | {HAP.ValueStore.t(), value_opts: HAP.ValueStore.opts()}
@typedoc """
The resolved value of a characteristic
"""
@type value :: any()
@doc false
def get_type({characteristic_definition, _value_source}) do
characteristic_definition.type()
end
@doc false
def get_perms({characteristic_definition, _value_source}) do
characteristic_definition.perms()
end
@doc false
def get_format({characteristic_definition, _value_source}) do
characteristic_definition.format()
end
@doc false
def get_meta({characteristic_definition, _value_source}) do
[
{:format, :format},
{:minValue, :min_value},
{:maxValue, :max_value},
{:minStep, :step_value},
{:unit, :unit},
{:maxLength, :max_length}
]
|> Enum.reduce(%{}, fn {return_key, call_key}, acc ->
if function_exported?(characteristic_definition, call_key, 0) do
Map.put(acc, return_key, apply(characteristic_definition, call_key, []))
else
acc
end
end)
end
@doc false
def get_value({characteristic_definition, value_source}) do
if "pr" in characteristic_definition.perms() do
get_value_from_source(value_source)
else
{:error, -70_405}
end
end
@doc false
def get_value!(characteristic) do
{:ok, value} = get_value(characteristic)
value
end
@doc false
def put_value({characteristic_definition, value_source}, value) do
if "pw" in characteristic_definition.perms() do
put_value_to_source(value_source, value)
else
{:error, -70_404}
end
end
@doc false
def set_change_token({_characteristic_definition, {mod, opts}}, token) do
if function_exported?(mod, :set_change_token, 2) do
mod.set_change_token(token, opts)
else
{:error, -70_406}
end
end
def set_change_token({_characteristic_definition, _value_source}, _token) do
raise "Cannot set change token on a statically defined characteristic"
end
defp get_value_from_source({mod, opts}) do
mod.get_value(opts)
end
defp get_value_from_source(value) do
{:ok, value}
end
defp put_value_to_source({mod, opts}, value) do
mod.put_value(value, opts)
end
defp put_value_to_source(_value, _new_value) do
raise "Cannot write to a statically defined characteristic"
end
end
|
lib/hap/characteristic.ex
| 0.836321
| 0.541954
|
characteristic.ex
|
starcoder
|
defmodule PolyAmory do
@moduledoc """
Creates a polymorphic embedded type
"""
alias Ecto.Changeset
@doc """
# Arguments
* `types`: Keyword list of `{name, type}`. The `name` is stored in the database in order to identify what `type` to use in runtime.
* `type_field`: Name of the field used to the type of that particular object. Default is `:__type__`.
# Example
defmodule PolyAmory.TestChannelData do
use PolyAmory, types: [
sms: TestSmsChannel,
email: TestEmailChannel,
]
end
"""
defmacro __using__(opts) do
env = __CALLER__
type_field =
opts
|> Keyword.get(:type_field, :__type__)
|> Macro.expand(env)
|> Atom.to_string()
types =
opts
|> Keyword.fetch!(:types)
|> Enum.map(fn {key, value} ->
{key |> Macro.expand(env), value |> Macro.expand(env)}
end)
|> Macro.expand(env)
union_type = build_union_type(types)
quote do
use Ecto.Type
@type_field unquote(type_field)
@type t :: unquote(union_type)
alias Ecto.Changeset
def type, do: :map
PolyAmory.__casters__(unquote(types))
PolyAmory.__dumpers__(unquote(types))
PolyAmory.__loaders__(unquote(types))
def __types__, do: unquote(types)
def load(data) when is_map(data) do
name =
data
|> Map.get(@type_field)
|> String.to_existing_atom()
fields =
data
|> Map.delete(@type_field)
|> Enum.map(fn {key, value} ->
{String.to_atom(key), value}
end)
load(name, fields)
end
def cast(_), do: :error
def dump(_), do: :error
end
end
@doc false
defmacro __casters__(types) do
types
|> Enum.map(&caster/1)
end
@doc false
defmacro __dumpers__(types) do
types
|> Enum.map(&dumper/1)
end
@doc false
defmacro __loaders__(types) do
types
|> Enum.map(&loader/1)
end
@doc "Casts the given poly with the changeset parameters."
@spec cast(Changeset.t(), atom, atom) :: Changeset.t()
@spec cast(Changeset.t(), atom, atom, Keyword.t()) :: Changeset.t()
def cast(changes, key, typename, opts \\ [])
def cast(%Changeset{data: data, types: types}, _key, _typename, _opts)
when data == nil or types == nil do
raise ArgumentError,
"cast/2 expects the changeset to be cast. " <>
"Please call cast/4 before calling cast/2"
end
def cast(%Changeset{params: params, types: types} = changeset, key, typename, opts) do
case types[key] do
nil ->
raise ArgumentError, "invalid field: #{key}"
poly ->
{key, param_key} = cast_key(key)
do_cast(changeset, key, params[param_key], poly, typename, opts)
end
end
## Private.
defp caster({_, value_type}) do
quote do
def cast(value = %unquote(value_type){}), do: {:ok, value}
end
end
defp loader({name, value_type}) do
value_type
|> is_schema?
|> loader(name, value_type)
end
defp loader(true, name, value_type) do
quote do
defp load(unquote(name), fields) do
result =
unquote(value_type)
|> Ecto.Schema.Loader.unsafe_load(fields |> Map.new(), &PolyAmory.load_value/2)
{:ok, result}
end
end
end
defp loader(false, name, value_type) do
quote do
defp load(unquote(name), fields) do
result = unquote(value_type) |> struct!(fields)
{:ok, result}
end
end
end
defp dumper({name, value_type}) do
value_type
|> is_schema?
|> dumper(name, value_type)
end
defp dumper(true, name, value_type) do
embedded_structure =
if __MODULE__.Helpers.dependency_vsn_match?(:ecto, "~> 3.5") do
quote(do: {:parameterized, Ecto.Embedded, var!(struct)})
else
quote(do: {:embed, var!(struct)})
end
quote do
def dump(value = %unquote(value_type){}) do
var!(struct) = %Ecto.Embedded{
cardinality: :one,
field: :data,
related: unquote(value_type),
}
embedded_type = unquote(embedded_structure)
with {:ok, result} <- Ecto.Type.dump(embedded_type, value, &PolyAmory.dump_value/2),
result = result |> Map.put(@type_field, Atom.to_string(unquote(name))) do
{:ok, result}
end
end
end
end
defp dumper(false, name, value_type) do
quote do
def dump(value = %unquote(value_type){}) do
result =
value
|> Map.from_struct()
|> Map.put(@type_field, Atom.to_string(unquote(name)))
{:ok, result}
end
end
end
defp build_union_type(types) do
types
|> Enum.reduce(nil, fn x, acc ->
case acc do
nil ->
x
value ->
{:|, [], [value, x]}
end
end)
end
defp is_schema?(type) do
try do
type.__schema__(:query)
true
rescue
_ in UndefinedFunctionError -> false
end
end
@doc false
def dump_value(type, value) do
with {:ok, value} <- Ecto.Type.dump(type, value, &dump_value/2),
{:ok, value} <- transform_dump(type, value) do
{:ok, value}
else
{:error, error} ->
{:error, error}
:error ->
:error
end
end
@doc false
def load_value(type, value) do
with {:ok, value} <- transform_load(type, value),
{:ok, value} <- Ecto.Type.load(type, value, &load_value/2) do
{:ok, value}
else
{:error, error} ->
{:error, error}
:error ->
:error
end
end
defp transform_dump(type, value), do: do_transform_dump(Ecto.Type.type(type), value)
defp do_transform_dump(_, nil), do: {:ok, nil}
defp do_transform_dump(:decimal, value), do: {:ok, Decimal.to_string(value)}
defp do_transform_dump(:time, %Time{} = t), do: {:ok, t}
defp do_transform_dump(:time_usec, %Time{} = t), do: {:ok, t}
defp do_transform_dump(:naive_datetime, %NaiveDateTime{} = dt), do: {:ok, dt}
defp do_transform_dump(:naive_datetime_usec, %NaiveDateTime{} = dt), do: {:ok, dt}
defp do_transform_dump(:utc_datetime, %DateTime{} = dt), do: {:ok, dt}
defp do_transform_dump(:utc_datetime_usec, %DateTime{} = dt), do: {:ok, dt}
defp do_transform_dump(:date, %Date{} = d), do: {:ok, d}
defp do_transform_dump(_, value), do: {:ok, value}
def transform_load(type, value), do: do_transform_load(Ecto.Type.type(type), value)
defp do_transform_load(_, nil), do: {:ok, nil}
defp do_transform_load(:decimal, value), do: {:ok, Decimal.new(value)}
defp do_transform_load(:time, value), do: value |> Time.from_iso8601()
defp do_transform_load(:time_usec, value), do: value |> Time.from_iso8601()
defp do_transform_load(:naive_datetime, value), do: value |> NaiveDateTime.from_iso8601()
defp do_transform_load(:naive_datetime_usec, value), do: value |> NaiveDateTime.from_iso8601()
defp do_transform_load(:utc_datetime, value) do
with {:ok, dt, _} <- value |> DateTime.from_iso8601() do
{:ok, dt}
end
end
defp do_transform_load(:utc_datetime_usec, value) do
with {:ok, dt, _} <- value |> DateTime.from_iso8601() do
{:ok, dt}
end
end
defp do_transform_load(:date, value), do: value |> Date.from_iso8601()
defp do_transform_load(_, value), do: {:ok, value}
def cast_key(key) when is_atom(key) do
{key, Atom.to_string(key)}
end
defp do_cast(changeset, _, nil, _, _, _), do: changeset
defp do_cast(changeset, key, param, poly, typename, opts) do
case poly.cast(param) do
{:ok, value} ->
Changeset.put_change(changeset, key, value)
:error ->
type =
poly.__types__
|> Enum.find(fn
{^typename, _type} -> true
{_, ^typename} -> true
_ -> false
end)
|> case do
nil -> nil
{_, module} -> module
end
do_changeset(changeset, key, param, type, typename, opts)
end
end
defp do_changeset(_changeset, _key, _param, nil, typename, _opts) do
raise ArgumentError, "invalid type: #{typename}"
end
defp do_changeset(changeset, key, param, module, _typename, opts) do
%Changeset{changes: changes, data: data} = changeset
on_cast = on_cast_fun(module, opts)
original = Map.get(data, key)
struct =
case original do
nil -> struct(module)
_ -> original
end
{change, valid?} =
case on_cast.(struct, param) do
%Changeset{valid?: false} = change ->
{change, false}
change ->
{Changeset.apply_changes(change), changeset.valid?}
end
%{changeset | changes: Map.put(changes, key, change), valid?: valid?}
end
defp on_cast_fun(module, opts) do
opts
|> Keyword.get(:with)
|> case do
nil ->
on_cast_default(module)
fun ->
fun
end
end
defp on_cast_default(module) do
fn struct, param ->
try do
module.changeset(struct, param)
rescue
e in UndefinedFunctionError ->
case __STACKTRACE__ do
[{^module, :changeset, args_or_arity, _} | _]
when args_or_arity == 2
when length(args_or_arity) == 2 ->
raise ArgumentError, """
the module #{inspect(module)} does not define a changeset/2
function, which is used by PolyAmory.cast/3. You need to
implement the #{module}.changeset/2 function.
"""
stacktrace ->
reraise e, stacktrace
end
end
end
end
end
|
lib/poly_amory.ex
| 0.829457
| 0.410993
|
poly_amory.ex
|
starcoder
|
defmodule Okta.OIDC do
@moduledoc """
The `Okta.OIDC` module provides access methods to the [Okta OpenID Connect & OAuth 2.0 API](https://developer.okta.com/docs/reference/api/oidc/).
All methods require a Tesla Client struct created with `Okta.OIDC.client`.
This client uses [client authentication](https://developer.okta.com/docs/reference/api/oidc/#client-authentication-methods) rather than Okta API token authentication.
Currently the supported client authentication method is [Client Secret](https://developer.okta.com/docs/reference/api/oidc/#client-secret)
### `client_secret_basic`
client = Okta.OIDC.oidc_client("https://dev-000000.okta.com", client_secret_basic: %{client_id: "thisistheclientid", client_secret: "thisistheclientsecret"})
### `client_secret_post`
client = Okta.OIDC.oidc_client("https://dev-000000.okta.com", client_secret_post: %{client_id: "thisistheclientid", client_secret: "thisistheclientsecret"})
## Examples
client = Okta.OIDC.oidc_client("https://dev-000000.okta.com", client_secret_basic: %{client_id: "thisistheclientid", client_secret: "thisistheclientsecret"})
{:ok, result, _env} = Okta.OIDC.token_for_code(client,"thisisthecode", "https://localhost:8080/implicit/callback")
"""
@doc """
Returns access tokens, ID tokens, and refresh tokens, depending on the [request parameters](https://developer.okta.com/docs/reference/api/oidc/#request-parameters-2).
Also takes `:auth_server_id` as an option to change the auth server from the standard "default"
https://developer.okta.com/docs/reference/api/oidc/#token
"""
@spec token(Okta.client(), map(), keyword()) :: Okta.result()
def token(client, params, opts \\ []) do
opts = Keyword.merge([auth_server_id: "default"], opts)
Tesla.post(client, oidc_url(opts[:auth_server_id]) <> "/token", params)
|> Okta.result()
|> case do
{ret, body, env} -> {ret, Jason.decode!(body), env}
other -> other
end
end
@doc """
Returns access tokens, ID tokens, and refresh tokens given an `authorization_code` and `redirect_uri` used when creating the code on the `authorize` endpoint (usually in a browser)
Also takes `:auth_server_id` as an option to change the auth server from the standard "default"
https://developer.okta.com/docs/reference/api/oidc/#token
"""
@spec token_for_code(Okta.client(), String.t(), String.t(), keyword()) :: Okta.result()
def token_for_code(client, code, redirect__uri, opts \\ []) do
params = %{grant_type: "authorization_code", redirect_uri: redirect__uri, code: code}
token(client, params, opts)
end
@doc """
Returns access tokens, ID tokens, and refresh tokens given a `refresh_cide` and `redirect_uri` used when the refresh_code was retrieved from the `token` endpoint
Also takes `:auth_server_id` as an option to change the auth server from the standard "default" and an optional `:scope` argument, otherwise will default the scope to "offline_access openid"
https://developer.okta.com/docs/reference/api/oidc/#token
"""
@spec token_for_refresh_token(Okta.client(), String.t(), String.t(), keyword()) :: Okta.result()
def token_for_refresh_token(client, refresh_token, redirect__uri, opts \\ []) do
opts = Keyword.merge([scope: "offline_access openid"], opts)
params = %{
grant_type: "refresh_token",
scope: opts[:scope],
redirect_uri: redirect__uri,
refresh_token: refresh_token
}
token(client, params, opts)
end
@doc """
This endpoint takes an access, ID, or refresh token, and returns a boolean that indicates whether it is active or not. If the token is active, additional data about the token is also returned. If the token is invalid, expired, or revoked, it is considered inactive.
Also takes `:auth_server_id` as an option to change the auth server from the standard "default" and an optional `:token_type_hint` argument to indicate what type of token it is
https://developer.okta.com/docs/reference/api/oidc/#introspect
"""
@spec token_for_refresh_token(Okta.client(), String.t(), keyword()) :: Okta.result()
def introspect(client, token, opts \\ []) do
opts = Keyword.merge([auth_server_id: "default", token_type_hint: nil], opts)
Tesla.post(client, oidc_url(opts[:auth_server_id]) <> "/introspect", %{
token: token,
token_type_hint: opts[:token_type_hint]
})
|> Okta.result()
|> case do
{ret, body, env} -> {ret, Jason.decode!(body), env}
other -> other
end
end
@doc """
The API takes an access or refresh token and revokes it. Revoked tokens are considered inactive at the introspection endpoint. A client may only revoke its own tokens
Also takes `:auth_server_id` as an option to change the auth server from the standard "default" and an optional `:token_type_hint` argument to indicate what type of token it is
https://developer.okta.com/docs/reference/api/oidc/#revoke
"""
@spec revoke(Okta.client(), String.t(), keyword()) :: Okta.result()
def revoke(client, token, opts \\ []) do
opts = Keyword.merge([auth_server_id: "default", token_type_hint: nil], opts)
Tesla.post(client, oidc_url(opts[:auth_server_id]) <> "/revoke", %{
token: token,
token_type_hint: opts[:token_type_hint]
})
|> Okta.result()
|> case do
{ret, body, env} -> {ret, Jason.decode!(body), env}
other -> other
end
end
@doc """
Creates a Tesla Client struct specifically for OIDC API calls with [client authentication](https://developer.okta.com/docs/reference/api/oidc/#client-authentication-methods) rather than Okta API token authentication.
Currently the supported client authentication method is [Client Secret](https://developer.okta.com/docs/reference/api/oidc/#client-secret)
### `client_secret_basic`
client = Okta.OIDC.oidc_client("https://dev-000000.okta.com", client_secret_basic: %{client_id: "thisistheclientid", client_secret: "thisistheclientsecret"})
### `client_secret_post`
client = Okta.OIDC.oidc_client("https://dev-000000.okta.com", client_secret_post: %{client_id: "thisistheclientid", client_secret: "thisistheclientsecret"})
"""
@spec oidc_client(String.t(), keyword()) :: Okta.client()
def oidc_client(base_url,
client_secret_basic: %{client_id: client_id, client_secret: client_secret}
) do
middleware = [
{Tesla.Middleware.BaseUrl, base_url},
Tesla.Middleware.FormUrlencoded,
{Tesla.Middleware.Headers,
[
{"authorization", "Basic: " <> Base.encode64(client_id <> ":" <> client_secret)},
{"accept", "application/json"},
{"cache-control", "no-cache"}
]}
]
Tesla.client(middleware, Okta.adapter())
end
def oidc_client(base_url,
client_secret_post: %{client_id: client_id, client_secret: client_secret}
) do
middleware = [
{Tesla.Middleware.BaseUrl, base_url},
{Tesla.Middleware.ClientSecretPost, [client_id: client_id, client_secret: client_secret]},
Tesla.Middleware.FormUrlencoded,
{Tesla.Middleware.Headers,
[
{"accept", "application/json"},
{"cache-control", "no-cache"}
]}
]
Tesla.client(middleware, Okta.adapter())
end
defp oidc_url(auth_server_id), do: "/oauth2/#{auth_server_id}/v1"
end
defmodule Tesla.Middleware.ClientSecretPost do
@moduledoc false
@behaviour Tesla.Middleware
def call(%{body: body} = env, next, client_id: client_id, client_secret: client_secret) do
env
|> Tesla.put_body(Map.merge(body, %{client_id: client_id, client_secret: client_secret}))
|> Tesla.run(next)
end
end
|
lib/okta/oidc.ex
| 0.89546
| 0.476823
|
oidc.ex
|
starcoder
|
defmodule Seven.EventStore.State do
@moduledoc false
@type string_to_pids :: Map.t()
@type pid_to_strings :: Map.t()
@type pid_to_references :: Map.t()
@opaque t :: %__MODULE__{
event_to_pids: string_to_pids,
pid_to_events: pid_to_strings,
pid_to_monitor: pid_to_references,
next_counter: Integer.t()
}
defstruct event_to_pids: %{},
pid_to_events: %{},
pid_to_monitor: %{},
next_counter: 0
# API
def counter_step, do: 10
@spec init(Integer.t()) :: Map.t()
def init(next_counter), do: %__MODULE__{next_counter: next_counter}
@spec increment_counter(Map.t()) :: Map.t()
def increment_counter(state), do: %{state | next_counter: state.next_counter + counter_step()}
@spec next_counter(Map.t()) :: Integer.t()
def next_counter(state), do: state.next_counter
@spec pids_by_event(Map.t(), String.t()) :: List.t()
def pids_by_event(state, event_type),
do: get_in(state.event_to_pids, [event_type]) |> get_collection()
@spec pid_is_down(Map.t(), pid) :: Map.t()
def pid_is_down(state, pid) do
event_to_pids =
state.event_to_pids
|> Enum.reduce(%{}, fn {e, pids}, acc -> put_in(acc, [e], pids -- [pid]) end)
demonitor(state.pid_to_monitor[pid])
pid_to_monitor = state.pid_to_monitor |> Map.delete(pid)
pid_to_events = state.pid_to_events |> Map.delete(pid)
%{
state
| event_to_pids: event_to_pids,
pid_to_events: pid_to_events,
pid_to_monitor: pid_to_monitor
}
end
@spec subscribe_pid_to_event(Map.t(), pid, String.t()) :: Map.t()
def subscribe_pid_to_event(state, pid, event_type) do
current_collection = get_in(state.event_to_pids, [event_type]) |> get_collection()
event_to_pids = put_in(state.event_to_pids, [event_type], current_collection ++ [pid])
current_pid_to_events = get_in(state.pid_to_events, [pid]) |> get_collection()
pid_to_events = put_in(state.pid_to_events, [pid], current_pid_to_events ++ [event_type])
monitor_ref = monitor(pid)
pid_to_monitor = state.pid_to_monitor |> Map.put(pid, monitor_ref)
%{
state
| event_to_pids: event_to_pids,
pid_to_events: pid_to_events,
pid_to_monitor: pid_to_monitor
}
end
@spec unsubscribe_pid_to_event(Map.t(), pid, String.t()) :: Map.t()
def unsubscribe_pid_to_event(state, pid, event_type) do
current_event_to_pids = get_in(state.event_to_pids, [event_type]) |> get_collection()
event_to_pids = put_in(state.event_to_pids, [event_type], current_event_to_pids -- [pid])
current_pid_to_events = get_in(state.pid_to_events, [pid]) |> get_collection()
pid_to_events = put_in(state.pid_to_events, [pid], current_pid_to_events -- [event_type])
# demonitor this subscriptor
{pid_to_events, pid_to_monitor} =
case length(pid_to_events[pid]) do
0 ->
demonitor(state.pid_to_monitor[pid])
{Map.delete(pid_to_events, pid), Map.delete(state.pid_to_monitor, pid)}
_ ->
{pid_to_events, state.pid_to_monitor}
end
%{
state
| event_to_pids: event_to_pids,
pid_to_events: pid_to_events,
pid_to_monitor: pid_to_monitor
}
end
@spec monitor(pid) :: any
defp monitor(pid), do: Process.monitor(pid)
@spec demonitor(any) :: any
defp demonitor(nil), do: nil
defp demonitor(ref), do: Process.demonitor(ref)
# Privates
defp get_collection(nil), do: []
defp get_collection(c), do: c
end
|
lib/seven/event_store/state.ex
| 0.677581
| 0.567547
|
state.ex
|
starcoder
|
defmodule Tensorflow.TensorDebugMode do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t ::
integer
| :UNSPECIFIED
| :NO_TENSOR
| :CURT_HEALTH
| :CONCISE_HEALTH
| :FULL_HEALTH
| :SHAPE
| :FULL_NUMERICS
| :FULL_TENSOR
| :REDUCE_INF_NAN_THREE_SLOTS
field(:UNSPECIFIED, 0)
field(:NO_TENSOR, 1)
field(:CURT_HEALTH, 2)
field(:CONCISE_HEALTH, 3)
field(:FULL_HEALTH, 4)
field(:SHAPE, 5)
field(:FULL_NUMERICS, 6)
field(:FULL_TENSOR, 7)
field(:REDUCE_INF_NAN_THREE_SLOTS, 8)
end
defmodule Tensorflow.DebugEvent do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
what: {atom, any},
wall_time: float | :infinity | :negative_infinity | :nan,
step: integer
}
defstruct [:what, :wall_time, :step]
oneof(:what, 0)
field(:wall_time, 1, type: :double)
field(:step, 2, type: :int64)
field(:debug_metadata, 3, type: Tensorflow.DebugMetadata, oneof: 0)
field(:source_file, 4, type: Tensorflow.SourceFile, oneof: 0)
field(:stack_frame_with_id, 6, type: Tensorflow.StackFrameWithId, oneof: 0)
field(:graph_op_creation, 7, type: Tensorflow.GraphOpCreation, oneof: 0)
field(:debugged_graph, 8, type: Tensorflow.DebuggedGraph, oneof: 0)
field(:execution, 9, type: Tensorflow.Execution, oneof: 0)
field(:graph_execution_trace, 10,
type: Tensorflow.GraphExecutionTrace,
oneof: 0
)
field(:graph_id, 11, type: :string, oneof: 0)
field(:debugged_device, 12, type: Tensorflow.DebuggedDevice, oneof: 0)
end
defmodule Tensorflow.DebugMetadata do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tensorflow_version: String.t(),
file_version: String.t(),
tfdbg_run_id: String.t()
}
defstruct [:tensorflow_version, :file_version, :tfdbg_run_id]
field(:tensorflow_version, 1, type: :string)
field(:file_version, 2, type: :string)
field(:tfdbg_run_id, 3, type: :string)
end
defmodule Tensorflow.SourceFile do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
file_path: String.t(),
host_name: String.t(),
lines: [String.t()]
}
defstruct [:file_path, :host_name, :lines]
field(:file_path, 1, type: :string)
field(:host_name, 2, type: :string)
field(:lines, 3, repeated: true, type: :string)
end
defmodule Tensorflow.StackFrameWithId do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id: String.t(),
file_line_col: Tensorflow.GraphDebugInfo.FileLineCol.t() | nil
}
defstruct [:id, :file_line_col]
field(:id, 1, type: :string)
field(:file_line_col, 2, type: Tensorflow.GraphDebugInfo.FileLineCol)
end
defmodule Tensorflow.CodeLocation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
host_name: String.t(),
stack_frame_ids: [String.t()]
}
defstruct [:host_name, :stack_frame_ids]
field(:host_name, 1, type: :string)
field(:stack_frame_ids, 2, repeated: true, type: :string)
end
defmodule Tensorflow.GraphOpCreation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
op_type: String.t(),
op_name: String.t(),
graph_name: String.t(),
graph_id: String.t(),
device_name: String.t(),
input_names: [String.t()],
num_outputs: integer,
code_location: Tensorflow.CodeLocation.t() | nil,
output_tensor_ids: [integer]
}
defstruct [
:op_type,
:op_name,
:graph_name,
:graph_id,
:device_name,
:input_names,
:num_outputs,
:code_location,
:output_tensor_ids
]
field(:op_type, 1, type: :string)
field(:op_name, 2, type: :string)
field(:graph_name, 3, type: :string)
field(:graph_id, 4, type: :string)
field(:device_name, 5, type: :string)
field(:input_names, 6, repeated: true, type: :string)
field(:num_outputs, 7, type: :int32)
field(:code_location, 8, type: Tensorflow.CodeLocation)
field(:output_tensor_ids, 9, repeated: true, type: :int32)
end
defmodule Tensorflow.DebuggedGraph do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
graph_id: String.t(),
graph_name: String.t(),
instrumented_ops: [String.t()],
original_graph_def: binary,
instrumented_graph_def: binary,
outer_context_id: String.t()
}
defstruct [
:graph_id,
:graph_name,
:instrumented_ops,
:original_graph_def,
:instrumented_graph_def,
:outer_context_id
]
field(:graph_id, 1, type: :string)
field(:graph_name, 2, type: :string)
field(:instrumented_ops, 3, repeated: true, type: :string)
field(:original_graph_def, 4, type: :bytes)
field(:instrumented_graph_def, 5, type: :bytes)
field(:outer_context_id, 6, type: :string)
end
defmodule Tensorflow.DebuggedDevice do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
device_name: String.t(),
device_id: integer
}
defstruct [:device_name, :device_id]
field(:device_name, 1, type: :string)
field(:device_id, 2, type: :int32)
end
defmodule Tensorflow.Execution do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
op_type: String.t(),
num_outputs: integer,
graph_id: String.t(),
input_tensor_ids: [integer],
output_tensor_ids: [integer],
tensor_debug_mode: Tensorflow.TensorDebugMode.t(),
tensor_protos: [Tensorflow.TensorProto.t()],
code_location: Tensorflow.CodeLocation.t() | nil,
output_tensor_device_ids: [integer]
}
defstruct [
:op_type,
:num_outputs,
:graph_id,
:input_tensor_ids,
:output_tensor_ids,
:tensor_debug_mode,
:tensor_protos,
:code_location,
:output_tensor_device_ids
]
field(:op_type, 1, type: :string)
field(:num_outputs, 2, type: :int32)
field(:graph_id, 3, type: :string)
field(:input_tensor_ids, 4, repeated: true, type: :int64)
field(:output_tensor_ids, 5, repeated: true, type: :int64)
field(:tensor_debug_mode, 6, type: Tensorflow.TensorDebugMode, enum: true)
field(:tensor_protos, 7, repeated: true, type: Tensorflow.TensorProto)
field(:code_location, 8, type: Tensorflow.CodeLocation)
field(:output_tensor_device_ids, 9, repeated: true, type: :int32)
end
defmodule Tensorflow.GraphExecutionTrace do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tfdbg_context_id: String.t(),
op_name: String.t(),
output_slot: integer,
tensor_debug_mode: Tensorflow.TensorDebugMode.t(),
tensor_proto: Tensorflow.TensorProto.t() | nil,
device_name: String.t()
}
defstruct [
:tfdbg_context_id,
:op_name,
:output_slot,
:tensor_debug_mode,
:tensor_proto,
:device_name
]
field(:tfdbg_context_id, 1, type: :string)
field(:op_name, 2, type: :string)
field(:output_slot, 3, type: :int32)
field(:tensor_debug_mode, 4, type: Tensorflow.TensorDebugMode, enum: true)
field(:tensor_proto, 5, type: Tensorflow.TensorProto)
field(:device_name, 6, type: :string)
end
|
lib/tensorflow/core/protobuf/debug_event.pb.ex
| 0.721939
| 0.44342
|
debug_event.pb.ex
|
starcoder
|
defmodule Render.Core.DataHandler do
@moduledoc """
Handle the model's setps's sections data
The `Render.Core.Parser.exec/1` is used in the name and the content
"""
alias Fatex.LatexConfigs
@doc """
Returns a list with the strins of the sections in order from the DB
"""
@spec concat_all(integer) :: [bitstring]
def concat_all(model_id) do
get_all(model_id)
|> Enum.flat_map(fn x ->
Enum.map(x, &make_latex_of_section/1)
end)
# Concat is two lists, one for the steps and other for the section of the step
# The steps is concatened with the flat_map
# That leaves one list with all sections sorted based on DB
end
# Get all in the order of the DB
defp get_all(model_id) do
LatexConfigs.get_model(model_id)
|> LatexConfigs.list_steps_from_model()
|> Enum.map(&LatexConfigs.list_sections_from_step/1)
end
defp string_any(str) do
if String.length(str) > 0 do
true
else
false
end
end
# Em tabelas root o nome é ignorado caso latex_name_start ou latex_name_end não for contiverem algo, em tabelas filhas o nome é usado com o begin{NAME} caso não houver os dados acima
defp get_section_name(section) do
case section.type do
"root" ->
if string_any(section.latex_name_start) ||
string_any(section.latex_name_end) do
"""
#{section.latex_name_start}
#{section.name}
#{section.latex_name_end}
"""
else
""
end
_ ->
if string_any(section.latex_name_start) ||
string_any(section.latex_name_end) do
"""
#{section.latex_name_start}
#{section.name}
#{section.latex_name_end}
"""
else
name =
section.name
|> Render.Core.Parser.exec()
"begin{#{name}}"
end
end
end
# get content, and apply the parser to section.content
defp get_section_content(section) do
content =
section.content
|> Render.Core.Parser.exec()
"""
#{section.latex_start}
#{content}
#{section.latex_end}
"""
end
defp load_child(child_id) do
LatexConfigs.get_section(child_id)
end
defp make_latex_of_section(section) do
"""
#{get_section_name(section)}
#{get_section_content(section)}
#{
Enum.map(section.children, fn c ->
load_child(c)
|> make_latex_of_section()
end)
}
"""
end
end
|
lib/render/core/data_handler.ex
| 0.713332
| 0.409457
|
data_handler.ex
|
starcoder
|
defmodule GuardianRedis.Repo do
@moduledoc """
`GuardianRedis.Repo` is a repo module that operates Guardian.DB.Token in Redis.
The repo module serves only GuardianDb purpose, do not use it as a Redis repo for your project.
Dependant on :jason and :redix.
Stores and deletes JWT token to/from Redis using key combined from JWT.jti and JWT.aud.
Module stores JWT token in Redis using automatic expiry feature of Redis so we don't need to run token sweeper.
Anyway, `delete_all` still implemented to allow manual sweeping if needed.
"""
@behaviour Guardian.DB.Adapter
alias Guardian.DB.Token
alias GuardianRedis.Redix, as: Redis
@spec one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
Ecto.Schema.t() | nil
@doc """
Fetches a single result from the query.
Returns nil if no result was found. Raises if more than one entry.
"""
def one(query, _opts \\ []) do
key = key(query)
case Redis.command(["GET", key]) do
{:ok, nil} -> nil
{:ok, jwt_json} -> Jason.decode!(jwt_json)
_ -> nil
end
end
@doc """
Insert Token into Redis
Token is auto expired in `expired_in` seconds based on JWT `exp` value
"""
@spec insert(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
def insert(struct, _opts \\ []) do
key = key(struct)
expires_in = struct.changes.exp - System.system_time(:second)
utc_now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second)
token =
struct.changes
|> Map.put(:inserted_at, utc_now)
|> Map.put(:updated_at, utc_now)
case Redis.command(["SETEX", key, Integer.to_string(expires_in), Jason.encode!(token)]) do
{:ok, "OK"} ->
# Adding key to the set `sub` (user_id in JWT) so we can delete all user tokens in one go
sub = sub_elem(struct)
Redis.command(["SADD", set_name(sub), key])
{:ok, struct(Token, token)}
error ->
{:error, error}
end
end
@doc """
Remove all user tokens from Redis, useful for manual sweeping
"""
@spec delete_all(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: {integer(), nil | [term()]}
def delete_all(query, _opts \\ []) do
set_name = query |> sub_elem() |> set_name()
{:ok, keys} = Redis.command(["SMEMBERS", set_name])
{:ok, amount_deleted} = Redis.command(["DEL", set_name] ++ keys)
# yeah, vise-versa
{amount_deleted - 1, :ok}
end
@doc """
Remove a user token, log out functionality, invalidation of a JWT token
"""
@spec delete(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
def delete(model, _opts \\ []) do
key = key(model)
case Redis.command(["DEL", key]) do
{:ok, _num} -> {:ok, struct(Token, model)}
_ -> {:error, model}
end
end
@doc """
Generate Redis key from a changeset, %Token{} struct or Ecto.Query
"""
defp key(%{changes: %{jti: jti, aud: aud}}), do: combine_key(jti, aud)
defp key(%{"jti" => jti, "aud" => aud}), do: combine_key(jti, aud)
defp key(query), do: combine_key(jti_elem(query), aud_elem(query))
defp sub_elem(%{changes: %{sub: sub}}), do: sub
defp sub_elem(%{"sub" => sub}), do: sub
defp sub_elem(query), do: query_param(query, :sub)
defp jti_elem(query), do: query_param(query, :jti)
defp aud_elem(query), do: query_param(query, :aud)
defp combine_key(jti, aud), do: "#{jti}:#{aud}"
defp set_name(sub), do: "set:#{sub}"
@doc """
Retrieves params from `query.wheres` by atom name (`:jti` and `:aud` in our case), example:
```
[
%Ecto.Query.BooleanExpr{
...
params: [
{"2e024736-b4a6-4422-8b0a-4e89c7a7ebf9", {0, :jti}},
{"my_app", {0, :aud}}
],
...
}
]
```
"""
defp query_param(query, param) do
(query.wheres |> List.first()).params
|> Enum.find(fn i -> i |> elem(1) |> elem(1) == param end)
|> elem(0)
end
end
|
lib/repo.ex
| 0.776919
| 0.615926
|
repo.ex
|
starcoder
|
defmodule StringIO do
@moduledoc """
This module provides an IO device that wraps a string.
## Examples
iex> { :ok, pid } = StringIO.open("foo")
iex> IO.read(pid, 2)
"fo"
"""
use GenServer.Behaviour
defrecordp :state, input: "", output: "", capture_prompt: false
@doc """
Creates an IO device.
If the `:capture_prompt` option is set to `true`,
prompts (specified as arguments to `IO.get*` functions)
are captured.
## Examples
iex> { :ok, pid } = StringIO.open("foo")
iex> IO.gets(pid, ">")
"foo"
iex> StringIO.contents(pid)
{ "", "" }
iex> { :ok, pid } = StringIO.open("foo", capture_prompt: true)
iex> IO.gets(pid, ">")
"foo"
iex> StringIO.contents(pid)
{ "", ">" }
"""
@spec open(binary, Keyword.t) :: { :ok, pid }
def open(string, options \\ []) when is_binary(string) do
:gen_server.start_link(__MODULE__, { string, options }, [])
end
@doc """
Returns current buffers.
## Examples
iex> { :ok, pid } = StringIO.open("in")
iex> IO.write(pid, "out")
iex> StringIO.contents(pid)
{ "in", "out" }
"""
@spec contents(pid) :: { binary, binary }
def contents(pid) when is_pid(pid) do
:gen_server.call(pid, :contents)
end
@doc """
Stops the IO device and returns remaining buffers.
## Examples
iex> { :ok, pid } = StringIO.open("in")
iex> IO.write(pid, "out")
iex> StringIO.close(pid)
{ :ok, { "in", "out" } }
"""
@spec close(pid) :: { :ok, { binary, binary } }
def close(pid) when is_pid(pid) do
:gen_server.call(pid, :close)
end
## callbacks
def init({ string, options }) do
capture_prompt = options[:capture_prompt] || false
{ :ok, state(input: string, capture_prompt: capture_prompt) }
end
def handle_info({ :io_request, from, reply_as, req }, s) do
s = io_request(from, reply_as, req, s)
{ :noreply, s }
end
def handle_info(msg, s) do
super(msg, s)
end
def handle_call(:contents, _from, state(input: input, output: output) = s) do
{ :reply, { input, output }, s }
end
def handle_call(:close, _from, state(input: input, output: output) = s) do
{ :stop, :normal, { :ok, { input, output } }, s }
end
def handle_call(request, from, s) do
super(request, from, s)
end
defp io_request(from, reply_as, req, s) do
{ reply, s } = io_request(req, s)
io_reply(from, reply_as, to_reply(reply))
s
end
defp io_request({ :put_chars, chars }, state(output: output) = s) do
{ :ok, state(s, output: << output :: binary, to_binary(chars) :: binary >>) }
end
defp io_request({ :put_chars, m, f, as }, state(output: output) = s) do
chars = apply(m, f, as)
{ :ok, state(s, output: << output :: binary, to_binary(chars) :: binary >>) }
end
defp io_request({ :put_chars, _encoding, chars }, s) do
io_request({ :put_chars, chars }, s)
end
defp io_request({ :put_chars, _encoding, mod, func, args }, s) do
io_request({ :put_chars, mod, func, args }, s)
end
defp io_request({ :get_chars, prompt, n }, s) when n >= 0 do
io_request({ :get_chars, :latin1, prompt, n }, s)
end
defp io_request({ :get_chars, encoding, prompt, n }, s) when n >= 0 do
get_chars(encoding, prompt, n, s)
end
defp io_request({ :get_line, prompt }, s) do
io_request({ :get_line, :latin1, prompt }, s)
end
defp io_request({ :get_line, encoding, prompt }, s) do
get_line(encoding, prompt, s)
end
defp io_request({ :get_until, prompt, mod, fun, args }, s) do
io_request({ :get_until, :latin1, prompt, mod, fun, args }, s)
end
defp io_request({ :get_until, encoding, prompt, mod, fun, args }, s) do
get_until(encoding, prompt, mod, fun, args, s)
end
defp io_request({ :get_password, encoding }, s) do
get_line(encoding, "", s)
end
defp io_request({ :setopts, _opts }, s) do
{ { :error, :enotsup }, s }
end
defp io_request(:getopts, s) do
{ { :ok, [binary: true, encoding: :unicode] }, s }
end
defp io_request({ :get_geometry, :columns }, s) do
{ { :error, :enotsup }, s }
end
defp io_request({ :get_geometry, :rows }, s) do
{ { :error, :enotsup }, s }
end
defp io_request({ :requests, reqs }, s) do
io_requests(reqs, { :ok, s })
end
defp io_request(_, s) do
{ { :error, :request }, s }
end
## get_chars
defp get_chars(encoding, prompt, n,
state(input: input, output: output, capture_prompt: capture_prompt) = s) do
case do_get_chars(input, encoding, n) do
{ :error, _ } = error ->
{ error, s }
{ result, input } ->
if capture_prompt do
output = << output :: binary, to_binary(prompt) :: binary >>
end
{ result, state(s, input: input, output: output) }
end
end
defp do_get_chars("", _encoding, _n) do
{ :eof, "" }
end
defp do_get_chars(input, :latin1, n) when byte_size(input) < n do
{ input, "" }
end
defp do_get_chars(input, :latin1, n) do
<< chars :: [ binary, size(n) ], rest :: binary >> = input
{ chars, rest }
end
defp do_get_chars(input, encoding, n) do
try do
case :file_io_server.count_and_find(input, n, encoding) do
{ buf_count, split_pos } when buf_count < n or split_pos == :none ->
{ input, "" }
{ _buf_count, split_pos } ->
<< chars :: [ binary, size(split_pos) ], rest :: binary >> = input
{ chars, rest }
end
catch
:exit, :invalid_unicode ->
{ :error, :invalid_unicode }
end
end
## get_line
defp get_line(encoding, prompt,
state(input: input, output: output, capture_prompt: capture_prompt) = s) do
case :unicode.characters_to_list(input, encoding) do
{ :error, _, _ } ->
{ { :error, :collect_line }, s }
{ :incomplete, _, _ } ->
{ { :error, :collect_line }, s }
chars ->
{ result, input } = do_get_line(chars, encoding)
if capture_prompt do
output = << output :: binary, to_binary(prompt) :: binary >>
end
{ result, state(s, input: input, output: output) }
end
end
defp do_get_line('', _encoding) do
{ :eof, "" }
end
defp do_get_line(chars, encoding) do
{ line, rest } = collect_line(chars)
{ :unicode.characters_to_binary(line, encoding),
:unicode.characters_to_binary(rest, encoding) }
end
## get_until
defp get_until(encoding, prompt, mod, fun, args,
state(input: input, output: output, capture_prompt: capture_prompt) = s) do
case :unicode.characters_to_list(input, encoding) do
{ :error, _, _ } ->
{ :error, s }
{ :incomplete, _, _ } ->
{ :error, s }
chars ->
{ result, input, count } = do_get_until(chars, encoding, mod, fun, args)
if capture_prompt do
output = << output :: binary, :binary.copy(to_binary(prompt), count) :: binary >>
end
input =
case input do
:eof -> ""
_ -> :unicode.characters_to_binary(input, encoding)
end
{ result, state(s, input: input, output: output) }
end
end
defp do_get_until(chars, encoding, mod, fun, args, continuation \\ [], count \\ 0)
defp do_get_until('', encoding, mod, fun, args, continuation, count) do
case apply(mod, fun, [continuation, :eof | args]) do
{ :done, result, rest } ->
{ result, rest, count + 1 }
{ :more, next_continuation } ->
do_get_until('', encoding, mod, fun, args, next_continuation, count + 1)
end
end
defp do_get_until(chars, encoding, mod, fun, args, continuation, count) do
{ line, rest } = collect_line(chars)
case apply(mod, fun, [continuation, line | args]) do
{ :done, result, rest1 } ->
unless rest1 == :eof do
rest = rest1 ++ rest
end
{ result, rest, count + 1 }
{ :more, next_continuation } ->
do_get_until(rest, encoding, mod, fun, args, next_continuation, count + 1)
end
end
## io_requests
defp io_requests([r|rs], { :ok, s }) do
io_requests(rs, io_request(r, s))
end
defp io_requests(_, result) do
result
end
## helpers
defp collect_line(chars) do
collect_line(chars, [])
end
defp collect_line([], stack) do
{ :lists.reverse(stack), [] }
end
defp collect_line([?\r, ?\n | rest], stack) do
{ :lists.reverse([?\n|stack]), rest }
end
defp collect_line([?\n | rest], stack) do
{ :lists.reverse([?\n|stack]), rest }
end
defp collect_line([h|t], stack) do
collect_line(t, [h|stack])
end
defp io_reply(from, reply_as, reply) do
send from, { :io_reply, reply_as, reply }
end
defp to_binary(list) when is_list(list), do: String.from_char_list!(list)
defp to_binary(binary) when is_binary(binary), do: binary
defp to_reply(list) when is_list(list), do: String.from_char_list!(list)
defp to_reply(other), do: other
end
|
lib/elixir/lib/string_io.ex
| 0.82755
| 0.401453
|
string_io.ex
|
starcoder
|
defmodule Sanbase.Alert.Validation.Target do
@doc ~s"""
Check if a given `target` is a valid target argument for an alert.
A target can be valid if it match one of many criteria. For more information,
check the function headers.
"""
def valid_target?("default"), do: :ok
def valid_target?(%{user_list: int}) when is_integer(int), do: :ok
def valid_target?(%{watchlist_id: int}) when is_integer(int), do: :ok
def valid_target?(%{text: text}) when is_binary(text), do: :ok
def valid_target?(%{word: word}) when is_binary(word), do: :ok
def valid_target?(%{market_segments: [market_segment | _]}) when is_binary(market_segment),
do: :ok
def valid_target?(%{word: words}) when is_list(words) do
Enum.find(words, fn word -> not is_binary(word) end)
|> case do
nil -> :ok
_ -> {:error, "The target list contains elements that are not string"}
end
end
def valid_target?(%{slug: slug}) when is_binary(slug), do: :ok
def valid_target?(%{slug: slugs}) when is_list(slugs) do
Enum.find(slugs, fn slug -> not is_binary(slug) end)
|> case do
nil -> :ok
_ -> {:error, "The target list contains elements that are not string"}
end
end
def valid_target?(target),
do: {:error, "#{inspect(target)} is not a valid target"}
@doc ~s"""
Check if a target is a valid eth_wallet alert target.
It is a valid target if:
- It is %{eth_address: addres_or_addresses}
- It is not a watchlist
- It returns true for valid_target
"""
@spec valid_eth_wallet_target?(any) :: :ok | {:error, <<_::64, _::_*8>>}
def valid_eth_wallet_target?(%{eth_address: address_or_addresses}) do
valid_crypto_address?(address_or_addresses)
end
def valid_eth_wallet_target?(%{word: _}) do
{:error, "Word is not valid wallet target"}
end
def valid_eth_wallet_target?(%{text: _}) do
{:error, "Text is not valid wallet target"}
end
def valid_eth_wallet_target?(%{user_list: _}) do
{:error, "Watchlists are not valid wallet target"}
end
def valid_eth_wallet_target?(%{watchlist_id: _}) do
{:error, "Watchlists are not valid wallet target"}
end
def valid_eth_wallet_target?(target), do: valid_target?(target)
@doc ~s"""
Check if the target is a valid crypto address.
A valid crypto address is:
- An address or lsit of addresses
- A `slug`. In this case the blockchain addresses associated with this
slug will be used.
"""
def valid_crypto_address?(%{slug: slug}), do: valid_target?(%{slug: slug})
def valid_crypto_address?(%{address: address_or_addresses}) do
valid_crypto_address?(address_or_addresses)
end
def valid_crypto_address?(address_or_addresses)
when is_binary(address_or_addresses) or is_list(address_or_addresses) do
address_or_addresses
|> List.wrap()
|> Enum.find(fn elem -> not is_binary(elem) end)
|> case do
nil ->
:ok
_ ->
{:error,
"#{inspect(address_or_addresses)} is not a valid crypto address. The list contains elements that are not string"}
end
end
def valid_crypto_address?(data), do: {:error, "#{inspect(data)} is not a valid crypto address"}
def valid_historical_balance_selector?(selector) when is_map(selector) do
case Sanbase.Clickhouse.HistoricalBalance.selector_to_args(selector) do
%{module: _, blockchain: _, asset: _, decimals: _, slug: _} -> :ok
{:error, _error} -> "#{inspect(selector)} is not a valid historical balance selector."
end
end
def valid_historical_balance_selector?(selector) do
{:error,
"#{inspect(selector)} is not a valid historical balance selector - it has to be a map"}
end
end
|
lib/sanbase/alerts/trigger/validation/target_validation.ex
| 0.805938
| 0.500854
|
target_validation.ex
|
starcoder
|
defmodule Money do
@moduledoc ~S"""
`Money` represents some monetary value (stored in cents) in a given currency.
See `Money.Ecto` for a custom type implementation that can be used in schemas.
In order to use the `~M` sigil, import the module:
import Money
## Examples
iex> Money.new("10.00 USD")
~M"10.00 USD"
iex> ~M"10.00 USD".currency
"USD"
iex> ~M"10.01 USD".cents
1001
iex> Money.add(~M"10 USD", ~M"20 USD")
~M"30.00 USD"
iex> Kernel.to_string(~M"-10.50 USD")
"-10.50 USD"
iex> inspect(~M"10 USD")
"~M\"10.00 USD\""
iex> Money.parse("10 USD")
{:ok, ~M"10 USD"}
iex> Money.parse("10.1 USD")
:error
iex> ~M"10.001 USD"
** (ArgumentError) invalid string: "10.001 USD"
"""
defstruct cents: 0, currency: nil
def new(str) when is_binary(str) do
case parse(str) do
{:ok, money} -> money
:error -> raise ArgumentError, "invalid string: #{inspect str}"
end
end
def parse(str) when is_binary(str) do
case Regex.run(~r/\A(-?)(\d+)(\.(\d{2}))?\ ([A-Z]{3})\z/, str) do
[_, sign, dollars, _, cents, currency] ->
do_parse(sign, dollars, cents, currency)
_ -> :error
end
end
defp do_parse(sign, dollars, "", currency),
do: do_parse(sign, dollars, "00", currency)
defp do_parse(sign, dollars, cents, currency) do
sign = if sign == "-", do: -1, else: 1
cents = sign * (String.to_integer(dollars) * 100 + String.to_integer(cents))
{:ok, %Money{cents: cents, currency: currency}}
end
def sigil_M(str, _opts),
do: new(str)
def add(%Money{cents: left_cents, currency: currency},
%Money{cents: right_cents, currency: currency}) do
%Money{cents: left_cents + right_cents, currency: currency}
end
def to_string(%Money{cents: cents, currency: currency}) when cents >= 0 do
{dollars, cents} = {div(cents, 100), rem(cents, 100)}
cents = :io_lib.format("~2..0B", [cents]) |> IO.iodata_to_binary
"#{dollars}.#{cents} #{currency}"
end
def to_string(%Money{cents: cents, currency: currency}) do
"-" <> Money.to_string(%Money{cents: -cents, currency: currency})
end
end
defimpl Inspect, for: Money do
def inspect(money, _opts),
do: "~M\"#{money}\""
end
defimpl String.Chars, for: Money do
defdelegate to_string(data), to: Money
end
|
apps/money/lib/money.ex
| 0.837155
| 0.470493
|
money.ex
|
starcoder
|
import Kernel, except: [apply: 2]
defmodule Ecto.Query.Builder.OrderBy do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes an order by query.
The query is escaped to a list of `{direction, expression}`
pairs at runtime. Escaping also validates direction is one of
`:asc` or `:desc`.
## Examples
iex> escape(:order_by, quote do [x.x, desc: 13] end, [x: 0], __ENV__)
{[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]},
desc: 13],
%{}}
"""
@spec escape(:order_by | :distinct, Macro.t, Keyword.t, Macro.Env.t) :: Macro.t
def escape(kind, {:^, _, [expr]}, _vars, _env) do
{quote(do: Ecto.Query.Builder.OrderBy.order_by!(unquote(kind), unquote(expr))), %{}}
end
def escape(kind, expr, vars, env) do
expr
|> List.wrap
|> Enum.map_reduce(%{}, &do_escape(&1, &2, kind, vars, env))
end
defp do_escape({dir, {:^, _, [expr]}}, params, kind, _vars, _env) do
{{quoted_dir!(kind, dir), quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}, params}
end
defp do_escape({:^, _, [expr]}, params, kind, _vars, _env) do
{{:asc, quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}, params}
end
defp do_escape({dir, field}, params, kind, _vars, _env) when is_atom(field) do
{{quoted_dir!(kind, dir), Macro.escape(to_field(field))}, params}
end
defp do_escape(field, params, _kind, _vars, _env) when is_atom(field) do
{{:asc, Macro.escape(to_field(field))}, params}
end
defp do_escape({dir, expr}, params, kind, vars, env) do
{ast, params} = Builder.escape(expr, :any, params, vars, env)
{{quoted_dir!(kind, dir), ast}, params}
end
defp do_escape(expr, params, _kind, vars, env) do
{ast, params} = Builder.escape(expr, :any, params, vars, env)
{{:asc, ast}, params}
end
@doc """
Checks the variable is a quoted direction at compilation time or
delegate the check to runtime for interpolation.
"""
def quoted_dir!(kind, {:^, _, [expr]}),
do: quote(do: Ecto.Query.Builder.OrderBy.dir!(unquote(kind), unquote(expr)))
def quoted_dir!(_kind, dir) when dir in [:asc, :desc],
do: dir
def quoted_dir!(kind, other),
do: Builder.error!("expected :asc, :desc or interpolated value in `#{kind}`, got: `#{inspect other}`")
@doc """
Called by at runtime to verify the direction.
"""
def dir!(_kind, dir) when dir in [:asc, :desc],
do: dir
def dir!(kind, other),
do: Builder.error!("expected :asc or :desc in `#{kind}`, got: `#{inspect other}`")
@doc """
Called at runtime to verify a field.
"""
def field!(_kind, field) when is_atom(field),
do: to_field(field)
def field!(kind, other) do
raise ArgumentError,
"expected a field as an atom in `#{kind}`, got: `#{inspect other}`"
end
@doc """
Called at runtime to verify order_by.
"""
def order_by!(kind, exprs) do
Enum.map List.wrap(exprs), fn
{dir, field} when dir in [:asc, :desc] and is_atom(field) ->
{dir, to_field(field)}
field when is_atom(field) ->
{:asc, to_field(field)}
_ ->
raise ArgumentError,
"expected a list or keyword list of fields in `#{kind}`, got: `#{inspect exprs}`"
end
end
defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []}
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(query, binding, expr, env) do
{query, binding} = Builder.escape_binding(query, binding)
{expr, params} = escape(:order_by, expr, binding, env)
params = Builder.escape_params(params)
order_by = quote do: %Ecto.Query.QueryExpr{
expr: unquote(expr),
params: unquote(params),
file: unquote(env.file),
line: unquote(env.line)}
Builder.apply_query(query, __MODULE__, [order_by], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t
def apply(%Ecto.Query{order_bys: order_bys} = query, expr) do
%{query | order_bys: order_bys ++ [expr]}
end
def apply(query, expr) do
apply(Ecto.Queryable.to_query(query), expr)
end
end
|
data/web/deps/ecto/lib/ecto/query/builder/order_by.ex
| 0.853593
| 0.508422
|
order_by.ex
|
starcoder
|
defmodule ChainDefinition do
@enforce_keys [
:block_reward_position,
:chain_id,
:check_window,
:min_diversity,
:min_transaction_fee,
:get_block_hash_limit,
:allow_contract_override
]
defstruct @enforce_keys
@type t :: %ChainDefinition{
block_reward_position: :last | :first,
chain_id: non_neg_integer(),
check_window: bool(),
get_block_hash_limit: non_neg_integer(),
min_diversity: non_neg_integer(),
min_transaction_fee: bool(),
allow_contract_override: bool()
}
@spec get_block_hash_limit(non_neg_integer) :: non_neg_integer()
def get_block_hash_limit(blockheight) do
chain_definition(blockheight).get_block_hash_limit
end
@spec min_diversity(non_neg_integer) :: non_neg_integer()
def min_diversity(blockheight) do
chain_definition(blockheight).min_diversity
end
@spec block_reward_position(non_neg_integer) :: :last | :first
def block_reward_position(blockheight) do
chain_definition(blockheight).block_reward_position
end
@spec min_transaction_fee(non_neg_integer) :: bool()
def min_transaction_fee(blockheight) do
chain_definition(blockheight).min_transaction_fee
end
@spec allow_contract_override(non_neg_integer) :: bool()
def allow_contract_override(blockheight) do
chain_definition(blockheight).allow_contract_override
end
@spec chain_id(non_neg_integer) :: non_neg_integer()
def chain_id(blockheight) do
chain_definition(blockheight).chain_id
end
@spec check_window(non_neg_integer) :: bool()
def check_window(blockheight) do
chain_definition(blockheight).check_window
end
@spec genesis_accounts() :: [{binary(), Account.t()}]
def genesis_accounts() do
chain_definition().genesis_accounts()
end
@spec genesis_transactions(Wallet.t()) :: [Chain.Transaction.t()]
def genesis_transactions(miner) do
chain_definition().genesis_transactions(miner)
end
@spec genesis_timestamp :: non_neg_integer()
def genesis_timestamp() do
chain_definition().genesis_timestamp()
end
@spec genesis_miner :: Wallet.t()
def genesis_miner() do
chain_definition().genesis_miner()
end
defp chain_definition() do
:persistent_term.get(:chain_definition, nil)
end
@spec chain_definition(non_neg_integer()) :: ChainDefinition
defp chain_definition(blockheight) do
chain_definition().network(blockheight)
end
@spec genesis_pre_hash :: <<_::256>>
def genesis_pre_hash() do
chain_definition().genesis_pre_hash()
end
@spec hardforks(Chain.Block.t()) :: Chain.Block.t()
def hardforks(sim_block) do
chain_definition().hardforks(sim_block)
end
end
|
lib/chaindefinition.ex
| 0.811228
| 0.430626
|
chaindefinition.ex
|
starcoder
|
defmodule Apoc do
@moduledoc """
Comprehensive docs coming soon!
"""
@typedoc """
Hex (lowercase) encoded string
See `Apoc.hex/1`
"""
@type hexstring :: binary()
@typedoc """
An encoded string that represents a string encoded in Apoc's encoding scheme
(URL safe Base 64).
See `Apoc.encode/1`
"""
@type encoded_string :: binary()
@doc """
Hash a message with the default hashing scheme
and encode it with `Apoc.encode/1`.
See `Apoc.Hash` for other hashing schemes and encoding options
"""
@spec hash(message :: binary) :: {:ok, hexstring} | :error
def hash(message) do
Apoc.Hash.hash_encode(message)
end
# TODO: Spec (returns a tuple)
@doc "Decodes a URL safe base 64 string to binary"
def decode(encoded) do
Base.url_decode64(encoded, padding: false)
end
def decode!(encoded) do
Base.url_decode64!(encoded, padding: false)
end
@doc """
Encodes a binary as a URL safe base 64 string
## Example
```
iex> Apoc.encode(<<16, 32, 64>>)
"ECBA"
```
## Encoding Scheme
Base 64 is similar to hex encoding but now instead of using 4-bit nibbles
it uses groups of 6 bits (64 possible values) and then assigns each to
a character as defined here https://hexdocs.pm/elixir/Base.html#module-base-64-url-and-filename-safe-alphabet.
The algorithm is a little more complex now as we have to worry about padding
to the nearest mulitple of 6 bytes. However, a simple example can be demonstrated
with 3 bytes which is 24 bits and already a mulitple of 6.
Take the binary `<<10, 10, 10>>`, we can break it into 6-bit components:
```
iex> <<a::6, b::6, c::6, d::6>> = <<10, 10, 10>>
...> [a, b, c, d]
[2, 32, 40, 10]
```
Now mapping each value to the safe alphabet we get:
```
iex> Apoc.encode(<<10, 10, 10>>)
"CgoK"
```
"""
@spec encode(payload :: binary) :: encoded_string()
def encode(payload) when is_binary(payload) do
Base.url_encode64(payload, padding: false)
end
@doc """
Encodes a binary in hex format.
Hex strings represent a binary by splitting each
byte into two parts of 4-bits (called a "nibble").
Each nibble has 16 possible values, 0 through to 15.
Values 0 to 9 stay as they are while values 10 to 15
are mapped to the letters a through to h.
## Example
```
iex> Apoc.hex(<<27, 90, 33, 46>>)
"1b5a212e"
```
## Encoding Scheme
The binary `<<184>>` splits into the nibbles x and y:
```
iex> <<x::4, y::4>> = <<184>>
...> [x, y]
[11, 8]
```
Now 11 maps to the character "b" while 8 stays the same
so the hex encoding of the byte `<<184>>` is "b8".
```
iex> Apoc.hex(<<184>>)
"b8"
```
Note that hex strings are exactly twice as long (in bytes)
as the original binary.
See also `Base.encode16/2`
"""
@spec hex(payload :: binary) :: hexstring()
def hex(payload) do
Base.encode16(payload, case: :lower)
end
def decode_hex(payload) do
Base.decode16(payload, case: :lower)
end
@doc """
Simple wrapper to `:crypto.strong_rand_bytes/1`.
Returns a secure binary of `num` random bytes
"""
def rand_bytes(num) do
:crypto.strong_rand_bytes(num)
end
defdelegate encrypt(message, key), to: Apoc.AES
defdelegate decrypt(encrypted, key), to: Apoc.AES
defdelegate sign(message, key, opts \\ []), to: Apoc.MAC.HMAC
defdelegate verify(tag, message, key, opts \\ []), to: Apoc.MAC.HMAC
@doc """
Compares to bitlists for equality in constant time
to avoid timing attacks.
See https://codahale.com/a-lesson-in-timing-attacks/
and `Plug.Crypto`.
"""
def secure_compare(left, right) do
if byte_size(left) == byte_size(right) do
secure_compare(left, right, 0) == 0
else
false
end
end
defp secure_compare(<<x, left::binary>>, <<y, right::binary>>, acc) do
use Bitwise
xorred = x ^^^ y
secure_compare(left, right, acc ||| xorred)
end
defp secure_compare(<<>>, <<>>, acc) do
acc
end
end
|
lib/apoc.ex
| 0.865764
| 0.888614
|
apoc.ex
|
starcoder
|
defmodule Searchex.Keyword.Server do
@moduledoc false
use GenServer
@doc """
Keyword Server
We start one KeywordSer for each keyword in the index. The state looks like:
%{"DIOCID1" => [list of positions], "DIOCID2" => [list of positions]}
"""
def start_link(server_name \\ :server) do
GenServer.start_link(__MODULE__, %{}, name: server_name)
end
def add_keyword_position(pt_name, keyword, docid, position) do
server = get_keyword_server(pt_name, keyword)
GenServer.cast(server, {:add, docid, position})
end
@doc """
Perform a query
terms is a list of strings = ["term1", "term2"]
doc_matches looks like:
[{"term1", %{"docid1" => [pos1, pos2], "docid2" => [pos3, pos4]}}, {"term2", %{}}]
matches_per_term_and_doc looks like:
%{{"term1", "docid1"} => 23, {"term1", "docid2"} => 4, ...}
"""
def do_query({pt_name, terms}) when is_list(terms) do
stem_terms = Enum.map(terms, &(StemEx.stem(&1)))
doc_matches = gen_doc_matches(pt_name, stem_terms)
matches_per_term_and_doc = gen_matches_per_term_and_doc(doc_matches)
Searchex.Command.Search.Bm25.doc_scores(terms, doc_matches, matches_per_term_and_doc)
|> Enum.map(fn(el) -> {elem(el, 0), Float.round(elem(el, 1) * 1.0, 3)} end)
end
def do_query({pt_name, terms}) when is_list(terms) , do: do_query(pt_name, terms)
def do_query({pt_name, terms}) when is_binary(terms) , do: do_query(pt_name, String.split(terms))
def do_query({pt_name, terms}) , do: do_query(pt_name, terms)
def do_query(pt_name, terms) when is_binary(terms) , do: do_query(pt_name, String.split(terms, " "))
def gen_doc_matches(pt_name, terms) do
Enum.map(terms, fn(term) -> get_ids(pt_name, term) end)
end
def gen_matches_per_term_and_doc(doc_matches) do
Enum.reduce doc_matches, %{}, fn({term, docids}, acc1) ->
tmp = Enum.reduce docids, %{}, fn({docid, pos_list}, acc2) ->
Map.merge(acc2, %{{term, docid} => Enum.count(pos_list)})
end
Map.merge(acc1, tmp)
end
end
@doc """
Gets the list of ID's for a term.
The returned data structure look like:
%{"DIOCID1" => [list of positions], "DIOCID2" => [list of positions]}
"""
def get_ids(pt_name, term) do
case find_keyword_server(pt_name, term) do
{:ok, server} -> {term, GenServer.call(server, :get_ids)};
{:error, _ } -> {term, %{}}
end
end
@doc """
Sets the state for a KeywordSer. This is useful when building an in-memory
index from a persisted index saved to disk.
The state looks like:
%{"DIOCID1" => [list of positions], "DIOCID2" => [list of positions]}
"""
def set_state(server, new_state) do
GenServer.cast(server, {:set_state, new_state})
end
# --- CALLBACKS
def init(_opts) do
{:ok, %{}}
end
def handle_call(:get_ids, _from, state) do
result = state
{:reply, result, state}
end
def handle_cast({:add, docid, position}, state) do
old_list = state[docid] || []
new_list = old_list ++ [position]
new_state = Map.merge(state, %{docid => new_list})
{:noreply, new_state}
end
def handle_cast({:set_state, new_state}, _state) do
{:noreply, new_state}
end
def get_keyword_server(pt_name, keyword) do
name = if is_atom(keyword), do: keyword, else: keyword_server_name(pt_name, keyword)
Process.whereis(name) || Searchex.Keyword.Supervisor.add_child_and_return_pid(pt_name, name)
end
def find_keyword_server(pt_name, keyword) do
name = keyword_server_name(pt_name, keyword)
case Process.whereis(name) do
nil -> {:error, "Not found"}
pid -> {:ok, pid}
end
end
def keyword_server_name(pt_name, keyword) do
new_pt_name = to_string(pt_name)
"kw_" <> new_pt_name <> "_" <> keyword |> String.to_atom
end
end
|
lib/searchex/keyword/server.ex
| 0.581778
| 0.445047
|
server.ex
|
starcoder
|
defmodule Xgit.Repository.WorkingTree do
@moduledoc ~S"""
A working tree is an on-disk manifestation of a commit or pending commit in
a git repository.
An `Xgit.Repository.Storage` may have a default working tree associated with it or
it may not. (A repository without a working tree is often referred to as a
"bare" repository.)
More than one working tree may be associated with a repository, though this
is not (currently) well-tested in Xgit.
A working tree is itself strictly tied to a file system, but it need not be
tied to an on-disk repository instance.
_IMPORTANT NOTE:_ This is intended as a reference implementation largely
for testing purposes and may not necessarily handle all of the edge cases that
the traditional `git` command-line interface will handle.
"""
use GenServer
import Xgit.Util.ForceCoverage
alias Xgit.DirCache
alias Xgit.DirCache.Entry, as: DirCacheEntry
alias Xgit.FilePath
alias Xgit.Object
alias Xgit.ObjectId
alias Xgit.Repository.Storage
alias Xgit.Tree
alias Xgit.Util.TrailingHashDevice
require Logger
@typedoc ~S"""
The process ID for a `WorkingTree` process.
"""
@type t :: pid
@doc """
Starts a `WorkingTree` process linked to the current process.
## Parameters
`repository` is the associated `Xgit.Repository.Storage` process.
`work_dir` is the root path for the working tree.
`options` are passed to `GenServer.start_link/3`.
## Return Value
See `GenServer.start_link/3`.
If the process is unable to create the working directory root, the response
will be `{:error, {:mkdir, :eexist}}` (or perhaps a different posix error code).
"""
@spec start_link(repository :: Storage.t(), work_dir :: Path.t(), GenServer.options()) ::
GenServer.on_start()
def start_link(repository, work_dir, options \\ [])
when is_pid(repository) and is_binary(work_dir) and is_list(options) do
Storage.assert_valid(repository)
GenServer.start_link(__MODULE__, {repository, work_dir}, options)
end
@impl true
def init({repository, work_dir}) do
case File.mkdir_p(work_dir) do
:ok ->
index_path = Path.join([work_dir, ".git", "index"])
Process.monitor(repository)
# Read index file here or maybe in a :continue handler?
cover {:ok, %{repository: repository, work_dir: work_dir, index_path: index_path}}
{:error, reason} ->
cover {:stop, {:mkdir, reason}}
end
end
@doc ~S"""
Returns `true` if the argument is a PID representing a valid `WorkingTree` process.
"""
@spec valid?(working_tree :: term) :: boolean
def valid?(working_tree) when is_pid(working_tree) do
Process.alive?(working_tree) &&
GenServer.call(working_tree, :valid_working_tree?) == :valid_working_tree
end
def valid?(_), do: cover(false)
@doc ~S"""
Returns a current snapshot of the working tree state.
## Return Value
`{:ok, dir_cache}` if an index file exists and could be parsed as a dir cache file.
`{:ok, dir_cache}` if no index file exists. (`dir_cache` will have zero entries.)
`{:error, reason}` if the file exists but could not be parsed. (See
`Xgit.DirCache.from_iodevice/1` for possible reason codes.
## TO DO
Find index file in appropriate location (i.e. as potentially modified
by `.git/config` file). [Issue #86](https://github.com/elixir-git/xgit/issues/86)
Cache state of index file so we don't have to parse it for every
call. [Issue #87](https://github.com/elixir-git/xgit/issues/87)
Consider scalability of passing a potentially large `Xgit.DirCache` structure
across process boundaries. [Issue #88](https://github.com/elixir-git/xgit/issues/88)
"""
@spec dir_cache(working_tree :: t) ::
{:ok, DirCache.t()} | {:error, reason :: DirCache.from_iodevice_reason()}
def dir_cache(working_tree) when is_pid(working_tree),
do: GenServer.call(working_tree, :dir_cache)
defp handle_dir_cache(%{index_path: index_path} = state) do
case parse_index_file_if_exists(index_path) do
{:ok, dir_cache} -> {:reply, {:ok, dir_cache}, state}
{:error, reason} -> {:reply, {:error, reason}, state}
end
end
@typedoc ~S"""
Error code reasons returned by `reset_dir_cache/1`.
"""
@type reset_dir_cache_reason :: DirCache.to_iodevice_reason()
@doc ~S"""
Reset the dir cache to empty and rewrite the index file accordingly.
## Return Values
`:ok` if successful.
`{:error, reason}` if unable. The relevant reason codes may come from:
* `Xgit.DirCache.to_iodevice/2`.
"""
@spec reset_dir_cache(working_tree :: t) ::
:ok | {:error, reset_dir_cache_reason}
def reset_dir_cache(working_tree) when is_pid(working_tree),
do: GenServer.call(working_tree, :reset_dir_cache)
defp handle_reset_dir_cache(%{index_path: index_path} = state) do
case write_index_file(DirCache.empty(), index_path) do
:ok -> cover {:reply, :ok, state}
{:error, reason} -> cover {:reply, {:error, reason}, state}
end
end
@typedoc ~S"""
Error code reasons returned by `update_dir_cache/3`.
"""
@type update_dir_cache_reason ::
DirCache.add_entries_reason()
| DirCache.from_iodevice_reason()
| DirCache.remove_entries_reason()
| DirCache.to_iodevice_reason()
@doc ~S"""
Apply updates to the dir cache and rewrite the index tree accordingly.
## Parameters
`add`: a list of `Xgit.DirCache.Entry` structs to add to the dir cache.
In the event of collisions with existing entries, the existing entries will
be replaced with the corresponding new entries.
`remove`: a list of `{path, stage}` tuples to remove from the dir cache.
`stage` must be `0..3` to remove a specific stage entry or `:all` to match
any entry for the `path`.
## Return Values
`{:ok, dir_cache}` where `dir_cache` is the original `dir_cache` with the new
entries added (and properly sorted) and targeted entries removed.
`{:error, reason}` if unable. The relevant reason codes may come from:
* `Xgit.DirCache.add_entries/2`
* `Xgit.DirCache.from_iodevice/1`
* `Xgit.DirCache.remove_entries/2`
* `Xgit.DirCache.to_iodevice/2`.
## TO DO
Find index file in appropriate location (i.e. as potentially modified
by `.git/config` file). [Issue #86](https://github.com/elixir-git/xgit/issues/86)
Cache state of index file so we don't have to parse it for every
call. [Issue #87](https://github.com/elixir-git/xgit/issues/87)
"""
@spec update_dir_cache(
working_tree :: t,
add :: [DirCacheEntry.t()],
remove :: [{path :: FilePath.t(), stage :: DirCacheEntry.stage_match()}]
) ::
{:ok, DirCache.t()} | {:error, update_dir_cache_reason}
def update_dir_cache(working_tree, add, remove)
when is_pid(working_tree) and is_list(add) and is_list(remove),
do: GenServer.call(working_tree, {:update_dir_cache, add, remove})
defp handle_update_dir_cache(add, remove, %{index_path: index_path} = state) do
with {:ok, dir_cache} <- parse_index_file_if_exists(index_path),
{:ok, dir_cache} <- DirCache.add_entries(dir_cache, add),
{:ok, dir_cache} <- DirCache.remove_entries(dir_cache, remove),
:ok <- write_index_file(dir_cache, index_path) do
{:reply, :ok, state}
else
{:error, reason} -> {:reply, {:error, reason}, state}
end
end
@typedoc ~S"""
Reason codes that can be returned by `read_tree/3`.
"""
@type read_tree_reason ::
:objects_missing
| DirCache.to_iodevice_reason()
| Storage.get_object_reason()
| Tree.from_object_reason()
@doc ~S"""
Read a `tree` object and any trees it may refer to and populate the dir cache accordingly.
Does not update files in the working tree itself.
Analogous to [`git read-tree`](https://git-scm.com/docs/git-read-tree).
## Parameters
`object_id` is the object ID of the root working tree.
## Options
`:missing_ok?`: `true` to ignore any objects that are referenced by the tree
structures that are not present in the object database. Normally this would be an error.
## Return Value
`:ok` if successful.
`{:error, :objects_missing}` if any of the objects referenced by the index
are not present in the object store. (Exception: If `missing_ok?` is `true`,
then this condition will be ignored.)
Reason codes may also come from the following functions:
* `Xgit.DirCache.to_iodevice/2`
* `Xgit.Repository.Storage.get_object/2`
* `Xgit.Tree.from_object/1`
## TO DO
Implement `--prefix` option. https://github.com/elixir-git/xgit/issues/175
"""
@spec read_tree(working_tree :: t, object_id :: ObjectId.t(), missing_ok?: boolean) ::
:ok | {:error, reason :: read_tree_reason}
def read_tree(working_tree, object_id, opts \\ [])
when is_pid(working_tree) and is_binary(object_id) and is_list(opts) do
missing_ok? = validate_read_tree_options(opts)
GenServer.call(working_tree, {:read_tree, object_id, missing_ok?})
end
defp validate_read_tree_options(opts) do
missing_ok? = Keyword.get(opts, :missing_ok?, false)
unless is_boolean(missing_ok?) do
raise ArgumentError,
"Xgit.Repository.WorkingTree.read_tree/3: missing_ok? #{inspect(missing_ok?)} is invalid"
end
missing_ok?
end
defp handle_read_tree(
object_id,
missing_ok?,
%{repository: repository, index_path: index_path} = state
) do
with {:ok, %DirCache{entries: entries} = dir_cache} <-
tree_to_dir_cache(repository, object_id),
{:has_all_objects?, true} <-
{:has_all_objects?, has_all_objects?(repository, entries, missing_ok?)},
:ok <- write_index_file(dir_cache, index_path) do
cover {:reply, :ok, state}
else
{:error, reason} -> cover {:reply, {:error, reason}, state}
{:has_all_objects?, false} -> cover {:reply, {:error, :objects_missing}, state}
end
end
defp tree_to_dir_cache(repository, object_id) do
case tree_to_dir_cache_entries(repository, object_id, '', []) do
{:ok, reversed_entries} ->
{:ok,
%DirCache{
version: 2,
entry_count: Enum.count(reversed_entries),
entries: Enum.reverse(reversed_entries)
}}
{:error, reason} ->
{:error, reason}
end
end
defp tree_to_dir_cache_entries(repository, object_id, prefix, acc) do
with {:ok, object} <- Storage.get_object(repository, object_id),
{:ok, %Tree{entries: tree_entries} = _tree} <- Tree.from_object(object) do
tree_entries_to_dir_cache_entries(repository, tree_entries, prefix, acc)
# TO DO: A malformed tree could cause an infinite loop here.
# https://github.com/elixir-git/xgit/issues/178
else
{:error, reason} -> {:error, reason}
end
end
defp tree_entries_to_dir_cache_entries(repository, tree_entries, prefix, acc)
defp tree_entries_to_dir_cache_entries(_repository, [], _prefix, acc), do: {:ok, acc}
defp tree_entries_to_dir_cache_entries(
repository,
[%{mode: 0o040000, object_id: object_id, name: name} = _tree_entry | tail],
prefix,
acc
) do
case tree_to_dir_cache_entries(repository, object_id, append_to_prefix(prefix, name), acc) do
{:ok, acc} ->
tree_entries_to_dir_cache_entries(repository, tail, prefix, acc)
{:error, reason} ->
{:error, reason}
end
end
defp tree_entries_to_dir_cache_entries(
repository,
[%{mode: mode, object_id: object_id, name: name} = _tree_entry | tail],
prefix,
acc
) do
dir_cache_entry = %DirCacheEntry{
name: append_to_prefix(prefix, name),
stage: 0,
object_id: object_id,
mode: mode,
size: 0,
ctime: 0,
mtime: 0
}
tree_entries_to_dir_cache_entries(repository, tail, prefix, [dir_cache_entry | acc])
end
defp append_to_prefix('', name), do: name
defp append_to_prefix(prefix, name), do: '#{prefix}/#{name}'
@typedoc ~S"""
Reason codes that can be returned by `write_tree/2`.
"""
@type write_tree_reason ::
:incomplete_merge
| :objects_missing
| :prefix_not_found
| DirCache.from_iodevice_reason()
| DirCache.to_tree_objects_reason()
| Storage.put_loose_object_reason()
@doc ~S"""
Translates the current dir cache, as reflected in its index file, to one or more
tree objects.
The working tree must be in a fully-merged state.
## Options
`:missing_ok?`: `true` to ignore any objects that are referenced by the index
file that are not present in the object database. Normally this would be an error.
`:prefix`: (`Xgit.FilePath`) if present, returns the `object_id` for the tree at
the given subdirectory. If not present, writes a tree corresponding to the root.
(The entire tree is written in either case.)
## Return Value
`{:ok, object_id}` with the object ID for the tree that was generated. (If the exact tree
specified by the index already existed, it will return that existing tree's ID.)
`{:error, :incomplete_merge}` if any entry in the index file is not fully merged.
`{:error, :objects_missing}` if any of the objects referenced by the index
are not present in the object store. (Exception: If `missing_ok?` is `true`,
then this condition will be ignored.)
`{:error, :prefix_not_found}` if `prefix` was specified, but that prefix is not referenced
in the index file.
Reason codes may also come from the following functions:
* `Xgit.DirCache.from_iodevice/1`
* `Xgit.DirCache.to_tree_objects/2`
* `Xgit.Repository.Storage.put_loose_object/2`
"""
@spec write_tree(working_tree :: t, missing_ok?: boolean, prefix: FilePath.t()) ::
{:ok, object_id :: ObjectId.t()} | {:error, reason :: write_tree_reason}
def write_tree(working_tree, opts \\ []) when is_pid(working_tree) do
{missing_ok?, prefix} = validate_write_tree_options(opts)
GenServer.call(working_tree, {:write_tree, missing_ok?, prefix})
end
defp validate_write_tree_options(opts) do
missing_ok? = Keyword.get(opts, :missing_ok?, false)
unless is_boolean(missing_ok?) do
raise ArgumentError,
"Xgit.Repository.WorkingTree.write_tree/2: missing_ok? #{inspect(missing_ok?)} is invalid"
end
prefix = Keyword.get(opts, :prefix, [])
unless prefix == [] or FilePath.valid?(prefix) do
raise ArgumentError,
"Xgit.Repository.WorkingTree.write_tree/2: prefix #{inspect(prefix)} is invalid (should be a charlist, not a String)"
end
{missing_ok?, prefix}
end
defp handle_write_tree(
missing_ok?,
prefix,
%{repository: repository, index_path: index_path} = state
) do
with {:ok, %DirCache{entries: entries} = dir_cache} <- parse_index_file_if_exists(index_path),
{:merged?, true} <- {:merged?, DirCache.fully_merged?(dir_cache)},
{:has_all_objects?, true} <-
{:has_all_objects?, has_all_objects?(repository, entries, missing_ok?)},
{:ok, objects, %Object{id: object_id}} <- DirCache.to_tree_objects(dir_cache, prefix),
:ok <- write_all_objects(repository, objects) do
cover {:reply, {:ok, object_id}, state}
else
{:error, reason} -> cover {:reply, {:error, reason}, state}
{:merged?, false} -> cover {:reply, {:error, :incomplete_merge}, state}
{:has_all_objects?, false} -> cover {:reply, {:error, :objects_missing}, state}
end
end
defp has_all_objects?(repository, entries, missing_ok?)
defp has_all_objects?(_repository, _entries, true), do: cover(true)
defp has_all_objects?(repository, entries, false) do
entries
|> Enum.chunk_every(100)
|> Enum.all?(fn entries_chunk ->
Storage.has_all_object_ids?(
repository,
Enum.map(entries_chunk, fn %{object_id: id} -> id end)
)
end)
end
defp write_all_objects(repository, objects)
defp write_all_objects(_repository, []), do: cover(:ok)
defp write_all_objects(repository, [object | tail]) do
case Storage.put_loose_object(repository, object) do
:ok -> write_all_objects(repository, tail)
{:error, :object_exists} -> write_all_objects(repository, tail)
{:error, reason} -> cover {:error, reason}
end
end
defp parse_index_file_if_exists(index_path) do
with true <- File.exists?(index_path),
{:ok, iodevice} when is_pid(iodevice) <- TrailingHashDevice.open_file(index_path) do
res = DirCache.from_iodevice(iodevice)
:ok = File.close(iodevice)
res
else
false -> cover {:ok, DirCache.empty()}
{:error, reason} -> cover {:error, reason}
end
end
defp write_index_file(dir_cache, index_path) do
with {:ok, iodevice}
when is_pid(iodevice) <- TrailingHashDevice.open_file_for_write(index_path),
:ok <- DirCache.to_iodevice(dir_cache, iodevice),
:ok <- File.close(iodevice) do
:ok
else
{:error, reason} -> {:error, reason}
end
end
@impl true
def handle_call(:valid_working_tree?, _from, state), do: {:reply, :valid_working_tree, state}
def handle_call(:dir_cache, _from, state), do: handle_dir_cache(state)
def handle_call(:reset_dir_cache, _from, state), do: handle_reset_dir_cache(state)
def handle_call({:update_dir_cache, add, remove}, _from, state),
do: handle_update_dir_cache(add, remove, state)
def handle_call({:read_tree, object_id, missing_ok?}, _from, state),
do: handle_read_tree(object_id, missing_ok?, state)
def handle_call({:write_tree, missing_ok?, prefix}, _from, state),
do: handle_write_tree(missing_ok?, prefix, state)
def handle_call(message, _from, state) do
Logger.warn("WorkingTree received unrecognized call #{inspect(message)}")
{:reply, {:error, :unknown_message}, state}
end
@impl true
def handle_info({:DOWN, _ref, :process, _object, reason}, state), do: {:stop, reason, state}
end
|
lib/xgit/repository/working_tree.ex
| 0.859723
| 0.412589
|
working_tree.ex
|
starcoder
|
defmodule KinesisClient.Stream do
@moduledoc """
This is the entry point for processing the shards of a Kinesis Data Stream.
"""
use Supervisor
require Logger
import KinesisClient.Util
alias KinesisClient.Stream.Coordinator
@doc """
Starts a `KinesisClient.Stream` process.
## Options
* `:stream_name` - Required. The Kinesis Data Stream to process.
* `:app_name` - Required.This should be a unique name across all your applications and the DynamodDB
tablespace in your AWS region
* `:name` - The process name. Defaults to `KinesisClient.Stream`.
* `:max_demand` - The maximum number of records to retrieve from Kinesis. Defaults to 100.
* `:aws_region` - AWS region. Will rely on ExAws defaults if not set.
* `:shard_supervisor` - The child_spec for the Supervisor that monitors the ProcessingPipelines.
Must implement the DynamicSupervisor behaviour.
* `:lease_renew_interval`(optional) - How long (in milliseconds) a lease will be held before a renewal is attempted.
* `:lease_expiry`(optional) - The lenght of time in milliseconds that least lasts for. If a
lease is not renewed within this time frame, then that lease is considered expired and can be
taken by another process.
"""
def start_link(opts) do
Supervisor.start_link(__MODULE__, opts, name: Keyword.get(opts, :name, __MODULE__))
end
def init(opts) do
stream_name = get_stream_name(opts)
app_name = get_app_name(opts)
worker_ref = "worker-#{:rand.uniform(10_000)}"
{shard_supervisor_spec, shard_supervisor_name} = get_shard_supervisor(opts)
coordinator_name = get_coordinator_name(opts)
shard_consumer = get_shard_consumer(opts)
shard_args = [
app_name: opts[:app_name],
coordinator_name: coordinator_name,
stream_name: stream_name,
lease_owner: worker_ref,
shard_consumer: shard_consumer,
processors: opts[:processors],
batchers: opts[:batchers]
]
shard_args =
shard_args
|> optional_kw(:app_state_opts, Keyword.get(opts, :app_state_opts))
|> optional_kw(:lease_renew_interval, Keyword.get(opts, :lease_renew_interval))
|> optional_kw(:lease_expiry, Keyword.get(opts, :lease_expiry))
coordinator_args = [
name: coordinator_name,
stream_name: stream_name,
app_name: app_name,
app_state_opts: Keyword.get(opts, :app_state_opts, []),
shard_supervisor_name: shard_supervisor_name,
worker_ref: worker_ref,
shard_args: shard_args
]
children = [
shard_supervisor_spec,
{Coordinator, coordinator_args}
]
Logger.debug(
"Starting KinesisClient.Stream: [app_name: #{app_name}, stream_name: {stream_name}]"
)
Supervisor.init(children, strategy: :one_for_all)
end
defp get_coordinator_name(opts) do
case Keyword.get(opts, :shard_supervisor) do
nil -> Module.concat(KinesisClient.Stream.Coordinator, opts[:stream_name])
# Shard processes may be running on nodes different from the Coordinator if passed
# :shard_supervisor is distributed,so use :global to allow inter-node communication.
_ -> {:global, Module.concat(KinesisClient.Stream.Coordinator, opts[:stream_name])}
end
end
defp get_stream_name(opts) do
case Keyword.get(opts, :stream_name) do
nil -> raise ArgumentError, message: "Missing required option :stream_name"
x when is_binary(x) -> x
_ -> raise ArgumentError, message: ":stream_name must be a binary"
end
end
defp get_app_name(opts) do
case Keyword.get(opts, :app_name) do
nil -> raise ArgumentError, message: "Missing required option :app_name"
x when is_binary(x) -> x
_ -> raise ArgumentError, message: ":app_name must be a binary"
end
end
@spec get_shard_supervisor(keyword) ::
{{module, keyword}, name :: any} | no_return
defp get_shard_supervisor(opts) do
name = Module.concat(KinesisClient.Stream.ShardSupervisor, opts[:stream_name])
{{DynamicSupervisor, [strategy: :one_for_one, name: name]}, name}
end
defp get_shard_consumer(opts) do
case Keyword.get(opts, :shard_consumer) do
nil ->
raise ArgumentError,
message:
"Missing required option :shard_processor. Must be a module implementing the Broadway behaviour"
x when is_atom(x) ->
x
_ ->
raise ArgumentError, message: ":shard_processor option must be a module name"
end
end
end
|
lib/kinesis_client/stream.ex
| 0.836354
| 0.456107
|
stream.ex
|
starcoder
|
defmodule ElixirFilms.Movies do
@moduledoc """
The Movies context.
"""
import Ecto.Query, warn: false
alias ElixirFilms.{
Repo,
Movies.Movie,
Movies.ImportMovieJob
}
@doc """
Returns the list of movies.
## Examples
iex> list_movies()
[%Movie{}, ...]
"""
def list_movies(params) do
Movie
|> order_by(:id)
|> Repo.paginate(page: params["page"])
end
@doc """
Gets a single movie.
Raises `Ecto.NoResultsError` if the Movie does not exist.
## Examples
iex> get_movie!(123)
%Movie{}
iex> get_movie!(456)
** (Ecto.NoResultsError)
"""
def get_movie!(id) when is_integer(id), do: Repo.get!(Movie, id)
def get_movie!(parsed_title: parsed_title) do
query = from movie in Movie, where: movie.parsed_title == ^parsed_title
Repo.one!(query)
end
@doc """
Creates a movie.
## Examples
iex> create_movie(%{field: value})
{:ok, %Movie{}}
iex> create_movie(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_movie(attrs \\ %{}) do
%Movie{}
|> Movie.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a movie.
## Examples
iex> update_movie(movie, %{field: new_value})
{:ok, %Movie{}}
iex> update_movie(movie, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_movie(%Movie{} = movie, attrs) do
movie
|> Movie.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a movie.
## Examples
iex> delete_movie(movie)
{:ok, %Movie{}}
iex> delete_movie(movie)
{:error, %Ecto.Changeset{}}
"""
def delete_movie(%Movie{} = movie) do
Repo.delete(movie)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking movie changes.
## Examples
iex> change_movie(movie)
%Ecto.Changeset{data: %Movie{}}
"""
def change_movie(%Movie{} = movie, attrs \\ %{}) do
Movie.changeset(movie, attrs)
end
def import(movies_list) do
Enum.each(movies_list, &Que.add(ImportMovieJob, Map.get(&1, "title")))
end
end
|
lib/elixir_films/movies.ex
| 0.837221
| 0.431764
|
movies.ex
|
starcoder
|
defmodule LatticeObserver.Observed.Lattice do
@annotation_app_spec "wasmcloud.dev/appspec"
@moduledoc """
The root structure of an observed lattice. An observed lattice is essentially an
event sourced aggregate who state is determined by application of a stream of
lattice events
"""
alias __MODULE__
alias LatticeObserver.Observed.{
Provider,
Host,
Actor,
Instance,
LinkDefinition,
Decay,
EventProcessor,
Invocation
}
require Logger
# We need the keys to be there, even if they hold empty lists
@enforce_keys [:actors, :providers, :hosts, :linkdefs]
defstruct [
:actors,
:providers,
:hosts,
:linkdefs,
:refmap,
:instance_tracking,
:parameters,
:invocation_log
]
@typedoc """
A provider key is the provider's public key accompanied by the link name
"""
@type provider_key :: {String.t(), String.t()}
@type actormap :: %{required(String.t()) => [Actor.t()]}
@type providermap :: %{required(provider_key()) => Provider.t()}
@type hostmap :: %{required(String.t()) => Host.t()}
# map between OCI image URL/imageref and public key
@type refmap :: %{required(String.t()) => String.t()}
@type entitystatus :: :healthy | :warn | :fail | :unavailable | :remove
@typedoc """
Keys are the instance ID, values are ISO 8601 timestamps in UTC
"""
@type instance_trackmap :: %{required(String.t()) => DateTime.t()}
defmodule Parameters do
@type t :: %Parameters{
host_status_decay_rate_seconds: Integer.t()
}
defstruct [:host_status_decay_rate_seconds]
end
@typedoc """
The root structure of an observed lattice. An observed lattice keeps track
of the actors, providers, link definitions, and hosts within it.
"""
@type t :: %Lattice{
actors: actormap(),
providers: providermap(),
hosts: hostmap(),
linkdefs: [LinkDefinition.t()],
instance_tracking: instance_trackmap(),
refmap: refmap(),
invocation_log: Invocation.invocationlog_map(),
parameters: [Parameters.t()]
}
@spec new(Keyword.t()) :: LatticeObserver.Observed.Lattice.t()
def new(parameters \\ []) do
%Lattice{
actors: %{},
providers: %{},
hosts: %{},
linkdefs: [],
instance_tracking: %{},
refmap: %{},
invocation_log: %{},
parameters: %Parameters{
host_status_decay_rate_seconds:
Keyword.get(parameters, :host_status_decay_rate_seconds, 35)
}
}
end
# Note to those unfamiliar with Elixir matching syntax. The following function
# patterns are not "complete" or "exact" matches. They only match the _minimum_ required shape of
# the event in question. Events can have more fields with which the pattern isn't
# concerned and the pattern match will still be successful. Where possible, we've tried to use
# ignored variables to indicate required fields that aren't used for internal processing.
@spec apply_event(
t(),
Cloudevents.Format.V_1_0.Event.t()
) :: t()
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: data,
datacontenttype: "application/json",
source: source_host,
time: stamp,
type: "com.wasmcloud.lattice.host_heartbeat"
}
) do
EventProcessor.record_heartbeat(l, source_host, stamp, data)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: data,
datacontenttype: "application/json",
source: source_host,
time: stamp,
type: "com.wasmcloud.lattice.host_started"
}
) do
labels = Map.get(data, "labels", %{})
friendly_name = Map.get(data, "friendly_name", "")
EventProcessor.record_host(l, source_host, labels, stamp, friendly_name)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
datacontenttype: "application/json",
source: _source_host,
type: "com.wasmcloud.lattice.invocation_succeeded",
data: %{
"source" =>
%{
"public_key" => _pk,
"contract_id" => _cid,
"link_name" => _ln
} = source,
"dest" =>
%{
"public_key" => _pk2,
"contract_id" => _cid2,
"link_name" => _ln2
} = dest,
"operation" => operation,
"bytes" => bytes
}
}
) do
Invocation.record_invocation_success(l, source, dest, operation, bytes)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
datacontenttype: "application/json",
source: _source_host,
type: "com.wasmcloud.lattice.invocation_failed",
data: %{
"source" =>
%{
"public_key" => _pk,
"contract_id" => _cid,
"link_name" => _ln
} = source,
"dest" =>
%{
"public_key" => _pk2,
"contract_id" => _cid2,
"link_name" => _ln2
} = dest,
"operation" => operation,
"bytes" => bytes
}
}
) do
Invocation.record_invocation_failed(l, source, dest, operation, bytes)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
datacontenttype: "application/json",
source: source_host,
type: "com.wasmcloud.lattice.host_stopped"
}
) do
EventProcessor.remove_host(l, source_host)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: %{
"public_key" => _public_key
},
datacontenttype: "application/json",
source: _source_host,
type: "com.wasmcloud.lattice.health_check_passed"
}
) do
# TODO update the status of entity that passed check
l
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: %{
"public_key" => _public_key
},
datacontenttype: "application/json",
source: _source_host,
type: "com.wasmcloud.lattice.health_check_failed"
}
) do
# TODO update status of entity that failed check
l
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data:
%{
"public_key" => pk,
"link_name" => link_name,
"contract_id" => contract_id,
"instance_id" => instance_id
} = d,
time: stamp,
source: source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.lattice.provider_started"
}
) do
annotations = Map.get(d, "annotations", %{})
spec = Map.get(annotations, @annotation_app_spec, "")
claims = Map.get(d, "claims", %{})
EventProcessor.put_provider_instance(
l,
source_host,
pk,
link_name,
contract_id,
instance_id,
spec,
stamp,
claims
)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
source: _source_host,
datacontenttype: "application/json",
time: _stamp,
type: "com.wasmcloud.lattice.provider_start_failed"
}
) do
# This does not currently affect state, but shouldn't generate a warning either
l
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data:
%{
"link_name" => link_name,
"public_key" => pk,
"instance_id" => instance_id
} = d,
datacontenttype: "application/json",
source: source_host,
time: _stamp,
type: "com.wasmcloud.lattice.provider_stopped"
}
) do
annotations = Map.get(d, "annotations", %{})
spec = Map.get(annotations, @annotation_app_spec, "")
EventProcessor.remove_provider_instance(l, source_host, pk, link_name, instance_id, spec)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data:
%{
"public_key" => pk,
"instance_id" => instance_id
} = d,
datacontenttype: "application/json",
source: source_host,
type: "com.wasmcloud.lattice.actor_stopped"
}
) do
spec = Map.get(d, "annotations", %{}) |> Map.get(@annotation_app_spec, "")
EventProcessor.remove_actor_instance(l, source_host, pk, instance_id, spec)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data:
%{
"public_key" => pk,
"instance_id" => instance_id
} = d,
source: source_host,
datacontenttype: "application/json",
time: stamp,
type: "com.wasmcloud.lattice.actor_started"
}
) do
spec = Map.get(d, "annotations", %{}) |> Map.get(@annotation_app_spec, "")
claims = Map.get(d, "claims", %{})
EventProcessor.put_actor_instance(l, source_host, pk, instance_id, spec, stamp, claims)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
source: _source_host,
datacontenttype: "application/json",
time: _stamp,
type: "com.wasmcloud.lattice.actor_start_failed"
}
) do
# This does not currently affect state, but shouldn't generate a warning either
l
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: %{
"actor_id" => actor_id,
"link_name" => link_name,
"contract_id" => contract_id,
"provider_id" => provider_id,
"values" => values
},
source: _source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.lattice.linkdef_set"
}
) do
EventProcessor.put_linkdef(l, actor_id, link_name, provider_id, contract_id, values)
end
def apply_event(
l = %Lattice{},
%Cloudevents.Format.V_1_0.Event{
data: %{
"actor_id" => actor_id,
"link_name" => link_name,
"provider_id" => provider_id,
"contract_id" => _contract_id
},
source: _source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.lattice.linkdef_deleted"
}
) do
EventProcessor.del_linkdef(l, actor_id, link_name, provider_id)
end
def apply_event(l = %Lattice{}, %Cloudevents.Format.V_1_0.Event{
data: %{
"oci_url" => image_ref,
"public_key" => public_key
},
source: _source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.lattice.refmap_set"
}) do
%Lattice{l | refmap: Map.put(l.refmap, image_ref, public_key)}
end
def apply_event(l = %Lattice{}, %Cloudevents.Format.V_1_0.Event{
data: %{
"oci_url" => image_ref
},
source: _source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.lattice.refmap_del"
}) do
%Lattice{l | refmap: Map.delete(l.refmap, image_ref)}
end
def apply_event(l = %Lattice{}, %Cloudevents.Format.V_1_0.Event{
source: _source_host,
datacontenttype: "application/json",
type: "com.wasmcloud.synthetic.decay_ticked",
time: stamp
}) do
event_time = EventProcessor.timestamp_from_iso8601(stamp)
Decay.age_hosts(l, event_time)
end
def apply_event(l = %Lattice{}, evt) do
Logger.warn("Unexpected event: #{inspect(evt)}")
l
end
@spec running_instances(
LatticeObserver.Observed.Lattice.t(),
nil | String.t(),
String.t()
) :: [%{id: String.t(), instance_id: String.t(), host_id: String.t()}]
def running_instances(%Lattice{} = l, pk, spec_id) when is_binary(pk) do
if String.starts_with?(pk, "M") do
actors_in_appspec(l, spec_id)
|> Enum.map(fn %{actor_id: pk, instance_id: iid, host_id: hid} ->
%{id: pk, instance_id: iid, host_id: hid}
end)
else
providers_in_appspec(l, spec_id)
|> Enum.map(fn %{provider_id: pk, instance_id: iid, host_id: hid} ->
%{id: pk, instance_id: iid, host_id: hid}
end)
end
end
def running_instances(%Lattice{}, nil, _spec_id) do
[]
end
@spec actors_in_appspec(LatticeObserver.Observed.Lattice.t(), binary) :: [
%{actor_id: String.t(), instance_id: String.t(), host_id: String.t()}
]
def actors_in_appspec(%Lattice{actors: actors}, appspec) when is_binary(appspec) do
for {pk, %Actor{instances: instances}} <- actors,
instance <- instances,
in_spec?(instance, appspec) do
%{
actor_id: pk,
instance_id: instance.id,
host_id: instance.host_id
}
end
end
@spec providers_in_appspec(LatticeObserver.Observed.Lattice.t(), binary) :: [
%{
provider_id: String.t(),
link_name: String.t(),
host_id: String.t(),
instance_id: String.t()
}
]
def providers_in_appspec(%Lattice{providers: providers}, appspec) when is_binary(appspec) do
for {{pk, link_name}, %Provider{instances: instances, contract_id: contract_id}} <- providers,
instance <- instances,
in_spec?(instance, appspec) do
%{
provider_id: pk,
link_name: link_name,
contract_id: contract_id,
host_id: instance.host_id,
instance_id: instance.id
}
end
end
def lookup_linkdef(%Lattice{linkdefs: linkdefs}, actor_id, provider_id, link_name) do
case Enum.filter(linkdefs, fn ld ->
ld.actor_id == actor_id && ld.provider_id == provider_id && ld.link_name == link_name
end) do
[h | _] -> {:ok, h}
[] -> :error
end
end
def lookup_ociref(%Lattice{refmap: refmap}, target) when is_binary(target) do
case Map.get(refmap, target) do
nil -> :error
pk -> {:ok, pk}
end
end
@spec lookup_invocation_log(
Lattice.t(),
Invocation.Entity.t(),
Invocation.Entity.t(),
String.t()
) ::
:error | {:ok, Invocation.Log.t()}
def lookup_invocation_log(%Lattice{invocation_log: log}, from, to, operation) do
case Map.get(log, {from, to, operation}) do
nil -> :error
il -> {:ok, il}
end
end
@spec get_all_invocation_logs(Lattice.t()) :: [Invocation.InvocationLog.t()]
def get_all_invocation_logs(%Lattice{} = l) do
Map.values(l.invocation_log)
end
defp in_spec?(%Instance{spec_id: spec_id}, appspec) do
spec_id == appspec
end
end
|
lib/lattice_observer/observed/lattice.ex
| 0.806091
| 0.404272
|
lattice.ex
|
starcoder
|
defmodule Extatus.Metric.Counter do
@moduledoc """
This module defines a wrapper over `Prometheus.Metric.Counter` functions to
be compatible with `Extatus` way of handling metrics.
"""
alias Extatus.Settings
@metric Settings.extatus_counter_mod()
@doc """
Creates a counter using the `name` of a metric.
"""
defmacro new(name) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).get_spec(name) do
{unquote(module), spec} ->
unquote(metric).new(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Creates a counter using the `name` of a `metric`. If the counter exists,
returns false.
"""
defmacro declare(name) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).get_spec(name) do
{unquote(module), spec} ->
unquote(metric).declare(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Increments the counter identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`.
"""
defmacro inc(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).inc(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Increments the counter identified by `name` and `values` (keyword list with
the correspondence between labels and values) by `value`. If `value` happened
to be a float even one time(!) you shouldn't use `inc/3` after `dinc/3`.
"""
defmacro dinc(name, values, value \\ 1) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).dinc(spec, unquote(value))
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Removes counter series identified by `name` and `values` (keyword list with
the correspondence between labels and values).
"""
defmacro remove(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).remove(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Resets the value of the counter identified by `name` and `values`
(keyword list with the correspondence between labels and values).
"""
defmacro reset(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).reset(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
@doc """
Returns the value of the counter identified by `name` and `values`
(keyword list with the correspondence between labels and values).
"""
defmacro value(name, values) do
module = __MODULE__
caller = __CALLER__.module()
metric = @metric
quote do
require Prometheus.Metric.Counter
name = unquote(name)
case unquote(caller).gen_spec(name, unquote(values)) do
{unquote(module), spec} ->
unquote(metric).value(spec)
_ ->
raise %Prometheus.UnknownMetricError{registry: nil, name: name}
end
end
end
end
|
lib/extatus/metric/counter.ex
| 0.846657
| 0.550245
|
counter.ex
|
starcoder
|
defmodule FalconPlusApi.Api.Alarm do
alias Maxwell.Conn
alias FalconPlusApi.{Util, Sig, Api}
@doc """
* [Session](#/authentication) Required
### Request
Content-type: application/x-www-form-urlencoded
```event_id=s_165_cef145900bf4e2a4a0db8b85762b9cdb ```
### Response
```Status: 200```
```
[
{
"closed_at": null,
"closed_note": "",
"cond": "0 != 66",
"current_step": 3,
"endpoint": "agent2",
"expression_id": 0,
"func": "all(#1)",
"id": "s_165_cef145900bf4e2a4a0db8b85762b9cdb",
"metric": "cpu.idle",
"note": "\u5a13\ue103\u2502\u935b\u5a45\ue11f\u9477\ue044\u5aca\u93c7\u5b58\u67ca",
"priority": 0,
"process_note": 56603,
"process_status": "ignored",
"status": "PROBLEM",
"step": 300,
"strategy_id": 165,
"template_id": 45,
"timestamp": "2017-03-23T15:51:11+08:00",
"tpl_creator": "root",
"update_at": "2016-06-23T05:00:00+08:00",
"user_modified": 0
}
]
```
For errors responses, see the [response status codes documentation](#/response-status-codes).
"""
def eventcases_get_by_id(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/alarm/eventcases>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
```
{
"endTime": 1480521600,
"limit": 10,
"process_status": "ignored,unresolved",
"startTime": 1466956800,
"status": "PROBLEM",
"endpoints": ["agent4"],
"strategy_id": 46,
"template_id": 126
}
```
### Response
```Status: 200```
```
{
"closed_at": null,
"closed_note": "",
"cond": "48.33759590792839 > 40",
"current_step": 1,
"endpoint": "agent4",
"expression_id": 0,
"func": "all(#3)",
"id": "s_46_1ac45122afb893adc02fbd30154ac303",
"metric": "cpu.iowait",
"note": "CPU I/O wait\u74d2\u5470\u7e4340",
"priority": 1,
"process_note": 16907,
"process_status": "ignored",
"status": "PROBLEM",
"step": 1,
"strategy_id": 46,
"template_id": 126,
"timestamp": "2016-08-01T06:25:00+08:00",
"tpl_creator": "root",
"update_at": "2016-08-01T06:25:00+08:00",
"user_modified": 0
}
]
```
For errors responses, see the [response status codes documentation](#/response-status-codes).
"""
def eventcases_list(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/alarm/eventcases>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
```
{
"event_id": "s_165_cef145900bf4e2a4a0db8b85762b9cdb",
"note": "test note",
"status": "comment"
}
```
### Response
```Status: 200```
```
{
"id": "s_165_cef145900bf4e2a4a0db8b85762b9cdb",
"message": "add note to s_165_cef145900bf4e2a4a0db8b85762b9cdb successfuled"
}
```
For errors responses, see the [response status codes documentation](#/response-status-codes).
"""
def eventnote_create(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/alarm/event_note>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
Content-type: application/x-www-form-urlencoded
```endTime=1466697600&startTime=1466611200```
or
```event_id=s_165_cef145900bf4e2a4a0db8b85762b9cdb```
### Response
```Status: 200```
```
[
{
"case_id": "",
"event_caseId": "s_165_cef145900bf4e2a4a0db8b85762b9cdb",
"note": "test",
"status": "ignored",
"timestamp": "2016-06-23T05:39:09+08:00",
"user": "root"
},
{
"case_id": "",
"event_caseId": "s_165_9d223f126e7ecb3477cd6806f1ee9656",
"note": "Ignored by user",
"status": "ignored",
"timestamp": "2016-06-23T05:38:56+08:00",
"user": "root"
}
]
```
For errors responses, see the [response status codes documentation](#/response-status-codes).
"""
def eventnote_get(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/alarm/event_note>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
Content-type: application/x-www-form-urlencoded
Key|Value
"""
def events_create(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/alarm/events>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
end
|
lib/falcon_plus_api/api/alarm.ex
| 0.705278
| 0.648181
|
alarm.ex
|
starcoder
|
defmodule Terrasol do
@moduledoc """
Various utility functions to assist with some of the
unique requirements for Earthstar documents.
"""
defimpl Jason.Encoder, for: [Terrasol.Author, Terrasol.Workspace, Terrasol.Path] do
def encode(struct, opts) do
Jason.Encode.string(struct.string, opts)
end
end
@doc """
Encode the Base32 standard for Earthstar
## Examples
iex> Terrasol.bencode("🤡💩")
"b6cp2jipqt6jks"
iex> Terrasol.bencode("abcdef")
"bmfrggzdfmy"
"""
@base_opts [case: :lower, padding: false]
def bencode(bits) do
"b" <> Base.encode32(bits, @base_opts)
end
@doc """
Decode the Base32 standard for Earthstar
## Examples
iex> Terrasol.bdecode("b6cp2jipqt6jks")
"🤡💩"
iex> Terrasol.bdecode("bmfrggzdfmy")
"abcdef"
iex> Terrasol.bdecode("mfrggzdfmy")
:error
"""
def bdecode(encoded_string)
def bdecode(<<"b", string::binary>>) do
case Base.decode32(string, @base_opts) do
:error -> :error
{:ok, val} -> val
end
end
def bdecode(_), do: :error
@doc """
Convert a duration into a number of microseconds.
Integer durations are taken as a number of seconds.
Keyword lists are interpreted for the implemented durations.
Unimplemented items are treated as 0
:weeks, :days, :hours, :minutes
:seconds, :milliseconds, :microseconds
## Examples
iex> Terrasol.duration_us(600)
600000000
iex> Terrasol.duration_us(minutes: 10, microseconds: 321)
600000321
iex> Terrasol.duration_us("600s")
0
"""
def duration_us(duration)
def duration_us(duration) when is_integer(duration), do: duration_us(seconds: duration)
def duration_us(duration) do
try do
sum_dur(duration, duration |> Keyword.keys() |> Enum.uniq(), 0)
rescue
_ -> 0
end
end
defp sum_dur(_, [], acc), do: acc
@multipliers %{
:microseconds => 1,
:milliseconds => 1000,
:seconds => 1_000_000,
:minutes => 60_000_000,
:hours => 3_600_000_000,
:days => 86_400_000_000,
:weeks => 604_800_000_000
}
defp sum_dur(dur, [label | rest], acc) do
{vals, nd} = dur |> Keyword.pop_values(label)
# Skip unknowns
mul =
case Map.fetch(@multipliers, label) do
{:ok, val} -> val
:error -> 0
end
sum_dur(nd, rest, acc + Enum.sum(vals) * mul)
end
end
|
lib/terrasol.ex
| 0.730001
| 0.536495
|
terrasol.ex
|
starcoder
|
defmodule Timex.Comparable.Diff do
@moduledoc false
alias Timex.Types
alias Timex.Duration
alias Timex.Comparable
@units [:years, :months, :weeks, :calendar_weeks, :days,
:hours, :minutes, :seconds, :milliseconds, :microseconds,
:duration]
@spec diff(Types.microseconds, Types.microseconds, Comparable.granularity) :: integer
def diff(a, a, granularity) when is_integer(a), do: zero(granularity)
def diff(a, b, granularity) when is_integer(a) and is_integer(b) and is_atom(granularity) do
do_diff(a, b, granularity)
end
defp do_diff(a, a, type), do: zero(type)
defp do_diff(a, b, :duration), do: Duration.from_seconds(do_diff(a,b,:seconds))
defp do_diff(a, b, :microseconds), do: a - b
defp do_diff(a, b, :milliseconds), do: div(a - b, 1_000)
defp do_diff(a, b, :seconds), do: div(a - b, 1_000*1_000)
defp do_diff(a, b, :minutes), do: div(a - b, 1_000*1_000*60)
defp do_diff(a, b, :hours), do: div(a - b, 1_000*1_000*60*60)
defp do_diff(a, b, :days), do: div(a - b, 1_000*1_000*60*60*24)
defp do_diff(a, b, :weeks), do: div(a - b, 1_000*1_000*60*60*24*7)
defp do_diff(a, b, :calendar_weeks) do
adate = :calendar.gregorian_seconds_to_datetime(div(a, 1_000*1_000))
bdate = :calendar.gregorian_seconds_to_datetime(div(b, 1_000*1_000))
days = cond do
a > b ->
ending = Timex.end_of_week(adate)
start = Timex.beginning_of_week(bdate)
endu = Timex.to_gregorian_microseconds(ending)
startu = Timex.to_gregorian_microseconds(start)
do_diff(endu, startu, :days)
:else ->
ending = Timex.end_of_week(bdate)
start = Timex.beginning_of_week(adate)
endu = Timex.to_gregorian_microseconds(ending)
startu = Timex.to_gregorian_microseconds(start)
do_diff(startu, endu, :days)
end
cond do
days >= 0 && rem(days, 7) != 0 -> div(days, 7) + 1
days <= 0 && rem(days, 7) != 0 -> div(days, 7) - 1
:else -> div(days, 7)
end
end
defp do_diff(a, b, :months) do
div(do_diff(a, b, :days), 30)
end
defp do_diff(a, b, :years) do
div(do_diff(a, b, :months), 12)
end
defp do_diff(_, _, granularity) when not granularity in @units,
do: {:error, {:invalid_granularity, granularity}}
defp zero(:duration), do: Duration.zero
defp zero(_type), do: 0
end
|
deps/timex/lib/comparable/diff.ex
| 0.783906
| 0.709384
|
diff.ex
|
starcoder
|
defmodule Raft.Log do
@moduledoc """
The `Log` module provides an api for all log operations. Since the log process
is the only process that can access the underlying log store we cache several
values in the log process state.
"""
use GenServer
require Logger
alias Raft.{
Config,
Log.Entry,
Log.Metadata,
LogStore,
}
@type index :: non_neg_integer()
@type log_term :: non_neg_integer()
@type candidate :: atom() | :none
@type metadata :: %{
term: non_neg_integer(),
voted_for: atom() | nil,
}
def start_link([name, config]) do
GenServer.start_link(__MODULE__, {name, config}, name: log_name(name))
end
@doc """
Appends new entries to the log and returns the latest index.
"""
@spec append(atom(), [Entry.t]) :: {:ok, index()} | {:error, term()}
def append(name, entries) when is_list(entries) do
call(name, {:append, entries})
end
@doc """
Gets the entry at the given index.
"""
@spec get_entry(atom(), index()) :: {:ok, Entry.t} | {:error, :not_found}
def get_entry(name, index) do
call(name, {:get_entry, index})
end
@doc """
Gets the current term.
"""
@spec get_term(atom()) :: log_term()
def get_term(name) do
%{term: term} = get_metadata(name)
term
end
@doc """
Gets the current metadata for the server.
"""
@spec get_metadata(atom()) :: metadata()
def get_metadata(name) do
call(name, :get_metadata)
end
@doc """
Sets metadata.
"""
@spec set_metadata(atom(), candidate(), log_term()) :: :ok
def set_metadata(name, candidate, term) do
call(name, {:set_metadata, candidate, term})
end
@doc """
Gets the current configuration.
"""
def get_configuration(name), do: call(name, :get_configuration)
@doc """
Deletes all logs in the range inclusivesly
"""
def delete_range(name, a, b) when a <= b do
call(name, {:delete_range, a, b})
end
def delete_range(_name, _, _) do
:ok
end
@doc """
Returns the last entry in the log. If there are no entries then it returns an
`:error`.
"""
@spec last_entry(atom()) :: {:ok, Entry.t} | :empty
def last_entry(name) do
call(name, :last_entry)
end
@doc """
Returns the index of the last entry in the log.
"""
@spec last_index(atom()) :: non_neg_integer()
def last_index(name) do
case last_entry(name) do
{:ok, %{index: index}} ->
index
:empty ->
0
end
end
@doc """
Returns the term of the last entry in the log. If the log is empty returns
0.
"""
@spec last_term(atom()) :: non_neg_integer()
def last_term(name) do
case last_entry(name) do
{:ok, %{term: term}} ->
term
:empty ->
0
end
end
def init({name, opts}) do
Logger.info("#{log_name(name)}: Restoring old state", metadata: name)
{:ok, log_store} = case name do
{me, _} ->
LogStore.open(Config.db_path(me, opts))
^name ->
LogStore.open(Config.db_path(name, opts))
end
metadata = LogStore.get_metadata(log_store)
last_index = LogStore.last_index(log_store)
state = %{
name: name,
log_store: log_store,
metadata: metadata,
last_index: last_index,
configuration: nil,
}
state = init_log(state)
{:ok, state}
end
def handle_call({:append, entries}, _from, state) do
state = append_entries(state, entries)
{:reply, {:ok, state.last_index}, state}
end
def handle_call({:get_entry, index}, _from, state) do
result = LogStore.get_entry(state.log_store, index)
{:reply, result, state}
end
def handle_call(:get_metadata, _from, %{metadata: meta}=state) do
{:reply, meta, state}
end
def handle_call({:set_metadata, cand, term}, _from, state) do
metadata = %Metadata{voted_for: cand, term: term}
:ok = LogStore.store_metadata(state.log_store, metadata)
{:reply, :ok, %{state | metadata: metadata}}
end
def handle_call(:get_configuration, _from, %{configuration: config}=state) do
{:reply, config, state}
end
def handle_call({:delete_range, a, b}, _from, state) do
:ok = LogStore.delete_range(state.log_store, a..b)
last_index = LogStore.last_index(state.log_store)
state = %{state | last_index: last_index}
{:reply, :ok, state}
end
def handle_call(:last_entry, _from, state) do
case LogStore.last_entry(state.log_store) do
{:ok, :empty} ->
{:reply, :empty, state}
{:ok, entry} ->
{:reply, {:ok, entry}, state}
end
end
defp call(name, msg), do: GenServer.call(log_name(name), msg)
defp log_name({name, _}), do: log_name(name)
defp log_name(name), do: :"#{name}_log"
defp apply_entry(state, %{type: :config, data: data}) do
%{state | configuration: data}
end
defp apply_entry(state, _) do
state
end
defp init_log(state) do
case LogStore.has_data?(state.log_store) do
true ->
Logger.debug("#{log_name(state.name)} has data")
restore_configuration(state)
false ->
configuration = %Raft.Configuration{}
entry = Entry.configuration(0, configuration)
entry = %{entry | index: 0}
{:ok, last_index} = LogStore.store_entries(state.log_store, [entry])
%{state | configuration: configuration, last_index: last_index}
end
end
defp append_entries(state, entries) do
Enum.reduce entries, state, fn entry, state ->
entry = add_index(entry, state.last_index)
{:ok, last_index} = LogStore.store_entries(state.log_store, [entry])
Logger.debug("#{log_name(state.name)}: Stored up to #{last_index}")
state = apply_entry(state, entry)
%{state | last_index: entry.index}
end
end
defp add_index(%{index: :none}=entry, index) do
%{entry | index: index+1}
end
defp add_index(entry, _), do: entry
defp restore_configuration(%{log_store: log_store, last_index: index}=state) do
log_store
|> LogStore.slice(0..index)
|> Enum.filter(&Entry.configuration?/1)
|> Enum.reduce(state, fn entry, old_state -> apply_entry(old_state, entry) end)
end
end
|
lib/raft/log.ex
| 0.841272
| 0.587647
|
log.ex
|
starcoder
|
defmodule Dune.AtomMapping do
@moduledoc false
alias Dune.{Success, Failure}
@type substitute_atom :: atom
@type substitute_string :: String.t()
@type original_string :: String.t()
@type sub_mapping :: %{optional(substitute_atom) => original_string}
@type extra_info :: %{optional(substitute_atom) => :wrapped}
@typedoc """
Should be considered opaque
"""
@type t :: %__MODULE__{atoms: sub_mapping, modules: sub_mapping, extra_info: extra_info}
@enforce_keys [:atoms, :modules, :extra_info]
defstruct @enforce_keys
@spec new :: t()
def new do
%__MODULE__{atoms: %{}, modules: %{}, extra_info: %{}}
end
@spec from_atoms([{substitute_atom, original_string}], [{substitute_atom, :wrapped}]) :: t
def from_atoms(list, extra_info) when is_list(list) do
atoms = build_mapping(list)
extra_info = Map.new(extra_info)
%__MODULE__{atoms: atoms, modules: %{}, extra_info: extra_info}
end
@spec add_modules(t, [{substitute_atom, original_string}]) :: t
def add_modules(mapping = %__MODULE__{}, list) do
%{mapping | modules: build_mapping(list)}
end
defp build_mapping(list) do
Map.new(list, fn {substitute_atom, original_string}
when is_atom(substitute_atom) and is_binary(original_string) ->
{substitute_atom, original_string}
end)
end
@spec to_string(t, atom) :: String.t()
def to_string(mapping = %__MODULE__{}, atom) when is_atom(atom) do
case lookup_original_string(mapping, atom) do
{:atom, string} -> string
{:wrapped_atom, string} -> string
{:module, string} -> "Elixir.#{string}"
:error -> Atom.to_string(atom)
end
end
@spec inspect(t, atom) :: String.t()
def inspect(mapping = %__MODULE__{}, atom) when is_atom(atom) do
case lookup_original_string(mapping, atom) do
{:atom, string} -> ":#{string}"
{:wrapped_atom, string} -> ~s(:"#{string}")
{:module, string} -> string
:error -> inspect(atom)
end
end
@spec lookup_original_string(t, atom) :: {:atom | :wrapped_atom | :module, String.t()} | :error
def lookup_original_string(mapping = %__MODULE__{}, atom) when is_atom(atom) do
case mapping.modules do
%{^atom => string} ->
{:module, string}
_ ->
case mapping.atoms do
%{^atom => string} ->
case mapping.extra_info do
%{^atom => :wrapped} -> {:wrapped_atom, string}
_ -> {:atom, string}
end
_ ->
:error
end
end
end
@spec replace_in_string(t, String.t()) :: String.t()
def replace_in_string(mapping, string) when is_binary(string) do
if string =~ "Dune" do
do_replace_in_string(mapping, string)
else
string
end
end
@dune_atom_regex ~r/(Dune_Atom_\d+__|a?__Dune_atom_\d+__|Dune_Module_\d+__)/
defp do_replace_in_string(mapping = %__MODULE__{}, string) do
string_replace_map =
%{}
|> build_replace_map(mapping.modules, nil, &inspect/1)
|> build_replace_map(mapping.atoms, mapping.extra_info, &Atom.to_string/1)
String.replace(string, @dune_atom_regex, &Map.get(string_replace_map, &1, &1))
end
defp build_replace_map(map, sub_mapping, extra_info, to_string_fun) do
for {subsitute_atom, original_string} <- sub_mapping, into: map do
replace_by =
case extra_info do
%{^subsitute_atom => :wrapped} -> ~s("#{original_string}")
_ -> original_string
end
{to_string_fun.(subsitute_atom), replace_by}
end
end
@spec replace_in_result(t, Success.t() | Failure.t()) ::
Success.t() | Failure.t()
def replace_in_result(mapping, result)
def replace_in_result(mapping, %Success{} = success) do
%{success | inspected: replace_in_string(mapping, success.inspected)}
end
def replace_in_result(mapping, %Failure{} = error) do
%{error | message: replace_in_string(mapping, error.message)}
end
@spec to_existing_atom(t, String.t()) :: atom
def to_existing_atom(mapping = %__MODULE__{}, string) when is_binary(string) do
case fetch_existing_atom(mapping, string) do
nil -> String.to_existing_atom(string)
atom -> atom
end
end
defp fetch_existing_atom(mapping, "Elixir." <> module_name) do
Enum.find_value(mapping.modules, fn {subsitute_atom, original_string} ->
if original_string == module_name do
subsitute_atom
end
end)
end
defp fetch_existing_atom(mapping, atom_name) do
Enum.find_value(mapping.atoms, fn {subsitute_atom, original_string} ->
if original_string == atom_name do
subsitute_atom
end
end)
end
end
|
lib/dune/atom_mapping.ex
| 0.813831
| 0.488222
|
atom_mapping.ex
|
starcoder
|
defmodule Jumubase.Showtime.Results do
alias Jumubase.JumuParams
alias Jumubase.Foundation.AgeGroups
alias Jumubase.Showtime.{Appearance, Performance}
@doc """
Returns a mapping from point ranges to ratings, depending on round.
"""
def ratings_for_round(0) do
%{
(23..25) => "mit hervorragendem Erfolg teilgenommen",
(21..22) => "mit sehr gutem Erfolg teilgenommen",
(17..20) => "mit gutem Erfolg teilgenommen",
(9..16) => "mit Erfolg teilgenommen",
(0..8) => "teilgenommen"
}
end
def ratings_for_round(1) do
%{
(9..12) => "mit gutem Erfolg teilgenommen",
(5..8) => "mit Erfolg teilgenommen",
(0..4) => "teilgenommen"
}
end
def ratings_for_round(2) do
%{
(14..16) => "mit gutem Erfolg teilgenommen",
(11..13) => "mit Erfolg teilgenommen",
(0..10) => "teilgenommen"
}
end
@doc """
Returns a mapping from point ranges to prizes, depending on round.
"""
def prizes_for_round(0), do: %{}
def prizes_for_round(1) do
%{
(21..25) => "1. Preis",
(17..20) => "2. Preis",
(13..16) => "3. Preis"
}
end
def prizes_for_round(2) do
%{
(23..25) => "1. Preis",
(20..22) => "2. Preis",
(17..19) => "3. Preis"
}
end
@doc """
Returns the prize resulting from the points in the given round, or nil if no prize is awarded.
"""
def get_prize(points, round) do
prizes_for_round(round) |> lookup(points)
end
@doc """
Returns the rating resulting from the points in the given round.
"""
def get_rating(points, round) do
ratings_for_round(round) |> lookup(points)
end
def advances?(%Performance{contest_category: cc} = p) do
%{
min_advancing_age_group: min_ag,
max_advancing_age_group: max_ag
} = cc
# Check age group range, then decide based on first non-accompanist
Enum.all?([min_ag, max_ag]) and
AgeGroups.in_range?(p.age_group, min_ag, max_ag) and
Performance.non_accompanists(p) |> hd |> may_advance?
end
@doc """
Returns whether an appearance advances, using data from its parent performance.
"""
def advances?(%Appearance{performance_id: id} = a, %Performance{id: id} = p) do
may_advance?(a) and advances?(p)
end
def gets_wespe_nomination?(
%Appearance{performance_id: id} = a,
%Performance{id: id, contest_category: cc}
) do
may_get_wespe_nomination?(a) and cc.allows_wespe_nominations
end
@doc """
Returns whether an appearance might be ineligible for the next round,
(example: pop accompanist groups) and should be checked by a human.
"""
def needs_eligibility_check?(
%Appearance{performance_id: id, role: "accompanist", points: points},
%Performance{id: id} = p,
round
) do
advances?(p) and
has_acc_group?(p) and
(round > 1 or points not in JumuParams.advancing_point_range())
end
def needs_eligibility_check?(_appearance, _performance, _round), do: false
# Private helpers
defp may_advance?(%Appearance{role: "accompanist"}), do: false
defp may_advance?(%Appearance{points: points}) do
points in JumuParams.advancing_point_range()
end
defp may_get_wespe_nomination?(%Appearance{points: points}) do
points in JumuParams.wespe_nomination_point_range()
end
defp lookup(point_mapping, points) do
Enum.find_value(point_mapping, fn {point_range, result} ->
if points in point_range, do: result, else: false
end)
end
defp has_acc_group?(p) do
p |> Performance.accompanists() |> length > 1
end
end
|
lib/jumubase/showtime/results.ex
| 0.688468
| 0.437643
|
results.ex
|
starcoder
|
defmodule Sneex.Ops.Increment do
@moduledoc """
This represents the op codes for incrementing a value (INC, INX, and INY).
"""
defstruct [:disasm_override, :bit_size, :cycle_mods, :address_mode]
alias Sneex.Address.{Absolute, CycleCalculator, DirectPage, Indexed, Mode, Register}
alias Sneex.{Cpu, CpuHelper}
use Bitwise
@type t :: %__MODULE__{
disasm_override: nil | String.t(),
bit_size: :bit8 | :bit16,
cycle_mods: list(CycleCalculator.t()),
address_mode: any()
}
@spec new(Cpu.t()) :: nil | __MODULE__.t()
def new(cpu) do
cpu |> Cpu.read_opcode() |> new(cpu)
end
@spec new(byte(), Cpu.t()) :: nil | __MODULE__.t()
def new(0x1A, cpu) do
addr_mode = :acc |> Register.new()
bit_size = cpu |> Cpu.acc_size()
mods = [CycleCalculator.constant(2)]
%__MODULE__{bit_size: bit_size, cycle_mods: mods, address_mode: addr_mode}
end
def new(0xEE, cpu) do
addr_mode = true |> Absolute.new()
bit_size = cpu |> Cpu.acc_size()
mods = [CycleCalculator.constant(6), CycleCalculator.acc_is_16_bit(2)]
%__MODULE__{bit_size: bit_size, cycle_mods: mods, address_mode: addr_mode}
end
def new(0xE6, cpu) do
addr_mode = DirectPage.new()
bit_size = cpu |> Cpu.acc_size()
mods = [
CycleCalculator.constant(5),
CycleCalculator.acc_is_16_bit(2),
CycleCalculator.low_direct_page_is_not_zero(1)
]
%__MODULE__{bit_size: bit_size, cycle_mods: mods, address_mode: addr_mode}
end
def new(0xFE, cpu) do
addr_mode = true |> Absolute.new() |> Indexed.new(:x)
bit_size = cpu |> Cpu.acc_size()
mods = [CycleCalculator.constant(7), CycleCalculator.acc_is_16_bit(2)]
%__MODULE__{bit_size: bit_size, cycle_mods: mods, address_mode: addr_mode}
end
def new(0xF6, cpu) do
addr_mode = DirectPage.new() |> Indexed.new(:x)
bit_size = cpu |> Cpu.acc_size()
mods = [
CycleCalculator.constant(6),
CycleCalculator.acc_is_16_bit(2),
CycleCalculator.low_direct_page_is_not_zero(1)
]
%__MODULE__{bit_size: bit_size, cycle_mods: mods, address_mode: addr_mode}
end
def new(0xE8, cpu) do
addr_mode = :x |> Register.new()
bit_size = cpu |> Cpu.index_size()
mods = [CycleCalculator.constant(2)]
%__MODULE__{
disasm_override: "INX",
bit_size: bit_size,
cycle_mods: mods,
address_mode: addr_mode
}
end
def new(0xC8, cpu) do
addr_mode = :y |> Register.new()
bit_size = cpu |> Cpu.index_size()
mods = [CycleCalculator.constant(2)]
%__MODULE__{
disasm_override: "INY",
bit_size: bit_size,
cycle_mods: mods,
address_mode: addr_mode
}
end
def new(_opcode, _cpu), do: nil
defimpl Sneex.Ops.Opcode do
def byte_size(%{address_mode: mode}, cpu), do: 1 + Mode.byte_size(mode, cpu)
def total_cycles(%{cycle_mods: mods}, cpu) do
cpu |> CycleCalculator.calc_cycles(mods)
end
def execute(%{address_mode: mode, bit_size: bit_size}, cpu) do
{data, cpu} = mode |> Mode.fetch(cpu) |> increment(bit_size, cpu)
mode |> Mode.store(cpu, data)
end
defp increment(value, bit_size, cpu = %Cpu{}) do
new_value = bit_size |> determine_mask() |> band(value + 1)
%{negative: nf, zero: zf} = CpuHelper.check_flags_for_value(new_value, bit_size)
new_cpu = cpu |> Cpu.zero_flag(zf) |> Cpu.negative_flag(nf)
{new_value, new_cpu}
end
defp determine_mask(:bit8), do: 0xFF
defp determine_mask(:bit16), do: 0xFFFF
def disasm(%{disasm_override: nil, address_mode: mode}, cpu),
do: "INC #{Mode.disasm(mode, cpu)}"
def disasm(%{disasm_override: override}, _cpu), do: override
end
end
|
lib/sneex/ops/increment.ex
| 0.822403
| 0.408483
|
increment.ex
|
starcoder
|
defmodule SPARQL.Query.Result.Format do
@moduledoc """
A behaviour for SPARQL query result formats.
A `SPARQL.Query.Result.Format` for a format can be implemented like this
defmodule SomeFormat do
use SPARQL.Query.Result.Format
import RDF.Sigils
@id ~I<http://example.com/some_format>
@name :some_format
@extension "ext"
@media_type "application/some-format"
end
When `@id`, `@name`, `@extension` and `@media_type` module attributes are
defined the resp. behaviour functions are generated automatically and return
these values.
Then you'll have to do the main work by implementing a
`RDF.Serialization.Encoder` and a `RDF.Serialization.Decoder` for the format.
By default, it is assumed that these are defined in `Encoder` and `Decoder`
modules under the module of the format, i.e. in the example above in
`SomeFormat.Encoder` and `SomeFormat.Decoder`.
If you want them in another module, you'll have to override the `encoder/0`
and/or `decoder/0` functions in your `SPARQL.Query.Result.Format` module.
"""
@doc """
An IRI of the SPARQL query result format.
"""
@callback id :: RDF.IRI.t
@doc """
An name atom of the SPARQL query result format under which it can referenced.
"""
@callback name :: atom
@doc """
The usual file extension for the SPARQL query result format.
"""
@callback extension :: binary
@doc """
The MIME type of the SPARQL query result format.
"""
@callback media_type :: binary
@doc """
A list of the supported query forms of the SPARQL query result format.
"""
@callback supported_query_forms :: [SPARQL.Query.forms]
@doc """
The `SPARQL.Query.Result.Format.Decoder` module for the result format.
"""
@callback decoder :: module
@doc """
The `SPARQL.Query.Result.Format.Encoder` module for the result format.
"""
@callback encoder :: module
defmacro __using__(_) do
quote bind_quoted: [], unquote: true do
@behaviour unquote(__MODULE__)
@decoder __MODULE__.Decoder
@encoder __MODULE__.Encoder
def decoder, do: @decoder
def encoder, do: @encoder
defdelegate decode(results, opts \\ []), to: @decoder
defdelegate decode!(results, opts \\ []), to: @decoder
# defdelegate encode(content, opts \\ []), to: @encoder
# defdelegate encode!(content, opts \\ []), to: @encoder
# This is just to be API-compatible to RDF.Serialization.format.
# TODO: Should we introduce a similar Reader-/Writer-functionality here or introduce RDF.Serialization.decode?
defdelegate read_string(results, opts \\ []), to: __MODULE__, as: :decode
defoverridable [decoder: 0, encoder: 0]
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(_env) do
quote do
if !Module.defines?(__MODULE__, {:id, 0}) &&
Module.get_attribute(__MODULE__, :id) do
def id, do: @id
end
if !Module.defines?(__MODULE__, {:name, 0}) &&
Module.get_attribute(__MODULE__, :name) do
def name, do: @name
end
if !Module.defines?(__MODULE__, {:extension, 0}) &&
Module.get_attribute(__MODULE__, :extension) do
def extension, do: @extension
end
if !Module.defines?(__MODULE__, {:media_type, 0}) &&
Module.get_attribute(__MODULE__, :media_type) do
def media_type, do: @media_type
end
if !Module.defines?(__MODULE__, {:supported_query_forms, 0}) &&
Module.get_attribute(__MODULE__, :supported_query_forms) do
def supported_query_forms, do: @supported_query_forms
end
end
end
end
|
lib/sparql/query/result/format.ex
| 0.736874
| 0.491822
|
format.ex
|
starcoder
|
defmodule AWS.CloudHSMV2 do
@moduledoc """
For more information about AWS CloudHSM, see [AWS
CloudHSM](http://aws.amazon.com/cloudhsm/) and the [AWS CloudHSM User
Guide](https://docs.aws.amazon.com/cloudhsm/latest/userguide/).
"""
@doc """
Copy an AWS CloudHSM cluster backup to a different region.
"""
def copy_backup_to_region(client, input, options \\ []) do
request(client, "CopyBackupToRegion", input, options)
end
@doc """
Creates a new AWS CloudHSM cluster.
"""
def create_cluster(client, input, options \\ []) do
request(client, "CreateCluster", input, options)
end
@doc """
Creates a new hardware security module (HSM) in the specified AWS CloudHSM
cluster.
"""
def create_hsm(client, input, options \\ []) do
request(client, "CreateHsm", input, options)
end
@doc """
Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7
days after the DeleteBackup request is made. For more information on
restoring a backup, see `RestoreBackup`.
"""
def delete_backup(client, input, options \\ []) do
request(client, "DeleteBackup", input, options)
end
@doc """
Deletes the specified AWS CloudHSM cluster. Before you can delete a
cluster, you must delete all HSMs in the cluster. To see if the cluster
contains any HSMs, use `DescribeClusters`. To delete an HSM, use
`DeleteHsm`.
"""
def delete_cluster(client, input, options \\ []) do
request(client, "DeleteCluster", input, options)
end
@doc """
Deletes the specified HSM. To specify an HSM, you can use its identifier
(ID), the IP address of the HSM's elastic network interface (ENI), or the
ID of the HSM's ENI. You need to specify only one of these values. To find
these values, use `DescribeClusters`.
"""
def delete_hsm(client, input, options \\ []) do
request(client, "DeleteHsm", input, options)
end
@doc """
Gets information about backups of AWS CloudHSM clusters.
This is a paginated operation, which means that each response might contain
only a subset of all the backups. When the response contains only a subset
of backups, it includes a `NextToken` value. Use this value in a subsequent
`DescribeBackups` request to get more backups. When you receive a response
with no `NextToken` (or an empty or null value), that means there are no
more backups to get.
"""
def describe_backups(client, input, options \\ []) do
request(client, "DescribeBackups", input, options)
end
@doc """
Gets information about AWS CloudHSM clusters.
This is a paginated operation, which means that each response might contain
only a subset of all the clusters. When the response contains only a subset
of clusters, it includes a `NextToken` value. Use this value in a
subsequent `DescribeClusters` request to get more clusters. When you
receive a response with no `NextToken` (or an empty or null value), that
means there are no more clusters to get.
"""
def describe_clusters(client, input, options \\ []) do
request(client, "DescribeClusters", input, options)
end
@doc """
Claims an AWS CloudHSM cluster by submitting the cluster certificate issued
by your issuing certificate authority (CA) and the CA's root certificate.
Before you can claim a cluster, you must sign the cluster's certificate
signing request (CSR) with your issuing CA. To get the cluster's CSR, use
`DescribeClusters`.
"""
def initialize_cluster(client, input, options \\ []) do
request(client, "InitializeCluster", input, options)
end
@doc """
Gets a list of tags for the specified AWS CloudHSM cluster.
This is a paginated operation, which means that each response might contain
only a subset of all the tags. When the response contains only a subset of
tags, it includes a `NextToken` value. Use this value in a subsequent
`ListTags` request to get more tags. When you receive a response with no
`NextToken` (or an empty or null value), that means there are no more tags
to get.
"""
def list_tags(client, input, options \\ []) do
request(client, "ListTags", input, options)
end
@doc """
Restores a specified AWS CloudHSM backup that is in the `PENDING_DELETION`
state. For mor information on deleting a backup, see `DeleteBackup`.
"""
def restore_backup(client, input, options \\ []) do
request(client, "RestoreBackup", input, options)
end
@doc """
Adds or overwrites one or more tags for the specified AWS CloudHSM cluster.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes the specified tag or tags from the specified AWS CloudHSM cluster.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "cloudhsm"}
host = build_host("cloudhsmv2", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "BaldrApiService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/cloud_hsm_v2.ex
| 0.774754
| 0.569553
|
cloud_hsm_v2.ex
|
starcoder
|
defmodule Appsignal.Instrumentation.Helpers do
@moduledoc """
Helper functions and macros to instrument function calls.
"""
alias Appsignal.{Transaction, TransactionRegistry}
@type instrument_arg :: Transaction.t | Plug.Conn.t | pid() | nil
@doc """
Execute the given function in start / finish event calls in the current
transaction. See `instrument/6`.
"""
@spec instrument(String.t, String.t, function) :: any
def instrument(name, title, function) do
instrument(self(), name, title, "", function)
end
@doc """
Execute the given function in start / finish event calls. See `instrument/6`.
"""
@spec instrument(instrument_arg, String.t, String.t, function) :: any
def instrument(arg, name, title, function) do
instrument(arg, name, title, "", function)
end
@doc """
Execute the given function in start / finish event calls. See `instrument/6`.
"""
@spec instrument(instrument_arg, String.t, String.t, String.t, function) :: any
def instrument(arg, name, title, body, function) do
instrument(arg, name, title, body, 0, function)
end
@doc """
Execute the given function in start / finish event calls.
The result of the function's execution is returned. For example, to
instrument a backend HTTP call in a Phoenix controller, do the
following:
```
import Appsignal.Instrumentation.Helpers, only: [instrument: 4]
def index(conn, _params) do
result = instrument "net.http", "Some slow backend call", fn() ->
Backend.get_result()
end
json conn, result
end
```
"""
@spec instrument(instrument_arg, String.t, String.t, String.t, integer, function) :: any
def instrument(pid, name, title, body, body_format, function) when is_pid(pid) do
t = TransactionRegistry.lookup(pid)
instrument(t, name, title, body, body_format, function)
end
def instrument(%Transaction{} = transaction, name, title, body, body_format, function) do
Transaction.start_event(transaction)
result = function.()
Transaction.finish_event(transaction, name, title, body, body_format)
result
end
def instrument(nil, _name, _title, _body, _body_format, function) do
function.()
end
end
|
lib/appsignal/instrumentation/helpers.ex
| 0.865778
| 0.81549
|
helpers.ex
|
starcoder
|
defmodule Terminus.Bitbus do
@moduledoc """
Module for interfacing with the [Bitbus](https://bitbus.network) API.
Bitbus is a powerful API that allows you to crawl a filtered subset of the
Bitcoin blockchain. Bitbus indexes blocks, thus can be used to crawl
**confirmed** transactions.
> We run a highly efficient bitcoin crawler which indexes the bitcoin
> blockchain. Then we expose the index as an API so anyone can easily crawl
> ONLY the subset of bitcoin by crawling bitbus.
## Schema
Bitbus transaction documents have the following schema:
%{
"_id" => "...", # Bitbus document ID
"blk" => %{
"h" => "...", # Block hash
"i" => int, # Block height
"t" => int, # Block timestamp
},
"in" => [...], # List of inputs
"lock" => int, # Lock time
"out" => [...], # List of outputs
"tx" => %{
"h" => "..." # Transaction hash
}
}
Terminus supports both of the Bitbus public enpoints. The endpoint can be
selected by passing the `:host` option to any API method.
The available endpoints are:
* `:txo` - Query and return transactions in the [Transaction Object](https://bitquery.planaria.network/#/?id=txo) schema. Default.
* `:bob` - Query and return transactions in the [Bitcoin OP_RETURN Bytecode](https://bitquery.planaria.network/#/?id=bob) schema.
```
iex> Terminus.Bitbus.fetch(query, token: token, host: :bob)
```
## Usage
For smaller queries of limited results, `fetch/2` can be used to return a list
of transactions:
iex> Terminus.Bitbus.fetch(%{
...> find: %{ "out.s2" => "1LtyME6b5AnMopQrBPLk4FGN8UBuhxKqrn" },
...> sort: %{ "blk.i": -1 },
...> limit: 5
...> }, token: token)
{:ok, [
%{
"tx" => %{"h" => "fca7bdd7658613418c54872212811cf4c5b4f8ee16864eaf70cb1393fb0df6ca"},
...
},
...
]}
For larger crawl operations, use `crawl/3` to stream results into your own
data processing pipeline.
iex> Terminus.Bitbus.crawl!(query, token: token)
...> |> Stream.map(&Terminus.BitFS.scan_tx/1)
...> |> Stream.each(&save_to_db/1)
...> |> Stream.run
:ok
"""
use Terminus.HTTPStream,
hosts: [
txo: "https://txo.bitbus.network",
bob: "https://bob.bitbus.network"
],
headers: [
{"cache-control", "no-cache"},
{"content-type", "application/json"}
]
@doc """
Crawls Bitbus for transactions using the given query and returns a stream.
Returns the result in an `:ok` / `:error` tuple pair.
All requests must provide a valid [Planaria token](https://token.planaria.network).
By default a streaming `t:Enumerable.t/0` is returned. Optionally a linked
GenStage [`pid`](`t:pid/0`) can be returned for using in combination with your
own GenStage consumer.
If a [`callback`](`t:Terminus.callback/0`) is provided, the stream is
automatically run and the callback is called on each transaction.
## Options
The accepted options are:
* `token` - Planaria authentication token. **Required**.
* `host` - The Bitbus host. Defaults to "https://txo.bitbus.network".
* `stage` - Return a linked GenStage [`pid`](`t:pid/0`). Defaults to `false`.
## Examples
Queries should be in the form of any valid [Bitquery](https://bitquery.planaria.network/).
query = %{
find: %{ "out.s2" => "1LtyME6b5AnMopQrBPLk4FGN8UBuhxKqrn" }
}
By default `Terminus.Bitbus.crawl/3` returns a streaming `t:Enumerable.t/0`.
iex> Terminus.Bitbus.crawl(query, token: token)
{:ok, %Stream{}}
Optionally the [`pid`](`t:pid/0`) of the GenStage producer can be returned.
iex> Terminus.Bitbus.crawl(query, token: token, stage: true)
{:ok, #PID<>}
If a [`callback`](`t:Terminus.callback/0`) is provided, the function
returns `:ok` and the callback is called on each transaction.
iex> Terminus.Bitbus.crawl(query, [token: token], fn tx ->
...> IO.inspect tx
...> end)
:ok
"""
@spec crawl(Terminus.bitquery, keyword, Terminus.callback) ::
{:ok, Enumerable.t | pid} | :ok |
{:error, Exception.t}
def crawl(query, options \\ [], ondata \\ nil)
def crawl(query, options, nil) when is_function(options),
do: crawl(query, [], options)
def crawl(%{} = query, options, ondata),
do: query |> normalize_query |> Jason.encode! |> crawl(options, ondata)
def crawl(query, options, ondata) when is_binary(query) do
options = options
|> Keyword.take([:token, :host, :stage])
|> Keyword.put(:body, query)
|> Keyword.put(:decoder, :ndjson)
case request(:stream, "POST", "/block", options) do
{:ok, pid} when is_pid(pid) ->
{:ok, pid}
{:ok, stream} ->
handle_callback(stream, ondata)
{:error, error} ->
{:error, error}
end
end
@doc """
As `crawl/3` but returns the result or raises an exception if it fails.
"""
@spec crawl!(Terminus.bitquery, keyword, Terminus.callback) ::
Enumerable.t | pid | :ok
def crawl!(query, options \\ [], ondata \\ nil) do
case crawl(query, options, ondata) do
:ok -> :ok
{:ok, stream} ->
stream
{:error, error} ->
raise error
end
end
@doc """
Crawls Bitbus for transactions using the given query and returns a list of
transactions.
Returns the result in an `:ok` / `:error` tuple pair.
This function is suitable for smaller limited queries as the entire result set
is loaded in to memory an returned. For large crawls, `crawl/3` is preferred
as the results can be streamed and processed more efficiently.
## Options
The accepted options are:
* `token` - Planaria authentication token. **Required**.
* `host` - The Bitbus host. Defaults to "https://txo.bitbus.network".
## Examples
iex> Terminus.Bitbus.fetch(query, token: token)
{:ok, [
%{
"tx" => %{"h" => "bbae7aa467cb34010c52033691f6688e00d9781b2d24620cab51827cd517afb8"},
...
},
...
]}
"""
@spec fetch(Terminus.bitquery, keyword) ::
{:ok, list} |
{:error, Exception.t}
def fetch(query, options \\ [])
def fetch(%{} = query, options),
do: query |> normalize_query |> Jason.encode! |> fetch(options)
def fetch(query, options) when is_binary(query) do
options = Keyword.take(options, [:token, :host])
|> Keyword.put(:body, query)
|> Keyword.put(:decoder, :ndjson)
request(:fetch, "POST", "/block", options)
end
@doc """
As `fetch/2` but returns the result or raises an exception if it fails.
"""
@spec fetch!(Terminus.bitquery, keyword) :: list
def fetch!(query, options \\ []) do
case fetch(query, options) do
{:ok, data} ->
data
{:error, error} ->
raise error
end
end
@doc """
Fetches and returns the current status of Bitbus.
Returns the result in an `:ok` / `:error` tuple pair. The returned [`map`](`t:map/0`)
of data includes the current block header as well as the total number of
transactions in the block (`count`).
## Examples
iex> Terminus.Bitbus.status
{:ok, %{
"hash" => "00000000000000000249f9bd0e127ebc62125a72686a7470fe4a41e4add2deb1",
"prevHash" => "000000000000000001b3ea9c4c073226c6233583ab510ea10ce01faff17c89f3",
"merkleRoot" => "fb19b1ff5cd838cfca399f9f2e46fa3a0e9f0da0459f91bfd6d02a75756f3a4d",
"time" => 1586457301,
"bits" => 402821143,
"nonce" => 1471454449,
"height" => 629967,
"count" => 2193
}}
"""
@spec status(keyword) :: {:ok, map} | {:error, Exception.t}
def status(options \\ []) do
options = Keyword.take(options, [:host])
case request(:fetch, "GET", "/status", options) do
{:ok, body} ->
Jason.decode(body)
{:error, error} ->
{:error, error}
end
end
@doc """
As `status/1` but returns the result or raises an exception if it fails.
"""
@spec status!(keyword) :: map
def status!(options \\ []) do
case status(options) do
{:ok, data} ->
data
{:error, error} ->
raise error
end
end
end
|
lib/terminus/bitbus.ex
| 0.890014
| 0.871639
|
bitbus.ex
|
starcoder
|
defmodule Upvest.Tenancy.Historical.HDBlock do
@moduledoc """
HDBlock represents block object from historical data API
"""
defstruct [
:number,
:hash,
:parent_hash,
:nonce,
:sha3uncles,
:transactions_root,
:receipts_root,
:miner,
:difficulty,
:total_difficulty,
:extra_data,
:size,
:gas_limit,
:gas_used,
:transactions,
:timestamp,
:uncles
]
end
defmodule Upvest.Tenancy.Historical.HDTransaction do
@moduledoc """
HDTransaction represents transaction object from historical data API
"""
defstruct [
:block_hash,
:block_number,
:from,
:gas,
:hash,
:nonce,
:transaction_index,
:to,
:value,
:gas_price,
:input,
:confirmations
]
end
defmodule Upvest.Tenancy.Historical.HDBalance do
@moduledoc """
HDBalance reprents balance of an asset or contract
if native asset balance,contract is set to address of the contract
"""
defstruct [
:id,
:address,
:contract,
:balance,
:transaction_hash,
:transaction_index,
:block_hash,
:block_number,
:timestamp,
:is_main_chain
]
end
defmodule Upvest.Tenancy.Historical.HDTransactionList do
@moduledoc """
HDTransactionList is a list of HDTransaction objects
"""
# result -> [HDTransaction]
defstruct [:result, :next_cursor]
end
defmodule Upvest.Tenancy.Historical.HDStatus do
@moduledoc """
HDStatus represents historical data API status object
"""
defstruct [:lowest, :highest, :latest]
end
defmodule Upvest.Tenancy.Historical.TxFilters do
@moduledoc """
TxFilters is for filtering historical Data API queries
"""
defstruct [:before, :after, :confirmations, :cursor, :limit]
end
defmodule Upvest.Tenancy.Historical do
@moduledoc """
Handles operations related to historical blockchain data.
You can:
- Retrieve block details by blockNumber
- List transactions that have been sent to and received by an address
- Retrieve transaction (single) by txhash
- Retrieve native asset balance by address
- Retrieve contract asset balance by address
- GET API status
"""
@type t :: HDTransaction | HDBalance | HDBlock | HDStatus
use Upvest.API, [:dry]
alias Upvest.Tenancy.Historical.{HDTransaction, HDBalance, HDBlock, HDStatus}
alias Upvest.Tenancy.Historical.TxFilters
def endpoint do
"/data"
end
@doc "Retrieve block details by block number"
def get_block(client, protocol, network, block_number) do
url = "#{endpoint()}/#{protocol}/#{network}/block/#{block_number}"
with {:ok, resp} <- request(:get, url, %{}, client) do
{:ok, to_struct(resp["result"], HDBlock, true)}
end
end
@doc """
List transactions that have been sent to and received by an address.
Takes an optional parameter of transaction filters
"""
def all_transactions(client, protocol, network, address, opts \\ %TxFilters{}) do
url = "#{endpoint()}/#{protocol}/#{network}/transactions/#{address}"
opts = Enum.reject(Map.from_struct(opts), fn {k, v} -> is_nil(v) end) |> Enum.into(%{})
with {:ok, resp} <- request(:get, url, opts, client) do
{:ok, to_struct(resp["result"], HDTransaction, true)}
end
end
@doc "Retrieve transaction (single) by txhash"
def get_transaction(client, protocol, network, txhash) do
url = "#{endpoint()}/#{protocol}/#{network}/transaction/#{txhash}"
with {:ok, resp} <- request(:get, url, %{}, client) do
{:ok, to_struct(resp["result"], HDTransaction, true)}
end
end
@doc "Retrieve native asset balance by address"
def get_balance(client, protocol, network, address) do
url = "#{endpoint()}/#{protocol}/#{network}/balance/#{address}"
with {:ok, resp} <- request(:get, url, %{}, client) do
{:ok, to_struct(resp["result"], HDBalance, true)}
end
end
@doc "Retrieve contract asset balance by address"
def get_balance(client, protocol, network, address, contract_address) do
url = "#{endpoint()}/#{protocol}/#{network}/balance/#{address}/#{contract_address}"
with {:ok, resp} <- request(:get, url, %{}, client) do
{:ok, to_struct(resp["result"], HDBalance, true)}
end
end
def api_status(client, protocol, network) do
url = "#{endpoint()}/#{protocol}/#{network}/status"
with {:ok, resp} <- request(:get, url, %{}, client) do
{:ok, to_struct(resp["result"], HDStatus)}
end
end
end
|
lib/upvest/tenancy/historical.ex
| 0.785144
| 0.418608
|
historical.ex
|
starcoder
|
defmodule Genex.Tools.Benchmarks.Binary do
@moduledoc """
Benchmark objective functions for binary genetic algorithms.
"""
@doc """
Binary deceptive function.
Returns ```math```.
# Parameters
- `chromosome`: `%Chromosome{}`
"""
def chuang_f1(chromosome) do
last = Enum.at(chromosome.genes, -1)
n = Enum.count(chromosome.genes)
if last == 0 do
chromosome.genes
|> Enum.take(n - 1)
|> Enum.chunk_every(4)
|> Enum.reduce(0, fn x, acc -> acc + inv_trap(x) end)
else
chromosome.genes
|> Enum.take(n - 1)
|> Enum.chunk_every(4)
|> Enum.reduce(0, fn x, acc -> acc + trap(x) end)
end
end
@doc """
Binary deceptive function.
Returns ```math```.
# Parameters
- `chromosome`: `%Chromosome{}`
"""
def chuang_f2(chromosome) do
last = Enum.at(chromosome.genes, -1)
second_to_last = Enum.at(chromosome.genes, -2)
cond do
second_to_last == 0 and last == 0 ->
chromosome.genes
|> Enum.chunk_every(8)
|> Enum.reduce(0,
fn x, acc ->
{y, z} = Enum.split(x, 4)
acc + inv_trap(y) + inv_trap(z)
end
)
second_to_last == 0 and last == 1 ->
chromosome.genes
|> Enum.chunk_every(8)
|> Enum.reduce(0,
fn x, acc ->
{y, z} = Enum.split(x, 4)
acc + inv_trap(y) + trap(z)
end
)
second_to_last == 1 and last == 0 ->
chromosome.genes
|> Enum.chunk_every(8)
|> Enum.reduce(0,
fn x, acc ->
{y, z} = Enum.split(x, 4)
acc + trap(y) + inv_trap(z)
end
)
second_to_last == 1 and last == 1 ->
chromosome.genes
|> Enum.chunk_every(8)
|> Enum.reduce(0,
fn x, acc ->
{y, z} = Enum.split(x, 4)
acc + trap(y) + trap(z)
end
)
end
end
@doc """
Binary deceptive function.
Returns ```math```.
# Parameters
- `chromosome`: `%Chromosome{}`
"""
def chuang_f3(chromosome) do
last = Enum.at(chromosome.genes, -1)
n = Enum.count(chromosome.genes)
if last == 0 do
chromosome.genes
|> Enum.take(n - 1)
|> Enum.chunk_every(4)
|> Enum.reduce(0, fn x, acc -> acc + inv_trap(x) end)
else
{head, middle} = Enum.split(chromosome.genes, 3)
{middle, tail} = Enum.split(middle, n - 3)
y =
middle
|> Enum.chunk_every(4)
|> Enum.reduce(0, fn x, acc -> acc + inv_trap(x) end)
z = trap(Enum.concat(head, tail))
y + z
end
end
defp trap(genes) do
u = Enum.sum(genes)
k = Enum.count(genes)
if u == k, do: k, else: k - 1 - u
end
defp inv_trap(genes) do
u = Enum.sum(genes)
k = Enum.count(genes)
if u == 0, do: k, else: u - 1
end
@doc """
Royal Road objective function.
Returns ```math```.
# Parameters
- `chromosome`: `%Chromosome{}`.
- `order`: `Integer`.
"""
def royal_road1(chromosome, order) do
chromosome.genes
|> Enum.chunk_every(order)
|> Enum.map(&Enum.join(&1, ""))
|> Enum.map(&String.to_integer(&1, 2))
|> Enum.sum()
end
@doc """
Royal Road 2 objective function.
Returns ```math```.
# Parameters
- `chromosome`: `%Chromosome{}`.
- `order`: `Integer`.
"""
def royal_road2(chromosome, order),
do: do_royal_road2(chromosome, order, floor(:math.pow(2, order)), 0)
defp do_royal_road2(chromosome, norder, order, acc) do
if norder >= order do
acc
else
val = royal_road1(chromosome, norder)
do_royal_road2(chromosome, order, norder * 2, acc + val)
end
end
end
|
lib/genex/tools/benchmarks/binary.ex
| 0.83901
| 0.892187
|
binary.ex
|
starcoder
|
defmodule Grapevine.Presence.Client do
@moduledoc """
Implementation of the Presence client
"""
alias Grapevine.Applications
alias Grapevine.Client
alias Grapevine.Games
import Grapevine.Presence, only: [ets_key: 0]
@timeout_seconds 60
@doc """
Number of seconds that a game can be offline before the game is considered offline
"""
def timeout_seconds(), do: @timeout_seconds
@doc """
Get a list of games that are connected and online
"""
def online_games() do
keys()
|> Enum.map(&fetch_from_ets/1)
|> Enum.filter(&filter_online/1)
|> Enum.map(&fetch_from_db/1)
|> Enum.reject(&is_nil/1)
|> append_grapevine()
end
defp append_grapevine(games) do
[Client.presence() | games]
end
@doc """
Fetch a game by id from ETS
"""
def fetch_from_ets(id) do
case :ets.lookup(ets_key(), id) do
[{^id, presence}] ->
{id, presence}
_ ->
nil
end
end
@doc """
Determine if a game is online based on presence from ETS
"""
def game_online?(presence) do
oldest_online = Timex.now() |> Timex.shift(seconds: -1 * @timeout_seconds)
Timex.after?(presence.timestamp, oldest_online)
end
defp filter_online(nil), do: false
defp filter_online({_game_id, presence}) do
game_online?(presence)
end
defp fetch_from_db({"game:" <> game_id, presence}) do
case Games.get(game_id) do
{:ok, game} ->
presence
|> Map.put(:type, :game)
|> Map.put(:game, game)
{:error, :not_found} ->
nil
end
end
defp fetch_from_db({"application:" <> application_id, presence}) do
case Applications.get(application_id) do
{:ok, application} ->
presence
|> Map.put(:type, :application)
|> Map.put(:game, application)
{:error, :not_found} ->
nil
end
end
def keys() do
key = :ets.first(ets_key())
keys(key, [key])
end
def keys(:"$end_of_table", [:"$end_of_table" | accumulator]), do: accumulator
def keys(current_key, accumulator) do
next_key = :ets.next(ets_key(), current_key)
keys(next_key, [next_key | accumulator])
end
end
|
lib/grapevine/presence/client.ex
| 0.721351
| 0.41742
|
client.ex
|
starcoder
|
defmodule Plymio.Name.Utils do
@moduledoc ~S"""
Utility functions for `names`
An element of a `name` is anything `Kernel.to_string/1` can transform to a string.
"""
@typedoc "A name element"
@type namel :: nil | :atom | :number | String.t
@typedoc "A name"
@type name :: namel | [namel]
@typedoc "Option"
@type option ::
{:transform, fun | [fun]} |
{:sep, name}
@typedoc "Options"
@type options :: [option]
use Plymio.Name.Attributes
defp separator_to_string(sep) when is_binary(sep), do: sep
defp separator_to_string(sep), do: name_to_string(sep)
# header
@doc ~S"""
Transforms a `name` to a `String`.
Use the `:sep` option to separate each element of the `name`.
Apply the `:transform` function(s) (if given) to the created string.
## Examples
iex> name_to_string(:abc)
"abc"
iex> name_to_string([:a, "b", :c], sep: "_")
"a_b_c"
iex> name_to_string([:a, "b", :c], sep: "/", transform: &String.capitalize/1)
"A/b/c"
iex> name_to_string([:a, 1, ["b", nil, 2], [nil, :c, 3]],
...> transform: [&String.capitalize/1, fn str -> str <> "FortyTwo" end])
"A1b2c3FortyTwo"
"""
@spec name_to_string(name, options) :: String.t
def name_to_string(name, opts \\ nil)
def name_to_string(name, nil) when is_binary(name), do: name
def name_to_string(name, nil) when is_nil(name), do: ""
def name_to_string(name, nil) when is_atom(name), do: name |> to_string
def name_to_string(name, nil) when is_number(name), do: name |> to_string
def name_to_string(name, nil) when is_list(name) do
name |> Enum.map(&name_to_string/1) |> Enum.join
end
def name_to_string(name, opts) when is_tuple(name) do
name |> Tuple.to_list |> name_to_string(opts)
end
def name_to_string(name, opts) when is_list(opts) do
name =
case is_list(name) do
true ->
# don't propagate the transform(s)
opts_namel = opts |> Keyword.drop([@plymio_name_opts_key_transform])
name = name
|> Enum.map(fn v -> name_to_string(v, opts_namel) end)
|> Enum.reject(fn
nil -> true
"" -> true
_ -> false
end)
case Keyword.get(opts, @plymio_name_opts_key_separator) do
x when x != nil -> Enum.join(name, separator_to_string(x))
_ -> Enum.join(name)
end
_ -> name_to_string(name)
end
# any transform function?
case Keyword.get(opts, @plymio_name_opts_key_transform) do
fun when fun != nil ->
case fun do
x when is_function(x) -> x.(name)
x when is_list(x) -> x |> Enum.reduce(name, fn f, s -> f.(s) end)
end
_ -> name
end
end
@doc ~S"""
Transforms a list of names to a list of strings.
## Examples
iex> names_to_strings([:aBc, 123, "xYz"])
["aBc", "123", "xYz"]
iex> names_to_strings([:aBc, 123, "xYz"], transform: &String.capitalize/1)
["Abc", "123", "Xyz"]
"""
@spec names_to_strings([name], options) :: [String.t]
def names_to_strings(names, opts \\ []) when is_list(names) do
names
|> Stream.reject(&is_nil/1)
|> Stream.map(fn name -> name_to_string(name, opts) end)
|> Enum.reject(&is_nil/1)
end
@doc ~S"""
Transforms a `name` to a `Atom`.
Create an intermediate string first using `name_to_string/2` and then transforms the result to an atom.
## Examples
iex> name_to_atom(:abc)
:abc
iex> name_to_atom([:a, "b", :c], sep: "_")
:"a_b_c"
iex> name_to_atom([:a, "b", :c], sep: "/", transform: &String.capitalize/1)
:"A/b/c"
iex> name_to_atom([:a, 1, ["b", nil, 2], [nil, :c, 3]], transform: &String.capitalize/1)
:A1b2c3
"""
@spec name_to_atom(name, options) :: atom
def name_to_atom(name, opts \\ [])
def name_to_atom(name, opts) do
case name_to_string(name, opts) do
x when is_atom(x) -> x
x when is_binary(x) -> x |> String.to_atom
end
end
@doc ~S"""
Transforms a list of names to a list of atoms.
## Examples
iex> names_to_atoms([:abc, 123, "xyz"])
[:abc, :"123", :xyz]
"""
@spec names_to_atoms([name], options) :: [atom]
def names_to_atoms(name, opts \\ []) when is_list(name) do
name
|> Stream.reject(&is_nil/1)
|> Stream.map(fn name -> name_to_atom(name, opts) end)
|> Enum.reject(&is_nil/1)
end
@doc ~S"""
Transforms a `name` to a Macro.var.
Create an intermediate atom first using `name_to_atom/2` and then
creates the ast for the var using `Macro.var/2`.
The `:context` option (if any; default `nil`) is passed to `Macro.var/2`.
## Examples
iex> name_to_var(:abc)
{:abc, [], nil}
iex> name_to_var([:a, "b", :c], sep: "_")
{:"a_b_c", [], nil}
iex> name_to_var([:a, "b", :c], sep: "_", context: ModuleA)
{:"a_b_c", [], ModuleA}
"""
@spec name_to_var(name, options) :: {var, [], context} when var: atom, context: atom
def name_to_var(name, opts \\ []) do
name
|> name_to_atom(opts)
|> Macro.var(opts |> Keyword.get(@plymio_name_opts_key_context))
end
@doc ~S"""
Transforms a list of names to a list of vars.
## Examples
iex> names_to_vars([:abc, 123, "xyz"])
[{:abc, [], nil}, {:"123", [], nil}, {:xyz, [], nil}]
iex> names_to_vars([:AbC, 123, "XyZ"], transform: &String.downcase/1)
[{:abc, [], nil}, {:"123", [], nil}, {:xyz, [], nil}]
"""
@spec names_to_vars([name], options) :: [{var, [], context}] when var: atom, context: atom
def names_to_vars(names, opts \\ []) when is_list(names) do
names
|> Stream.map(fn name -> name_to_var(name, opts) end)
|> Enum.reject(&is_nil/1)
end
@doc ~S"""
Transforms the name to a string and capitalize the first character
of a name and leave rest alone (i.e. does *not* lowercase rest).
## Examples
iex> name_capitalize_first(:abc)
"Abc"
iex> name_capitalize_first("abcDeF")
"AbcDeF"
"""
@spec name_capitalize_first(name) :: String.t
def name_capitalize_first(name) when is_binary(name) do
# split on first char
splits = String.split_at(name, 1)
# capitalize the first char and concat with rest
String.capitalize(elem(splits, 0)) <> elem(splits, 1)
end
def name_capitalize_first(name) do
name
|> name_to_string
|> name_capitalize_first
end
@doc ~S"""
Splits a string on underscore and concats the splits
after capitalizing *only* the first character of each split.
## Examples
iex> name_camel_case("hello_world")
"HelloWorld"
iex> name_camel_case("HellO_worLd")
"HellOWorLd"
iex> name_camel_case("heLLoworld")
"HeLLoworld"
"""
@spec name_camel_case(name) :: String.t
def name_camel_case(name) when is_binary(name) do
name
|> String.split("_")
|> Stream.map(&name_capitalize_first/1)
|> Enum.join
end
def name_camel_case(name) do
name
|> name_to_string
|> name_camel_case
end
@doc ~S"""
Creates a unique string from a reference.
Uses `Kernel.make_ref/0` to generate the unique value (e.g.
*#Reference<0.0.2.293>*), transforms to a string, extracts
everthing betwen the "<" and ">" and replaces dots with underscores.
## Examples
name_uniqueness() # e.g. "0_0_2_293"
"""
@spec name_uniqueness() :: String.t
def name_uniqueness() do
make_ref()
|> inspect
|> String.slice(11..-2)
|> String.replace(".", "_")
end
@doc ~S"""
Creates a unique string using `name_uniqueness/0` and appends to the
name (default "uniq").
## Examples
iex> << x :: bytes-size(4), _rest :: binary >> = name_to_string_unique()
...> match?("uniq", x)
true
iex> << x :: bytes-size(3), _rest :: binary >> = name_to_string_unique(:abc)
...> match?("abc", x)
true
iex> << x :: bytes-size(6), _rest :: binary >> = name_to_string_unique(:aBcDeF, transform: &String.capitalize/1)
...> match?("Abcdef", x)
true
"""
@spec name_to_string_unique(name, options) :: String.t
def name_to_string_unique(name \\ "uniq", opts \\ [])
def name_to_string_unique(name, opts) do
name_to_string([name, name_uniqueness()], opts)
end
@doc ~S"""
Creates a unique string using `name_uniqueness/0` and appends to the
name (default "uniq"), and transforms the string to an atom.
## Examples
iex> atom = name_to_atom_unique(:aBcDeF, transform: &String.capitalize/1) # e.g. :"Abcdef0_0_2_293"
...> << x :: bytes-size(6), _rest :: binary >> = atom |> to_string
...> match?("Abcdef", x)
true
"""
@spec name_to_atom_unique(name) :: atom
def name_to_atom_unique(name, opts \\ []) do
name_to_atom([name, name_uniqueness()], opts)
end
@doc ~S"""
Creates a string from the `Kernel.self/0` pid using inspect, extracts
everything between the "<" and ">", and replaces the dots with
underscores.
## Examples
iex> << x :: bytes-size(2), _rest :: binary >> = name_self() # e.g. "0_789_0"
...> match?("0_", x)
true
iex> << x :: bytes-size(12), _rest :: binary >> = name_self(:"my_pid_is_") # e.g. "my_pid_is_0_80_0"
...> match?("my_pid_is_0_", x)
true
"""
@spec name_self(name) :: String.t
def name_self(name \\ nil)
def name_self(nil) do
self()
|> inspect
|> String.slice(5..-2)
|> String.replace(".", "_")
end
def name_self(name) do
name_to_string(name) <> name_self()
end
end
|
lib/name/utils.ex
| 0.912495
| 0.603581
|
utils.ex
|
starcoder
|
defmodule ExPIX do
@moduledoc """
Module with basic operations.
All functions in this library use the parameters in the `config.exs` file. By default, the configuration for building static QR-Codes is:
```elixir
config :ex_pix, :static_code,
payload_indicator: "01",
start_method: "11",
gui: "br.gov.bcb.pix",
currency_code: "986",
country_code: "BR",
default_value: "0.00",
category_code: "0000"
```
If you need to change any of these parameters, just define it in your own configuration file.
"""
alias ExPIX.Data.{
Additional,
Static,
StaticAccount,
}
alias ExPIX.Code.Builder
@typedoc """
Basic information for building static code. It must be a map with the following parameters:
- `:merchant_name` - Required field that represents the name of the user to be charged.
- `:merchant_city` - Required field that represents the city of the user to be charged.
- `:postal_code` - Non-Required field that represents the user's postal code.
- `:amount` - The amount of the operation. If not provided, the code will use the default value defined in the configuration. It must be a string in the format 0.00.
- `:payload_indicator` - The payload indicator of the operation. If not provided, the code will use the default value defined in the configuration.
- `:initiation_method` - The initiation method of the operation. If not provided, the code will use the default value defined in the configuration.
- `:category_code` - The category code of the operation. If not provided, the code will use the default value defined in the configuration.
- `:currency_code` - The currency code of the operation. If not provided, the code will use the default value defined in the configuration.
- `:country_code` - The country code of the operation. If not provided, the code will use the default value defined in the configuration.
"""
@type static_params :: %{
required(:merchant_name) => String.t,
required(:merchant_city) => String.t,
optional(:amount) => String.t,
optional(:payload_indicator) => String.t,
optional(:initiation_method) => String.t,
optional(:category_code) => String.t,
optional(:currency_code) => String.t,
optional(:country_code) => String.t,
optional(:postal_code) => String.t,
}
@typedoc """
Optional parameters to build the notes of the operation. The available parameters are:
- `:info` - A string with the operation notes.
**Important:**
- The count of characters of the `info` + count of characters of the `gui` + count of characters of the `key` must not exceed the limit of 99 characters. If this limit is exceeded, an error will be returned.
"""
@type account_params :: %{optional(:info) => String.t}
@typedoc """
Additional parameters to build the code. The available parameters are:
- `:txid` - An ID to represent the operation. If not provided, the `txid` will be `***`, that represents a random string to be generated by Banco Central.
**Important:**
- Banco Central does not check if the provided ID is unique, so the recomendation here is use an UUID (ideally the v4).
- The count of characters must not exceed the limit of 99 characters. If this limit is exceeded, an error will be returned.
"""
@type additional_params :: %{optional(:txid) => String.t}
@doc """
Generates a static raw code.
This function uses the default parameters defined in the `static_code` configuration, but if you need a special parameter, this function can receive such parameters as well.
* `key` - The 'key' to be charged. It can be a CNPJ, CPF, phone number or a random binary string. It is not necessary to send the key type, but this function expects that the key is already in the format for your type (see the PIX docs for some examples).
* `static_params` - Basic information to build a static code.
* `account_params` - Optional parameters to build the notes of the operation.
* `additional_params` - Additional parameters to build the code.
### Example:
```elixir
iex> ExPIX.generate_static_code("email@<EMAIL>", %{merchant_name: "<NAME>", merchant_city: "Florianopolis"})
{:ok, "00020101021126370014<EMAIL>52040000530398654040.005802BR5914Leonardo Leite6013Florianopolis62070503***6304891C"}
```
"""
@spec generate_static_code(String.t(), static_params, account_params, additional_params) :: {:ok, String.t()} | {:error, any}
def generate_static_code(key, static_params, account_params \\ %{}, additional_params \\ %{}) do
account_info = Map.merge(%StaticAccount{}, Map.merge(%{key: key}, account_params))
additional_info = Map.merge(%Additional{}, additional_params)
%Static{}
|> Map.merge(
static_params
|> Map.put(:account_information, account_info)
|> Map.put(:additional, additional_info)
)
|> Builder.build()
end
end
|
lib/ex_pix.ex
| 0.89998
| 0.941708
|
ex_pix.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.