code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%% rufus_tokenize inserts expression terminators into a stream of tokens
%% produced by rufus_raw_tokenize, based on the set of rules defined in the
%% _Semicolons_ section of the language spec:
%%
%% 1. When the input is broken into tokens, a semicolon is automatically
%% inserted into the token stream immediately after a line's final token if
%% that token is:
%% - An identifier.
%% - An `atom`, `bool`, `float`, `int` or `string` literal.
%% - One of the punctuation `)` or `}`.
%% 2. To allow complex expressions to occupy a single line, a semicolon may be
%% omitted before a closing `)` or `}`.
-module(rufus_tokenize).
-include_lib("rufus_type.hrl").
%% API exports
-export([string/1]).
%% API
%% string tokenizes RufusText and inserts semicolon expression terminators based
%% on a set of rules. Return values:
%% - `{ok, Tokens}` with `Tokens` as a list of tokens, if tokenization is
%% successful.
%% - `{error, Reason}` if an error occurs.
-spec string(rufus_text()) -> ok_tuple() | error_tuple().
string(RufusText) ->
case rufus_raw_tokenize:string(RufusText) of
{ok, Tokens, _Lines} ->
insert_semicolons(discard_comments(Tokens));
{error, Reason, _LineNumber} ->
{error, Reason}
end.
%% Private API
make_semicolon_token(TokenLine) ->
{';', TokenLine}.
terminate(Acc = [{';', _TokenLine1} | _T], _TokenLine2) ->
Acc;
terminate(Acc, TokenLine) ->
[make_semicolon_token(TokenLine) | Acc].
%% discard_comments discards comment tokens.
-spec discard_comments(list(tuple())) -> list(tuple()).
discard_comments(Tokens) ->
discard_comments([], Tokens).
discard_comments(Acc, [{comment, _TokenLine, _TokenChars} | T]) ->
discard_comments(Acc, T);
discard_comments(Acc, [Token | T]) ->
discard_comments([Token | Acc], T);
discard_comments(Acc, []) ->
lists:reverse(Acc).
%% insert_semicolons inserts `;` tokens after some `eol` tokens to terminate
%% expressions. All `eol` tokens are discarded in the resulting list of tokens.
-spec insert_semicolons(list(tuple())) -> {ok, list(tuple())}.
insert_semicolons(Tokens) ->
LastToken = undefined,
SemicolonTerminatedTokens = insert_semicolons([], LastToken, Tokens),
{ok, SemicolonTerminatedTokens}.
%% insert_semicolons inserts a semicolon when the following tokens are the last
%% on a line or the last in the source text. The `eol` token is always discarded.
insert_semicolons(Acc, {identifier, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {identifier, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {atom_lit, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {atom_lit, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {bool_lit, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {bool_lit, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {float_lit, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {float_lit, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {int_lit, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {int_lit, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {string_lit, TokenLine, _TokenChars}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {string_lit, TokenLine, _TokenChars}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {')', TokenLine}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {')', TokenLine}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
insert_semicolons(Acc, {'}', TokenLine}, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(terminate(Acc, TokenLine), Token, T);
insert_semicolons(Acc, {'}', TokenLine}, []) ->
insert_semicolons(terminate(Acc, TokenLine), undefined, []);
%% insert_semicolons discards `eol` tokens.
insert_semicolons(Acc, _LastToken, [Token = {eol, _TokenLine} | T]) ->
insert_semicolons(Acc, Token, T);
%% insert_semicolons keeps all other tokens.
insert_semicolons(Acc, _LastToken, [Token | T]) ->
insert_semicolons([Token | Acc], Token, T);
%% insert_semicolons terminates when all tokens have been processed.
insert_semicolons(Acc, _LastToken, []) ->
lists:reverse(Acc). | rf/src/rufus_tokenize.erl | 0.610802 | 0.799951 | rufus_tokenize.erl | starcoder |
-module(morpheus_instrument).
%% API exports
-export([instrument_and_load/6, whitelist_func/3]).
-include("morpheus_priv.hrl").
%%====================================================================
%% API functions
%%====================================================================
%% Instrument a module with object code `ObjectCode` and module name `OriginalModule`.
%% During the function, the instrumented module will be loaded, where:
%% - The module is renamed to `NewModule` (NewModule is supposed to be globally unique)
%% - call M:F(A...) is redirected to CtlMod:handle(OriginalModule, NewModule, call, M, F, A, _Ann)
%% - apply(M, F, A) is redirected to CtlMod:handle(OriginalModule, NewModule, apply, M, F, A, _Ann)
%% - T ! M is transformed to CtlMod:handle(OriginalModule, NewModule, call, erlang, send, [T, M], _Ann)
%% - receive ... after ... end is transformed to case CtlMod:handle(OriginalModule, NewModule, 'receive', [PatFun, Timeout], _Ann) of ... end
%% - Nif stubs in the new module are forwarding calls to the original stubs
%% - _Ann is code annotation that is unused for now
%% If success, {ok, Warnings} will be returned.
%% Otherwise, failed will be returned
instrument_and_load(CtlMod, CtlState, OriginalModule, NewModule, Filename, ObjectCode) ->
?DEBUG("instrument(~w, ~w, ~w, ~p, ...)", [CtlMod, OriginalModule, NewModule, Filename]),
Core = get_core(ObjectCode),
?VERBOSE_DEBUG("~w: gonna instrument ~p", [?FUNCTION_NAME, Core]),
_InstrResult = {{Original, Instrumented}, CtlState0, Nifs, Warnings} = instrument_core(CtlMod, CtlState, Core, OriginalModule, NewModule),
case Filename of
[] ->
%% empty filename indicate dynamic module. So far there is no good way of handling that.
ok;
_ ->
case Original of
undefined ->
{module, OriginalModule} = code:ensure_loaded(OriginalModule);
_ ->
?DEBUG("Reload original module ~p", [OriginalModule]),
load_module_from_core(OriginalModule, Filename, Original)
end
end,
load_module_from_core(NewModule, Filename, Instrumented),
{ok, CtlState0, Nifs, Warnings}.
%%====================================================================
%% Internal functions
%%====================================================================
load_module_from_core(Name, Filename, Core) ->
?DEBUG("load_module_from_core: ~p", [Name]),
CompResult = compile:forms(Core, [from_core, report_errors, binary]),
Binary =
case CompResult of
{ok, _, _Binary} ->
_Binary;
_Other ->
?WARNING("compile failed on module ~p:~n"
" info: ~p~n"
,[Name, _Other]),
error(load_module_failed)
end,
{module, Name} = code:load_binary(Name, Filename, Binary),
ok.
%% Some functions are inspired by Concuerror
get_core(CodeOrFilename) ->
{ok, {Module, [{abstract_code, ChunkInfo}]}} =
beam_lib:chunks(CodeOrFilename, [abstract_code]),
case ChunkInfo of
{_, Chunk} ->
{ok, Module, Core} = compile:forms(Chunk, [binary, to_core0]),
Core;
no_abstract_code ->
{ok, {Module, [{compile_info, CompileInfo}]}} =
beam_lib:chunks(CodeOrFilename, [compile_info]),
{source, File} = proplists:lookup(source, CompileInfo),
{options, CompileOptions} = proplists:lookup(options, CompileInfo),
Filter =
fun(Option) ->
lists:member(element(1, Option), [d, i, parse_transform])
end,
CleanOptions = lists:filter(Filter, CompileOptions),
Options = [debug_info, report_errors, binary, to_core0|CleanOptions],
{ok, Module, Core} = compile:file(File, Options),
Core
end.
-record(ins_state, { nif_error_found :: boolean(), nifs :: [{atom(), integer()}], debug_counter :: integer() }).
instrument_core(CtlMod, CtlState, Core, OriginalModule, NewModule) ->
Opt = {CtlMod, OriginalModule, NewModule},
{R, {Opt, CtlState0, #ins_state{nifs = Nifs}, Warn}} =
cerl_trees:mapfold(fun instrument_core_tree/2,
{Opt, CtlState,
#ins_state{nif_error_found = false, nifs = [], debug_counter = 0},
[]}, Core),
ToExport = case OriginalModule of
erl_eval ->
[{match_clause, 5} | Nifs];
_ ->
Nifs
end,
case ToExport of
[] ->
{{undefined, R}, CtlState0, [], Warn};
_ ->
%% XXX some module cannot be loaded twice due to on_load handler limitation
ToReload =
case code:is_sticky(OriginalModule) of
false -> true;
true ->
code:unstick_mod(OriginalModule)
end,
case ToReload of
true ->
?DEBUG("Reload original module of ~w with export list: ~p", [OriginalModule, ToExport]),
{ModifiedOriginal, _} = cerl_trees:mapfold(fun export_fun/2, ToExport, Core),
{{ModifiedOriginal, R},
CtlState0,
%% Since NIFs is not handled specially anywhere except in the process entry (and it's rare!), simply pass []
%% IS#ins_state.nifs,
[],
Warn};
false ->
?INFO("Skip reloading module ~p", [OriginalModule]),
{{undefined, R}, CtlState0, [], Warn}
end
end.
export_fun(Tree, FunList) ->
Type = cerl:type(Tree),
case Type of
module ->
NewTree = cerl:update_c_module(
Tree,
cerl:module_name(Tree),
lists:foldr(fun (V, Acc) ->
[ cerl:c_var(V) | Acc ]
end, [], FunList) ++ cerl:module_exports(Tree),
cerl:module_attrs(Tree),
cerl:module_defs(Tree)),
{NewTree, FunList};
_ ->
{Tree, FunList}
end.
%% Currently, we identify nif overrides by assuming that the nif stubs will call nif_error to generate error
instrument_core_tree(Tree, {{CtlMod, OriginalModule, NewModule} = Opt, CtlState, IS, Warnings}) ->
Type = cerl:type(Tree),
{NewTreeAndMaybeWarn, CtlState0, NewIS} =
case Type of
module ->
_OriginalModule = cerl:atom_val(cerl:module_name(Tree)),
%% Ignore on_load property assuming it's only loading NIF binaries,
%% and the original module already handled it.
NewAttrs = lists:foldr(
fun ({NameTree, ValueTree}, Acc) ->
Name = cerl:concrete(NameTree),
case Name of
on_load ->
Acc;
_ ->
[{NameTree, ValueTree} | Acc]
end
end, [], cerl:module_attrs(Tree)),
{Nifs, ToExpose, DefsWithForwarding} =
lists:foldr(
fun ({NT, DT}, {NifsL, ToExposeL0, DefsL}) ->
N = cerl:var_name(NT),
{F, A} = N,
Vars = cerl:fun_vars(DT),
Body = cerl:fun_body(DT),
IsNifStub = lists:member(nif_stub, cerl:get_ann(DT)),
ToExposeL =
case erlang:function_exported(CtlMod, to_expose, 4)
andalso CtlMod:to_expose(CtlState, OriginalModule, F, A) of
true ->
[{F, A} | ToExposeL0];
false ->
ToExposeL0
end,
case N =/= {on_load, 0}
andalso erlang:function_exported(CtlMod, to_override, 4)
andalso CtlMod:to_override(CtlState, OriginalModule, F, A) of
{true, Action}
when Action =:= trace; Action =:= callback ->
OrigName =
case IsNifStub of
true ->
[];
false ->
list_to_atom(atom_to_list(F) ++ "$orig")
end,
InspectedBody = inspect( override
, [ cerl:c_atom(Action)
, cerl:c_atom(F)
, cerl:c_atom(OrigName)
, cerl:make_list(Vars)
]
, Body, Opt),
OverridedDef = cerl:update_c_fun(DT, Vars, InspectedBody),
case IsNifStub of
true ->
{[N | NifsL], ToExposeL, [{NT, OverridedDef} | DefsL]};
false ->
OrigNT = {OrigName, A},
{NifsL, [OrigNT | ToExposeL],
[{NT, OverridedDef}, {cerl:c_var(OrigNT), DT} | DefsL]}
end;
false when N =:= {on_load, 0} ->
?INFO("ignored on_load/0", []),
{NifsL, ToExposeL, DefsL};
false when IsNifStub ->
UpdatedDef =
case CtlMod:is_undet_nif(CtlState, OriginalModule, F, A) of
true ->
InspectedBody = inspect(undet_nif_stub, [cerl:c_atom(element(1, N)), cerl:make_list(Vars)], Body, Opt),
cerl:update_c_fun(DT, Vars, InspectedBody);
false ->
cerl:update_c_fun(
DT, Vars,
cerl:update_tree(DT, call,
[[cerl:c_atom(OriginalModule)],
[cerl:c_atom(F)],
Vars]))
end,
{[N | NifsL], ToExposeL, [{NT, UpdatedDef} | DefsL]};
false ->
{NifsL, ToExposeL, [{NT, DT} | DefsL]}
end
end, {[], [], []}, cerl:module_defs(Tree)),
NewExports = lists:usort(cerl:module_exports(Tree) ++
lists:foldr(fun (VN, Acc) -> [cerl:c_var(VN) | Acc] end, [], ToExpose))
-- [cerl:c_var({on_load, 0})]
,
{cerl:update_c_module(
Tree,
cerl:c_atom(NewModule),
NewExports,
NewAttrs,
DefsWithForwarding),
CtlState, IS#ins_state{nifs = Nifs}};
'fun' ->
case IS#ins_state.nif_error_found of
true ->
{cerl:add_ann([nif_stub], Tree),
CtlState, IS#ins_state{nif_error_found = false}};
false ->
{Tree, CtlState, IS}
end;
apply ->
Op = cerl:apply_op(Tree),
case cerl:is_c_fname(Op) of
true ->
case cerl:fname_id(Op) of
%% Some modules (crypto I'm looking at you) wrap nif_error in their own function ...
nif_stub_error ->
{Tree, CtlState, IS#ins_state{nif_error_found = true}};
_ ->
?VERBOSE_DEBUG("~w: ignore c_fname apply ~p", [?FUNCTION_NAME, Op]),
{Tree, CtlState, IS}
end;
false ->
OldArgs = cerl:make_list(cerl:apply_args(Tree)),
{inspect(apply, [Op, OldArgs], Tree, Opt), CtlState, IS}
end;
call ->
Module = cerl:call_module(Tree),
Name = cerl:call_name(Tree),
Args = cerl:call_args(Tree),
Arity = length(Args),
?DEBUG("~w: call ~p", [?FUNCTION_NAME, {Module, Name, Arity}]),
{Tree1, ToInspect, _CtlState, IS0} =
case cerl:is_literal(Module) andalso cerl:is_literal(Name) andalso
{cerl:concrete(Module), cerl:concrete(Name), Arity} of
false ->
{Tree, true, CtlState, IS};
{erlang, nif_error, _} ->
{Tree, false, CtlState, IS#ins_state{nif_error_found = true}};
{erlang, node, 0} ->
%% HACK: prevent compiling erlang:node() into node vm instruction
{cerl:update_tree(Tree, call,
[[cerl:c_atom(erlang)], [cerl:c_atom(node)],
[cerl:update_tree(Tree, call, [[cerl:c_atom(erlang)], [cerl:c_atom(self)], []])]]),
false, CtlState, IS};
{M, F, A} ->
case whitelist_func(M, F, A) of
true ->
{Tree, false, CtlState, IS};
false ->
{_A, _B} = CtlMod:to_handle(
CtlState, OriginalModule, NewModule,
{call, erlang, F, A}),
{Tree, _A, _B, IS}
end
end,
?VERBOSE_DEBUG("~w: to instrument? ~p", [?FUNCTION_NAME, ToInspect]),
case ToInspect of
true ->
{inspect(call, [Module, Name, cerl:make_list(Args)], Tree1, Opt), _CtlState, IS0};
false ->
{Tree1, _CtlState, IS0}
end;
'receive' ->
PatFun = receive_matching_fun(Tree),
Clauses = cerl:receive_clauses(Tree),
Timeout = cerl:receive_timeout(Tree),
TimeoutAction = cerl:receive_action(Tree),
InspectedMsgTree = inspect('receive', [PatFun, Timeout], Tree, Opt),
TransformedClauses = transform_receive_clauses(Clauses, TimeoutAction),
Tree1 = cerl:update_tree(Tree, 'case', [[InspectedMsgTree], TransformedClauses]),
{Tree1, CtlState, IS};
_ ->
{Tree, CtlState, IS}
end,
{NewTree, NewWarnings} =
case NewTreeAndMaybeWarn of
{warn, NT, W} -> {NT, [W|Warnings]};
_ -> {NewTreeAndMaybeWarn, Warnings}
end,
{NewTree, {Opt, CtlState0, NewIS, NewWarnings}}.
inspect(Tag, Args, Tree, {CtlMod, OriginalModule, NewModule}) ->
CTag = cerl:c_atom(Tag),
CArgs = cerl:make_list(Args),
cerl:update_tree(Tree, call,
[[cerl:c_atom(CtlMod)],
[cerl:c_atom(handle)],
[cerl:c_atom(OriginalModule), cerl:c_atom(NewModule),
CTag, CArgs,
cerl:abstract(cerl:get_ann(Tree))]]).
receive_matching_fun(Tree) ->
Msg = cerl:c_var(message),
case cerl:receive_clauses(Tree) of
[] ->
%% this could happen in receive after X -> Y end
cerl:ann_c_atom(cerl:get_ann(Tree), undefined);
Clauses ->
PatClauses = extract_patterns(Clauses),
Body = cerl:update_tree(Tree, 'case', [[Msg], PatClauses]),
cerl:update_tree(Tree, 'fun', [[Msg], [Body]])
end.
extract_patterns(Clauses) ->
extract_patterns(Clauses, []).
extract_patterns([], Acc) ->
Pat = [cerl:c_var(message)],
Guard = cerl:c_atom(true),
Body = cerl:c_atom(false),
lists:reverse([cerl:c_clause(Pat, Guard, Body)|Acc]);
extract_patterns([Tree|Rest], Acc) ->
Body = cerl:c_atom(true),
Pats = cerl:clause_pats(Tree),
Guard = cerl:clause_guard(Tree),
extract_patterns(Rest, [cerl:update_c_clause(Tree, Pats, Guard, Body)|Acc]).
transform_receive_clauses(Clauses, TimeoutAction) ->
transform_receive_clauses(Clauses, [], TimeoutAction).
transform_receive_clauses([], Acc, TimeoutAction) ->
lists:reverse([cerl:update_c_clause(TimeoutAction,
%%%% this wil trigger failure in OTP-22-dev
%% [cerl:c_atom(timeout)],
[cerl:c_var(anything)],
cerl:c_atom(true), TimeoutAction) | Acc]);
transform_receive_clauses([Tree | Rest], Acc, TimeoutAction) ->
[Pat] = cerl:clause_pats(Tree),
Guard = cerl:clause_guard(Tree),
Body = cerl:clause_body(Tree),
transform_receive_clauses(
Rest,
[cerl:update_c_clause(
Tree, [cerl:c_cons(cerl:c_atom(message), Pat)], Guard, Body)
| Acc],
TimeoutAction).
whitelist_func(lists, _, _) -> true;
whitelist_func(queue, _, _) -> true;
whitelist_func(proplists, _, _) -> true;
whitelist_func(dict, _, _) -> true;
whitelist_func(orddict, _, _) -> true;
whitelist_func(math, _, _) -> true;
whitelist_func(maps, _, _) -> true;
whitelist_func(gb_sets, _, _) -> true;
whitelist_func(gb_trees, _, _) -> true;
whitelist_func(sets, _, _) -> true;
whitelist_func(re, _, _) -> true;
whitelist_func(binary, _, _) -> true;
whitelist_func(string, _, _) -> true;
whitelist_func(unicode, _, _) -> true;
whitelist_func(crypto, _, _) -> true;
whitelist_func(io_lib, _, _) -> true;
%% XXX could not instrument zlib properly - seems nif stub related
whitelist_func(zlib, _, _) -> true;
whitelist_func(filename, _, _) -> true;
whitelist_func(erl_internal, _, _) ->true;
whitelist_func(erl_parse, _, _) -> true;
whitelist_func(erl_lint, _, _) -> true;
whitelist_func(erl_prim_loader, _, _) -> true;
whitelist_func(erl_anno, _, _) -> true;
whitelist_func(erl_scan, _, _) -> true;
whitelist_func(erts_internal, _, _) -> true;
whitelist_func(erlang, error, _) -> true;
whitelist_func(erlang, F, A) ->
(erl_internal:guard_bif(F, A)
orelse erl_internal:arith_op(F, A)
orelse erl_internal:bool_op(F, A)
orelse erl_internal:comp_op(F, A));
whitelist_func(_, _, _) ->
false. | src/morpheus_instrument.erl | 0.570212 | 0.47025 | morpheus_instrument.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_task_status).
-behaviour(gen_server).
% This module is used to track the status of long running tasks.
% Long running tasks register themselves, via a call to add_task/1, and then
% update their status properties via update/1. The status of a task is a
% list of properties. Each property is a tuple, with the first element being
% either an atom or a binary and the second element must be an EJSON value. When
% a task updates its status, it can override some or all of its properties.
% The properties {started_on, UnitTimestamp}, {updated_on, UnixTimestamp} and
% {pid, ErlangPid} are automatically added by this module.
% When a tracked task dies, its status will be automatically removed from
% memory. To get the tasks list, call the all/0 function.
-export([start_link/0, stop/0]).
-export([all/0, add_task/1, update/1, get/1, set_update_frequency/1]).
-export([is_task_added/0]).
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
-include("couch_db.hrl").
-define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
stop() ->
gen_server:cast(?MODULE, stop).
all() ->
gen_server:call(?MODULE, all, infinity).
add_task(Props) ->
put(task_status_update, {{0, 0, 0}, 0}),
Ts = timestamp(),
TaskProps = lists:ukeysort(
1, [{started_on, Ts}, {updated_on, Ts} | Props]),
put(task_status_props, TaskProps),
gen_server:call(?MODULE, {add_task, TaskProps}, infinity).
is_task_added() ->
undefined /= erlang:get(task_status_props).
set_update_frequency(Msecs) ->
put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
update(Props) ->
MergeProps = lists:ukeysort(1, Props),
TaskProps = lists:ukeymerge(1, MergeProps, erlang:get(task_status_props)),
put(task_status_props, TaskProps),
maybe_persist(TaskProps).
get(Props) when is_list(Props) ->
TaskProps = erlang:get(task_status_props),
[couch_util:get_value(P, TaskProps) || P <- Props];
get(Prop) ->
TaskProps = erlang:get(task_status_props),
couch_util:get_value(Prop, TaskProps).
maybe_persist(TaskProps0) ->
{LastUpdateTime, Frequency} = erlang:get(task_status_update),
case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
true ->
put(task_status_update, {Now, Frequency}),
TaskProps = ?set(TaskProps0, updated_on, timestamp(Now)),
gen_server:cast(?MODULE, {update_status, self(), TaskProps});
false ->
ok
end.
init([]) ->
% read configuration settings and register for configuration changes
ets:new(?MODULE, [ordered_set, protected, named_table]),
{ok, nil}.
terminate(_Reason,_State) ->
ok.
handle_call({add_task, TaskProps}, {From, _}, Server) ->
case ets:lookup(?MODULE, From) of
[] ->
true = ets:insert(?MODULE, EtsTuple = {From, TaskProps}),
erlang:monitor(process, From),
gen_event:notify(couch_task_events, {created, EtsTuple}),
{reply, ok, Server};
[_] ->
{reply, {add_task_error, already_registered}, Server}
end;
handle_call(all, _, Server) ->
All = [
[{pid, ?l2b(pid_to_list(Pid))} | TaskProps]
||
{Pid, TaskProps} <- ets:tab2list(?MODULE)
],
{reply, All, Server}.
handle_cast({update_status, Pid, NewProps}, Server) ->
case ets:lookup(?MODULE, Pid) of
[{Pid, _CurProps}] ->
?LOG_DEBUG("New task status for ~p: ~p", [Pid, NewProps]),
true = ets:insert(?MODULE, {Pid, NewProps});
_ ->
% Task finished/died in the meanwhile and we must have received
% a monitor message before this call - ignore.
ok
end,
{noreply, Server};
handle_cast(stop, State) ->
{stop, normal, State}.
handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
gen_event:notify(couch_task_events, {deleted, Pid}),
ets:delete(?MODULE, Pid),
{noreply, Server}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
timestamp() ->
timestamp(now()).
timestamp({Mega, Secs, _}) ->
Mega * 1000000 + Secs. | src/couchdb/couch_task_status.erl | 0.693058 | 0.442456 | couch_task_status.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021-2022 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Khepri API for transactional queries and updates.
%%
%% Transactions are anonymous functions which take no arguments, much like
%% what Mnesia supports. However, unlike with Mnesia, transaction functions in
%% Khepri are restricted:
%%
%% <ul>
%% <li>Calls to BIFs and other functions is limited to a set of whitelisted
%% APIs. See {@link is_remote_call_valid/3} for the complete list.</li>
%% <li>Sending or receiving messages is denied.</li>
%% </ul>
%%
%% The reason is that the transaction function must always have the exact same
%% outcome given its inputs. Indeed, the transaction function is executed on
%% every Ra cluster members participating in the consensus. The function must
%% therefore modify the Khepri state (the database) identically on all Ra
%% members. This is also true for Ra members joining the cluster later or
%% catching up after a network partition.
%%
%% To achieve that:
%% <ol>
%% <li>The code of the transaction function is extracted from the its initial
%% Erlang module. This way, the transaction function does not depend on the
%% initial module availability and is not affected by a module reload. See
%% {@link khepri_fun})</li>
%% <li>The code is verified to make sure it does not perform any denied
%% operations.</li>
%% <li>The extracted transaction function is stored as a Khepri state machine
%% command in the Ra journal to be replicated on all Ra members.</li>
%% </ol>
-module(khepri_tx).
-include_lib("stdlib/include/assert.hrl").
-include("include/khepri.hrl").
-include("src/internal.hrl").
-include("src/khepri_machine.hrl").
%% IMPORTANT: When adding a new khepri_tx function to be used inside a
%% transaction function:
%% 1. The function must be added to the whitelist in
%% `is_remote_call_valid()' in this file.
%% 2. If the function modifies the tree, it must be handled in
%% `is_standalone_fun_still_needed()' is this file too.
-export([put/2, put/3,
get/1, get/2,
exists/1,
has_data/1,
list/1,
find/2,
delete/1,
abort/1,
is_transaction/0]).
%% For internal user only.
-export([to_standalone_fun/2,
run/3]).
-compile({no_auto_import, [get/1, put/2, erase/1]}).
-type tx_fun_result() :: any() | no_return().
-type tx_fun() :: fun(() -> tx_fun_result()).
-type tx_fun_bindings() :: #{Name :: atom() => Value :: any()}.
-type tx_abort() :: {aborted, any()}.
-type tx_props() :: #{allow_updates := boolean()}.
-export_type([tx_fun/0,
tx_fun_bindings/0,
tx_fun_result/0,
tx_abort/0]).
-spec put(PathPattern, Payload) -> Result when
PathPattern :: khepri_path:pattern(),
Payload :: khepri_machine:payload(),
Result :: khepri_machine:result().
%% @doc Creates or modifies a specific tree node in the tree structure.
put(PathPattern, Payload) ->
put(PathPattern, Payload, #{}).
-spec put(PathPattern, Payload, Extra) -> Result when
PathPattern :: khepri_path:pattern(),
Payload :: khepri_machine:payload(),
Extra :: #{keep_while => khepri_condition:keep_while()},
Result :: khepri_machine:result().
%% @doc Creates or modifies a specific tree node in the tree structure.
put(PathPattern, Payload, Extra) when ?IS_KHEPRI_PAYLOAD(Payload) ->
ensure_path_pattern_is_valid(PathPattern),
ensure_updates_are_allowed(),
{State, SideEffects} = get_tx_state(),
Ret = khepri_machine:insert_or_update_node(
State, PathPattern, Payload, Extra),
case Ret of
{NewState, Result, NewSideEffects} ->
set_tx_state(NewState, SideEffects ++ NewSideEffects);
{NewState, Result} ->
set_tx_state(NewState, SideEffects)
end,
Result;
put(PathPattern, Payload, _Extra) ->
abort({invalid_payload, PathPattern, Payload}).
get(PathPattern) ->
get(PathPattern, #{}).
get(PathPattern, Options) ->
ensure_path_pattern_is_valid(PathPattern),
{#khepri_machine{root = Root}, _SideEffects} = get_tx_state(),
khepri_machine:find_matching_nodes(Root, PathPattern, Options).
-spec exists(Path) -> Exists when
Path :: khepri_path:pattern(),
Exists :: boolean().
exists(Path) ->
case get(Path, #{expect_specific_node => true}) of
{ok, _} -> true;
_ -> false
end.
-spec has_data(Path) -> HasData when
Path :: khepri_path:pattern(),
HasData :: boolean().
has_data(Path) ->
case get(Path, #{expect_specific_node => true}) of
{ok, Result} ->
[NodeProps] = maps:values(Result),
maps:is_key(data, NodeProps);
_ ->
false
end.
list(Path) ->
Path1 = Path ++ [?STAR],
get(Path1).
find(Path, Condition) ->
Condition1 = #if_all{conditions = [?STAR_STAR, Condition]},
Path1 = Path ++ [Condition1],
get(Path1).
delete(PathPattern) ->
ensure_path_pattern_is_valid(PathPattern),
ensure_updates_are_allowed(),
{State, SideEffects} = get_tx_state(),
Ret = khepri_machine:delete_matching_nodes(State, PathPattern),
case Ret of
{NewState, Result, NewSideEffects} ->
set_tx_state(NewState, SideEffects ++ NewSideEffects);
{NewState, Result} ->
set_tx_state(NewState, SideEffects)
end,
Result.
-spec abort(Reason) -> no_return() when
Reason :: any().
abort(Reason) ->
throw({aborted, Reason}).
-spec is_transaction() -> boolean().
is_transaction() ->
StateAndSideEffects = erlang:get(?TX_STATE_KEY),
case StateAndSideEffects of
{#khepri_machine{}, _SideEffects} -> true;
_ -> false
end.
-spec to_standalone_fun(Fun, ReadWrite) -> StandaloneFun | no_return() when
Fun :: fun(),
ReadWrite :: ro | rw | auto,
StandaloneFun :: khepri_fun:standalone_fun().
to_standalone_fun(Fun, ReadWrite)
when is_function(Fun, 0) andalso
(ReadWrite =:= auto orelse ReadWrite =:= rw) ->
Options =
#{ensure_instruction_is_permitted =>
fun ensure_instruction_is_permitted/1,
should_process_function =>
fun should_process_function/4,
is_standalone_fun_still_needed =>
fun(Params) -> is_standalone_fun_still_needed(Params, ReadWrite) end},
try
khepri_fun:to_standalone_fun(Fun, Options)
catch
throw:Error ->
throw({invalid_tx_fun, Error})
end;
to_standalone_fun(Fun, ro) ->
Fun.
ensure_instruction_is_permitted({allocate, _, _}) ->
ok;
ensure_instruction_is_permitted({allocate_zero, _, _}) ->
ok;
ensure_instruction_is_permitted({allocate_heap, _, _, _}) ->
ok;
ensure_instruction_is_permitted({apply, _}) ->
throw(dynamic_apply_denied);
ensure_instruction_is_permitted({apply_last, _, _}) ->
throw(dynamic_apply_denied);
ensure_instruction_is_permitted({badmatch, _}) ->
ok;
ensure_instruction_is_permitted({bif, Bif, _, Args, _}) ->
Arity = length(Args),
ensure_bif_is_valid(Bif, Arity);
ensure_instruction_is_permitted({bs_add, _, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_append, _, _, _, _, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_init2, _, _, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({BsPutSomething, _, _, _, _, _})
when BsPutSomething =:= bs_put_binary orelse
BsPutSomething =:= bs_put_integer ->
ok;
ensure_instruction_is_permitted({bs_put_string, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_get_position, _, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_set_position, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_get_tail, _, _, _}) ->
ok;
ensure_instruction_is_permitted({bs_start_match4, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({Call, _, _})
when Call =:= call orelse Call =:= call_only orelse
Call =:= call_ext orelse Call =:= call_ext_only ->
ok;
ensure_instruction_is_permitted({Call, _, _, _})
when Call =:= call_last orelse Call =:= call_ext_last ->
ok;
ensure_instruction_is_permitted({call_fun, _}) ->
ok;
ensure_instruction_is_permitted({case_end, _}) ->
ok;
ensure_instruction_is_permitted({'catch', _, _}) ->
ok;
ensure_instruction_is_permitted({catch_end, _}) ->
ok;
ensure_instruction_is_permitted({deallocate, _}) ->
ok;
ensure_instruction_is_permitted({func_info, _, _, _}) ->
ok;
ensure_instruction_is_permitted({gc_bif, Bif, _, Arity, _, _}) ->
ensure_bif_is_valid(Bif, Arity);
ensure_instruction_is_permitted({get_hd, _, _}) ->
ok;
ensure_instruction_is_permitted({get_tl, _, _}) ->
ok;
ensure_instruction_is_permitted({get_tuple_element, _, _, _}) ->
ok;
ensure_instruction_is_permitted({get_map_elements, _, _, _}) ->
ok;
ensure_instruction_is_permitted({get_list, _, _, _}) ->
ok;
ensure_instruction_is_permitted(if_end) ->
ok;
ensure_instruction_is_permitted({init, _}) ->
ok;
ensure_instruction_is_permitted({init_yregs, _}) ->
ok;
ensure_instruction_is_permitted({jump, _}) ->
ok;
ensure_instruction_is_permitted({move, _, _}) ->
ok;
ensure_instruction_is_permitted({loop_rec, _, _}) ->
throw(receiving_message_denied);
ensure_instruction_is_permitted({loop_rec_env, _}) ->
throw(receiving_message_denied);
ensure_instruction_is_permitted({make_fun2, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({make_fun3, _, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({put_list, _, _, _}) ->
ok;
ensure_instruction_is_permitted({put_map_assoc, _, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({put_tuple2, _, _}) ->
ok;
ensure_instruction_is_permitted(raw_raise) ->
ok;
ensure_instruction_is_permitted(remove_message) ->
throw(receiving_message_denied);
ensure_instruction_is_permitted(return) ->
ok;
ensure_instruction_is_permitted(send) ->
throw(sending_message_denied);
ensure_instruction_is_permitted({select_tuple_arity, _, _, {list, _}}) ->
ok;
ensure_instruction_is_permitted({select_val, _, _, {list, _}}) ->
ok;
ensure_instruction_is_permitted({set_tuple_element, _, _, _}) ->
ok;
ensure_instruction_is_permitted({swap, _, _}) ->
ok;
ensure_instruction_is_permitted({test, _, _, _}) ->
ok;
ensure_instruction_is_permitted({test, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({test, _, _, _, _, _}) ->
ok;
ensure_instruction_is_permitted({test_heap, _, _}) ->
ok;
ensure_instruction_is_permitted({trim, _, _}) ->
ok;
ensure_instruction_is_permitted({'try', _, _}) ->
ok;
ensure_instruction_is_permitted({try_end, _}) ->
ok;
ensure_instruction_is_permitted({try_case, _}) ->
ok;
ensure_instruction_is_permitted(Unknown) ->
throw({unknown_instruction, Unknown}).
should_process_function(Module, Name, Arity, FromModule) ->
ShouldCollect = khepri_utils:should_collect_code_for_module(Module),
case ShouldCollect of
true ->
case Module of
FromModule ->
true;
_ ->
_ = code:ensure_loaded(Module),
case erlang:function_exported(Module, Name, Arity) of
true ->
true;
false ->
throw({call_to_unexported_function,
{Module, Name, Arity}})
end
end;
false ->
ensure_call_is_valid(Module, Name, Arity),
false
end.
ensure_call_is_valid(Module, Name, Arity) ->
case is_remote_call_valid(Module, Name, Arity) of
true -> ok;
false -> throw({call_denied, {Module, Name, Arity}})
end.
ensure_bif_is_valid(Bif, Arity) ->
try
ensure_call_is_valid(erlang, Bif, Arity)
catch
throw:{call_denied, {erlang, Bif, Arity}} ->
throw({call_denied, {Bif, Arity}})
end.
is_remote_call_valid(khepri, no_payload, 0) -> true;
is_remote_call_valid(khepri, data_payload, 1) -> true;
is_remote_call_valid(khepri_tx, put, _) -> true;
is_remote_call_valid(khepri_tx, get, _) -> true;
is_remote_call_valid(khepri_tx, exists, _) -> true;
is_remote_call_valid(khepri_tx, has_data, _) -> true;
is_remote_call_valid(khepri_tx, list, _) -> true;
is_remote_call_valid(khepri_tx, find, _) -> true;
is_remote_call_valid(khepri_tx, delete, _) -> true;
is_remote_call_valid(khepri_tx, abort, _) -> true;
is_remote_call_valid(khepri_tx, is_transaction, _) -> true;
is_remote_call_valid(_, module_info, _) -> false;
is_remote_call_valid(erlang, abs, _) -> true;
is_remote_call_valid(erlang, adler32, _) -> true;
is_remote_call_valid(erlang, adler32_combine, _) -> true;
is_remote_call_valid(erlang, append_element, _) -> true;
is_remote_call_valid(erlang, atom_to_binary, _) -> true;
is_remote_call_valid(erlang, atom_to_list, _) -> true;
is_remote_call_valid(erlang, binary_part, _) -> true;
is_remote_call_valid(erlang, binary_to_atom, _) -> true;
is_remote_call_valid(erlang, binary_to_float, _) -> true;
is_remote_call_valid(erlang, binary_to_integer, _) -> true;
is_remote_call_valid(erlang, binary_to_list, _) -> true;
is_remote_call_valid(erlang, binary_to_term, _) -> true;
is_remote_call_valid(erlang, bit_size, _) -> true;
is_remote_call_valid(erlang, bitstring_to_list, _) -> true;
is_remote_call_valid(erlang, byte_size, _) -> true;
is_remote_call_valid(erlang, ceil, _) -> true;
is_remote_call_valid(erlang, crc32, _) -> true;
is_remote_call_valid(erlang, crc32_combine, _) -> true;
is_remote_call_valid(erlang, delete_element, _) -> true;
is_remote_call_valid(erlang, element, _) -> true;
is_remote_call_valid(erlang, error, _) -> true;
is_remote_call_valid(erlang, exit, _) -> true;
is_remote_call_valid(erlang, external_size, _) -> true;
is_remote_call_valid(erlang, float, _) -> true;
is_remote_call_valid(erlang, float_to_binary, _) -> true;
is_remote_call_valid(erlang, float_to_list, _) -> true;
is_remote_call_valid(erlang, hd, _) -> true;
is_remote_call_valid(erlang, insert_element, _) -> true;
is_remote_call_valid(erlang, integer_to_binary, _) -> true;
is_remote_call_valid(erlang, integer_to_list, _) -> true;
is_remote_call_valid(erlang, iolist_size, _) -> true;
is_remote_call_valid(erlang, iolist_to_binary, _) -> true;
is_remote_call_valid(erlang, iolist_to_iovec, _) -> true;
is_remote_call_valid(erlang, is_atom, _) -> true;
is_remote_call_valid(erlang, is_binary, _) -> true;
is_remote_call_valid(erlang, is_bitstring, _) -> true;
is_remote_call_valid(erlang, is_boolean, _) -> true;
is_remote_call_valid(erlang, is_float, _) -> true;
is_remote_call_valid(erlang, is_integer, _) -> true;
is_remote_call_valid(erlang, is_list, _) -> true;
is_remote_call_valid(erlang, is_map, _) -> true;
is_remote_call_valid(erlang, is_map_key, _) -> true;
is_remote_call_valid(erlang, is_number, _) -> true;
is_remote_call_valid(erlang, is_pid, _) -> true;
is_remote_call_valid(erlang, is_record, _) -> true;
is_remote_call_valid(erlang, is_reference, _) -> true;
is_remote_call_valid(erlang, is_tuple, _) -> true;
is_remote_call_valid(erlang, length, _) -> true;
is_remote_call_valid(erlang, list_to_atom, _) -> true;
is_remote_call_valid(erlang, list_to_binary, _) -> true;
is_remote_call_valid(erlang, list_to_bitstring, _) -> true;
is_remote_call_valid(erlang, list_to_float, _) -> true;
is_remote_call_valid(erlang, list_to_integer, _) -> true;
is_remote_call_valid(erlang, list_to_pid, _) -> true;
is_remote_call_valid(erlang, list_to_tuple, _) -> true;
is_remote_call_valid(erlang, make_tuple, _) -> true;
is_remote_call_valid(erlang, map_get, _) -> true;
is_remote_call_valid(erlang, map_size, _) -> true;
is_remote_call_valid(erlang, max, _) -> true;
is_remote_call_valid(erlang, md5, _) -> true;
is_remote_call_valid(erlang, md5_final, _) -> true;
is_remote_call_valid(erlang, md5_init, _) -> true;
is_remote_call_valid(erlang, md5_update, _) -> true;
is_remote_call_valid(erlang, min, _) -> true;
is_remote_call_valid(erlang, 'not', _) -> true;
is_remote_call_valid(erlang, phash2, _) -> true;
is_remote_call_valid(erlang, pid_to_list, _) -> true;
is_remote_call_valid(erlang, raise, _) -> true;
is_remote_call_valid(erlang, round, _) -> true;
is_remote_call_valid(erlang, setelement, _) -> true;
is_remote_call_valid(erlang, size, _) -> true;
is_remote_call_valid(erlang, split_binary, _) -> true;
%% FIXME: What about changes to the marshalling code between versions of
%% Erlang?
is_remote_call_valid(erlang, term_to_binary, _) -> true;
is_remote_call_valid(erlang, term_to_iovec, _) -> true;
is_remote_call_valid(erlang, throw, _) -> true;
is_remote_call_valid(erlang, tl, _) -> true;
is_remote_call_valid(erlang, tuple_size, _) -> true;
is_remote_call_valid(erlang, tuple_to_list, _) -> true;
is_remote_call_valid(erlang, '++', _) -> true;
is_remote_call_valid(erlang, '--', _) -> true;
is_remote_call_valid(erlang, '+', _) -> true;
is_remote_call_valid(erlang, '>=', _) -> true;
is_remote_call_valid(erlang, '=<', _) -> true;
is_remote_call_valid(erlang, '>', _) -> true;
is_remote_call_valid(erlang, '<', _) -> true;
is_remote_call_valid(erlang, '==', _) -> true;
is_remote_call_valid(erlang, '/=', _) -> true;
is_remote_call_valid(erlang, '=:=', _) -> true;
is_remote_call_valid(erlang, '=/=', _) -> true;
is_remote_call_valid(dict, _, _) -> true;
is_remote_call_valid(io_lib, format, _) -> true;
is_remote_call_valid(lists, _, _) -> true;
is_remote_call_valid(logger, alert, _) -> true;
is_remote_call_valid(logger, critical, _) -> true;
is_remote_call_valid(logger, debug, _) -> true;
is_remote_call_valid(logger, emergency, _) -> true;
is_remote_call_valid(logger, error, _) -> true;
is_remote_call_valid(logger, info, _) -> true;
is_remote_call_valid(logger, notice, _) -> true;
is_remote_call_valid(logger, warning, _) -> true;
is_remote_call_valid(maps, _, _) -> true;
is_remote_call_valid(orddict, _, _) -> true;
is_remote_call_valid(ordsets, _, _) -> true;
is_remote_call_valid(proplists, _, _) -> true;
is_remote_call_valid(re, compile, _) -> true;
is_remote_call_valid(re, inspect, _) -> true;
is_remote_call_valid(re, replace, _) -> true;
is_remote_call_valid(re, run, _) -> true;
is_remote_call_valid(re, split, _) -> true;
is_remote_call_valid(sets, _, _) -> true;
is_remote_call_valid(string, _, _) -> true;
is_remote_call_valid(unicode, _, _) -> true;
is_remote_call_valid(_, _, _) -> false.
is_standalone_fun_still_needed(_, rw) ->
true;
is_standalone_fun_still_needed(#{calls := Calls}, auto) ->
ReadWrite = case Calls of
#{{khepri_tx, put, 2} := _} -> rw;
#{{khepri_tx, put, 3} := _} -> rw;
#{{khepri_tx, delete, 1} := _} -> rw;
_ -> ro
end,
ReadWrite =:= rw.
-spec run(State, Fun, AllowUpdates) -> Ret when
State :: khepri_machine:state(),
Fun :: tx_fun(),
AllowUpdates :: boolean(),
Ret :: {State, tx_fun_result() | Exception, SideEffects},
Exception :: {exception, Class, Reason, Stacktrace},
Class :: error | exit | throw,
Reason :: any(),
Stacktrace :: list(),
SideEffects :: ra_machine:effects().
%% @private
run(State, Fun, AllowUpdates) ->
SideEffects = [],
TxProps = #{allow_updates => AllowUpdates},
NoState = erlang:put(?TX_STATE_KEY, {State, SideEffects}),
NoProps = erlang:put(?TX_PROPS, TxProps),
?assertEqual(undefined, NoState),
?assertEqual(undefined, NoProps),
try
Ret = Fun(),
{NewState, NewSideEffects} = erlang:erase(?TX_STATE_KEY),
NewTxProps = erlang:erase(?TX_PROPS),
?assert(is_record(NewState, khepri_machine)),
?assertEqual(TxProps, NewTxProps),
{NewState, Ret, NewSideEffects}
catch
Class:Reason:Stacktrace ->
_ = erlang:erase(?TX_STATE_KEY),
_ = erlang:erase(?TX_PROPS),
Exception = {exception, Class, Reason, Stacktrace},
{State, Exception, []}
end.
-spec get_tx_state() -> {State, SideEffects} when
State :: khepri_machine:state(),
SideEffects :: ra_machine:effects().
%% @private
get_tx_state() ->
StateAndSideEffects =
{#khepri_machine{}, _SideEffects} = erlang:get(?TX_STATE_KEY),
StateAndSideEffects.
-spec set_tx_state(State, SideEffects) -> ok when
State :: khepri_machine:state(),
SideEffects :: ra_machine:effects().
%% @private
set_tx_state(#khepri_machine{} = NewState, SideEffects) ->
_ = erlang:put(?TX_STATE_KEY, {NewState, SideEffects}),
ok.
-spec get_tx_props() -> TxProps when
TxProps :: tx_props().
%% @private
get_tx_props() ->
erlang:get(?TX_PROPS).
-spec ensure_path_pattern_is_valid(PathPattern) -> ok | no_return() when
PathPattern :: khepri_path:pattern().
ensure_path_pattern_is_valid(PathPattern) ->
case khepri_path:is_valid(PathPattern) of
true -> ok;
{false, Path} -> abort({invalid_path, Path})
end.
-spec ensure_updates_are_allowed() -> ok | no_return().
%% @private
ensure_updates_are_allowed() ->
case get_tx_props() of
#{allow_updates := true} -> ok;
#{allow_updates := false} -> abort(store_update_denied)
end. | src/khepri_tx.erl | 0.691706 | 0.415373 | khepri_tx.erl | starcoder |
-module(coello_basic_spec).
-include_lib("espec/include/espec.hrl").
-include_lib("hamcrest/include/hamcrest.hrl").
-include_lib("amqp_client/include/amqp_client.hrl").
spec() ->
before_all(fun() ->
meck:new([amqp_channel]),
meck:expect(amqp_channel, cast, 2, ok),
meck:expect(amqp_channel, cast, 3, ok)
end),
after_all(fun() ->
meck:unload([amqp_channel])
end),
describe("publish/4", fun() ->
it("should publish a binary message", fun()->
Data = crypto:rand_bytes(30),
Msg = #amqp_msg{payload = Data },
Method = #'basic.publish'{ exchange = exchange, routing_key = routing_key},
ok = coello_basic:publish(channel, Data, exchange,routing_key),
assert_that(meck:called(amqp_channel, cast, [channel, Method, Msg]), is(true))
end),
it("should publish a text message", fun()->
Msg = #amqp_msg{payload = <<"abc">> },
Method = #'basic.publish'{ exchange = exchange, routing_key = routing_key},
ok = coello_basic:publish(channel, "abc", exchange, routing_key),
assert_that(meck:called(amqp_channel, cast, [channel, Method, Msg]), is(true))
end)
end),
describe("publish/5", fun() ->
it("should publish a binary message", fun()->
ReplyTo = <<"respon<PASSWORD>">>,
Data = crypto:rand_bytes(30),
Msg = #amqp_msg{payload = Data, props = #'P_basic'{reply_to = ReplyTo} },
Method = #'basic.publish'{ exchange = exchange, routing_key = routing_key},
ok = coello_basic:publish(channel, Data, exchange, routing_key, ReplyTo),
assert_that(meck:called(amqp_channel, cast, [channel, Method, Msg]), is(true))
end),
it("should publish a text message", fun() ->
Data = "abc",
ReplyTo = <<"respondeaqui">>,
Msg = #amqp_msg{payload = <<"abc">>, props = #'P_basic'{reply_to = ReplyTo} },
Method = #'basic.publish'{ exchange = exchange, routing_key = routing_key},
ok = coello_basic:publish(channel, Data, exchange, routing_key, ReplyTo),
assert_that(meck:called(amqp_channel, cast, [channel, Method, Msg]), is(true))
end)
end),
describe("consume/3", fun() ->
it("should consume messages and invoke the passed in callback", fun()->
meck:expect(amqp_channel, subscribe, 3, #'basic.consume_ok'{consumer_tag = 1234}),
QueueName = <<"queue">>,
Method = #'basic.consume'{ queue = QueueName},
Pid = self(),
{ConsumerPid, _} = coello_basic:consume(channel, QueueName, fun(_, _) -> Pid ! on_message end),
ConsumerPid ! {#'basic.deliver'{}, #amqp_msg{}},
assert_that(
receive
on_message ->
true
after 500 ->
false
end, is(true)),
assert_that(meck:called(amqp_channel, subscribe, [channel, Method, ConsumerPid]), is(true))
end)
end),
describe("consume/4", fun() ->
describe("when option 'no_ack' is present", fun() ->
it("should set its value in the amqp method", fun() ->
meck:new(coello_consumer),
meck:expect(coello_consumer, start, 1, consumer),
meck:expect(amqp_channel, subscribe, 3, #'basic.consume_ok'{consumer_tag = 1234}),
Options = [{no_ack, true}],
QueueName = <<"queue">>,
Method = #'basic.consume'{ queue = QueueName, no_ack = true},
coello_basic:consume(channel, QueueName, fun(_, _) -> ok end, Options),
assert_that(meck:called(amqp_channel, subscribe, [channel, Method, '_']), is(true)),
meck:unload(coello_consumer)
end)
end)
end),
describe("cancel", fun() ->
it("should cancel a consumer", fun() ->
meck:new(coello_consumer),
meck:expect(amqp_channel, call, 2, ok),
meck:expect(coello_consumer, stop, 1, ok),
Method = #'basic.cancel'{consumer_tag = consumer_tag},
ok = coello_basic:cancel(channel, {consumer, consumer_tag}),
assert_that(meck:called(amqp_channel, call, [channel, Method]), is(true)),
assert_that(meck:called(coello_consumer, stop, [consumer]), is(true)),
meck:unload(coello_consumer)
end)
end),
describe("ack", fun() ->
it("should ack a message", fun() ->
DeliveryTag = tag,
Multiple = 0,
Method = #'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple},
ok = coello_basic:ack(channel, DeliveryTag, Multiple),
assert_that(meck:called(amqp_channel, cast, [channel, Method]), is(true))
end)
end),
describe("reject", fun() ->
it("should reject a message", fun() ->
DeliveryTag = tag,
Requeue = true,
Method = #'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
ok = coello_basic:reject(channel, DeliveryTag, Requeue),
assert_that(meck:called(amqp_channel, cast, [channel, Method]), is(true))
end)
end). | test/spec/coello_basic_spec.erl | 0.668447 | 0.456349 | coello_basic_spec.erl | starcoder |
%% roman -- Roman Numerals in Erlang
%%
%% @doc Allows you to add and subtract Roman numerals in pure Erlang, with
%% Roman numeral "literals":
%%
%% ```
%% roman:add(ii, ii) =:= iv.
%% roman:subtract(v, iii) =:= ii.
%% '''
%%
%% Finally! A way to do calculations on movie copyright years!
%%
%% ```
%% roman:subtract(mmi, mcmlxviii) =:= xxxiii.
%% '''
%%
%% At no point does any of this code attempt to convert anything into an
%% integer. In fact, apart from Erlang's function_notation/0, there are no
%% digits in the source code whatsoever. All arithmetic is done
%% "symbolically", and without using your computer's integer arithmetic
%% hardware. Give those adders a break!
%%
%% Other fun facts:
%% - This code has no concept of zero.
%% - Since this code only uses "standardized" Roman numerals, the maximum
%% value is ⅯⅯⅯⅭⅯⅩⅭⅠⅩ (three thousand, nine hundred ninety nine).
%%
%% This gives a total range of Ⅰ to ⅯⅯⅯⅭⅯⅩⅭⅠⅩ, inclusive.
%%
%% Also, I used Google Translate to name as many variables as I could in
%% Latin, because... quidnī?
%% (I don't remember enough from tenth grade Latin to know whether any of the
%% translated names are GOOD, so they are most likely BAD).
-module(roman).
-export([add/2, subtract/2]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% ====================================================================
%% Public API
%% ====================================================================
%% @doc Adds two Roman numerals together.
add(A, B) ->
Summa = add_internus(parse(A), parse(B)),
scribe(Summa).
%% @doc Subtracts the second Roman numeral from the first.
subtract(A, B) ->
Differentia = sub_internus(parse(A), parse(B)),
scribe(Differentia).
%% ====================================================================
%% Internal Functions
%% ====================================================================
%% The internal representation of roman numerals is a list of decimal places.
%% Decimal places are ordered in INCREASING magnitude. That is, the lowest
%% magnitude item is first, and the highest magnitude item is last.
%%
%% This is **technically** not a place value system, since if a decimal place
%% does not participate in a number, it is absent from the list, rather than
%% having a value of zero. It is also **technically** annoying.
%%
%% Examples of valid internal representations:
%%
%% i ≡ [{unus, i}]
%% x ≡ [{decem, x}]
%% c ≡ [{centum, c}]
%% m ≡ [{mille, m}]
%%
%% Values are in increasing magnitude, and intervening decimal places are
%% absent:
%%
%% mdcvii ≡ [{unus, vii}, {decem, dc}, {mille, m}]
%%
%% Notice the lack of a {decem, _} term!
-type numerum() ::[valorem(), ...].
%% A decimal place can be ones, tens, hundreds, thousands:
-type valorem() :: unus() | decem() | centum() | mille().
%% And these enumerate all possible value within that decimal place:
-type unus() :: {unus, i | ii | iii | iv | v | vi | vii | viii | ix}.
-type decem() :: {decem, x | xx | xxx | xl | l | lx | lxx | lxxx | xc}.
-type centum() :: {centum, c | cc | ccc | cd | d | dc | dcc | dccc | cm}.
%% note bene: in "standardized" Roman numerals, there's no way to count from
%% four thousand and beyond:
-type mille() :: {mille, m | mm | mmm }.
%% @doc Write a Roman numeral as an atom.
-spec scribe(numerum()) -> atom().
scribe(Numerum) ->
DecreasingInMagntiude = lists:reverse(Numerum),
ListOfLists = [atom_to_list(I) || {_, I} <- DecreasingInMagntiude],
list_to_atom(lists:concat(ListOfLists)).
%% ====================================================================
%% The way adding works is by decomposing the addition into
%% a bunch of adding ones.
%%
%% Say you want the sum x + v (ten plus five). We know:
%% - how to increment a number by one
%% - how to decrement a number by one
%%
%% Why, that's the same as the sum of one more than first and one less than
%% the second:
%%
%% x + v =:= (x + i) + (v - i)
%% x + v =:= xi + iv
%%
%% Continue this process; at each step, the first number gets one bigger, and
%% the second number gets one smaller:
%%
%% x + v =:= (x + i) + (v - i)
%% x + v =:= xi + iv
%% xi + iv =:= (xi + i) + (iv - i)
%% xi + iv =:= xii + iii
%% xii + iii =:= (xii + i) + (iii - i)
%% xii + iii =:= xiii + ii
%%
%% Eventually, the second number becomes one. Since we know how to add one to
%% a number, we return the end result
%%
%% x + v =:= xiv + i
%% x + v =:= xv
%%
%% @see increment/1
%% @see decrement/1
-spec add_internus(numerum(), numerum()) -> numerum().
add_internus(A, [{unus, i}]) ->
increment(A);
add_internus(A, B) ->
ASucc = increment(A),
BPred = decrement(B),
add_internus(ASucc, BPred).
%% Subtracting works similarly to adding.
%% In this case, we are taking away from both numbers by the same amount.
%%
%% We know how to take away one from a number. So we repeat this process:
%%
%% x - v =:= (x - i) - (v - i)
%% x - v =:= ix - iv
%%
%% ix - iv =:= (ix - i) - (iv - i)
%% ix - iv =:= viii - iii
%%
%% viii - iii =:= (viii - i) - (iii - i)
%% viii - iii =:= vii - ii
%%
%% Eventually, the second number becomes one. Since we know how to subtract
%% one from a number, we return the result:
%%
%% x - v =:= vi - i
%% x - v =:= v
%%
%% @see add_internus/2
%% @see decrement/1
-spec sub_internus(numerum(), numerum()) -> numerum().
sub_internus(A, [{unus, i}]) ->
decrement(A);
sub_internus(A, B) ->
sub_internus(decrement(A), decrement(B)).
%% ====================================================================
%% @doc add one to a number.
-spec increment(numerum()) -> numerum().
increment(Numerum=[{unus, _}|_]) ->
%% Incrementing when there is a ones place in a number is straightforward:
incrementum_generalis(Numerum);
increment(Etc) ->
%% However, when the lowest decimal place is tens or larger, adding one
%% means adding the term {unus, i} to entire number.
[primus(unus)|Etc].
%% Increments any decimal place.
incrementum_generalis([{Magnitudo, I}|Etc]) ->
case succssorem(I) of
%% We must carry over into the next decimal place:
carry -> propagate_carry(Magnitudo, Etc);
%% No carry required; we keep the same decimal place:
II -> [{Magnitudo, II}|Etc]
end.
%% When handling carry, we need to know, what decimal initiated the carry.
propagate_carry(ExMagnitudo, []) ->
%% In the case that there is a carry, and there is not a decimal place
%% high enough to absorb it, that means that we need to spontaneously
%% create the immediate NEXT decimal place into existence:
[primus(diende(ExMagnitudo))];
propagate_carry(ExMagnitudo, Numerum=[{Magnitudo, _}|_]) ->
%% If there is something bigger, we need to check whether it's...
case MagnitudoProximo = diende(ExMagnitudo) of
%% ...the immediately next decimal place:
%% e.g., xix + i ≡ [{unus, ix}, {decem, x}] + i
%% ≡ [ {decem, xx}]
Magnitudo -> incrementum_generalis(Numerum);
%% ...or an even larger decimal place, so we need to vivify the
%% decimal place in between:
%% e.g., cix + i ≡ [{unus, ix}, {centum, c}]
%% ≡ [ {decem, x}, {centum, c}]
_ -> [primus(MagnitudoProximo)|Numerum]
end.
%% The next decimal place:
diende(unus) -> decem;
diende(decem) -> centum;
diende(centum) -> mille;
diende(mille) -> error(nimis_magna).
%% The "first" of a decimal place.
primus(unus) -> {unus, i};
primus(decem) -> {decem, x};
primus(centum) -> {centum, c};
primus(mille) -> {mille, m}.
%% ====================================================================
%% @doc take one away from a number.
-spec decrement(numerum()) -> numerum().
decrement([{Magnitudo, II}|Etc]) ->
ante(Magnitudo) ++ case praedecessor(II) of
% If we have to borrow, this term disappears from
% existence:
borrow -> Etc;
I -> [{Magnitudo, I}|Etc]
end.
%% What number is immediately before a decimal place?
ante(mille) -> ante(centum) ++ [{centum, cm}];
ante(centum) -> ante(decem) ++ [{decem, xc}];
ante(decem) -> [{unus, ix}];
ante(unus) -> [].
%% @doc What numeral comes next for THIS decimal place?
% Special cases:
succssorem(m) -> mm;
succssorem(mm) -> mmm;
succssorem(mmm) -> error(nimis_magna);
% General case:
succssorem(Numerum) ->
Partes = split_atom(Numerum),
Simbola = simbola(magnitudo(Partes)),
case succssorem(Simbola, Partes) of
carry -> carry;
PartesNovem -> fuse_atom(PartesNovem)
end.
%% The successor of any numeral looks the same, regardless of its decimal
%% place. This table generalizes adding one to any numeral, with your
%% choice of {I, V, X} := {i, v, x} | {x, l, c} | {c, d, m}.
%%
%% @see simbola/1
succssorem({I, _, _}, [I]) -> [I, I];
succssorem({I, _, _}, [I, I]) -> [I, I, I];
succssorem({I, V, _}, [I, I, I]) -> [I, V];
succssorem({I, V, _}, [I, V]) -> [V];
succssorem({I, V, _}, [V]) -> [V, I];
succssorem({I, V, _}, [V, I]) -> [V, I, I];
succssorem({I, V, _}, [V, I, I]) -> [V, I, I, I];
succssorem({I, V, X}, [V, I, I, I]) -> [I, X];
succssorem({I, _, X}, [I, X]) -> carry.
%% @doc What numeral comes immediately before for THIS decimal place?
% Special cases:
praedecessor(m) -> borrow;
praedecessor(mm) -> m;
praedecessor(mmm) -> mm;
% General case
praedecessor(Numerum) ->
Partes = split_atom(Numerum),
Simbola = simbola(magnitudo(Partes)),
case praedecessor(Simbola, Partes) of
borrow -> borrow;
PartesNovem -> fuse_atom(PartesNovem)
end.
%% The predecessor of any numeral looks the same, regardless of its decimal
%% place. This table generalizes taking one away from any numeral, with your
%% choice of {I, V, X} := {i, v, x} | {x, l, c} | {c, d, m}.
%%
%% @see simbola/1
praedecessor({I, _, _}, [I]) -> borrow;
praedecessor({I, _, _}, [I, I]) -> [I];
praedecessor({I, _, _}, [I, I, I]) -> [I, I];
praedecessor({I, V, _}, [I, V]) -> [I, I, I];
praedecessor({I, V, _}, [V]) -> [I, V];
praedecessor({I, V, _}, [V, I]) -> [V];
praedecessor({I, V, _}, [V, I, I]) -> [V, I];
praedecessor({I, V, _}, [V, I, I, I]) -> [V, I, I];
praedecessor({I, V, X}, [I, X]) -> [V, I, I, I].
%% @doc What is the decimal place of a term?
magnitudo([i|_]) -> unus;
magnitudo([v|_]) -> unus;
magnitudo([x|_]) -> decem;
magnitudo([l|_]) -> decem;
magnitudo([c|_]) -> centum;
magnitudo([d|_]) -> centum;
magnitudo([m|_]) -> mille.
%% @doc What letters are used to represent this decimal place?
simbola(unus) -> {i, v, x};
simbola(decem) -> {x, l, c};
simbola(centum) -> {c, d, m};
% Note: in practice, this is never used:
simbola(mille) -> {m}.
%% ====================================================================
%% @doc Parses a "Roman numeral literal" (really, an atom) into the internal
%% representation.
-spec parse(atom()) -> numerum().
parse(Term) ->
AtomList = split_atom(Term),
{Mille, EtceteraI} = parse_mille(AtomList),
{Centum, EtceteraII} = parse_generalis(centum, {c, d, m}, EtceteraI),
{Decem, EtceteraIII} = parse_generalis(decem, {x, l, c}, EtceteraII),
{Unus, []} = parse_generalis(unus, {i, v, x}, EtceteraIII),
Unus ++ Decem ++ Centum ++ Mille.
%% @doc Returns a parsed numeral, if any, and a list of leftover characters.
parse_generalis(Nomen, {I, V, X}, AtomList) ->
{Parse, Etc} = parse_generalis_initium({I, V, X}, AtomList, []),
Term = case Parse of
% Did not parse anything (everything is leftover):
[] -> [];
% Parsed something! The accepted string is in reverse:
Reversed -> [{Nomen, fuse_atom(lists:reverse(Reversed))}]
end,
{Term, Etc}.
%% @doc Implements a "generic" finite state machine (FSM) that accepts Roman
%% numerals at a given order of magnitude. The FSM is parameterized by the
%% symbols used for its ones (I), its fives (V), and its tens (X).
%%
%% Note: accepted atoms are returned in REVERESED order!
%%
%% See the README for a diagram of this state machine.
parse_generalis_initium({I, V, X}, [I|Etc], Acc) -> i({I, V, X}, Etc, [I|Acc]);
parse_generalis_initium({I, V, X}, [V|Etc], Acc) -> v({I, V, X}, Etc, [V|Acc]);
parse_generalis_initium({_, _, _}, Etc, []) -> {[], Etc}.
i({I, V, X}, [I|Etc], Acc) -> ii({I, V, X}, Etc, [I|Acc]);
i({I, V, X}, [V|Etc], Acc) -> iv({I, V, X}, Etc, [V|Acc]);
i({I, V, X}, [X|Etc], Acc) -> ix({I, V, X}, Etc, [X|Acc]);
i({_, _, _}, Etc, Acc) -> {Acc, Etc}.
ii({I, V, X}, [I|Etc], Acc) -> iii({I, V, X}, Etc, [I|Acc]);
ii({_, _, _}, Etc, Acc) -> {Acc, Etc}.
iii({_, _, _}, Etc, Acc) -> {Acc, Etc}.
iv({_, _, _}, Etc, Acc) -> {Acc, Etc}.
v({I, V, X}, [I|Etc], Acc) -> vi({I, V, X}, Etc, [I|Acc]);
v({_, _, _}, Etc, Acc) -> {Acc, Etc}.
vi({I, V, X}, [I|Etc], Acc) -> ii({I, V, X}, Etc, [I|Acc]);
vi({_, _, _}, Etc, Acc) -> {Acc, Etc}.
ix({_, _, _}, Etc, Acc) -> {Acc, Etc}.
%% Parse thousands. Special cased, because the Romans could not count beyond
%% MMMCMXCIX.
parse_mille([m,m,m|Etc]) -> {[{mille, mmm}], Etc};
parse_mille([m,m|Etc]) -> {[{mille, mm}], Etc};
parse_mille([m|Etc]) -> {[{mille, m}], Etc};
parse_mille(Etc) -> {[], Etc}.
%% ====================================================================
%% @doc Returns a list of single character atoms taken from the given atom.
-spec split_atom(atom()) -> [atom()].
split_atom(Atom) ->
[list_to_atom([Char]) || Char <- atom_to_list(Atom)].
%% @doc Concatenates a list of atoms into a single atom.
-spec fuse_atom(['i' | 'v' | 'x' | 'l' | 'c' | 'd' | 'm', ...]) -> atom().
fuse_atom(AtomList) ->
ListOfLists = [atom_to_list(Char) || Char <- AtomList],
list_to_atom([Char || [Char] <- ListOfLists]).
%% ====================================================================
%% Unit Tests
%% ====================================================================
-ifdef(EUNIT).
% largest value:
maximum() -> mmmcmxcix.
% before the largest value:
ante_maximum() -> mmmcmxcviii.
-endif.
-ifdef(EUNIT).
add_test() ->
[?assertEqual(ii, add(i, i)),
?assertEqual(iv, add(ii, ii)),
?assertEqual(mmxx, add(i, mmxix)),
?assertEqual(cxi, add(lxix, xlii)),
?assertEqual(cdlxxxix, add(lxix, cdxx)),
?assertEqual(lxix, add(lxviii, i)), % commendatus
?assertEqual(maximum(), add(ante_maximum(), i))].
-endif.
-ifdef(EUNIT).
sub_test() ->
[?assertEqual(i, subtract(ii, i)),
?assertEqual(ii, subtract(iv, ii)),
?assertEqual(xxvii, subtract(lxix, xlii)),
?assertEqual(xxx, subtract(mmxxii, mcmxcii)),
?assertEqual(ante_maximum(), subtract(maximum(), i)),
?assertEqual(i, subtract(maximum(), ante_maximum()))].
-endif.
-ifdef(EUNIT).
decrement_test() ->
[?assertEqual(parse(mmccxix), decrement(parse(mmccxx))),
?assertEqual(parse(mmcxcix), decrement(parse(mmcc)))].
-endif.
-ifdef(EUNIT).
parse_unum_test() ->
[?assertEqual([{unus, i}], parse(i)),
?assertEqual([{unus, ii}], parse(ii)),
?assertEqual([{unus, iii}], parse(iii)),
?assertEqual([{unus, iv}], parse(iv)),
?assertEqual([{unus, v}], parse(v)),
?assertEqual([{unus, vi}], parse(vi)),
?assertEqual([{unus, vii}], parse(vii)),
?assertEqual([{unus, viii}], parse(viii)),
?assertEqual([{unus, ix}], parse(ix))].
-endif.
-ifdef(EUNIT).
parse_mixed_test() ->
[?assertEqual([{unus, ix}, {decem, lx}], parse(lxix)),
?assertEqual([{decem, xx}, {centum, cd}], parse(cdxx))].
-endif.
-ifdef(EUNIT).
parse_invalid_test() ->
[?assertError(_, parse(viv)),
?assertError(_, parse(vix))].
-endif.
-ifdef(EUNIT).
split_test() ->
[?assertEqual([c, x, i], split_atom(cxi))].
-endif.
-ifdef(EUNIT).
join_test() ->
[?assertEqual(cxi, fuse_atom([c, x, i]))].
-endif. | roman.erl | 0.603932 | 0.533215 | roman.erl | starcoder |
%% ==========================================================================================================
%% Ram - A distributed KV store for Erlang and Elixir.
%%
%% The MIT License (MIT)
%%
%% Copyright (c) 2021-2022 <NAME> <<EMAIL>>.
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%% THE SOFTWARE.
%% ==========================================================================================================
%% ===================================================================
%% @doc Exposes all of the Key Value store APIs.
%% @end
%% ===================================================================
-module(ram).
%% API
-export([start/0, stop/0]).
-export([start_cluster/1, stop_cluster/1]).
-export([add_node/1, remove_node/1, nodes/0]).
-export([restart_server/0]).
-export([get/1, get/2, fetch/1]).
-export([put/2]).
-export([update/3]).
-export([delete/1]).
%% ===================================================================
%% API
%% ===================================================================
%% @doc Starts Ram manually.
%%
%% In most cases Ram will be started as one of your application's dependencies,
%% however you may use this helper method to start it manually.
-spec start() -> ok.
start() ->
{ok, _} = application:ensure_all_started(ram),
ok.
%% @doc Stops Ram manually.
-spec stop() -> ok | {error, Reason :: term()}.
stop() ->
application:stop(ram).
%% @doc Starts the Ram cluster.
-spec start_cluster([node()]) -> ok | {error, Reason :: term()}.
start_cluster(Nodes) ->
ram_backbone:start_cluster(Nodes).
%% @doc Stops the Ram cluster.
-spec stop_cluster([node()]) -> ok | {error, Reason :: term()}.
stop_cluster(Nodes) ->
ram_backbone:stop_cluster(Nodes).
%% @doc Restart a previously stopped node of the cluster, so that it joins the cluster again.
-spec restart_server() -> ok | {error, Reason :: term()}.
restart_server() ->
ram_backbone:restart_server().
%% @doc Adds Node to an existing Ram cluster.
%%
%% This method is to be called when:
%% <ul>
%% <li>Adding a new node to the cluster.</li>
%% <li>Restarting a previously stopped node of the cluster, so that it joins the cluster again.</li>
%% </ul>
%%
%% Note that when restarting a server it might be preferable to use {@link restart_server/0} instead.
-spec add_node(Node :: node()) -> ok | {error, Reason :: term()}.
add_node(Node) ->
ram_backbone:add_node(Node).
%% @doc Removes Node from the Ram cluster.
-spec remove_node(Node :: node()) -> ok | {error, Reason :: term()}.
remove_node(Node) ->
ram_backbone:remove_node(Node).
%% @doc Returns the nodes in the Ram cluster.
-spec nodes() -> [node()].
nodes() ->
ram_backbone:nodes().
%% @equiv get(Key, undefined)
%% @end
-spec get(Key :: term()) -> Value :: term().
get(Key) ->
get(Key, undefined).
%% @doc Returns the Key's Value or Default if the Key is not found.
%%
%% <h2>Examples</h2>
%% <h3>Elixir</h3>
%% ```
%% iex(1)> :ram.get("key")
%% :undefined
%% iex(2)> :ram.get("key", "default")
%% "default"
%% iex(3)> :ram.put("key", "value")
%% :ok
%% iex(4)> :ram.get("key")
%% "value"
%% '''
%% <h3>Erlang</h3>
%% ```
%% 1> ram:get("key").
%% undefined
%% 2> ram:get("key", "default").
%% "default"
%% 3> ram:put("key", "value").
%% ok
%% 4> ram:get("key").
%% "value"
%% '''
-spec get(Key :: term(), Default :: term()) -> Value :: term().
get(Key, Default) ->
ram_kv:get(Key, Default).
%% @doc Looks up a Key.
%%
%% Returns `error' if the Key is not found.
-spec fetch(Key :: term()) -> {ok, Value :: term()} | error.
fetch(Key) ->
ram_kv:fetch(Key).
%% @doc Puts a Value for a Key.
-spec put(Key :: term(), Value :: term()) -> ok.
put(Key, Value) ->
ram_kv:put(Key, Value).
%% @doc Atomically updates a Key with the given function.
%%
%% If Key is found then the existing Value is passed to the fun and its result is used as the updated Value of Key.
%% If Key is not found, Default is put as the Value of Key. The Default value will not be passed through the update function.
%%
%% <div style="font-size:30px;float:left;padding-right:10px;color:#ffcc00;">⚠</div>
%% <i>Passing functions as arguments might not be compatible across different Erlang versions,
%% so if your cluster is composed of nodes running different Erlang versions do not use this method.</i>
%%
%% <h2>Examples</h2>
%% <h3>Elixir</h3>
%% ```
%% iex(1)> update_fun = fn existing_value -> existing_value * 2 end
%% #Function<44.65746770/1 in :erl_eval.expr/5>
%% iex(2)> :ram.update("key", 10, update_fun)
%% ok
%% iex(3)> :ram.get("key")
%% 10
%% iex(4)> :ram.update("key", 10, update_fun)
%% ok
%% iex(5)> :ram.get("key")
%% 20
%% '''
%% <h3>Erlang</h3>
%% ```
%% 1> UpdateFun = fun(ExistingValue) -> ExistingValue * 2 end.
%% #Fun<erl_eval.44.65746770>
%% 2> ram:update("key", 10, UpdateFun).
%% ok
%% 3> ram:get("key").
%% 10
%% 4> ram:update("key", 10, UpdateFun).
%% ok
%% 5> ram:get("key").
%% 20
%% '''
-spec update(Key :: term(), Default :: term(), function()) -> ok.
update(Key, Default, Fun) ->
ram_kv:update(Key, Default, Fun).
%% @doc Deletes a Key.
-spec delete(Key :: term()) -> ok.
delete(Key) ->
ram_kv:delete(Key). | src/ram.erl | 0.532911 | 0.42931 | ram.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 SyncFree Consortium. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(clocksi_materializer).
-include("antidote.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/1,
materialize/4,
materialize_eager/3]).
%% @doc Creates an empty CRDT for a given type.
-spec new(type()) -> snapshot().
new(Type) ->
materializer:create_snapshot(Type).
%% The materializer is given of tuple containing ordered update operations.
%% Each update operation has an id number that is one larger than
%% the previous. This function takes as input that tuple and returns the id number of the first update
%% operation (i.e. the one with the largest id)
-spec get_first_id([{non_neg_integer(), #clocksi_payload{}}] | tuple()) ->
non_neg_integer().
get_first_id([]) ->
0;
get_first_id([{Id, _Op}|_]) ->
Id;
get_first_id(Tuple) when is_tuple(Tuple) ->
{Length, _ListLen} = element(2, Tuple),
case Length of
0 ->
0;
Length ->
{Id, _Op} = element(?FIRST_OP+(Length-1), Tuple),
Id
end.
%% @doc Applies the operation of a list to a previously created CRDT snapshot. Only the
%% operations that are not already in the previous snapshot and
%% with smaller timestamp than the specified
%% are considered. Newer operations are discarded.
%% Input:
%% Type: The type of CRDT to create
%% Snapshot: Current state of the CRDT
%% SnapshotCommitTime: The time used to describe the state of the CRDT given in Snapshot
%% MinSnapshotTime: The threshold time given by the reading transaction
%% Ops: The list of operations to apply in causal order
%% TxId: The Id of the transaction requesting the snapshot
%% Output: A tuple. The first element is ok, the seond is the CRDT after appliying the operations,
%% the third element 1 minus the number of the operation with the smallest id not included in the snapshot,
%% the fourth element is the smallest vectorclock that describes this snapshot,
%% the fifth element is a boolean, it it is true it means that the returned snapshot contains
%% more operations than the one given as input, false otherwise.
%% the sixth element is an integer the counts the number of operations applied to make the snapshot
-spec materialize(type(),
txid() | ignore,
snapshot_time() | ignore,
#snapshot_get_response{}
) ->
{ok, snapshot(), integer(), snapshot_time() | ignore,
boolean(), non_neg_integer()} | {error, reason()}.
materialize(Type, TxId, MinSnapshotTime,
#snapshot_get_response{snapshot_time = SnapshotCommitTime, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = LastOp, value = Snapshot}}) ->
FirstId = get_first_id(Ops),
{ok, OpList, NewLastOp, LastOpCt, IsNewSS} =
materialize_intern(Type, [], LastOp, FirstId, SnapshotCommitTime, MinSnapshotTime,
Ops, TxId, SnapshotCommitTime, false, 0),
case apply_operations(Type, Snapshot, 0, OpList) of
{ok, NewSS, Count} ->
{ok, NewSS, NewLastOp, LastOpCt, IsNewSS, Count};
{error, Reason} ->
{error, Reason}
end.
%% @doc Applies a list of operations to a snapshot
%% Input:
%% Type: The type of CRDT of the snapshot
%% Snapshot: The initial snapshot to apply the operations to
%% Count: Should be input as 0, this will count the number of ops applied
%% OpList: The list of operations to apply
%% Output: Either the snapshot with the operations applied to
%% it, or an error.
-spec apply_operations(type(), snapshot(), non_neg_integer(), [clocksi_payload()]) ->
{ok, snapshot(), non_neg_integer()} | {error, reason()}.
apply_operations(_Type, Snapshot, Count, []) ->
{ok, Snapshot, Count};
apply_operations(Type, Snapshot, Count, [Op | Rest]) ->
case materializer:update_snapshot(Type, Snapshot, Op#clocksi_payload.op_param) of
{ok, NewSnapshot} ->
apply_operations(Type, NewSnapshot, Count+1, Rest);
{error, Reason} ->
{error, Reason}
end.
%% @doc Internal function that goes through a list of operations and a snapshot
%% time and returns which operations from the list should be applied for
%% the given snapshot time.
%% Input:
%% Type: The type of the CRDT
%% OpList: Should be given initially as an empty list, this will accumulate
%% the operations to apply.
%% LastOp: 1 minus the number of the operation with the smallest id not included in the initial snapshot
%% FirstHole: The variable keeps track of 1 minus the number of the operation with the smallest id
%% not included in the new snapshot that is currently being generated, it should be initialised to the
%% id of the first op in OpList
%% SnapshotCommitTime: The time used to describe the intitial state of the CRDT given in Snapshot
%% MinSnapshotTime: The threshold time given by the reading transaction
%% Ops: The list of operations to apply in causal order, the most recent op is on the left
%% TxId: The Id of the transaction requesting the snapshot
%% LastOpCommitTime: The snapshot time of the last operation in the list of operations to apply
%% NewSS: Boolean that is true if any operations should be applied, fale otherwise. Should start as false.
%% Output: A tuple with 4 elements or an error. The first element of the tuple is the atom ok.
%% The second element is the list of operations that should be applied to the snapshot.
%% The third element 1 minus the number of the operation with the smallest id not included in the snapshot.
%% The fourth element is the snapshot time of the last operation in the list.
%% The fifth element is a boolean, true if a new snapshot should be generated, false otherwise.
-spec materialize_intern(type(),
[clocksi_payload()],
integer(),
integer(),
snapshot_time() | ignore,
snapshot_time(),
[{integer(), clocksi_payload()}] | tuple(),
txid() | ignore,
snapshot_time() | ignore,
boolean(),
non_neg_integer()) ->
{ok, [clocksi_payload()], integer(), snapshot_time()|ignore, boolean()}.
materialize_intern(_Type, OpList, _LastOp, FirstHole, _SnapshotCommitTime, _MinSnapshotTime, [], _TxId, LastOpCt, NewSS, _Location) ->
{ok, OpList, FirstHole, LastOpCt, NewSS};
materialize_intern(Type, OpList, LastOp, FirstHole, SnapshotCommitTime, MinSnapshotTime, [{OpId, Op}|Rest], TxId, LastOpCt, NewSS, Location) ->
materialize_intern_perform(Type, OpList, LastOp, FirstHole, SnapshotCommitTime, MinSnapshotTime, {OpId, Op}, Rest, TxId, LastOpCt, NewSS, Location + 1);
materialize_intern(Type, OpList, LastOp, FirstHole, SnapshotCommitTime, MinSnapshotTime, TupleOps, TxId, LastOpCt, NewSS, Location) ->
{Length, _ListLen} = element(2, TupleOps),
case Length == Location of
true ->
{ok, OpList, FirstHole, LastOpCt, NewSS};
false ->
materialize_intern_perform(Type, OpList, LastOp, FirstHole, SnapshotCommitTime, MinSnapshotTime,
element((?FIRST_OP+Length-1) - Location, TupleOps), TupleOps, TxId, LastOpCt, NewSS, Location + 1)
end.
materialize_intern_perform(Type, OpList, LastOp, FirstHole, SnapshotCommitTime, MinSnapshotTime, {OpId, Op}, Rest, TxId, LastOpCt, NewSS, Location) ->
Result = case Type == Op#clocksi_payload.type of
true ->
OpCom=Op#clocksi_payload.commit_time,
OpSS=Op#clocksi_payload.snapshot_time,
%% Check if the op is not in the previous snapshot and should be included in the new one
case (is_op_in_snapshot(TxId, Op, OpCom, OpSS, MinSnapshotTime, SnapshotCommitTime, LastOpCt)) of
{true, _, NewOpCt} ->
%% Include the new op because it has a timestamp bigger than the snapshot being generated
{ok, [Op | OpList], NewOpCt, false, true, FirstHole};
{false, false, _} ->
%% Dont include the op
{ok, OpList, LastOpCt, false, NewSS, OpId-1}; % no update
{false, true, _} ->
%% Dont Include the op, because it was already in the SS
{ok, OpList, LastOpCt, true, NewSS, FirstHole}
end;
false -> %% Op is not for this {Key, Type}
%% @todo THIS CASE PROBABLY SHOULD NOT HAPPEN?!
{ok, OpList, LastOpCt, false, NewSS, FirstHole} %% no update
end,
case Result of
{ok, NewOpList1, NewLastOpCt, false, NewSS1, NewHole} ->
materialize_intern(Type, NewOpList1, LastOp, NewHole, SnapshotCommitTime,
MinSnapshotTime, Rest, TxId, NewLastOpCt, NewSS1, Location);
{ok, NewOpList1, NewLastOpCt, true, NewSS1, NewHole} ->
case OpId - 1 =< LastOp of
true ->
%% can skip the rest of the ops because they are already included in the SS
materialize_intern(Type, NewOpList1, LastOp, NewHole, SnapshotCommitTime,
MinSnapshotTime, [], TxId, NewLastOpCt, NewSS1, Location);
false ->
materialize_intern(Type, NewOpList1, LastOp, NewHole, SnapshotCommitTime,
MinSnapshotTime, Rest, TxId, NewLastOpCt, NewSS1, Location)
end
end.
%% @doc Check whether an udpate is included in a snapshot and also
%% if that update is newer than a snapshot's commit time
%% Input:
%% TxId: Descriptor of the transaction requesting the snapshot
%% Op: The operation to check
%% {OpDc, OpCommitTime}: The DC and commit time of the operation
%% OperationSnapshotTime: The snapshot time of the operation
%% SnapshotTime: The snapshot time to check if the operation is included in
%% LastSnapshot: The previous snapshot that is being used to generate the new snapshot
%% PrevTime: The snapshot time of the previous operation that was checked
%% Outptut: A tuple of 3 elements. The first element is a boolean that is true
%% if the operation should be included in the snapshot false otherwise, the second element
%% is a boolean that is true if the operation was already included in the previous snapshot,
%% false otherwise. The thrid element is the snapshot time of the last operation to
%% be applied to the snapshot
-spec is_op_in_snapshot(txid(), clocksi_payload(), dc_and_commit_time(), snapshot_time(), snapshot_time(),
snapshot_time() | ignore, snapshot_time()) -> {boolean(), boolean(), snapshot_time()}.
is_op_in_snapshot(TxId, Op, {OpDc, OpCommitTime}, OperationSnapshotTime, SnapshotTime, LastSnapshot, PrevTime) ->
%% First check if the op was already included in the previous snapshot
%% Is the "or TxId ==" part necessary and correct????
case materializer_vnode:belongs_to_snapshot_op(
LastSnapshot, {OpDc, OpCommitTime}, OperationSnapshotTime) or (TxId == Op#clocksi_payload.txid) of
true ->
%% If not, check if it should be included in the new snapshot
%% Replace the snapshot time of the dc where the transaction committed with the commit time
OpSSCommit = dict:store(OpDc, OpCommitTime, OperationSnapshotTime),
%% PrevTime2 is the time of the previous snapshot, if there was none, it usues the snapshot time
%% of the new operation
PrevTime2 = case PrevTime of
ignore ->
OpSSCommit;
_ ->
PrevTime
end,
%% Result is true if the op should be included in the snapshot
%% NewTime is the vectorclock of the snapshot with the time of Op included
{Result, NewTime} =
dict:fold(fun(DcIdOp, TimeOp, {Acc, PrevTime3}) ->
Res1 = case dict:find(DcIdOp, SnapshotTime) of
{ok, TimeSS} ->
case TimeSS < TimeOp of
true ->
false;
false ->
Acc
end;
error ->
lager:error("Could not find DC in SS ~p", [SnapshotTime]),
false
end,
Res2 = dict:update(DcIdOp, fun(Val) ->
case TimeOp > Val of
true ->
TimeOp;
false ->
Val
end
end, TimeOp, PrevTime3),
{Res1, Res2}
end, {true, PrevTime2}, OpSSCommit),
case Result of
true ->
{true, false, NewTime};
false ->
{false, false, PrevTime}
end;
false->
%% was already in the prev ss, done searching ops
{false, true, PrevTime}
end.
%% @doc Apply updates in given order without any checks.
%% Careful: In contrast to materialize/6, it takes just operations, not clocksi_payloads!
-spec materialize_eager(type(), snapshot(), [op()]) -> snapshot().
materialize_eager(Type, Snapshot, Ops) ->
materializer:materialize_eager(Type, Snapshot, Ops).
-ifdef(TEST).
materializer_clocksi_test()->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
%% need to add the snapshot time for these for the test to pass
Op1 = #clocksi_payload{key = abc, type = Type,
op_param = 2,
commit_time = {1, 1}, txid = 1, snapshot_time=vectorclock:from_list([{1, 1}])},
Op2 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 2}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}])},
Op3 = #clocksi_payload{key = abc, type =Type,
op_param = 1,
commit_time = {1, 3}, txid = 3, snapshot_time=vectorclock:from_list([{1, 3}])},
Op4 = #clocksi_payload{key = abc, type = Type,
op_param = 2,
commit_time = {1, 4}, txid = 4, snapshot_time=vectorclock:from_list([{1, 4}])},
Ops = [{4, Op4}, {3, Op3}, {2, Op2}, {1, Op1}],
SS = #snapshot_get_response{snapshot_time = ignore, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = 0, value = PNCounter}},
{ok, PNCounter2, 3, CommitTime2, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}]),
SS),
?assertEqual({4, vectorclock:from_list([{1, 3}])}, {Type:value(PNCounter2), CommitTime2}),
{ok, PNcounter3, 4, CommitTime3, _SsSave1, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 4}]),
SS),
?assertEqual({6, vectorclock:from_list([{1, 4}])}, {Type:value(PNcounter3), CommitTime3}),
{ok, PNcounter4, 4, CommitTime4, _SsSave2, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 7}]),
SS),
?assertEqual({6, vectorclock:from_list([{1, 4}])}, {Type:value(PNcounter4), CommitTime4}).
%% This test tests when a a snapshot is generated that does not include all of the updates in the
%% list of operations, precisely in the case where an operation is not taken, but the operations to
%% the left and right of it in the list are taken. When this snapshot is then used for a future
%% read with a different timestamp, this missing value must be checked.
materializer_missing_op_test() ->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
Op1 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 1}, txid = 1, snapshot_time=vectorclock:from_list([{1, 1}, {2, 1}])},
Op2 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 2}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}, {2, 1}])},
Op3 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {2, 2}, txid = 3, snapshot_time=vectorclock:from_list([{1, 1}, {2, 1}])},
Op4 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 3}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}, {2, 1}])},
Ops = [{4, Op4}, {3, Op3}, {2, Op2}, {1, Op1}],
SS = #snapshot_get_response{snapshot_time = ignore, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = 0, value = PNCounter}},
{ok, PNCounter2, LastOp, CommitTime2, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}, {2, 1}]),
SS),
?assertEqual({3, vectorclock:from_list([{1, 3}, {2, 1}])}, {Type:value(PNCounter2), CommitTime2}),
SS2 = #snapshot_get_response{snapshot_time = CommitTime2, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = LastOp, value = PNCounter2}},
{ok, PNCounter3, 4, CommitTime3, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}, {2, 2}]),
SS2),
?assertEqual({4, vectorclock:from_list([{1, 3}, {2, 2}])}, {Type:value(PNCounter3), CommitTime3}).
%% This test tests the case when there are updates that only snapshots that contain entries from one of the DCs.
%% This can happen for example if an update is commited before the DCs have been connected.
%% It ensures that when we read using a snapshot with and without all the DCs we still include the correct updates.
materializer_missing_dc_test() ->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
Op1 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 1}, txid = 1, snapshot_time=vectorclock:from_list([{1, 1}])},
Op2 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 2}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}])},
Op3 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {2, 2}, txid = 3, snapshot_time=vectorclock:from_list([{2, 1}])},
Op4 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 3}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}])},
Ops = [{4, Op4}, {3, Op3}, {2, Op2}, {1, Op1}],
SS = #snapshot_get_response{snapshot_time = ignore, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = 0, value = PNCounter}},
{ok, PNCounterA, LastOpA, CommitTimeA, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}]),
SS),
?assertEqual({3, vectorclock:from_list([{1, 3}])}, {Type:value(PNCounterA), CommitTimeA}),
SS2 = #snapshot_get_response{snapshot_time = CommitTimeA, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = LastOpA, value = PNCounterA}},
{ok, PNCounterB, 4, CommitTimeB, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}, {2, 2}]),
SS2),
?assertEqual({4, vectorclock:from_list([{1, 3}, {2, 2}])}, {Type:value(PNCounterB), CommitTimeB}),
{ok, PNCounter2, LastOp, CommitTime2, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}, {2, 1}]),
SS),
?assertEqual({3, vectorclock:from_list([{1, 3}])}, {Type:value(PNCounter2), CommitTime2}),
SS3 = #snapshot_get_response{snapshot_time = CommitTime2, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = LastOp, value = PNCounter2}},
{ok, PNCounter3, 4, CommitTime3, _SsSave, _} = materialize(Type,
ignore, vectorclock:from_list([{1, 3}, {2, 2}]),
SS3),
?assertEqual({4, vectorclock:from_list([{1, 3}, {2, 2}])}, {Type:value(PNCounter3), CommitTime3}).
materializer_clocksi_concurrent_test() ->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
Op1 = #clocksi_payload{key = abc, type = Type,
op_param = 2,
commit_time = {1, 1}, txid = 1, snapshot_time=vectorclock:from_list([{1, 1}, {2, 1}])},
Op2 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {1, 2}, txid = 2, snapshot_time=vectorclock:from_list([{1, 2}, {2, 1}])},
Op3 = #clocksi_payload{key = abc, type = Type,
op_param = 1,
commit_time = {2, 2}, txid = 3, snapshot_time=vectorclock:from_list([{1, 1}, {2, 1}])},
Ops = [{3, Op2}, {2, Op3}, {1, Op1}],
{ok, PNCounter2, 3, CommitTime2, _Keep} = materialize_intern(Type,
[], 0, 3, ignore,
vectorclock:from_list([{2, 2}, {1, 2}]),
Ops, ignore, ignore, false, 0),
{ok, PNCounter3, _} = apply_operations(Type, PNCounter, 0, PNCounter2),
?assertEqual({4, vectorclock:from_list([{1, 2}, {2, 2}])}, {Type:value(PNCounter3), CommitTime2}),
Snapshot=new(Type),
SS = #snapshot_get_response{snapshot_time = ignore, ops_list = Ops,
materialized_snapshot = #materialized_snapshot{last_op_id = 0, value = Snapshot}},
{ok, PNcounter3, 1, CommitTime3, _SsSave1, _} = materialize(Type, ignore,
vectorclock:from_list([{1, 2}, {2, 1}]), SS),
?assertEqual({3, vectorclock:from_list([{1, 2}, {2, 1}])}, {Type:value(PNcounter3), CommitTime3}),
{ok, PNcounter4, 2, CommitTime4, _SsSave2, _} = materialize(Type, ignore,
vectorclock:from_list([{1, 1}, {2, 2}]), SS),
?assertEqual({3, vectorclock:from_list([{1, 1}, {2, 2}])}, {Type:value(PNcounter4), CommitTime4}),
{ok, PNcounter5, 1, CommitTime5, _SsSave3, _} = materialize(Type, ignore,
vectorclock:from_list([{1, 1}, {2, 1}]), SS),
?assertEqual({2, vectorclock:from_list([{1, 1}, {2, 1}])}, {Type:value(PNcounter5), CommitTime5}).
%% Testing gcounter with empty update log
materializer_clocksi_noop_test() ->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
Ops = [],
{ok, PNCounter2, 0, ignore, _SsSave} = materialize_intern(Type, [], 0, 0, ignore,
vectorclock:from_list([{1, 1}]),
Ops, ignore, ignore, false, 0),
{ok, PNCounter3, _} = apply_operations(Type, PNCounter, 0, PNCounter2),
?assertEqual(0, Type:value(PNCounter3)).
materializer_eager_clocksi_test()->
Type = antidote_crdt_counter_pn,
PNCounter = new(Type),
?assertEqual(0, Type:value(PNCounter)),
% test - no ops
PNCounter2 = materialize_eager(Type, PNCounter, []),
?assertEqual(0, Type:value(PNCounter2)),
% test - several ops
Op1 = 1,
Op2 = 2,
Op3 = 3,
Op4 = 4,
Ops = [Op1, Op2, Op3, Op4],
PNCounter3 = materialize_eager(Type, PNCounter, Ops),
?assertEqual(10, Type:value(PNCounter3)).
is_op_in_snapshot_test() ->
Type = antidote_crdt_counter_pn,
Op1 = #clocksi_payload{key = abc, type = Type,
op_param = {increment, 2},
commit_time = {dc1, 1}, txid = 1, snapshot_time=vectorclock:from_list([{dc1, 1}])},
OpCT1 = {dc1, 1},
OpCT1SS = vectorclock:from_list([OpCT1]),
ST1 = vectorclock:from_list([{dc1, 2}]),
ST2 = vectorclock:from_list([{dc1, 0}]),
?assertEqual({true, false, OpCT1SS}, is_op_in_snapshot(2, Op1, OpCT1, OpCT1SS, ST1, ignore, ignore)),
?assertEqual({false, false, ignore}, is_op_in_snapshot(2, Op1, OpCT1, OpCT1SS, ST2, ignore, ignore)).
-endif. | src/clocksi_materializer.erl | 0.651133 | 0.403185 | clocksi_materializer.erl | starcoder |
%% @doc This is the main API for the library.
%%
%% You can think of this module as an abstraction layer
%% on the raw API which attempts to add some convenience
%% to plumbing the entire thing by hand-coding maps with
%% the appropriate AWS magic.
%%
%% The low level API calls are in `darcy_ddb_api.erl', but generally
%% users shouldn't call those directly.
-module(darcy).
-include("darcy.hrl").
-define(TIMEOUT, 5000). % 5 seconds
-define(BIG_TIMEOUT, 5*60*1000). % 5 minutes
-define(BATCH_SIZE, 100). % how many items to split into a batch
-define(BATCH_MAX, 10). % maximum number of batches to process in a single go
-define(INITIAL_ERROR_DELAY, 500). % initial throughput retry delay in milliseconds
-define(MAX_ERROR_DELAY, 32000). % maximum millseconds of delay before terminating operation
-define(INITIAL_ERROR_STATE, #error_delay{}).
%% This is a record because, later, I might try to extend this to reduce the sleep
%% time based on the number of successful operations. So for now it only has one
%% field, but it may have two or more in the future.
-record(error_delay, {
delay = 0 :: non_neg_integer()
}).
-export([
start/0,
to_map/1,
clean_map/1,
to_ddb/1,
default_decode/1,
default_encode/1,
make_attribute_defs/1,
make_key_schema/1,
make_provisioned_throughput/2,
table_name/1,
make_table_spec/3,
make_table_spec/5,
make_global_index_spec/3,
make_global_index_spec/5,
add_global_index/2,
make_table_if_not_exists/2,
make_global_table_if_not_exists/3,
describe_table/2,
describe_global_table/2,
delete_table/2,
get_item/3,
batch_get_items/3,
put_item/3,
put_item/4,
batch_write_items/3,
query/3,
query/4,
scan/3,
scan_all/3,
scan_all/4,
scan_parallel/5,
scan_parallel/6,
scan_parallel/7
]).
-type lookup_value() :: integer() | float() | binary() | {blob, binary()}.
%-type set_value() :: {number_set, [ integer() | float() ] } | {string_set, [ binary() ]}.
%-type list_value() :: {list, [ map() | set_value() | lookup_value() ]}.
%% @doc Convenience function to start `darcy' and all
%% of its dependent applications.
start() ->
application:ensure_all_started(darcy).
%% @doc Return a map with the key of `AttributeDefinitions'
%% suitable for using in a table or index specification.
-spec make_attribute_defs(
[ { AttributeName :: binary(),
AttributeType :: binary() } ]
) -> AttributeDefinitions :: map().
make_attribute_defs(Attributes) when is_list(Attributes) ->
#{ <<"AttributeDefinitions">> =>
[ #{ <<"AttributeName">> => N,
<<"AttributeType">> => T } || {N, T} <- Attributes ] }.
%% @doc Return a `KeySchema' map suitable for use in a
%% table or index specification.
%%
%% If you pass one attribute, it will be assigned the `HASH'
%% key type. If you pass two attributes, the first will be
%% `HASH' and the second will be the `RANGE' type.
%%
%% You can <a href="http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.Partitions.html">read more about hash and range keys</a>
%% in the official Dynamo documentation.
-spec make_key_schema(
[ Keys :: binary() ] ) -> KeySchema :: map().
make_key_schema([HashKey]) ->
make_schema_impl([{HashKey, <<"HASH">>}]);
make_key_schema([HashKey, RangeKey]) ->
make_schema_impl([{HashKey, <<"HASH">>}, {RangeKey, <<"RANGE">>}]).
make_schema_impl(Schema) ->
#{ <<"KeySchema">> =>
[ #{ <<"AttributeName">> => N,
<<"KeyType">> => T } || {N, T} <- Schema ] }.
%% @doc Makes a `ProvisionedThroughput' map to indicate the number
%% read and write units Dynamo should reserve for your index or
%% table.
-spec make_provisioned_throughput(
ReadUnits :: pos_integer(),
WriteUnits :: pos_integer() ) -> ProvisionedThroughput :: map().
make_provisioned_throughput(ReadUnits, WriteUnits) ->
#{ <<"ProvisionedThroughput">> =>
#{ <<"ReadCapacityUnits">> => ReadUnits,
<<"WriteCapacityUnits">> => WriteUnits } }.
%% @doc Makes a `TableName' map suitable for use in a table
%% or index specification.
-spec table_name( Name :: binary() ) -> TableName :: map().
table_name(N) when is_binary(N) -> #{ <<"TableName">> => N }.
%% @doc Convenience function which returns a complete
%% table specification. This function uses the default
%% number of read and write units (currently 5 each).
-spec make_table_spec(
TableName :: binary(),
Attr :: [{ AttrName :: binary(),
AttrType :: binary() }],
Keys :: [ Keys :: binary() ] ) -> TableSpec :: map().
make_table_spec(TableName, Attributes, Keys) ->
make_table_spec(TableName, Attributes, Keys,
?DEFAULT_READ_UNITS, ?DEFAULT_WRITE_UNITS).
%% @doc Convenience function which returns a complete
%% table specification.
-spec make_table_spec(
TableName :: binary(),
Attr :: [{ AttrName :: binary(),
AttrType :: binary() }],
Keys :: [ binary() ],
ReadUnits :: pos_integer(),
WriteUnits :: pos_integer() ) -> TableSpec :: map().
make_table_spec(TableName, Attributes, Keys, Read, Write) ->
lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{},
[make_attribute_defs(Attributes),
make_key_schema(Keys),
make_provisioned_throughput(Read, Write),
table_name(TableName)]).
%% @doc Convenience function which returns a global index
%% specification. This function uses the default read and
%% write units (currently 5 each).
-spec make_global_index_spec(
IndexName :: binary(),
Keys :: [ binary() ],
ProjectionSpec :: {} |
{ [ binary() ], binary() }
) -> GlobalIndexSpec :: map().
make_global_index_spec(IndexName, Keys, ProjectionSpec) ->
make_global_index_spec(IndexName, Keys, ProjectionSpec,
?DEFAULT_READ_UNITS, ?DEFAULT_WRITE_UNITS).
%% @doc Convenience function which returns a global index
%% specification.
-spec make_global_index_spec(
IndexName :: binary(),
Keys :: [ binary() ],
ProjectionSpec :: {} |
{ [ NonKeyAttribute :: binary() ], binary() },
ReadUnits :: pos_integer(),
WriteUnits :: pos_integer()
) -> GlobalIndexSpec :: map().
make_global_index_spec(IndexName, Keys, ProjectionSpec, Read, Write) ->
lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{},
[make_key_schema(Keys),
make_provisioned_throughput(Read, Write),
make_projection(ProjectionSpec),
index_name(IndexName)]).
%% @doc Add a global index specification to an existing table
%% specification. If a global index specification has already
%% been added, this function will add the new one to the
%% current one.
%%
%% Tables may not have more than two global indices.
-spec add_global_index(
TableSpec :: map(),
GlobalIndexSpec :: map() ) -> NewTableSpec :: map().
add_global_index(#{ <<"GlobalSecondaryIndexes">> := CurrentGSI } = TableSpec, GSISpec) ->
maps:put(<<"GlobalSecondaryIndexes">>, [ GSISpec | CurrentGSI ], TableSpec);
add_global_index(TableSpec, GSISpec) ->
maps:put(<<"GlobalSecondaryIndexes">>, [ GSISpec ], TableSpec).
%% @doc Create an `IndexName' map for use in an index specification.
-spec index_name( Name :: binary() ) -> IndexName :: map().
index_name(N) when is_binary(N) -> #{ <<"IndexName">> => N }.
%% @doc This function returns a `Projection' map suitable for
%% use in an index specification. It is expected that if you
%% want the `ALL' or `KEYS_ONLY' projection type, your list of
%% non-key attributes will be empty.
%%
%% If you want an empty projection map, pass in an empty tuple
%% `{}'.
-spec make_projection(
ProjectionSpec :: {} |
{ [ NonKeyAttribute :: binary() ], binary() }) -> Projection :: map().
make_projection({}) -> #{ <<"Projection">> => #{} };
make_projection({Attr, <<"INCLUDE">>}) -> #{ <<"Projection">> => #{ <<"NonKeyAttributes">> => Attr,
<<"ProjectionType">> => <<"INCLUDE">> } };
make_projection({[], T}) -> #{ <<"Projection">> => #{ <<"ProjectionType">> => T } }.
%% @doc Make a table if it doesn't already exist.
-spec make_table_if_not_exists( Client :: darcy_client:aws_client(),
TableSpec :: map() ) -> ok | {error, Error :: term()}.
make_table_if_not_exists(Client, #{ <<"TableName">> := TableName} = Spec) ->
case darcy_ddb_api:describe_table(Client, #{ <<"TableName">> => TableName }) of
{ok, _Result, _Details} -> ok;
{error, _Error, {400, _Headers, _Client}} -> attempt_make_table(Client, Spec);
{error, Error, {Status, _Headers, _NClient}} -> {error, {table_creation_error, {Status, Error}}}
end.
attempt_make_table(Client, Spec) ->
case darcy_ddb_api:create_table(Client, Spec) of
{ok, _Result, { 200, _Headers, _Client}} -> ok;
{error, Error, {Status, _Headers, _NewClient}} -> {error, {table_creation_failed, {Status, Error}}}
end.
%% @doc Make a global table if it doesn't already exist.
-spec make_global_table_if_not_exists(Client :: darcy_client:aws_client(),
TableSpec :: map(),
Regions :: [ binary() ]) -> ok | {error, Error :: term()}.
make_global_table_if_not_exists(#{ region := Region } = Client,
#{ <<"TableName">> := TableName } = Spec, Regions) ->
case lists:member(Region, Regions) of
false -> {error, {bad_region_spec, [Region, Regions]}};
true ->
case describe_global_table(Client, TableName) of
{ok, _Result} -> ok;
{error, _} ->
ok = global_table_setup(Client, Spec, Regions),
attempt_make_global_table(Client, TableName, Regions)
end
end.
all_ok(ok) -> true;
all_ok(_) -> false.
global_table_setup(Client, Spec, Regions) ->
true = lists:all(fun all_ok/1,
pmap(fun(R) -> do_table_creation(Client, Spec, R) end, Regions)
),
ok.
do_table_creation(Client, Spec, Region) ->
NewClient = darcy_client:switch_region(Client, Region),
NewSpec = enable_global_streams(Spec),
make_table_if_not_exists(NewClient, NewSpec).
enable_global_streams(Spec) ->
Streams = #{ <<"StreamSpecification">> =>
#{ <<"StreamEnabled">> => true,
<<"StreamViewType">> => <<"NEW_AND_OLD_IMAGES">> } },
maps:merge(Spec, Streams).
attempt_make_global_table(Client, TableName, Regions) ->
Req = #{ <<"GlobalTableName">> => TableName,
<<"ReplicationGroup">> => [ #{ <<"RegionName">> => R } || R <- Regions ] },
%% this is a long-running operation, make the response receive timeout 60 seconds
case darcy_ddb_api:create_global_table(Client, Req, [{recv_timeout, 60*1000}]) of
{ok, _Result, {200, _Headers, _Client}} -> ok;
{error, Error, {Status, _Headers, _Client}} -> {error, {global_table_creation_failed, {Status, Error}}}
end.
%% @doc Delete a Dynamo table with the given name.
-spec delete_table( Client :: darcy_client:aws_client(),
TableName :: binary() ) -> ok | {error, Error :: term()}.
delete_table(Client, TableName) ->
case darcy_ddb_api:delete_table(Client, table_name(TableName)) of
{ok, #{ <<"TableDescription">> := Desc }, Details} -> ensure_deleting_state(Desc, Details);
{error, Error, {Status, _Headers, _C}} -> {error, {table_deletion_error, {Status, Error}}}
end.
ensure_deleting_state( #{ <<"TableStatus">> := <<"DELETING">> }, _Details ) -> ok;
ensure_deleting_state( Other , {Status, _Headers, _C} ) -> {error, {table_deletion_error, {Status, Other}}}.
%% @doc This returns a map representing the current state of the
%% given Dynamo table.
-spec describe_table( Client :: darcy_client:aws_client(),
TableName :: binary() ) -> {ok, TableDesc :: map()} |
{error, Error :: term()}.
describe_table(Client, TableName) ->
case darcy_ddb_api:describe_table(Client, table_name(TableName)) of
{ok, Result, _Details } -> {ok, Result};
{error, Error, {Status, _Headers, Client}} -> {error, {table_description_error, {Status, Error}}}
end.
%% @doc This returns a map representing the current state of the
%% given Dynamo global table.
-spec describe_global_table( Client :: darcy_client:aws_client(),
TableName :: binary() ) -> {ok, TableDesc :: map()} |
{error, Error :: term()}.
describe_global_table(Client, TableName) ->
case darcy_ddb_api:describe_global_table(Client, #{ <<"GlobalTableName">> => TableName }) of
{ok, Result, _Details } -> {ok, Result};
{error, Error, {Status, _Headers, _Client}} -> {error, {table_description_error, {Status, Error}}}
end.
%% GET ITEM
%% @doc Retrieve a single item from the given Dynamo table using
%% the hash and if needed, range keys.
-spec get_item( Client :: darcy_client:aws_client(),
TableName :: binary(),
Key :: #{ KeyName :: binary() => LookupValue :: lookup_value() }
) -> {ok, Item :: map()} |
{error, not_found} |
{error, Error :: term()}.
get_item(Client, TableName, Key) ->
Request = #{ <<"TableName">> => TableName,
<<"Key">> => to_ddb(Key) },
case darcy_ddb_api:get_item(Client, Request) of
{ok, Raw, _Details} -> return_value(Raw);
{error, Error, {Code, Headers, _Client}} -> {error, {Error, [Code, Headers]}}
end.
%% @doc Retrieve a set of records given a list of keys.
%%
%% The underlying API supports a maximum of 100 keys per
%% request, so this call batches keys into sets of up
%% to 100 and folds across these sets into an
%% accumulator for all keys.
%%
%% Items which are not found in the table will not be part
%% of the result set but will consume provisioned read
%% capacity.
%%
%% <B>N.B.</B>: Dynamo and this client will not return your items
%% in any particular order!
-spec batch_get_items( Client :: darcy_client:aws_client(),
TableName :: binary(),
Items :: [ map() ] ) -> {ok, [ Result :: term() ]} |
{error, Error :: term() }.
batch_get_items(Client, TableName, Keys) ->
make_batch_get(Client, TableName, Keys, []).
make_batch_get(_Client, _TableName, [], Acc) -> {ok, lists:flatten(Acc)};
make_batch_get(Client, TableName, Keys, Acc) ->
{Request, Rest} = format_batch_get(TableName, Keys),
Result = execute_batch_get(Client, TableName, Request, ?RETRIES, []),
make_batch_get(Client, TableName, Rest, [ Result | Acc ]).
format_batch_get(TableName, Keys) ->
{Current, Rest} = maybe_split_keys(Keys),
{format_batch_get_request(TableName, Current), Rest}.
maybe_split_keys(Keys) when length(Keys) =< 100 -> {Keys, []};
maybe_split_keys(Keys) -> lists:split(100, Keys).
format_batch_get_request(TableName, Keys) ->
#{ <<"RequestItems">> =>
#{ TableName =>
#{ <<"Keys">> => [ to_ddb(K) || K <- Keys ] }
}
}.
execute_batch_get(_Client, _TableName, Request, 0, Acc) -> {error, {retries_exceeded, Request, Acc}};
execute_batch_get(Client, TableName, Request, Retries, Acc) ->
case darcy_ddb_api:batch_get_item(Client, Request) of
{ok, Raw, _Details} -> return_batch_get_results(Raw, Client, TableName, Retries, Acc);
{error, Error, {Code, Headers, _Client}} -> {error, {Error, [Code, Headers]}}
end.
%% happy path: no unprocessed keys, no accumulator
return_batch_get_results(#{ <<"Responses">> := R,
<<"UnprocessedKeys">> := U }, _Client, TableName,
_Retries, [])
when map_size(U) == 0 ->
batch_get_results(TableName, R);
%% not as happy path: no unprocessed keys, accumulator has partial results
return_batch_get_results(#{ <<"Responses">> := R,
<<"UnprocessedKeys">> := U }, _Client, TableName,
_Retries, Acc)
when map_size(U) == 0 ->
lists:flatten([ batch_get_results(TableName, R) | Acc ]);
%% not very happy path: unprocessed keys
return_batch_get_results(#{ <<"Responses">> := R,
<<"UnprocessedKeys">> := U }, Client, TableName,
Retries, Acc) ->
retry_sleep(Retries),
execute_batch_get(Client, TableName, U,
Retries - 1, [ batch_get_results(TableName, R) | Acc ]).
batch_get_results(TableName, Responses) ->
[ clean_map(to_map(I)) || I <- maps:get(TableName, Responses) ].
%% PUT ITEM
%% @doc Put a single item into the given dynamo table, with conditions!
-spec put_item( Client :: darcy_client:aws_client(),
TableName :: binary(),
Item :: map(),
ConditionMap :: map() ) -> ok | {error, Error :: term()}.
put_item(Client, TableName, Item, ConditionMap) ->
ConditionExpression = maps:get(condition_expression, ConditionMap, undefined),
ExpressionAttributeNames = maps:get(expression_attribute_names, ConditionMap, undefined),
ExpressionAttributeValues = maps:get(expression_attribute_values, ConditionMap, undefined),
Request = make_put_request(TableName,
Item,
ConditionExpression,
ExpressionAttributeNames,
ExpressionAttributeValues),
case darcy_ddb_api:put_item(Client, Request) of
{ok, #{}, {200, _Headers, _Client}} -> ok;
{error, Error, {Code, Headers, _Client}} -> {error, {Error, [Code, Headers]}}
end.
make_put_request(TableName, Item, undefined, undefined, undefined) ->
#{ <<"TableName">> => TableName,
<<"Item">> => to_ddb(Item) };
make_put_request(TableName, Item,
ConditionExpression,
ExpressionAttributeNames,
ExpressionAttributeValues)
when is_binary(ConditionExpression) and
is_map(ExpressionAttributeNames) and
is_map(ExpressionAttributeValues) ->
#{ <<"TableName">> => TableName,
<<"ConditionExpression">> => ConditionExpression,
<<"ExpressionAttributeNames">> => ExpressionAttributeNames,
<<"ExpressionAttributeValues">> => to_ddb(ExpressionAttributeValues),
<<"Item">> => to_ddb(Item) }.
%% @doc Put a single item into the given dynamo table.
-spec put_item( Client :: darcy_client:aws_client(),
TableName :: binary(),
Item :: map() ) -> ok | {error, Error :: term()}.
put_item(Client, TableName, Item) ->
put_item(Client, TableName, Item, #{}).
%% @doc Put a list of items into the given Dynamo table.
%%
%% This function currently does not support deleting
%% items (although the underlying API supports this.)
%%
%% Items are automatically batched into groups of 25 or
%% less as required by AWS. Unprocessed keys are automatically
%% retried up to 5 times.
-spec batch_write_items( Client :: darcy_client:aws_client(),
TableName :: binary(),
Items :: [ map() ] ) -> ok | {error, Error :: term()}.
batch_write_items(Client, TableName, Items) when length(Items) =< 25 ->
Request = make_batch_put(TableName, Items),
Result = darcy_ddb_api:batch_write_item(Client, Request),
handle_batch_write_result(Client, ?RETRIES, Result);
batch_write_items(Client, TableName, Items) ->
{Part, Tail} = lists:split(25, Items),
ok = batch_write_items(Client, TableName, Part),
batch_write_items(Client, TableName, Tail).
make_batch_put(TableName, Items) when length(Items) =< 25 ->
#{ <<"RequestItems">> =>
#{ TableName => [
#{ <<"PutRequest">> =>
#{ <<"Item">> => to_ddb(I) }
} || I <- Items ]
}
}.
handle_batch_write_result(_Client, _N,
{ok, #{ <<"UnprocessedItems">> := U }, _Details})
when map_size(U) == 0 -> ok;
handle_batch_write_result(Client, N,
{ok, #{ <<"UnprocessedItems">> := U }, _Details}) ->
reprocess_batch_write(Client, N, U);
handle_batch_write_result(_Client, _N,
{error, Error, {Status, Headers, _Ref}}) ->
{error, {Error, [Status, Headers]}}.
reprocess_batch_write(_Client, 0, RetryItems) -> {error, {retries_exceeded, RetryItems}};
reprocess_batch_write(Client, N, RetryItems) ->
retry_sleep(N),
Results = darcy_ddb_api:batch_write_item(Client, #{ <<"RequestItems">> => RetryItems }),
handle_batch_write_result(Client, N-1, Results).
return_value(#{ <<"Item">> := Item }) -> {ok, clean_map(to_map(Item))};
return_value(#{} = M) when map_size(M) == 0 -> {error, not_found}.
%% QUERY
%% @doc Lookup records using the partition and range keys from
%% a table or an index.
%%
%% Unfortunately this call requires quite a bit of understanding
%% of both the Dynamo data model and the table and/or index
%% structures.
%%
%% The query expression should take the form of a map which follows
%% the <a href="http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html">query guidelines</a> laid out in the official AWS documentation.
%%
%% The return value also punts on the issue of result pagination.
-spec query( Client :: darcy_client:aws_client(),
TableName :: binary(),
QueryExpression :: map() ) -> {ok, Results :: map() } |
{error, Error :: term()}.
query(Client, TableName, Expr) ->
query_impl(Client, [table_name(TableName), Expr]).
%% @doc A query that operates on an index instead of a table.
-spec query( Client :: darcy_client:aws_client(),
TableName :: binary(),
IndexName :: binary(),
QueryExpression :: map() ) -> {ok, Results :: map() } |
{error, Error :: term()}.
query(Client, TableName, IndexName, Expr) ->
query_impl(Client, [table_name(TableName), index_name(IndexName), Expr]).
query_impl(Client, Ops) ->
Request = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Ops),
case darcy_ddb_api:query(Client, Request) of
{ok, Result, _Details } -> process_result_set(Result);
{error, Error, {Status, Headers, _Client}} -> {error, {Error, {Status, Headers}}}
end.
process_result_set(#{ <<"Count">> := C }) when C == 0 -> {error, not_found};
process_result_set(#{ <<"Items">> := Items, <<"Count">> := C }) ->
{ok, #{ <<"Count">> => C, <<"Items">> => [ clean_map(to_map(I)) || I <- Items ] } };
process_result_set(Other) ->
{error, {query_error, Other}}.
%% SCAN
%% @doc This function executes a sequential table scan (no parallelism here)
%% and returns results synchronously to the caller. If you want to scan a table
%% with parallel workers, look at `scan_parallel'.
%%
%%
%% Scans return automatically when 1 MB of data has accumulated. If more data
%% is available, the atom `partial' will be returned instead of `ok'.
%%
%% If you want to continue your scanning activities, you must add the
%% `LastEvaluatedKey' as the `ExclusiveStartKey' in the next call to
%% this function's expression map. (See the official API docs for
%% further details about continuing scans:
%%
%% https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#API_Scan_ResponseSyntax.)
%%
%% If a table has no data, the atom `empty_table' will be returned.
%%
%% If a table has data, but a filter expression has filtered all results,
%% the atom `no_results' will be returned.
-spec scan( Client :: darcy_request:aws_client(),
TableName :: binary(),
Expr :: map() ) -> {ok, Result :: map()} |
{ok, empty_table} |
{ok, no_results} |
{partial, Result :: map()} |
{error, Reason :: term()}.
scan(Client, TableName, Expr) ->
execute_scan(Client, make_scan_request([Expr, table_name(TableName)])).
%% @doc This function executes a sequential scan over an entire table. In other
%% words, it will continue to make new calls until no `LastEvaluatedKey' field
%% is returned from Dynamo.
%%
%% The items will be accumulated into a list and returned. This call is
%% syntactic sugar for `scan_all/4' with a function of `same(X) -> X' passed
%% in.
scan_all(Client, TableName, Expr) ->
scan_all(Client, TableName, Expr, fun same/1).
same(X) -> X.
%% @doc This function executes a sequential scan over an entire table. In other
%% words, it will continue to make new calls until no `LastEvaluatedKey' field
%% is returned from Dynamo.
%%
%% For each item returned, the function `Fun' will be executed and the results
%% accumulated and returned when all valid rows from the scan query expression
%% have been processed. (You may or may not care about these results if you're
%% doing something in your function for the side effect.)
-spec scan_all( Client :: darcy_request:aws_client(),
TableName :: binary(),
Expr :: map(),
Fun :: function() ) -> {ok, [ term() ]} |
{error, { Reason :: term(), Acc :: [ map() ]}}.
scan_all(Client, TableName, Expr, Fun) ->
Request = make_scan_request([Expr, table_name(TableName)]),
do_scan_all(Client, Request, execute_scan(Client, Request), Fun, [], ?INITIAL_ERROR_STATE).
do_scan_all(_Client, _Req, {ok, empty_table}, _Fun, _Acc, _ErrDelay) -> {ok, []};
do_scan_all(_Client, _Req, {ok, no_results}, _Fun, Acc, _ErrDelay) -> {ok, flatten(Acc)};
do_scan_all(_Client, _Req, {ok, #{ <<"Items">> := I }}, Fun, Acc, _ErrDelay) ->
{ok, flatten([ batch_pmap(Fun, I) | Acc ])};
do_scan_all(Client, Req, {error, {EType, _ETxt}=Error}, Fun, Acc, EDelay) ->
case maybe_retry_scan_op(get_error_type(EType), EDelay) of
{true, NewErrDelay} ->
error_logger:warning_msg("Got error ~p. Will retry request.", [Error]),
maybe_sleep(NewErrDelay),
do_scan_all(Client, Req, execute_scan(Client, Req), Fun, Acc, NewErrDelay);
_ ->
error_logger:error_msg("Error executing scan_all: ~p request: ~p", [Error, Req]),
{error, {Error, Acc}}
end;
%% TODO? Maybe throttle back up if we get "a lot" of successes.
do_scan_all(Client, Req, {partial, #{ <<"LastEvaluatedKey">> := LEK, <<"Items">> := I }}, Fun, Acc, ErrDelay) ->
NewRequest = make_scan_request([Req, #{ <<"ExclusiveStartKey">> => LEK }]),
maybe_sleep(ErrDelay),
do_scan_all(Client, NewRequest, execute_scan(Client, NewRequest), Fun, [ batch_pmap(Fun, I) | Acc], ErrDelay).
maybe_sleep(#error_delay{ delay = 0 }) ->
ok;
maybe_sleep(#error_delay{ delay = D }) ->
error_logger:warning_msg("THROTTLE: Pid ~p sleeping for ~p ms before next request...", [self(), D]),
timer:sleep(D).
maybe_retry_scan_op(<<"ProvisionedThroughputExceededException">>, #error_delay{ delay = 0 } = ErrState) ->
{true, ErrState#error_delay{ delay = ?INITIAL_ERROR_DELAY }};
maybe_retry_scan_op(<<"ProvisionedThroughputExceededException">>, #error_delay{ delay = D } = ErrState) when D > ?MAX_ERROR_DELAY ->
{false, ErrState};
maybe_retry_scan_op(<<"ProvisionedThroughputExceededException">>, #error_delay{ delay = D } = ErrState) ->
{true, ErrState#error_delay{ delay = D*2 }};
maybe_retry_scan_op(_Type, ErrorState) ->
{false, ErrorState}.
get_error_type(Type) ->
[_, Exception] = binary:split(<<"#">>, Type),
Exception.
%% @doc Scan a table in parallel using the given expression. This
%% function is equivalent to `scan_parallel/7' with a timeout value of 60000
%% milliseconds.
%%
%% Instead of returning the coordinator pid to the caller, this
%% function blocks and waits for the return values from the
%% coordinator.
scan_parallel(Client, TableName, Expr, Fun, SegmentCount) ->
scan_parallel(Client, TableName, Expr, Fun, ?BIG_TIMEOUT, SegmentCount).
scan_parallel(Client, TableName, Expr, Fun, Timeout, SegmentCount) ->
{Ref, _Pid} = scan_parallel(Client, TableName, Expr, Fun, Timeout, SegmentCount, self()),
receive
{Ref, {Results, []}} -> {ok, Results};
{Ref, {Partial, Errors}} -> {error, {Errors, Partial}};
Other -> Other
after Timeout ->
{error, scan_timeout}
end.
%% @doc Scan a table in parallel using the given expression.
%%
%% DynamoDB supports parallel scans using a partitioning technique described in the Developer Guide.
%%
%% https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html
%%
%% This function spawns a coordinator process which in turn spawns a set of
%% workers, one per segment count which executes the `scan_all/4' function.
%%
%% <B>N.B.</B>: This operation can consume a lot of read capacity. It is a good
%% idea to limit the number of segments used in a scan operation.
%%
%% The coordinator that is spawned here is <B>linked</B> to the caller. If you
%% want more robust error handling, you should trap exit messages by the
%% caller's process.
%%
%% Since this function returns the "raw" results, you will have to
%% handle them appropriately within your own receive block.
%%
%% They will be in the form of `{Ref, {Results, Errors}}' where
%% `Ref' matches the reference returned by this call. The coordinator
%% pid is the process which is coordinating the parallel workers.
%% (You may or may not care about those.)
-spec scan_parallel(
Client :: darcy_request:aws_client(),
TableName :: binary(),
Expr :: map(),
Fun :: function(),
Timeout :: pos_integer(),
SegmentCount :: pos_integer(),
ReplyPid :: pid()) -> {Ref :: reference(),
Coordinator :: pid()}.
scan_parallel(Client, TableName, Expr, Fun, Timeout, SegmentCount, ReplyPid) ->
Reqs = [ make_scan_request([Expr, table_name(TableName), make_segments(N, SegmentCount)]) ||
N <- lists:seq(0, SegmentCount - 1) ],
Ref = make_ref(),
Pid = spawn_link(fun() -> start_coordinator(Client, Ref, Timeout, Fun, Reqs, ReplyPid) end),
{Ref, Pid}.
results_ok({ok, _}) -> true;
results_ok(_) -> false.
start_coordinator(Client, Ref, Timeout, Fun, Reqs, Reply) ->
L = pmap(fun(Req) -> worker_scan_all(Client, Req, Fun) end, Reqs,
Timeout, {Ref, {[], scan_timeout}}),
ReplyMsg = make_reply_msg(Ref, L),
Reply ! {Ref, ReplyMsg}.
make_reply_msg(Ref, {Ref, {[], scan_timeout}}) -> {[], {error, scan_timeout}};
make_reply_msg(_Ref, Results) ->
{Res, Err} = lists:partition(fun results_ok/1, Results),
{lists:flatten([ R || {ok, R} <- Res ]), Err}.
worker_scan_all(Client, Request, Fun) ->
do_scan_all(Client, Request, execute_scan(Client, Request), Fun, [], ?INITIAL_ERROR_STATE).
make_segments(N, Count) ->
#{ <<"Segment">> => N,
<<"TotalSegments">> => Count }.
make_scan_request(Ops) ->
lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Ops).
execute_scan(Client, Request) ->
case darcy_ddb_api:scan(Client, Request) of
{ok, Result, _Details } -> process_scan_result(Result);
{error, Error, {Status, Headers, _Client}} -> {error, {Error, {Status, Headers}}}
end.
process_scan_result(#{ <<"Count">> := 0, <<"ScannedCount">> := 0 }) -> {ok, empty_table};
process_scan_result(#{ <<"Count">> := 0, <<"ScannedCount">> := _SC }) -> {ok, no_results};
process_scan_result(#{ <<"Items">> := Items, <<"LastEvaluatedKey">> := _LEK } = M) ->
NewItems = [ clean_map(to_map(I)) || I <- Items ],
{partial, maps:put(<<"Items">>, NewItems, M)};
process_scan_result(#{ <<"Items">> := Items } = M) ->
NewItems = [ clean_map(to_map(I)) || I <- Items ],
{ok, maps:put(<<"Items">>, NewItems, M)};
process_scan_result(Other) ->
{error, {scan_error, Other}}.
flatten(L) when is_list(L) -> lists:flatten(lists:reverse(L)).
%% @doc This function returns a map without any Dynamo specific type tuples,
%% which is useful for passing around internally in an application that doesn't
%% care or understand Dynamo data types.
clean_map(M) when is_map(M) ->
maps:map(fun(_K, {_, V}) -> V;
(_K, V) -> V
end,
M).
%% @doc This is the default decoding function for binary data. It base64
%% decodes the binary, and decompresses it.
default_decode(Blob) ->
zlib:uncompress(base64:decode(Blob)).
%% @doc This is the default encoding function for binary data. It compresses
%% the data and base64 encodes it.
default_encode(Data) ->
base64:encode(zlib:compress(Data)).
%% @doc Translate from a "raw" JSON map representation of a Dynamo
%% data item to an Erlang data item. Uses the following tuples
%% to remove ambiguities in Erlang JSON encoding:
%% <ul>
%% <li>`NULL' values are returned as `undefined'</li>
%% <li>`{blob, Binary}'</li>
%% <li>`{list, List}'</li>
%% <li>`{string_set, Set}' (internally stored as an ordset)</li>
%% <li>`{number_set, Set}' (internally stored as an ordset)</li>
%% </ul>
to_map(M) when is_map(M) ->
maps:map(fun(_K, V) when is_map(V) -> unddt(V);
(_K, V) -> V end,
M).
%% @private
unddt(#{ <<"B">> := V }) ->
{M, F, A} = application:get_env(darcy, blob_decode_fun,
{darcy, default_decode, []}),
{blob, erlang:apply(M, F, [V | A])};
unddt(#{ <<"N">> := V }) ->
%% could be an integer or a float. Try integer conversion
%% first.
try
binary_to_integer(V)
catch
error:badarg -> binary_to_float(V)
end;
unddt(#{ <<"S">> := V }) -> V;
unddt(#{ <<"BOOL">> := <<"true">> }) -> true;
unddt(#{ <<"BOOL">> := true }) -> true;
unddt(#{ <<"BOOL">> := <<"false">> }) -> false;
unddt(#{ <<"BOOL">> := false }) -> false;
unddt(#{ <<"L">> := V }) -> {list, [ unddt(E) || E <- V ]};
unddt(#{ <<"M">> := V }) -> maps:map(fun(_K, Val) -> unddt(Val) end, V);
unddt(#{ <<"SS">> := V }) -> {string_set, ?SET:from_list([ E || E <- V ])};
unddt(#{ <<"NS">> := V }) -> {number_set, ?SET:from_list([ binary_to_integer(E) || E <- V ])};
unddt(#{ <<"NULL">> := _V }) -> undefined;
unddt(Other) -> erlang:error({error, badarg}, [Other]).
%% @doc This function takes an Erlang map and attempts to encode it using Dynamo
%% data type annotations. Because there are ambiguities in how Erlang internally
%% represents things like strings, lists and sets, tagged tuples are used to
%% remove ambiguity. They are the same tagged tuples as above:
%% <ul>
%% <li>`undefined' is stored as a `NULL' data type</li>
%% <li>`{blob, Binary}'</li>
%% <li>`{list, List}'</li>
%% <li>`{string_set, Set}' (internally stored as an ordset)</li>
%% <li>`{number_set, Set}' (internally stored as an ordset)</li>
%% </ul>
%%
%% Generally, you should try to modify your internal data representation values
%% to remove these ambiguities <i>before</i> you pass them into this function.
to_ddb(M) when is_map(M) ->
maps:map(fun(_K, V) -> ddt(V) end, M);
to_ddb(Other) -> erlang:error({error, badarg}, [Other]).
%% @private
%% see http://boto3.readthedocs.io/en/latest/_modules/boto3/dynamodb/types.html
ddt(undefined) -> #{ <<"NULL">> => true };
ddt(null) -> #{ <<"NULL">> => true };
ddt({blob, Data}) ->
{M, F, A} = application:get_env(darcy, blob_encode_fun,
{darcy, default_encode, []}),
#{ <<"B">> => erlang:apply(M, F, [ Data | A ]) };
ddt({list, L}) -> #{ <<"L">> => [ ddt(E) || E <- L ] };
ddt({string_set, S}) -> #{ <<"SS">> => [ ddt(E) || E <- ?SET:to_list(S) ] };
ddt({number_set, S}) -> #{ <<"NS">> => [ ddt(E) || E <- ?SET:to_list(S) ] };
ddt(V) when is_integer(V) -> #{ <<"N">> => number_to_binary(V) };
ddt(V) when is_float(V) -> #{ <<"N">> => number_to_binary(V) };
ddt(V) when is_binary(V) -> #{ <<"S">> => V };
ddt(V) when is_boolean(V) -> #{ <<"BOOL">> => V };
ddt(V) when is_map(V) -> #{ <<"M">> => maps:map(fun(_K, Val) -> ddt(Val) end, V) };
ddt(V) when is_list(V) ->
try
#{ <<"S">> => list_to_binary(V) }
catch
_:_ ->
#{ <<"L">> => [ ddt(E) || E <- V ] }
end;
ddt(Other) -> erlang:error({error, badarg}, [Other]).
number_to_binary(V) when is_integer(V) -> integer_to_binary(V);
number_to_binary(V) when is_float(V) -> float_to_binary(V, [{decimals, 20}, compact]).
%% sleep for RETRIES - N * 1000 milliseconds before retrying an operation.
%% Current RETRIES value is 5. Always sleeps for <i>at least</i> 1000
%% milliseconds.
%% N = 5 => 1000 ms sleep
%% N = 4 => 1000 ms sleep
%% N = 3 => 2000 ms sleep
%% N = 2 => 3000 ms sleep
%% N = 1 => 4000 ms sleep
retry_sleep(N) ->
S = max(1000, (?RETRIES-N) * 1000),
timer:sleep(S).
%% split the big list into smaller batches and execute them in parallel.
batch_pmap(F, List) when length(List) =< ?BATCH_SIZE -> pmap(F, List);
batch_pmap(F, BigList) ->
Len = length(BigList),
I = items_per_batch(Len),
PC = lists:seq(1, Len, I),
Batches = make_batches(BigList, I, PC, []),
pmap(fun(E) -> lists:map(F, E) end, Batches).
items_per_batch(Len) ->
case Len div ?BATCH_SIZE of
I when I =< ?BATCH_MAX -> ?BATCH_SIZE;
_ -> Len div ?BATCH_MAX
end.
make_batches(_, _, [], Acc) -> lists:reverse(Acc);
make_batches(L, Len, [H|T], Acc) ->
make_batches(L, Len, T, [ lists:sublist(L, H, Len) | Acc ]).
%% parallel map
%% http://erlang.org/pipermail/erlang-questions/2009-January/041214.html
%%
%% TODO: Maybe we do not care about the order messages are received
pmap(F, Arglist) ->
pmap(F, Arglist, ?TIMEOUT, pmap_timeout).
pmap(F, Arglist, Timeout, TimeoutError) ->
S = self(),
TaskID = make_ref(),
Workers = lists:map( fun(X) ->
spawn_link(fun() -> do_F(S, TaskID, F, X) end)
end, Arglist),
gather(Workers, TaskID, Timeout, TimeoutError).
do_F(Caller, TaskID, F, X) ->
Caller ! {self(), TaskID, catch(F(X))}.
gather([], _, _, _) -> [];
gather([W|R], TaskID, Timeout, TimeoutError) ->
receive
{W, TaskID, Val} ->
[Val | gather(R, TaskID, Timeout, TimeoutError)]
after Timeout ->
TimeoutError
end.
%% Tests
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
make_put_request_test() ->
TableName = <<"foo">>,
Item = #{ <<"Student">> => <<"Foo">>,
<<"Version">> => <<"totally_a_uuid">>
},
ConditionExpression = <<"#version = :old_version OR attribute_not_exists(#version)">>,
ExpressionAttributeNames = #{<<"#version">> => <<"Version">>},
ExpressionAttributeValues = #{<<":old_version">> => <<"totally_a_uuid">>},
Request = make_put_request(TableName, Item,
ConditionExpression,
ExpressionAttributeNames,
ExpressionAttributeValues),
Expected = #{<<"ConditionExpression">> =>
<<"#version = :old_version OR attribute_not_exists(#version)">>,
<<"ExpressionAttributeNames">> =>
#{<<"#version">> => <<"Version">>},
<<"ExpressionAttributeValues">> =>
#{<<":old_version">> =>
#{<<"S">> => <<"totally_a_uuid">>}},
<<"Item">> =>
#{<<"Student">> => #{<<"S">> => <<"Foo">>},
<<"Version">> => #{<<"S">> => <<"totally_a_uuid">>}},
<<"TableName">> => <<"foo">>},
?assertEqual(Expected, Request).
to_ddb_test() ->
Raw = #{ <<"Grades">> => {list, [17,39,76,27]},
<<"Average">> => 39.75,
<<"Student">> => <<"Quentin">>,
<<"Subject">> => <<"Science">> },
Expected = #{<<"Grades">> => #{<<"L">> => [#{<<"N">> => <<"17">>}, #{<<"N">> => <<"39">>}, #{<<"N">> => <<"76">>}, #{<<"N">> => <<"27">>}]},
<<"Average">> => #{<<"N">> => <<"39.75">>},
<<"Student">> => #{<<"S">> => <<"Quentin">>},
<<"Subject">> => #{<<"S">> => <<"Science">>}},
?assertEqual(Expected, to_ddb(Raw)).
to_map_test() ->
Raw = #{<<"Grades">> => #{<<"L">> => [#{<<"N">> => <<"17">>}, #{<<"N">> => <<"39">>}, #{<<"N">> => <<"76">>}, #{<<"N">> => <<"27">>}]},
<<"Student">> => #{<<"S">> => <<"Quentin">>},
<<"Subject">> => #{<<"S">> => <<"Science">>}},
Expected = #{ <<"Grades">> => {list, [17, 39, 76, 27]},
<<"Student">> => <<"Quentin">>,
<<"Subject">> => <<"Science">> },
?assertEqual(Expected, to_map(Raw)).
clean_map_test() ->
Raw = #{<<"Grades">> => #{<<"L">> => [#{<<"N">> => <<"17">>}, #{<<"N">> => <<"39">>}, #{<<"N">> => <<"76">>}, #{<<"N">> => <<"27">>}]},
<<"Student">> => #{<<"S">> => <<"Quentin">>},
<<"Subject">> => #{<<"S">> => <<"Science">>}},
Expected = #{ <<"Grades">> => [17, 39, 76, 27],
<<"Student">> => <<"Quentin">>,
<<"Subject">> => <<"Science">> },
?assertEqual(Expected, clean_map(to_map(Raw))).
-endif. | src/darcy.erl | 0.558568 | 0.444866 | darcy.erl | starcoder |
-module(interval_table).
-export([new/0, gap_size/1, span/2, assign/2, shrink/2, lookup/2]).
-export_type([table/0]).
-type key() :: any().
-record(table, {gaps = [] :: list(interval:interval()),
intervals = [] :: list({interval:interval(), key()})}).
-opaque table() :: #table{}.
-spec new() -> table().
new() ->
#table{gaps = [interval:new(rational:new(0, 1), rational:new(1, 1))]}.
%% @doc Get the span covered by all gaps in the table.
-spec gap_size(table()) -> rational:rational().
gap_size(#table{gaps=Gaps}) ->
lists:foldl(
fun(Interval, Size) ->
rational:add(Size, interval:length(Interval))
end,
rational:new(0, 1), Gaps).
%% @doc Assign all keys in `NewKeys' to gaps in `Table'. Uses the
%% greedy strategy (largest span/largest gap first strategy).
-spec assign(NewKeys::#{key() => rational:rational()}, Table::table()) -> table().
assign(NewKeys, Table) when map_size(NewKeys) =:= 0 ->
Table;
assign(NewKeys, Table=#table{gaps=[]}) ->
NewNewKeys = maps:filter(fun(_, Span) -> Span =/= rational:new(0) end, NewKeys),
case maps:size(NewNewKeys) of
0 -> Table;
_ -> throw({badarg, "Span too large for available gaps."})
end;
assign(NewKeys, Table=#table{gaps=Gaps, intervals=Intervals}) ->
{LargestKey, LargestSpan} =
maps:fold(
fun(Key, Span, Acc={_, LargestSpan}) ->
case rational:compare(Span, LargestSpan) of
eq -> {Key, Span};
lt -> Acc;
gt -> {Key, Span}
end
end, {'$no_key$', rational:new(0)}, NewKeys),
LargestGap =
lists:foldl(
fun(Gap, LargestGap) ->
case rational:compare(interval:length(Gap),
interval:length(LargestGap)) of
eq -> Gap;
lt -> LargestGap;
gt -> Gap
end
end, interval:new(rational:new(0), rational:new(0)), Gaps),
RemainingGaps = lists:delete(LargestGap, Gaps),
case interval:split(LargestSpan, LargestGap, left) of
{empty, _} ->
% if the key with the largest span has an empty span the
% assignment is done
Table;
{Interval, empty} ->
assign(
maps:update_with(
LargestKey,
fun(Span) ->
rational:subtract(Span, interval:length(Interval))
end,
NewKeys),
Table#table{
gaps = RemainingGaps,
intervals = insert_inorder({Interval, LargestKey}, Intervals)});
{Interval, Gap} ->
assign(
maps:remove(LargestKey, NewKeys),
Table#table{
gaps = [Gap | RemainingGaps],
intervals = insert_inorder({Interval, LargestKey}, Intervals)})
end.
insert_inorder({Gap, Key}, []) ->
[{Gap, Key}];
insert_inorder({Gap, Key}, [Assigned={I, _}|Intervals]) ->
% no case for eq because we should never insert an interval that
% is equal to an interval that is already assigned.
case interval:preceeds(Gap, I) of
true ->
[{Gap, Key},Assigned|Intervals];
false ->
[Assigned|insert_inorder({Gap, Key}, Intervals)]
end.
%% @doc Get the total span assigned to `Key'.
-spec span(key(), table()) -> rational:rational().
span(Key, IntervalTable) ->
lists:foldl(
fun({Interval, AssignedTo}, Span) ->
if AssignedTo =:= Key ->
rational:add(interval:length(Interval), Span);
AssignedTo =/= Key ->
Span
end
end, rational:new(0), IntervalTable#table.intervals).
%% @doc Create gaps by shrinking the span assigned to each key.
%% @param Changes a map ``#{Key => ShrinkAmount}'' giving the amount
%% by which to shrink the span assigned to `Key'.
%% @reutrns the table with gaps.
-spec shrink(#{key() => rational:rational()}, table()) -> table().
shrink(Changes, Table=#table{gaps=Gaps, intervals=Intervals}) ->
{NewGaps, NewIntervals} = cut_shift(Changes, Intervals),
Table#table{gaps = merge_gaps(NewGaps, Gaps),
intervals = NewIntervals}.
%% merge two lists of gaps combining adjacent gaps into single larger
%% gaps.
merge_gaps(Gaps1, Gaps2) ->
SortedGaps =
lists:merge(fun interval:preceeds/2,
lists:sort(fun interval:preceeds/2, Gaps1),
lists:sort(fun interval:preceeds/2, Gaps2)),
lists:foldr(fun(Gap, []) ->
[Gap];
(Gap, [PreceedingGap|Gaps]) ->
case interval:adjacent(Gap, PreceedingGap) of
true ->
[interval:merge(Gap, PreceedingGap) | Gaps];
false ->
[Gap, PreceedingGap | Gaps]
end
end,
[], SortedGaps).
cut_shift(AssignmentChanges, Intervals) ->
SubtractLength =
fun(I) ->
fun(X) ->
rational:subtract(X, interval:length(I))
end
end,
{_, Gaps, Remaining, _} =
lists:foldl(
fun({I, Key}, {Changes, Gaps, Remaining, left}) ->
Span = maps:get(Key, Changes, rational:new(0)),
case interval:split(Span, I, left) of
{empty, _} ->
{Changes, Gaps, [{I, Key}|Remaining], right};
{Gap, empty} ->
{maps:update_with(
Key, SubtractLength(Gap), rational:new(0), Changes),
[Gap|Gaps],
Remaining,
left};
{Gap, Rem} ->
{maps:update_with(
Key, SubtractLength(Gap), rational:new(0), Changes),
[Gap|Gaps],
[{Rem, Key}|Remaining],
right}
end;
({I, Key}, {Changes, Gaps, Remaining, right}) ->
Span = maps:get(Key, Changes, rational:new(0)),
case interval:split(Span, I, right) of
{_, empty} ->
{Changes, Gaps, [{I, Key}|Remaining], right};
{empty, Gap} ->
{maps:update_with(
Key, SubtractLength(Gap), rational:new(0), Changes),
[Gap|Gaps],
Remaining,
left};
{Rem, Gap} ->
{maps:update_with(
Key, SubtractLength(Gap), rational:new(0), Changes),
[Gap|Gaps],
[{Rem, Key}|Remaining],
left}
end
end,
{AssignmentChanges, [], [], right}, Intervals),
{Gaps, Remaining}.
%% @doc Look up the key assigned `X'.
%%
%% @returns `{ok, Key}' if `X' falls in the interval assigned to `Key'
%% or `unassigned' if `X' falls in a gap.
-spec lookup(rational:rational(), table()) -> {ok, key()} | unassigned.
lookup(X, #table{intervals=Intervals}) ->
case lists:filter(fun({I, _}) -> interval:contains(X, I) end, Intervals) of
[{_, Key}|_] ->
{ok, Key};
[] ->
unassigned
end. | src/interval_table.erl | 0.676834 | 0.488649 | interval_table.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(vtree_util).
-include("vtree.hrl").
-export([calc_mbb/2, nodes_mbb/2, min/2, max/2, calc_perimeter/1,
calc_volume/1, intersect_mbb/3, find_min_value/2, within_mbb/3]).
-spec min(Tuple::{any(), any()} | [any()], Less::fun()) -> Min::any().
min({A, B}, Less) ->
case Less(A, B) of
true -> A;
false -> B
end;
min([H|T], Less) ->
min(T, Less, H).
-spec min(List::[any()], Less::fun(), Min::any()) -> Min::any().
min([], _Less, Min) ->
Min;
min([H|T], Less, Min) ->
Min2 = case Less(H, Min) of
true -> H;
false -> Min
end,
min(T, Less, Min2).
-spec max(Tuple::{any(), any()} | [any()], Less::fun()) -> Max::any().
max({A, B}, Less) ->
case Less(A, B) of
true -> B;
false -> A
end;
max([H|T], Less) ->
max(T, Less, H).
-spec max(List::[any()], Less::fun(), Max::any()) -> Max::any().
max([], _Less, Max) ->
Max;
max([H|T], Less, Max) ->
Max2 = case Less(H, Max) of
true -> Max;
false -> H
end,
max(T, Less, Max2).
% Calculate the perimeter of list of 2-tuples that contain a min and a max
% value
-spec calc_perimeter(mbb()) -> number().
calc_perimeter(Values) ->
lists:foldl(fun({Min, Max}, Acc) ->
Acc + (Max-Min)
end, 0, Values).
% Calculate the volume of list of 2-tuples that contain a min and a max
% value
-spec calc_volume(mbb()) -> number().
calc_volume(Values) ->
lists:foldl(fun({Min, Max}, Acc) ->
Acc * (Max-Min)
end, 1, Values).
% Calculate the enclosing bounding box from a list of bounding boxes
-spec calc_mbb(List::[[{any(), any()}]], Less::fun()) -> [{any(), any()}].
calc_mbb([H|T], Less) ->
calc_mbb(T, Less, H).
-spec calc_mbb(List::[[{any(), any()}]], Less::fun(), Mbb::[{any(), any()}])
-> Mbb::[{any(), any()}].
calc_mbb([], _Less, Mbb) ->
Mbb;
calc_mbb([H|T], Less, Mbb) ->
Mbb2 = lists:map(
fun({{Min, Max}, {MinMbb, MaxMbb}}) ->
{?MODULE:min({Min, MinMbb}, Less),
?MODULE:max({Max, MaxMbb}, Less)}
end, lists:zip(H, Mbb)),
calc_mbb(T, Less, Mbb2).
% Calculate the enclosing MBB from a list of nodes
-spec nodes_mbb(Nodes :: [#kv_node{} | #kp_node{} | split_node()],
Less :: lessfun()) -> mbb().
nodes_mbb([#kv_node{}|_]=Nodes, Less) ->
Mbbs = [Node#kv_node.key || Node <- Nodes],
vtree_util:calc_mbb(Mbbs, Less);
nodes_mbb([#kp_node{}|_]=Nodes, Less) ->
Mbbs = [Node#kp_node.key || Node <- Nodes],
vtree_util:calc_mbb(Mbbs, Less);
nodes_mbb(Nodes, Less) ->
{Mbbs, _} = lists:unzip(Nodes),
vtree_util:calc_mbb(Mbbs, Less).
% Returns the intersection of two MBBs. Touching also counts as intersection.
-spec intersect_mbb(A :: mbb(), B :: mbb(), Less :: lessfun()) ->
mbb() | overlapfree.
intersect_mbb(A, B, Less) ->
intersect_mbb0(lists:zip(A, B), Less, []).
-spec intersect_mbb0([{{keyval(), keyval()}, {keyval(), keyval()}}],
Less :: lessfun(), Acc :: mbb()) -> mbb() | overlapfree.
intersect_mbb0([], _Less, Acc) ->
lists:reverse(Acc);
% If both values are `nil`, it is like a wildcard, it covers the other
% range completely
intersect_mbb0([{{nil, nil}, MinMax}|T], Less, Acc) ->
intersect_mbb0(T, Less, [MinMax|Acc]);
intersect_mbb0([{MinMax, {nil, nil}}|T], Less, Acc) ->
intersect_mbb0(T, Less, [MinMax|Acc]);
% If one end of the range is `nil` it's an open range and the intersection
% will be the one of the other given range. The guards are needed to prevent
% endless loops.
intersect_mbb0([{{nil, MaxA}, {MinB, MaxB}}|T], Less, Acc) when MinB =/= nil ->
intersect_mbb0([{{MinB, MaxA}, {MinB, MaxB}}|T], Less, Acc);
intersect_mbb0([{{MinA, nil}, {MinB, MaxB}}|T], Less, Acc) when MaxB =/= nil ->
intersect_mbb0([{{MinA, MaxB}, {MinB, MaxB}}|T], Less, Acc);
intersect_mbb0([{{MinA, MaxA}, {nil, MaxB}}|T], Less, Acc) when MinA =/= nil ->
intersect_mbb0([{{MinA, MaxA}, {MinA, MaxB}}|T], Less, Acc);
intersect_mbb0([{{MinA, MaxA}, {MinB, nil}}|T], Less, Acc) when MaxA =/= nil ->
intersect_mbb0([{{MinA, MaxA}, {MinB, MaxA}}|T], Less, Acc);
% All `nil` cases are resolved, do the actual work
intersect_mbb0([{{MinA, MaxA}, {MinB, MaxB}}|T], Less, Acc) ->
Min = vtree_util:max({MinA, MinB}, Less),
Max = vtree_util:min({MaxA, MaxB}, Less),
case Less(Max, Min) of
true -> overlapfree;
false -> intersect_mbb0(T, Less, [{Min, Max}|Acc])
end.
% Find the minumum value from a list of things. One item from the
% list will be passed into the the suppplied `MinFun`, which returns
% some numner. The lowest number together with the corresponding item
% will be returned.
-spec find_min_value(MinFun :: fun(), Data :: [any()]) -> {number(), any()}.
find_min_value(MinFun, [_|_]=Data) ->
lists:foldl(
fun(Item, {MinVal, _}=Acc) ->
Val = MinFun(Item),
case (Val < MinVal) orelse (MinVal =:= nil) of
true -> {Val, Item};
false -> Acc
end
end,
{nil, []}, Data).
% Returns true if MBB `A` is completely within `B`, i.E. merging `A` with `B`
% wouldn't expand `B` and just return `B`.
-spec within_mbb(A :: mbb(), B :: mbb(), Less :: lessfun()) -> true | false.
within_mbb(A, B, Less) ->
within_mbb0(lists:zip(A, B), Less).
-spec within_mbb0([{{keyval(), keyval()}, {keyval(), keyval()}}],
Less :: lessfun()) -> true | false.
within_mbb0([], _Less) ->
true;
within_mbb0([{{MinA, MaxA}, {MinB, MaxB}}|T], Less) ->
case Less(MinA, MinB) orelse Less(MaxB, MaxA) of
true ->
false;
_ ->
within_mbb0(T, Less)
end. | vtree/src/vtree_util.erl | 0.818845 | 0.660638 | vtree_util.erl | starcoder |
%% -------- Overview ---------
%%
%% There are two primary types of exchange sorted
%% - a full exchange aimed at implementations with cached trees, where the
%% cached trees represent all the data in the location, and the comparion is
%% between two complete data sets
%% - a partial exchange where it is expected that trees will be dynamically
%% created covering a subset of data within the location
%%
%% The full exchange assumes access to cached trees, with a low cost of
%% repeated access, and a relatively high proportion fo the overall cost in
%% network bandwitdh. These exchanges go through the following process:
%%
%% - Root Compare (x n)
%% - Branch Compare (x n)
%% - Clock Compare
%% - Repair
%%
%% The partial, dynamic tree exchange is based on dynamically produced trees,
%% where a relatively high proportion of the cost is in the production of the
%% trees. In a tree exchange, whole trees are compared (potentially reduced by
%% use of a segment filter), until the delta stops decreasing at a significant
%% rate and a Clock Compare is run. So these exchanges for through the
%% following process:
%%
%% - Tree Compare (x n)
%% - Clock Compare
%% - Repair
%%
%% Each exchange has a 'blue' list and a 'pink' list. Each list (blue and
%% pink) is a set of partitions pertinent to this exchange, with the state
%% to be compared being the merging of all the trees referenced by the list.
%%
%% The lists can be a single item each (for a pairwise exchange), or a
%% ring-size number of partitions for a coverage query exchange.
%%
%% -------- Root Compare ---------
%%
%% This allows the comparison between the roots of trees. Each root (with a
%% tree size of large and 4-byte hashes), will be 4KB in size. The outcome of
%% the comparison should be a set of BranchIDs where the (merged) roots are
%% showing differences.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Root Confirm ---------
%%
%% In the next stage the roots are again requested, received and compared.
%% Again a set of branchIDs which differ is created - and the set of
%% confirmed deltas is the intersection of the sets generated from both root
%% exchanges.
%%
%% The purpose of the confirm stage is to rule out false negative results
%% related to timing differences in the result of PUTs.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Branch Compare / Confirm ---------
%%
%% The set of branch ID differences should now be fetched (Compare), and then
%% re-fetched following a timeout (Confirm) to produce a set of SegmentIDs (or
%% tree leaves) that represent differences ebwteen blue and pink, eliminating
%% false negatives related to timing as with the Root Compare and Confirm.
%%
%% Each Branch is 1KB in size. So if there are more than 16 branches which
%% have differrences, only 16 should be chosen for the Compare and Confirm to
%% control the volume of network traffic prompted by the exchange.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Clock Compare ---------
%%
%% The final stage is clock compare. The clock compare can be done on up to
%% 128 segments across a maximum of 8 BranchIDs. This is to control the
%% potential overhead of the comparison and subsequent repairs. This may mean
%% for empty vnodes o(1000) exchanges may be required to fully recover the
%% store. However, in these cases it is likely that handoff and read repair
%% is already recovering the data so overly-aggressive read repair is
%% unnecessary.
%%
-module(aae_exchange).
-behaviour(gen_fsm).
-ifdef(fsm_deprecated).
-compile({nowarn_deprecated_function,
[{gen_fsm, start, 3},
{gen_fsm, send_event, 2}]}).
-endif.
-include("include/aae.hrl").
-define(TRANSITION_PAUSE_MS, 500).
% A pause between phases - allow queue lengths to change, and avoid
% generating an excess workload for AAE
-define(CACHE_TIMEOUT_MS, 60000).
% 60 seconds (used in fetch root/branches)
-define(SCAN_TIMEOUT_MS, 600000).
% 10 minutes (used in fetch clocks)
-define(UNFILTERED_SCAN_TIMEOUT_MS, 14400000).
% 4 hours (used in fetch trees with no filters)
-define(MAX_RESULTS, 128).
% Maximum number of results to request in one round of
-define(WORTHWHILE_REDUCTION, 0.3).
% If the last comparison of trees has reduced the size of the dirty leaves
% by 30%, probably worth comparing again before a clock fetch is run.
% Number a suck-teeth estimate, not even a fag-packet calculation involved.
-define(WORTHWHILE_REDUCTION_CACHED, 0).
% When checking a cached tree - then even a small reduction is worth
% another check, as the cost per check is so small. This changed after
% seeing cost of false negative results in large stores.
-define(WORTHWHILE_FILTER, 256).
% If the number of segment IDs to pass into a filter is too large, the
% filter is probably not worthwhile - more effort checking the filter, than
% time saved in the accumulator. Another suck-teeth estimate here as to
% what this value is, at this level with a small tree it will save opening
% all but one block in most slots (with the sst file). I suspect the
% optimal number is more likely to be higher than lower.
-export([init/1,
handle_sync_event/4,
handle_event/3,
handle_info/3,
terminate/3,
code_change/4]).
-export([waiting_all_results/2,
prepare_full_exchange/2,
prepare_partial_exchange/2,
root_compare/2,
branch_compare/2,
clock_compare/2,
tree_compare/2,
merge_root/2,
merge_branches/2]).
-export([compare_roots/2,
compare_branches/2,
compare_clocks/2,
compare_trees/2]).
-export([insync_responses/0]).
-export([start/4,
start/7,
reply/3]).
-include_lib("eunit/include/eunit.hrl").
-record(state, {root_compare_deltas = [] :: list(),
branch_compare_deltas = [] :: list(),
tree_compare_deltas = [] :: list(),
key_deltas = [] :: list(),
repair_fun :: repair_fun()|undefined,
reply_fun :: reply_fun()|undefined,
blue_list = [] :: input_list(),
pink_list = [] :: input_list(),
exchange_id = "not_set" :: list(),
blue_returns = {0, 0} :: {integer(), integer()},
pink_returns = {0, 0} :: {integer(), integer()},
pink_acc,
blue_acc,
merge_fun,
start_time = os:timestamp() :: erlang:timestamp(),
pending_state :: atom(),
reply_timeout = 0 :: integer(),
exchange_type :: exchange_type(),
exchange_filters = none :: filters(),
last_tree_compare = none :: list(non_neg_integer())|none,
last_root_compare = none :: list(non_neg_integer())|none,
last_branch_compare = none :: list(non_neg_integer())|none,
tree_compares = 0 :: integer(),
root_compares = 0 :: integer(),
branch_compares = 0 :: integer(),
prethrottle_branches = 0 :: non_neg_integer(),
prethrottle_leaves = 0 :: non_neg_integer(),
transition_pause_ms = ?TRANSITION_PAUSE_MS :: pos_integer(),
log_levels :: aae_util:log_levels()|undefined,
scan_timeout = ?SCAN_TIMEOUT_MS :: non_neg_integer(),
max_results = ?MAX_RESULTS :: pos_integer(),
purpose :: atom()|undefined
}).
-type branch_results() :: list({integer(), binary()}).
% Results to branch queries are a list mapping Branch ID to the binary for
% that branch
-type exchange_state() :: #state{}.
-type exchange_type() :: full|partial.
-type compare_state() ::
root_compare|tree_compare|branch_compare|clock_compare.
-type closing_state() ::
compare_state()|timeout|error|not_supported.
-type bucket_range() ::
{binary(), binary()}|binary()|all.
-type key_range() ::
{binary(), binary()}|all.
-type modified_range() ::
{non_neg_integer(), non_neg_integer()}|all.
-type segment_filter() ::
{segments, list(non_neg_integer()), leveled_tictac:tree_size()}|all.
-type hash_method() ::
pre_hash|{rehash, non_neg_integer()}.
-type filters() ::
{filter,
bucket_range(), key_range(),
leveled_tictac:tree_size(),
segment_filter(), modified_range(),
hash_method()}|none.
% filter to be used in partial exchanges
-type option_item() ::
{transition_pause_ms, pos_integer()}|
{scan_timeout, non_neg_integer()}|
{log_levels, aae_util:log_levels()}|
{max_results, non_neg_integer()}|
{purpose, atom()}.
-type options() :: list(option_item()).
-type send_message() ::
fetch_root |
{fetch_branches, list(non_neg_integer())} |
{fetch_clocks, list(non_neg_integer())} |
{merge_tree_range, filters()} |
{fetch_clocks_range, filters()}.
-type send_fun() :: fun((send_message(), list(tuple())|all, blue|pink) -> ok).
-type input_list() :: [{send_fun(), list(tuple())|all}].
% The Blue List and the Pink List are made up of:
% - a SendFun, which should be a 3-arity function, taking a preflist,
% a message and a colour to be used to flag the reply;
% - a list of preflists, to be used in the SendFun to be filtered by the
% target. The Preflist might be {Index, Node} for remote requests or
% {Index, Pid} for local requests
% For partial exchanges only, the preflist can and must be set to 'all'
-type repair_input() :: {{any(), any()}, {any(), any()}}.
% {{Bucket, Key}, {BlueClock, PinkClock}}.
-type repair_fun() :: fun((list(repair_input())) -> ok).
% Input will be Bucket, Key, Clock
-type reply_fun() :: fun(({closing_state(), non_neg_integer()}) -> ok).
-define(FILTERIDX_SEG, 5).
-define(FILTERIDX_TRS, 4).
-export_type([send_fun/0, repair_fun/0, reply_fun/0, filters/0]).
%%%============================================================================
%%% API
%%%============================================================================
start(BlueList, PinkList, RepairFun, ReplyFun) ->
% API for backwards compatability
start(full, BlueList, PinkList, RepairFun, ReplyFun, none, []).
-spec start(exchange_type(),
input_list(), input_list(),
repair_fun(),
reply_fun(),
filters(),
options()) -> {ok, pid(), list()}.
%% @doc
%% Start an FSM to manage an exchange and compare the preflsist in the
%% BlueList with those in the PinkList, using the RepairFun to repair any
%% keys discovered to have inconsistent clocks. ReplyFun used to reply back
%% to calling client the StateName at termination.
%%
%% The ReplyFun should be a 1 arity function that expects a tuple with the
%% closing state and the cout of deltas.
start(Type, BlueList, PinkList, RepairFun, ReplyFun, Filters, Opts) ->
ExchangeID = leveled_util:generate_uuid(),
{ok, ExPID} = gen_fsm:start(?MODULE,
[{Type, Filters},
BlueList, PinkList, RepairFun, ReplyFun,
ExchangeID,
Opts],
[]),
{ok, ExPID, ExchangeID}.
-spec reply(pid(), any(), pink|blue) -> ok.
%% @doc
%% Support events to be sent back to the FSM
reply(Exchange, {error, Error}, _Colour) ->
gen_fsm:send_event(Exchange, {error, Error});
reply(Exchange, Result, Colour) ->
gen_fsm:send_event(Exchange, {reply, Result, Colour}).
%%%============================================================================
%%% gen_fsm callbacks
%%%============================================================================
init([{Type, Filters},
BlueList, PinkList, RepairFun, ReplyFun, ExChID, Opts]) ->
leveled_rand:seed(),
PinkTarget = length(PinkList),
BlueTarget = length(BlueList),
State = #state{blue_list = BlueList,
pink_list = PinkList,
repair_fun = RepairFun,
reply_fun = ReplyFun,
exchange_id = ExChID,
pink_returns = {PinkTarget, PinkTarget},
blue_returns = {BlueTarget, BlueTarget},
exchange_type = Type,
exchange_filters = Filters},
State0 = process_options(Opts, State),
aae_util:log("EX001",
[ExChID, PinkTarget + BlueTarget, State0#state.purpose],
logs(),
State0#state.log_levels),
InitState =
case Type of
full -> prepare_full_exchange;
partial -> prepare_partial_exchange
end,
{ok, InitState, State0, 0}.
prepare_full_exchange(timeout, State) ->
aae_util:log("EX006",
[prepare_tree_exchange, State#state.exchange_id],
logs(),
State#state.log_levels),
trigger_next(fetch_root,
root_compare,
fun merge_root/2,
<<>>,
false,
?CACHE_TIMEOUT_MS,
State).
prepare_partial_exchange(timeout, State) ->
aae_util:log("EX006",
[prepare_partial_exchange, State#state.exchange_id],
logs(),
State#state.log_levels),
Filters = State#state.exchange_filters,
ScanTimeout = filtered_timeout(Filters, State#state.scan_timeout),
TreeSize = element(?FILTERIDX_TRS, Filters),
trigger_next({merge_tree_range, Filters},
tree_compare,
fun merge_tree/2,
leveled_tictac:new_tree(empty_tree, TreeSize),
false,
ScanTimeout,
State).
tree_compare(timeout, State) ->
aae_util:log("EX006",
[root_compare, State#state.exchange_id],
logs(),
State#state.log_levels),
DirtyLeaves = compare_trees(State#state.blue_acc, State#state.pink_acc),
TreeCompares = State#state.tree_compares + 1,
{StillDirtyLeaves, Reduction} =
case State#state.last_tree_compare of
none ->
{DirtyLeaves, 1.0};
PreviouslyDirtyLeaves ->
SDL = intersect_ids(PreviouslyDirtyLeaves, DirtyLeaves),
{SDL, 1.0 - length(SDL) / length(PreviouslyDirtyLeaves)}
end,
% We want to keep comparing trees until the number of deltas stops reducing
% significantly. Then there should be a clock comparison.
% It is expected there will be natural deltas with tree compare because of
% timing differences. Ideally the natural deltas will be small enough so
% that there should be no more than 2 tree compares before a segment filter
% can be applied to accelerate the process.
Filters = State#state.exchange_filters,
TreeSize = element(?FILTERIDX_TRS, Filters),
case ((length(StillDirtyLeaves) > 0)
and (Reduction > ?WORTHWHILE_REDUCTION)) of
true ->
% Keep comparing trees, this is reducing the segments we will
% eventually need to compare
Filters0 =
case length(StillDirtyLeaves) < ?WORTHWHILE_FILTER of
true ->
Segments =
{segments, StillDirtyLeaves, TreeSize},
setelement(?FILTERIDX_SEG, Filters, Segments);
false ->
Filters
end,
ScanTimeout = filtered_timeout(Filters0, State#state.scan_timeout),
trigger_next({merge_tree_range, Filters0},
tree_compare,
fun merge_tree/2,
leveled_tictac:new_tree(empty_tree, TreeSize),
false,
ScanTimeout,
State#state{last_tree_compare = StillDirtyLeaves,
tree_compares = TreeCompares});
false ->
% Compare clocks. Note if there are no Mismatched segment IDs the
% stop condition in trigger_next will be met
SegmentIDs = select_ids(StillDirtyLeaves,
State#state.max_results,
tree_compare,
State#state.exchange_id,
State#state.log_levels),
% TODO - select_ids doesn't account for TreeSize
Filters0 =
setelement(?FILTERIDX_SEG,
Filters,
{segments, SegmentIDs, TreeSize}),
trigger_next({fetch_clocks_range, Filters0},
clock_compare,
fun merge_clocks/2,
[],
length(SegmentIDs) == 0,
State#state.scan_timeout,
State#state{tree_compare_deltas = StillDirtyLeaves,
tree_compares = TreeCompares,
prethrottle_leaves =
length(StillDirtyLeaves)})
end.
root_compare(timeout, State) ->
aae_util:log("EX006",
[root_compare, State#state.exchange_id],
logs(),
State#state.log_levels),
DirtyBranches = compare_roots(State#state.blue_acc, State#state.pink_acc),
RootCompares = State#state.root_compares + 1,
{BranchIDs, Reduction} =
case State#state.last_root_compare of
none ->
{DirtyBranches, DirtyBranches};
PreviouslyDirtyBranches ->
BDL = intersect_ids(PreviouslyDirtyBranches, DirtyBranches),
{BDL, length(BDL) - length(PreviouslyDirtyBranches)}
end,
% Should we loop again on root_compare? As longs as root_compare is
% reducing the result set sufficiently, keep doing it until we switch to
% branch_compare
case ((length(BranchIDs) > 0)
and (Reduction > ?WORTHWHILE_REDUCTION_CACHED)) of
true ->
trigger_next(fetch_root,
root_compare,
fun merge_root/2,
<<>>,
false,
?CACHE_TIMEOUT_MS,
State#state{last_root_compare = BranchIDs,
root_compares = RootCompares});
false ->
BranchesToFetch = select_ids(BranchIDs,
State#state.max_results,
root_confirm,
State#state.exchange_id,
State#state.log_levels),
trigger_next({fetch_branches, BranchesToFetch},
branch_compare,
fun merge_branches/2,
[],
length(BranchIDs) == 0,
?CACHE_TIMEOUT_MS,
State#state{root_compare_deltas = BranchesToFetch,
root_compares = RootCompares,
prethrottle_branches =
length(BranchIDs)})
end.
branch_compare(timeout, State) ->
aae_util:log("EX006",
[branch_compare, State#state.exchange_id],
logs(),
State#state.log_levels),
DirtySegments = compare_branches(State#state.blue_acc, State#state.pink_acc),
BranchCompares = State#state.branch_compares + 1,
{SegmentIDs, Reduction} =
case State#state.last_branch_compare of
none ->
{DirtySegments, DirtySegments};
PreviouslyDirtySegments ->
SDL = intersect_ids(PreviouslyDirtySegments, DirtySegments),
{SDL, length(SDL) - length(PreviouslyDirtySegments)}
end,
% Should we loop again on root_compare? As longs as root_compare is
% reducing the result set sufficiently, keep doing it until we switch to
% branch_compare
case ((length(SegmentIDs) > 0)
and (Reduction > ?WORTHWHILE_REDUCTION_CACHED)) of
true ->
trigger_next({fetch_branches, State#state.root_compare_deltas},
branch_compare,
fun merge_branches/2,
[],
false,
?CACHE_TIMEOUT_MS,
State#state{last_branch_compare = SegmentIDs,
branch_compares = BranchCompares});
false ->
SegstoFetch = select_ids(SegmentIDs,
State#state.max_results,
branch_confirm,
State#state.exchange_id,
State#state.log_levels),
trigger_next({fetch_clocks,
SegstoFetch,
State#state.exchange_filters},
clock_compare,
fun merge_clocks/2,
[],
length(SegmentIDs) == 0,
State#state.scan_timeout,
State#state{branch_compare_deltas = SegstoFetch,
branch_compares = BranchCompares,
prethrottle_leaves =
length(SegmentIDs)})
end.
clock_compare(timeout, State) ->
aae_util:log("EX006",
[clock_compare, State#state.exchange_id],
logs(),
State#state.log_levels),
aae_util:log("EX008",
[State#state.blue_acc, State#state.pink_acc],
logs(),
State#state.log_levels),
RepairKeys = compare_clocks(State#state.blue_acc, State#state.pink_acc),
RepairFun = State#state.repair_fun,
aae_util:log("EX004",
[State#state.exchange_id, State#state.purpose, length(RepairKeys)],
logs(),
State#state.log_levels),
RepairFun(RepairKeys),
{stop,
normal,
State#state{key_deltas = RepairKeys}}.
waiting_all_results({reply, not_supported, Colour}, State) ->
aae_util:log("EX010",
[State#state.exchange_id, Colour, State#state.purpose],
logs(),
State#state.log_levels),
{stop, normal, State#state{pending_state = not_supported}};
waiting_all_results({reply, {error, Reason}, _Colour}, State) ->
waiting_all_results({error, Reason}, State);
waiting_all_results({reply, Result, Colour}, State) ->
aae_util:log("EX007",
[Colour, State#state.exchange_id],
logs(),
State#state.log_levels),
{PC, PT} = State#state.pink_returns,
{BC, BT} = State#state.blue_returns,
MergeFun = State#state.merge_fun,
{State0, AllPink, AllBlue} =
case Colour of
pink ->
PinkAcc = MergeFun(Result, State#state.pink_acc),
{State#state{pink_returns = {PC + 1, PT}, pink_acc = PinkAcc},
PC + 1 == PT, BC == BT};
blue ->
BlueAcc = MergeFun(Result, State#state.blue_acc),
{State#state{blue_returns = {BC + 1, BT}, blue_acc = BlueAcc},
PC == PT, BC + 1 == BT}
end,
case AllBlue and AllPink of
true ->
{next_state,
State0#state.pending_state,
State0,
jitter_pause(State#state.transition_pause_ms)};
false ->
{next_state,
waiting_all_results,
State0,
set_timeout(State0#state.start_time,
State0#state.reply_timeout)}
end;
waiting_all_results(UnexpectedResponse, State) ->
% timeout expected here, but also may get errors from vnode - such as
% {error, mailbox_overload} when vnode has entered overload state. Not
% possible to complete exchange so stop
{PC, PT} = State#state.pink_returns,
{BC, BT} = State#state.blue_returns,
MissingCount = PT + BT - (PC + BC),
aae_util:log("EX002",
[UnexpectedResponse,
State#state.pending_state,
MissingCount,
State#state.exchange_id,
State#state.purpose],
logs(),
State#state.log_levels),
ReplyState =
case UnexpectedResponse of
timeout ->
timeout;
_ ->
error
end,
{stop, normal, State#state{pending_state = ReplyState}}.
handle_sync_event(_msg, _From, StateName, State) ->
{reply, ok, StateName, State}.
handle_event(_Msg, StateName, State) ->
{next_state, StateName, State}.
handle_info(_Msg, StateName, State) ->
{next_state, StateName, State}.
terminate(normal, StateName, State) ->
case State#state.exchange_type of
full ->
case StateName of
StateName when
StateName == root_compare;
StateName == branch_compare ->
aae_util:log("EX003",
[State#state.purpose,
true,
StateName,
State#state.exchange_id,
0,
State#state.root_compares,
State#state.branch_compares,
length(State#state.key_deltas)],
logs(),
State#state.log_levels);
BrokenState ->
EstDamage =
estimated_damage(State#state.prethrottle_branches,
State#state.prethrottle_leaves,
State#state.max_results),
aae_util:log("EX003",
[State#state.purpose,
false,
BrokenState,
State#state.exchange_id,
EstDamage,
State#state.root_compares,
State#state.branch_compares,
length(State#state.key_deltas)],
logs(),
State#state.log_levels)
end;
partial ->
case StateName of
tree_compare ->
aae_util:log("EX009",
[State#state.purpose,
true,
tree_compare,
State#state.exchange_id,
0,
State#state.tree_compares,
length(State#state.key_deltas)],
logs(),
State#state.log_levels);
BrokenState ->
aae_util:log("EX009",
[State#state.purpose,
false,
BrokenState,
State#state.exchange_id,
State#state.prethrottle_leaves,
State#state.tree_compares,
length(State#state.key_deltas)],
logs(),
State#state.log_levels)
end
end,
ReplyFun = State#state.reply_fun,
ReplyFun({State#state.pending_state, length(State#state.key_deltas)}).
code_change(_OldVsn, StateName, State, _Extra) ->
{ok, StateName, State}.
%%%============================================================================
%%% External Functions
%%%============================================================================
-spec insync_responses() -> list(compare_state()).
%% @doc
%% To help external applications understand the states returned in replies,
%% a list of those response that imply that the systems are in sync. Note,
%% that with branch_compare this is possibly not true if not all branch deltas
%% were checked (e.g. branch IDs > max_results).
insync_responses() ->
[root_compare, branch_compare].
-spec merge_binary(binary(), binary()) -> binary().
%% @doc
%% Merge two binaries - where one might be empty (as nothing has been seen for
%% that preflist, or the accumulator is the initial one)
merge_binary(<<>>, AccBin) ->
AccBin;
merge_binary(ResultBin, <<>>) ->
ResultBin;
merge_binary(ResultBin, AccBin) ->
leveled_tictac:merge_binaries(ResultBin, AccBin).
-spec merge_branches(branch_results(), branch_results()) -> branch_results().
%% @doc
%% Branches should be returned as a list of {BranchID, BranchBin} pairs. For
%% each branch in a result, merge into the accumulator.
merge_branches([], BranchAccL) ->
BranchAccL;
merge_branches([{BranchID, BranchBin}|Rest], BranchAccL) ->
case lists:keyfind(BranchID, 1, BranchAccL) of
false ->
% First response has an empty accumulator
merge_branches(Rest, [{BranchID, BranchBin}|BranchAccL]);
{BranchID, BinAcc} ->
BinAcc0 = merge_binary(BranchBin, BinAcc),
merge_branches(Rest,
lists:keyreplace(BranchID,
1,
BranchAccL,
{BranchID, BinAcc0}))
end.
-spec merge_root(binary(), binary()) -> binary().
%% @doc
%% Merge an individual result for a set of preflists into the accumulated
%% binary for the tree root
merge_root(Root, RootAcc) ->
merge_binary(Root, RootAcc).
-spec merge_tree(leveled_tictac:tictactree(), leveled_tictac:tictactree())
-> leveled_tictac:tictactree().
%% @doc
%% Merge two trees into an XOR'd tree representing the total result set
merge_tree(Tree0, Tree1) ->
leveled_tictac:merge_trees(Tree0, Tree1).
%%%============================================================================
%%% Internal Functions
%%%============================================================================
-spec estimated_damage(pos_integer(), pos_integer(), pos_integer()) ->
non_neg_integer().
estimated_damage(BrokenBranches, BrokenLeaves, MaxResults) ->
Mult = max(1.0, BrokenBranches / MaxResults),
round(Mult * BrokenLeaves).
-spec process_options(options(), exchange_state()) -> exchange_state().
%% @doc
%% Alter state reflecting any passed in options
process_options([], State) ->
State;
process_options([{transition_pause_ms, PauseMS}|Tail], State)
when is_integer(PauseMS) ->
process_options(Tail, State#state{transition_pause_ms = PauseMS});
process_options([{log_levels, LogLevels}|Tail], State)
when is_list(LogLevels) ->
process_options(Tail, State#state{log_levels = LogLevels});
process_options([{scan_timeout, Timeout}|Tail], State)
when is_integer(Timeout) ->
process_options(Tail, State#state{scan_timeout = Timeout});
process_options([{max_results, MaxResults}|Tail], State)
when is_integer(MaxResults) ->
process_options(Tail, State#state{max_results = MaxResults});
process_options([{purpose, Purpose}|Tail], State)
when is_atom(Purpose) ->
process_options(Tail, State#state{purpose = Purpose}).
-spec trigger_next(any(), atom(), fun(), any(), boolean(),
integer(), exchange_state()) -> any().
%% @doc
%% Trigger the next request
trigger_next(NextRequest, PendingStateName, MergeFun, InitAcc, StopTest,
Timeout, LoopState) ->
case StopTest of
true ->
{stop, normal, LoopState};
false ->
ok = send_requests(NextRequest,
LoopState#state.blue_list,
LoopState#state.pink_list,
always_blue),
{next_state,
waiting_all_results,
LoopState#state{start_time = os:timestamp(),
pending_state = PendingStateName,
pink_acc = InitAcc,
blue_acc = InitAcc,
merge_fun = MergeFun,
pink_returns =
reset(LoopState#state.pink_returns),
blue_returns =
reset(LoopState#state.blue_returns),
reply_timeout = Timeout},
Timeout}
end.
-spec set_timeout(erlang:timestamp(), pos_integer()) -> integer().
%% @doc
%% Set the timeout in a given state based on the time the state was commenced
set_timeout(StartTime, Timeout) ->
max(0, Timeout - timer:now_diff(os:timestamp(), StartTime) div 1000).
-spec send_requests(any(), list(tuple()), list(tuple()),
always_blue|always_pink) -> ok.
%% @doc
%% Alternate between sending requests to items on the blue and pink list
send_requests({merge_tree_range, {filter, B, KR, TS, SF, MR, HM}},
BlueList, PinkList, Always) ->
% unpack the filter into a single tuple msg or merge_tree_range
send_requests({merge_tree_range, B, KR, TS, SF, MR, HM},
BlueList, PinkList, Always);
send_requests({fetch_clocks_range, {filter, B, KR, _TS, SF, MR, _HM}},
BlueList, PinkList, Always) ->
% unpack the filter into a single tuple msg or merge_tree_range
send_requests({fetch_clocks_range, B, KR, SF, MR},
BlueList, PinkList, Always);
send_requests({fetch_clocks, SegIDs, none}, BlueList, PinkList, Always) ->
send_requests({fetch_clocks, SegIDs}, BlueList, PinkList, Always);
send_requests({fetch_clocks,
SegIDs,
{filter, all, all, large, all, MR, pre_hash}},
BlueList, PinkList, Always) ->
send_requests({fetch_clocks, SegIDs, MR}, BlueList, PinkList, Always);
send_requests({fetch_clocks,
SegIDs,
{filter, B, KR, large, all, MR, pre_hash}},
BlueList, PinkList, Always) ->
F0 = {filter, B, KR, large, {segments, SegIDs, large}, MR, pre_hash},
send_requests({fetch_clocks_range, F0}, BlueList, PinkList, Always);
send_requests(_Msg, [], [], _Always) ->
ok;
send_requests(Msg, [{SendFun, Preflists}|Rest], PinkList, always_blue) ->
SendFun(Msg, Preflists, blue),
case length(PinkList) > 0 of
true ->
send_requests(Msg, Rest, PinkList, always_pink);
false ->
send_requests(Msg, Rest, PinkList, always_blue)
end;
send_requests(Msg, BlueList, [{SendFun, Preflists}|Rest], always_pink) ->
SendFun(Msg, Preflists, pink),
case length(BlueList) > 0 of
true ->
send_requests(Msg, BlueList, Rest, always_blue);
false ->
send_requests(Msg, BlueList, Rest, always_pink)
end.
-spec merge_clocks(list(tuple()), list(tuple())) -> list(tuple()).
%% @doc
%% Accumulate keys and clocks returned in the segment query, outputting a
%% sorted list of keys and clocks.
merge_clocks(KeyClockL, KeyClockLAcc) ->
lists:merge(lists:usort(KeyClockL), KeyClockLAcc).
-spec compare_roots(binary(), binary()) -> list(integer()).
%% @doc
%% Compare the roots of two trees (i.e. the Pink and Blue root), and return a
%% list of branch IDs which are mismatched.
compare_roots(BlueRoot, PinkRoot) ->
leveled_tictac:find_dirtysegments(BlueRoot, PinkRoot).
-spec compare_branches(branch_results(), branch_results()) -> list(integer()).
%% @doc
%% Compare two sets of branches , and return a list of segment IDs which are
%% mismatched
compare_branches(BlueBranches, PinkBranches) ->
FoldFun =
fun(Idx, Acc) ->
{BranchID, BlueBranch} = lists:nth(Idx, BlueBranches),
{BranchID, PinkBranch} = lists:keyfind(BranchID, 1, PinkBranches),
DirtySegs =
leveled_tictac:find_dirtysegments(BlueBranch, PinkBranch),
lists:map(fun(S) ->
leveled_tictac:join_segment(BranchID, S)
end,
DirtySegs) ++ Acc
end,
lists:foldl(FoldFun, [], lists:seq(1, length(BlueBranches))).
-spec compare_clocks(list(tuple()), list(tuple())) -> list(repair_input()).
%% @doc
%% Find the differences between the lists - and return a list of
%% {{B, K}, {blue-side VC | none, pink-side VC | none}}
%% If the blue-side or pink-side does not contain the key, then none is used
%% in place of the clock
compare_clocks(BlueList, PinkList) ->
% Two lists of {B, K, VC} want to remove everything where {B, K, VC} is
% the same in both lists
SortClockFun = fun({B, K, VC}) -> {B, K, refine_clock(VC)} end,
BlueSet = ordsets:from_list(lists:map(SortClockFun, BlueList)),
PinkSet = ordsets:from_list(lists:map(SortClockFun, PinkList)),
BlueDelta = ordsets:subtract(BlueSet, PinkSet),
PinkDelta = ordsets:subtract(PinkSet, BlueSet),
% Want to subtract out from the Pink and Blue Sets any example where
% both pink and blue are the same
%
% This should speed up the folding and key finding to provide the
% joined list
BlueDeltaList =
lists:reverse(
ordsets:fold(fun({B, K, VCB}, Acc) ->
% Assume for now that element may be only
% blue
[{{B, K}, {VCB, none}}|Acc]
end,
[],
BlueDelta)),
% BlueDeltaList is the output of compare clocks, assuming the item
% is only on the Blue side (so it compares the blue vector clock with
% none)
PinkEnrichFun =
fun({B, K, VCP}, Acc) ->
case lists:keyfind({B, K}, 1, Acc) of
{{B, K}, {VCB, none}} ->
ElementWithClockDiff =
{{B, K}, {VCB, VCP}},
lists:keyreplace({B, K}, 1, Acc, ElementWithClockDiff);
false ->
ElementOnlyPink =
{{B, K}, {none, VCP}},
lists:keysort(1, [ElementOnlyPink|Acc])
end
end,
% The Foldfun to be used on the PinkDelta, will now fill in the Pink
% vector clock if the element also exists in Pink
AllDeltaList =
ordsets:fold(PinkEnrichFun, BlueDeltaList, PinkDelta),
% The accumulator starts with the Blue side only perspective, and
% either adds to it or enriches it by folding over the Pink side
% view
AllDeltaList.
-spec compare_trees(leveled_tictac:tictactree(),
leveled_tictac:tictactree()) -> list(non_neg_integer()).
%% @doc
%% Compare the trees - get list of dirty leaves (Segment IDs)
compare_trees(Tree0, Tree1) ->
leveled_tictac:find_dirtyleaves(Tree0, Tree1).
-spec intersect_ids(list(integer()), list(integer())) -> list(integer()).
%% @doc
%% Provide the intersection of two lists of integer IDs
intersect_ids(IDs0, IDs1) ->
lists:filter(fun(ID) -> lists:member(ID, IDs1) end, IDs0).
-spec select_ids(list(integer()), pos_integer(), atom(), list(),
aae_util:log_levels()|undefined) -> list(integer()).
%% @doc
%% Select a cluster of IDs if the list of IDs is smaller than the maximum
%% output size. The lookup based on these IDs will be segment based, so it
%% is expected that the tightest clustering will yield the most efficient
%% results. However, if we always get the same list, then concurrent exchanges
%% will wastefully correct the same data - so randomly chose one of the better
%% lists
select_ids(IDList, MaxOutput, StateName, ExchangeID, LogLevels)
when length(IDList) > MaxOutput ->
IDList0 = lists:sort(IDList),
aae_util:log("EX005",
[ExchangeID, length(IDList0), StateName],
logs(),
LogLevels),
IDList1 =
lists:sublist(IDList0, 1 + length(IDList0) - MaxOutput),
IDList2 =
lists:sublist(IDList0, MaxOutput, 1 + length(IDList0) - MaxOutput),
FoldFun =
fun({Start, End}, {Idx, Acc}) ->
{Idx + 1, [{End - Start, Idx}|Acc]}
end,
{_EndIdx, SpaceIdxL} =
lists:foldl(FoldFun, {1, []}, lists:zip(IDList1, IDList2)),
Selections =
lists:sublist(lists:sort(SpaceIdxL), MaxOutput),
{_ChosenSpace, ChosenIdx} =
lists:nth(leveled_rand:uniform(length(Selections)), Selections),
lists:sublist(IDList0, ChosenIdx, MaxOutput);
select_ids(IDList, _MaxOutput, _StateName, _ExchangeID, _LogLevels) ->
lists:sort(IDList).
-spec jitter_pause(pos_integer()) -> pos_integer().
%% @doc
%% Jitter a pause, so if multiple FSMs started at once, they don't all use
%% the network at the same time
jitter_pause(Timeout) ->
leveled_rand:uniform(Timeout) + Timeout div 2.
-spec reset({pos_integer(), pos_integer()})
-> {non_neg_integer(), pos_integer()}.
%% @doc
%% Rest the count back to 0
reset({Target, Target}) -> {0, Target}.
-spec filtered_timeout(filters(), pos_integer()) -> pos_integer().
%% @doc
%% Has a filter been applied to the scan (true), or are we scanning the whole
%% bucket (false)
filtered_timeout({filter, _B, KeyRange, _TS, SegFilter, ModRange, _HM},
ScanTimeout) ->
case ((KeyRange == all) and (SegFilter == all) and (ModRange == all)) of
true ->
?UNFILTERED_SCAN_TIMEOUT_MS;
false ->
ScanTimeout
end.
-spec refine_clock(list()|binary()) -> list()|binary().
%% @doc
%% When the lock is a list, always sort the list so as not to confuse clocks
%% differentiated only by sorting
refine_clock(Clock) when is_list(Clock) ->
lists:sort(Clock);
refine_clock(Clock) ->
Clock.
%%%============================================================================
%%% log definitions
%%%============================================================================
-spec logs() -> list(tuple()).
%% @doc
%% Define log lines for this module
logs() ->
[{"EX001",
{info, "Exchange id=~s with target_count=~w expected purpose=~w"}},
{"EX002",
{error, "~w with pending_state=~w and missing_count=~w"
++ " for exchange id=~s purpose=~w"}},
{"EX003",
{info, "Normal exit for full exchange purpose=~w in_sync=~w "
++ " pending_state=~w for exchange id=~s"
++ " scope of mismatched_segments=~w"
++ " root_compare_loops=~w "
++ " branch_compare_loops=~w "
++ " keys_passed_for_repair=~w"}},
{"EX004",
{info, "Exchange id=~s purpose=~w led to prompting"
++ " of repair_count=~w"}},
{"EX005",
{info, "Exchange id=~s throttled count=~w at state=~w"}},
{"EX006",
{debug, "State change to ~w for exchange id=~s"}},
{"EX007",
{debug, "Reply received for colour=~w in exchange id=~s"}},
{"EX008",
{debug, "Comparison between BlueList ~w and PinkList ~w"}},
{"EX009",
{info, "Normal exit for full exchange purpose=~w in_sync=~w"
++ " pending_state=~w for exchange id=~s"
++ " scope of mismatched_segments=~w"
++ " tree_compare_loops=~w "
++ " keys_passed_for_repair=~w"}},
{"EX010",
{warn, "Exchange not_supported in exchange id=~s"
++ " for colour=~w purpose=~w"}}
].
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(TEST).
select_id_test() ->
L0 = [1, 2, 3],
?assertMatch(L0, select_ids(L0, 3, root_confirm, "t0", undefined)),
L1 = [3, 2, 1],
?assertMatch(L0, select_ids(L1, 3, root_confirm, "t0", undefined)),
?assertMatch(2, length(select_ids(L1, 2, root_confirm, "t0", undefined))).
select_best_id_rand_test() ->
L2 = [1, 2, 3, 5, 16, 17, 18],
F =
fun(_N, {S1, S2, S3}) ->
case {S1, S2, S3} of
{true, true, true} ->
{true, true, true};
_ ->
case select_ids(L2, 3, root_confirm,
"r3", undefined) of
[1, 2, 3] ->
{true, S2, S3};
[2, 3, 5] ->
{S1, true, S3};
[16, 17, 18] ->
{S1, S2, true}
end
end
end,
?assertMatch({true, true, true},
lists:foldl(F, {false, false, false}, lists:seq(1, 1000))).
compare_clocks_test() ->
KV1 = {<<"B1">>, <<"K1">>, [{a, 1}]},
KV2 = {<<"B1">>, <<"K2">>, [{b, 1}]},
KV3 = {<<"B1">>, <<"K3">>, [{a, 2}]},
KV4 = {<<"B1">>, <<"K1">>, [{a, 1}, {b, 2}]},
KV5 = {<<"B1">>, <<"K2">>, [{b, 1}, {c, 1}]},
BL1 = [KV1, KV2, KV3],
PL1 = [KV1, KV2, KV3],
?assertMatch([], compare_clocks(BL1, PL1)),
BL2 = [KV2, KV3, KV4],
?assertMatch([{{<<"B1">>, <<"K1">>}, {[{a, 1}, {b, 2}], [{a, 1}]}}],
compare_clocks(BL2, PL1)),
?assertMatch([{{<<"B1">>, <<"K1">>}, {[{a, 1}], [{a, 1}, {b, 2}]}}],
compare_clocks(PL1, BL2)),
PL2 = [KV4, KV5],
?assertMatch([{{<<"B1">>, <<"K1">>},
{[{a, 1}], [{a, 1}, {b, 2}]}},
{{<<"B1">>, <<"K2">>},
{[{b, 1}], [{b, 1}, {c, 1}]}},
{{<<"B1">>, <<"K3">>},
{[{a, 2}], none}}],
compare_clocks(BL1, PL2)).
compare_unsorted_clocks_test()->
KV1 = {<<"B1">>, <<"K1">>, [{a, 1}, {b, 2}]},
KV2 = {<<"B1">>, <<"K1">>, [{b, 2}, {a, 1}]},
KV1b = {<<"B1">>, <<"K1">>, term_to_binary([{a, 1}, {b, 2}])},
KV2b = {<<"B1">>, <<"K1">>, term_to_binary([{b, 2}, {a, 1}])},
?assertMatch([], compare_clocks([KV1], [KV2])),
KL = compare_clocks([KV1b], [KV2b]),
?assertMatch(1, length(KL)).
clean_exit_ontimeout_test() ->
State0 = #state{pink_returns={4, 5}, blue_returns={8, 8},
exchange_type = full},
State1 = State0#state{pending_state = timeout},
{stop, normal, State1} = waiting_all_results(timeout, State0).
connect_error_test() ->
SendFun =
fun(_Msg, _PLs, Colour) ->
Exchange = self(),
reply(Exchange, {error, disconnected}, Colour)
end,
BlueList = [{SendFun, [{0, 1}]}],
PinkList = [{SendFun, [{0, 1}]}],
RepairFun = fun(_RL) -> ok end,
ReceiveReply =
spawn(fun() ->
receive
{error, 0} ->
ok
end
end),
ReplyFun = fun(R) -> ReceiveReply ! R end,
{ok, Test, _ExID} = start(BlueList, PinkList, RepairFun, ReplyFun),
?assertMatch(true,
lists:foldl(fun(X, Acc) ->
case Acc of
true ->
true;
false ->
timer:sleep(X),
not is_process_alive(ReceiveReply)
end
end,
false,
[1000, 1000, 1000])),
?assertMatch(false, is_process_alive(Test)).
waiting_for_error_test() ->
{stop, normal, _S0} =
waiting_all_results({reply, {error, query_backlog}, blue},
#state{exchange_type = full,
merge_fun = fun merge_clocks/2}).
coverage_cheat_test() ->
{next_state, prepare, _State0} =
handle_event(null, prepare, #state{exchange_type = full}),
{reply, ok, prepare, _State1} =
handle_sync_event(null, nobody, prepare, #state{exchange_type = full}),
{next_state, prepare, _State2} =
handle_info(null, prepare, #state{exchange_type = full}),
{ok, prepare, _State3} =
code_change(null, prepare, #state{exchange_type = full}, null),
[root_compare, branch_compare] = insync_responses().
-endif. | src/aae_exchange.erl | 0.650245 | 0.871311 | aae_exchange.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2013-2018 EMQ Enterprise, Inc. (http://emqtt.io)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqttd_hooks).
-behaviour(gen_server).
-author("<NAME> <<EMAIL>>").
%% Start
-export([start_link/0]).
%% Hooks API
-export([add/3, add/4, delete/2, run/2, run/3, lookup/1]).
%% gen_server Function Exports
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-record(state, {}).
-type(hooktag() :: atom() | string() | binary()).
-export_type([hooktag/0]).
-record(callback, {tag :: hooktag(),
function :: function(),
init_args = [] :: list(any()),
priority = 0 :: integer()}).
-record(hook, {name :: atom(), callbacks = [] :: list(#callback{})}).
-define(HOOK_TAB, mqtt_hook).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%%--------------------------------------------------------------------
%% Hooks API
%%--------------------------------------------------------------------
-spec(add(atom(), function() | {hooktag(), function()}, list(any())) -> ok).
add(HookPoint, Function, InitArgs) when is_function(Function) ->
add(HookPoint, {undefined, Function}, InitArgs, 0);
add(HookPoint, {Tag, Function}, InitArgs) when is_function(Function) ->
add(HookPoint, {Tag, Function}, InitArgs, 0).
-spec(add(atom(), function() | {hooktag(), function()}, list(any()), integer()) -> ok).
add(HookPoint, Function, InitArgs, Priority) when is_function(Function) ->
add(HookPoint, {undefined, Function}, InitArgs, Priority);
add(HookPoint, {Tag, Function}, InitArgs, Priority) when is_function(Function) ->
gen_server:call(?MODULE, {add, HookPoint, {Tag, Function}, InitArgs, Priority}).
-spec(delete(atom(), function() | {hooktag(), function()}) -> ok).
delete(HookPoint, Function) when is_function(Function) ->
delete(HookPoint, {undefined, Function});
delete(HookPoint, {Tag, Function}) when is_function(Function) ->
gen_server:call(?MODULE, {delete, HookPoint, {Tag, Function}}).
%% @doc Run hooks without Acc.
-spec(run(atom(), list(Arg :: any())) -> ok | stop).
run(HookPoint, Args) ->
run_(lookup(HookPoint), Args).
-spec(run(atom(), list(Arg :: any()), any()) -> any()).
run(HookPoint, Args, Acc) ->
run_(lookup(HookPoint), Args, Acc).
%% @private
run_([#callback{function = Fun, init_args = InitArgs} | Callbacks], Args) ->
case apply(Fun, lists:append([Args, InitArgs])) of
ok -> run_(Callbacks, Args);
stop -> stop;
_Any -> run_(Callbacks, Args)
end;
run_([], _Args) ->
ok.
%% @private
run_([#callback{function = Fun, init_args = InitArgs} | Callbacks], Args, Acc) ->
case apply(Fun, lists:append([Args, [Acc], InitArgs])) of
ok -> run_(Callbacks, Args, Acc);
{ok, NewAcc} -> run_(Callbacks, Args, NewAcc);
stop -> {stop, Acc};
{stop, NewAcc} -> {stop, NewAcc};
_Any -> run_(Callbacks, Args, Acc)
end;
run_([], _Args, Acc) ->
{ok, Acc}.
-spec(lookup(atom()) -> [#callback{}]).
lookup(HookPoint) ->
case ets:lookup(?HOOK_TAB, HookPoint) of
[#hook{callbacks = Callbacks}] -> Callbacks;
[] -> []
end.
%%--------------------------------------------------------------------
%% gen_server Callbacks
%%--------------------------------------------------------------------
init([]) ->
ets:new(?HOOK_TAB, [set, protected, named_table, {keypos, #hook.name}]),
{ok, #state{}}.
handle_call({add, HookPoint, {Tag, Function}, InitArgs, Priority}, _From, State) ->
Callback = #callback{tag = Tag, function = Function,
init_args = InitArgs, priority = Priority},
{reply,
case ets:lookup(?HOOK_TAB, HookPoint) of
[#hook{callbacks = Callbacks}] ->
case contain_(Tag, Function, Callbacks) of
false ->
insert_hook_(HookPoint, add_callback_(Callback, Callbacks));
true ->
{error, already_hooked}
end;
[] ->
insert_hook_(HookPoint, [Callback])
end, State};
handle_call({delete, HookPoint, {Tag, Function}}, _From, State) ->
{reply,
case ets:lookup(?HOOK_TAB, HookPoint) of
[#hook{callbacks = Callbacks}] ->
case contain_(Tag, Function, Callbacks) of
true ->
insert_hook_(HookPoint, del_callback_(Tag, Function, Callbacks));
false ->
{error, not_found}
end;
[] ->
{error, not_found}
end, State};
handle_call(Req, _From, State) ->
{reply, {error, {unexpected_request, Req}}, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
insert_hook_(HookPoint, Callbacks) ->
ets:insert(?HOOK_TAB, #hook{name = HookPoint, callbacks = Callbacks}), ok.
add_callback_(Callback, Callbacks) ->
lists:keymerge(#callback.priority, Callbacks, [Callback]).
del_callback_(Tag, Function, Callbacks) ->
lists:filter(
fun(#callback{tag = Tag1, function = Func1}) ->
not ((Tag =:= Tag1) andalso (Function =:= Func1))
end, Callbacks).
contain_(_Tag, _Function, []) ->
false;
contain_(Tag, Function, [#callback{tag = Tag, function = Function}|_Callbacks]) ->
true;
contain_(Tag, Function, [_Callback | Callbacks]) ->
contain_(Tag, Function, Callbacks). | src/emqttd_hooks.erl | 0.587943 | 0.404507 | emqttd_hooks.erl | starcoder |
% @doc
% <a href="https://reference.digilentinc.com/reference/pmod/pmodgps/reference-manual">
% PmodGPS</a>
% module.
%
% The PmodGPS sends the GPS data over UART.
%
% Start the driver with
% ```
% 1> grisp:add_device(uart, pmod_gps).
% '''
% @end
-module(pmod_gps).
-behaviour(gen_server).
%--- Exports -------------------------------------------------------------------
% API
-export([start_link/2]).
-export([get/1]).
% Callbacks
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([code_change/3]).
-export([terminate/2]).
%--- Includes ------------------------------------------------------------------
-include("grisp.hrl").
%--- Macros --------------------------------------------------------------------
-define(MAX_CONSECUTIVE_ERRORS, 10).
%--- Records -------------------------------------------------------------------
-record(state, {port, last_sentences, error_count = 0}).
%--- API -----------------------------------------------------------------------
% @private
start_link(Slot, _Opts) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, Slot, []).
% @doc Get the GPS data.
%
% The input parameter specifies which type of sentence to get.
% For a description of the sentences see the
% <a href="https://reference.digilentinc.com/_media/reference/pmod/pmodgps/pmodgps_rm.pdf">
% PmodGPS Reference Manual
% The sentence CRC is checked for all the sentence types, but for now only the
% GGA values are parsed. If other sentences are needed, grisp_nmea needs to be
% extended to support more types.
% </a>.
%
% === Example ===
% ```
% 2> pmod_gps:get(gga).
% {gps,gga,#{alt => 61.9,fixed => true,lat => 52.122661666666666,long => 11.594928333333334,time => 53912000}}}
% 3> pmod_gps:get(gsa).
% {gps,gsa,<<"A,3,17,06,19,02,24,,,,,,,,2.69,2.51,0.97">>}
% 4> pmod_gps:get(gsv).
% {gps,gsv,<<"3,3,12,14,22,317,17,17,10,040,35,29,09,203,,22,02,351">>}
% 5> pmod_gps:get(rmc).
% {gps,rmc,<<"150007.000,A,5207.3592,N,01135.6895,E,0.46,255.74,120220,,,A">>}
% 6> pmod_gps:get(vtg).
% {gps,vtg,<<"297.56,T,,M,0.65,N,1.21,K,A">>}
% '''
-spec get(grisp_nmea:message_id()) ->
{grisp_nmea:talker_id(), grisp_nmea:message_id(), map() | binary()} | undefined.
get(MessageId) ->
call({get, MessageId}).
%--- Callbacks -----------------------------------------------------------------
% @private
init(Slot = uart) ->
Port = open_port({spawn_driver, "grisp_termios_drv"}, [binary]),
grisp_devices:register(Slot, ?MODULE),
Sentences = maps:from_list([{T, undefined} || T <- [
dtm, gbq, gbs, gga, gll, glq, gnq, gns, gpq,
grs, gsa, gst, gsv, rmc, txt, vlw, vtg, zda
]]),
{ok, #state{port = Port, last_sentences = Sentences}}.
% @private
handle_call(Call, _From, State) ->
try execute_call(Call, State)
catch throw:Reason -> {reply, {error, Reason}, State}
end.
% @private
handle_cast(Request, _State) -> error({unknown_cast, Request}).
% @private
% We need to support at least one message failing parsing, because
% when starting to read randomly in the stream of sentences, the first
% one may be truncated. For now, we fail after a maximum number of
% consecutive errors.
handle_info({Port, {data, Data}},
#state{port = Port, last_sentences = LastSentences,
error_count = ErrorCount} = State)
when is_binary(Data) ->
case {ErrorCount, grisp_nmea:parse(Data)} of
{Count, {error, Reason}} when Count > ?MAX_CONSECUTIVE_ERRORS ->
erlang:error({gps_nmea_parsing_error, Reason});
{Count, {error, _Reason}} ->
{noreply, State#state{error_count = Count + 1}};
{_, {ok, {_TalkerId, MessageType, _Values} = Sentence}} ->
LastSentences2 = LastSentences#{MessageType => Sentence},
{noreply, State#state{last_sentences = LastSentences2,
error_count = 0}}
end;
handle_info(_Any, State) ->
{noreply, State}.
% @private
code_change(_OldVsn, State, _Extra) -> {ok, State}.
% @private
terminate(_Reason, _State) -> ok.
%--- Internal -----------------------------------------------------------------
call(Call) ->
Dev = grisp_devices:default(?MODULE),
case gen_server:call(Dev#device.pid, Call) of
{error, Reason} -> error(Reason);
{ok, Result} -> Result
end.
execute_call({get, MessageId}, #state{last_sentences = Sentences} = State) ->
case maps:find(MessageId, Sentences) of
error ->
{reply, {error, {unknown_sentence, MessageId}}, State};
{ok, Sentence} ->
{reply, {ok, Sentence}, State}
end;
execute_call(Request, _State) ->
error({unknown_call, Request}). | src/pmod_gps.erl | 0.587943 | 0.481271 | pmod_gps.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2012 <NAME>
%% @doc Calculate a quadtile code from a lat/long location. (http://wiki.openstreetmap.org/wiki/QuadTiles)
%% Copyright 2012 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(geomap_quadtile).
-author('<NAME> <<EMAIL>>').
%% Default 31 bits, as 62 bits fit in a PostgreSQL 'bigint'
-define(BITS, 31).
%% Rounding precision when decoding (ca. 10cm)
-define(PREC, 1000000).
-export([
encode/2,
encode/3,
decode/1,
decode/2,
test/0
]).
-include_lib("zotonic_core/include/zotonic.hrl").
%% @doc Calculate a quadtile number in 64 bits (32 bits per coordinate)
-spec encode(Latitude :: float(), Longitude :: float()) -> integer().
encode(Latitude, Longitude) ->
encode(Latitude, Longitude, ?BITS).
%% @doc Calculate a quadtile number in a 2*Bits integer (Bits precision per coordinate)
-spec encode(Latitude :: float(), Longitude :: float(), Bits :: integer()) -> integer().
encode(Latitude, Longitude, Bits) ->
interleave(to_int(Longitude, 180.0, Bits),
to_int(Latitude, 90.0, Bits),
Bits - 1,
0).
%% @doc Decode the long/lat from a quadtile
-spec decode(integer()) -> {Latitude :: float(), Longitude :: float()}.
decode(Quadtile) ->
decode(Quadtile, ?BITS).
-spec decode(Quadtile :: integer(), Bits :: integer()) -> {Latitude :: float(), Longitude :: float()}.
decode(Quadtile, Bits) ->
{LongInt, LatInt} = deinterleave(0, 0, 1 bsl (Bits+Bits-1), Bits, Quadtile),
{to_float(LatInt, 90.0, Bits),
to_float(LongInt, 180.0, Bits)}.
%% @doc Interleave the longitude and latitude bit by bit, start with a longitude bit.
interleave(_, _, -1, Acc) ->
Acc;
interleave(Long, Lat, N, Acc) ->
Acc1 = (Acc bsl 1) bor ((Long bsr N) band 1),
Acc2 = (Acc1 bsl 1) bor ((Lat bsr N) band 1),
interleave(Long, Lat, N-1, Acc2).
%% @doc Get the longitude and latitude by de-interleaving the bits
deinterleave(Long, Lat, 0, _N, _Q) ->
{Long, Lat};
deinterleave(Long, Lat, Mask, N, Q) ->
Long1 = Long bor ((Q band Mask) bsr N),
Lat1 = Lat bor ((Q band (Mask bsr 1)) bsr (N-1)),
deinterleave(Long1, Lat1, Mask bsr 2, N-1, Q).
%% @doc Map a longitude/latitude to an integer in the range 0..(2**Bits-1)
to_int(Angle, MaxAngle, Bits) ->
round((fmod(Angle, MaxAngle)+MaxAngle)/(2.0*MaxAngle) * ((1 bsl Bits)-1)).
%% @doc Map a N bits integer to a float longitude/latitude
to_float(N, MaxAngle, Bits) ->
round(((N / ((1 bsl Bits) -1))*2.0*MaxAngle - MaxAngle) * ?PREC) / ?PREC.
%% @doc Ensure that the angle is between [-Max, Max>
fmod(Angle, Max) when Angle < -Max ->
fmod(Angle+2.0*Max, Max);
fmod(Angle, Max) when Angle >= Max ->
fmod(Angle-2.0*Max, Max);
fmod(Angle, _Max) ->
Angle.
test() ->
0 = geomap_quadtile:encode(-90, -180),
0 = geomap_quadtile:encode(90, 180),
{-90.0,-180.0} = geomap_quadtile:decode(0),
{1.0, 2.0} = geomap_quadtile:decode(geomap_quadtile:encode(1,2)),
% Below is close to (1 bsl (?BITS*2))-1 = 4611686018427387903
4611686018427387900 = geomap_quadtile:encode(89.9999999, 179.9999999),
{90.0, 180.0} = geomap_quadtile:decode(4611686018427387900),
ok. | src/support/geomap_quadtile.erl | 0.753467 | 0.58516 | geomap_quadtile.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author yangcancai
%%% Copyright (c) 2021 by yangcancai(<EMAIL>), All Rights Reserved.
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% https://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%% @doc
%%%
%%% @end
%%% Created : 2021-05-19T07:19:22+00:00
%%%-------------------------------------------------------------------
-module(bd_bench).
-author("yangcancai").
-include("big_data.hrl").
-export([run/0, run/2, overview/0]).
-define(TotalCmdIx, 1).
-define(TotalTime, 2).
-define(FIELDS, [total_command, sum_time]).
-define(SIZE, erlang:length(?FIELDS)).
run() ->
run(100, 100).
run(N, Degree) when is_integer(N) ->
run(#{bucket_counter => N,
degree => Degree,
min_row => 1,
max_row => 300,
mix_row_bytes => 100,
max_row_bytes => 500}).
run(#{bucket_counter := BucketCounter, degree := Degree} = Config) ->
spawn_link(fun() ->
Each = BucketCounter div Degree,
Pid = self(),
Size = ?SIZE,
Ref = counters:new(Size, []),
persistent_term:put(bd_bench_s, erlang:system_time(1000)),
persistent_term:put(counters, Ref),
Pids =
[spawn_link(fun() -> loop(Pid, Each, Config) end)
|| _ <- lists:seq(1, Degree)],
wait_loop(Pids),
Rs = overview(),
io:format("Overview = ~p~n", [Rs])
end).
overview() ->
Ref = persistent_term:get(counters),
L = [counters:get(Ref, I) || I <- lists:seq(1, ?SIZE)],
R = #{total_command := Cmd, sum_time := Time} =
maps:from_list(
lists:zip(?FIELDS, L)),
Tps = Time div Cmd,
TotalTime = persistent_term:get(bd_bench_e) - persistent_term:get(bd_bench_s),
S = TotalTime div 1000,
Sec = case S of
0 ->
1;
S ->
S
end,
R#{aver => Tps div 1000,
tps => Cmd div Sec,
total_time => TotalTime}.
wait_loop([]) ->
persistent_term:put(bd_bench_e, erlang:system_time(1000)),
ok;
wait_loop(Pids) ->
receive
{Pid, done} ->
?DEBUG("Bech done pid = ~p", [Pid]),
wait_loop(lists:delete(Pid, Pids));
{'EXIT', Pid, _} ->
?DEBUG("Bech exit pid = ~p", [Pid]),
wait_loop(lists:delete(Pid, Pids))
end.
loop(Parent,
Each,
#{min_row := MixRow,
max_row := MaxRow,
mix_row_bytes := MixRowBytes,
max_row_bytes := MaxRowBytes}) ->
Row = gen_row(MixRow, MaxRow),
Ref = persistent_term:get(counters),
%% insert
[begin
Bucket = gen_bucket(),
[begin
RowBytes = gen_data(MixRowBytes, MaxRowBytes),
counters:add(Ref, ?TotalCmdIx, 1),
{T, _Rs} =
timer:tc(fun() ->
big_data:command(#bd_wal{action = insert,
args =
[Bucket,
erlang:integer_to_binary(RowID),
erlang:system_time(1000),
RowBytes]})
end),
counters:add(Ref, ?TotalTime, T)
end
|| RowID <- lists:seq(1, Row)]
end
|| _ <- lists:seq(1, Each)],
Parent ! {self(), done}.
gen_row(MixRow, MaxRow) ->
rand(MixRow, MaxRow).
gen_bucket() ->
rand_bytes(32).
gen_data(MixRowBytes, MaxRowBytes) ->
RowBytes = rand(MixRowBytes, MaxRowBytes),
rand_bytes(RowBytes).
rand(Mix, Max) ->
rand:uniform(Max - Mix) + Mix.
rand_bytes(N) ->
?BD_RAND_BYTES(N). | src/bd_bench.erl | 0.554109 | 0.455078 | bd_bench.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% Measure represents a type of metric to be tracked and recorded.
%% For example, latency, request Mb/s, and response Mb/s are measures
%% to collect from a server.
%%
%% Measure is a generic interface for recording values in aggregations
%% via subscribed views.
%% When recording a value, we have to obtain the list of all subscribed views
%% and call respective aggregations. We use code generation to optimize this.
%% When a view subscribed or unsubscribed we regenerate unrolled loop in a
%% special module (one for each measure). Module names generated from measurement
%% names (1-to-1). If we know a measure name at the compile time, we can eliminate
%% the module name lookup and inject remote call directly, replacing `oc_stat:record'
%% with `<GENERATED_MEASURE_MODULE>:record'.
%% For that {parse_transform, oc_stat_measure} option must be used.
%% @end
%%%-----------------------------------------------------------------------
-module(oc_stat_measure).
%% user api
-export([new/3,
exists/1,
unit/1]).
%% codegen
-export([measure_module/1,
module_name/1,
maybe_module_name/1,
regen_record/2,
delete_measure/1]).
%% unsafe api, needs snychronization
-export([register_/1,
add_subscription_/2,
remove_subscription_/2,
terminate_/0]).
-export(['__init_backend__'/0]).
-export_type([name/0,
description/0,
unit/0,
measure/0]).
-record(measure, {name :: name(),
module :: module(),
description :: description(),
unit :: unit()}).
-type name() :: atom() | binary() | string().
-type description() :: binary() | string().
-type unit() :: atom().
-type measure() :: #measure{}.
-define(MEASURES_TABLE, ?MODULE).
-dialyzer({nowarn_function, regen_module/3}).
-dialyzer({nowarn_function, insert_measure_/1}).
-dialyzer({nowarn_function, regen_record/2}).
-dialyzer({nowarn_function, delete_measure/1}).
%% @doc
%% Creates and registers a measure. If a measure with the same name
%% already exists, old measure returned.
%% @end
-spec new(name(), description(), unit()) -> oc_stat_view:measure().
new(Name, Description, Unit) ->
gen_server:call(oc_stat, {measure_register,
#measure{name=Name,
module=oc_stat_measure:module_name(Name),
description=Description,
unit=Unit}}).
%% @doc
%% Returns a measure with the `Name' or `false'..
%% @end
-spec exists(name() | measure()) -> measure() | false.
exists(#measure{name=Name}) ->
exists(Name);
exists(Name) ->
case ets:lookup(?MEASURES_TABLE, Name) of
[Measure] ->
Measure;
_ -> false
end.
-spec unit(measure()) -> unit().
unit(#measure{unit=Unit}) ->
Unit.
%% =============================================================================
%% internal
%% =============================================================================
%% @private
register_(#measure{name=Name}=Measure) ->
case exists(Name) of
false ->
insert_measure_(Measure);
OldMeasure ->
OldMeasure
end.
%% @private
insert_measure_(#measure{module=Module}=Measure) ->
ets:insert(?MEASURES_TABLE, Measure),
regen_record(Module, []),
Measure.
%% @private
add_subscription_(Measure, VS) ->
case exists(Measure) of
false ->
{error, {unknown_measure, Measure}};
#measure{module=Module} ->
Subs = Module:subs(),
regen_record(Module, [VS | Subs]),
ok
end.
%% @private
remove_subscription_(Name, VS) ->
case exists(Name) of
false ->
ok;
#measure{module=Module} ->
Subs = Module:subs(),
regen_record(Module, lists:delete(VS, Subs)),
ok
end.
%% @private
terminate_() ->
[delete_measure(M) || M <- ets:tab2list(?MEASURES_TABLE)].
%% @private
'__init_backend__'() ->
?MEASURES_TABLE = ets:new(?MEASURES_TABLE, [set, named_table, public, {keypos, 2}]),
ok.
%% =============================================================================
%% codegen
%% =============================================================================
%% @private
measure_module(Name) ->
case ets:lookup(?MEASURES_TABLE, Name) of
[#measure{module=Module}] ->
Module;
_ -> erlang:error({unknown_measure, Name})
end.
%% @private
-spec module_name(name()) -> module().
module_name(Name) ->
list_to_atom(module_name_str(Name)).
module_name_str(Name) when is_atom(Name) ->
name_template(atom_to_list(Name));
module_name_str(Name) when is_binary(Name) ->
name_template(binary_to_list(Name));
module_name_str(Name) when is_list(Name) ->
name_template(binary_to_list(iolist_to_binary(Name))).
name_template(Name) ->
lists:flatten(["$_MEASURE_", Name]).
%% @private
maybe_module_name(Name) ->
list_to_existing_atom(module_name_str(Name)).
%% @private
regen_record(ModuleName, VSs) ->
regen_module(ModuleName, gen_add_sample_calls(VSs), erl_parse:abstract(VSs)).
%% @private
delete_measure(#measure{name=Name, module=Module}) ->
ErrorA = erl_parse:abstract({unknown_measure, Name}),
regen_module(Module,
gen_add_sample_calls([])
++ [{call, 1,
{remote, 1, {atom, 1, erlang}, {atom, 1, error}},
[ErrorA]}],
{call, 1,
{remote, 1, {atom, 1, erlang}, {atom, 1, error}},
[ErrorA]}).
%% @private
regen_module(ModuleName, RecordBody, Subs) ->
ModuleNameStr = atom_to_list(ModuleName),
{ok, Module, Binary} =
compile:forms(
[{attribute, 1, file,
{ModuleNameStr,
1}},
{attribute, 1, module, ModuleName},
{attribute, 1, export,
[{record, 2}]},
{attribute, 1, export,
[{subs, 0}]},
{function, 1, record, 2,
[{clause, 1, [{var, 1, 'ContextTags'}, {var, 1, 'Value'}], [],
RecordBody ++ [{atom, 1, ok}]
}]},
{function, 1, subs, 0,
[{clause, 1, [], [],
[Subs]
}]},
{eof, 2}]),
{module, Module} = code:load_binary(Module, ModuleNameStr, Binary).
gen_add_sample_calls([]) ->
[{match, 1, {var, 1, '_'}, {var, 1, 'ContextTags'}},
{match, 1, {var, 1, '_'}, {var, 1, 'Value'}}];
gen_add_sample_calls(VSs) ->
lists:map(fun oc_stat_view:gen_add_sample_/1, VSs). | src/oc_stat_measure.erl | 0.750461 | 0.496338 | oc_stat_measure.erl | starcoder |
%% =====================================================================
%% @doc Hugin is a framework to simplify the process of defining your
%% own web crawlers. It tries to do so while maintaining maximum
%% flexibility and efficiency.
%%
%% The name Hugin comes from one the two ravens owned by Odin in
%% Norse mythology, Hugin and Munin. Odin sent out the two ravens every
%% morning to scout the world and bring the entire world's accumulated
%% information back to him.
%%
%% Similar to the case for Odin, the Hugin framework helps you to crawl
%% the internet in the pursuit of gathering the World Wide Web's
%% collective information and bringing it back to you.
%% @copyright 2015 <NAME>
%% @author <NAME> <<EMAIL>>
%% @version {@version}
%% @end
%% =====================================================================
-module(hugin).
-export([start/0, start/1]).
-export([url/1, url/2]).
-export([set_option/2, set_options/2, max_freq/3, max_freq/4, max_par/2]).
-type url() :: binary().
-type server_ref() :: atom() | pid().
%%%=========================================================================
%%% API
%%%=========================================================================
-spec start() -> ok | {error, any()}.
%% @doc Start the application with no active crawlers.
start() ->
hackney:start(),
application:ensure_started(hugin).
-spec start(Callback :: module() | fun()) -> {ok, pid()}.
%% @doc Start the application together with one crawler.
%% The callback can be either a atom with the module or a function.
%% See {@link raven} for more information about the callback.
start(M) when is_atom(M) ->
start1(M);
%% Start application with one crawler and callback function F.
start(F) when is_function(F) ->
start1(F).
start1(Cback) ->
start(),
Id = supervisor_id(Cback),
supervisor:start_child(
hugin_sup,
{Id,
{supervisor, start_link, [{local, Id}, hugin_server_sup, [Cback, Id]]},
transient, 5000, supervisor, [ hugin_server_sup ]}).
%% @doc Return the pool of URLs waiting to be fetched.
-spec url(ServerRef :: server_ref()) -> [ url() ].
url(ServerRef) ->
hugin_server:url(ServerRef).
-spec url(ServerRef :: server_ref(), [url()]) -> ok | {error, any()}.
%% @doc Add more URLs to be fetched.
url(ServerRef, Urls) ->
hugin_server:url(ServerRef, Urls).
%%%========================================================================
%%% Functions to edit the behavior of the server
%%%========================================================================
-spec max_freq(Ref :: server_ref(), Amount :: integer(),
U :: hugin_opts:time_unit())
-> ok | {error, Reason :: any()}.
%% @doc An option to limit the frequency of calls that Hugin makes.
%% The default option is to have no frequency limits.
%% However, Hugin still limits the amount of simultaneous outgoing connections
%% to parallel sessions by default.
%% See {@link max_par/2} for more information.
%% @equiv max_freq(ServerRef, Amount, 1, U)
%% @see hugin_opts:max_freq/3
max_freq(ServerRef, Amount, Unit) ->
max_freq(ServerRef, Amount, 1, Unit).
%% @equiv set_option(ServerRef, hugin_opts:max_freq(ServerRef, Amount, 1, U))
%% @see hugin_opts:max_freq/4
max_freq(ServerRef, Amount, N, Unit) ->
set_option(ServerRef, hugin_opts:max_freq(Amount, N, Unit)).
%% @doc An option to limit the amount of simultaneous parallel outgoing
%% connections that Hugin will keep. The default is 5.
max_par(ServerRef, N) ->
set_option(ServerRef, hugin_opts:max_par(N)).
-spec set_option(ServerRef :: server_ref(), Option :: hugin_opts:opt())
-> ok | {error, Reason :: any()}.
%% @doc Immediately update the behavior of the Hugin server.
%% @equiv set_options(ServerRef, [Option])
set_option(ServerRef, O) ->
set_options(ServerRef, [O]).
-spec set_options(ServerRef :: server_ref(), Options :: [ hugin_opts:opt() ])
-> ok | {error, Reason :: any()}.
%% @doc Immediately update the behavior of the Hugin server.
set_options(ServerRef, Options) ->
%% Verify that ServerRef is of the right type. The options will be checked
%% in hugin_server and return {error, badarg} if any of them are not.
case is_atom(ServerRef) orelse is_pid(ServerRef) of
false -> exit(badarg);
true -> hugin_server:set_options(ServerRef, Options)
end.
%%%=========================================================================
%%% Private Functions
%%%=========================================================================
supervisor_id(F) when is_function(F) ->
supervisor_id(erlang:ref_to_list(make_ref()));
supervisor_id(Module) ->
list_to_atom( lists:concat([Module, "_sup"])). | src/hugin.erl | 0.544075 | 0.570511 | hugin.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @doc This is AOC2020 <em>day11</em> solution
%% @reference <a href="https://adventofcode.com/2020/day/11">AOC 2020 day 11</a> for
%% @since 2020-12-18
%% @version 0.5.0
-module(day11).
-export([solve_part1/1, solve_part2/1]).
% could be shielded with -ifdef(TEST) macro
-export([parse/1,
adjacent/2, at/2, visible/2,
next/2, next_adj/2, next_vis/2,
count_occupied/1]).
%%% solution
solve_part1(Input) ->
part1(parse(Input)).
solve_part2(Input) ->
part2(parse(Input)).
-type tile() :: floor | empty | occupied.
-type grid() :: [[tile(), ...], ...].
-spec parse(GridStr) -> Grid when
GridStr :: string(),
Grid :: grid().
%% @doc Convert Grid string to Grid.
%% @param GridStr string in the format of task input.
%% @returns is a double-list of atoms floor, empty,
%% @throws shadows.
%% and occupied.
parse(GridStr) ->
Lines = string:lexemes(GridStr, "\n"),
[parse_line(Line) || Line <- Lines].
parse_line(Line) ->
[parse_symbol(Symbol) || Symbol <- Line].
parse_symbol($.) -> floor;
parse_symbol($L) -> empty;
parse_symbol($#) -> occupied.
%% @doc Lists sorted adjacent tiles' coordinates
adjacent({MaxX, MaxY}, {X, Y}) ->
lists:sort([
{AdjX, AdjY} ||
AdjX <- [X - 1, X, X + 1],
AdjY <- [Y - 1, Y, Y + 1],
{AdjX, AdjY} =/= {X, Y}, % self is not adjacent
AdjX > 0, AdjY > 0,
AdjX =< MaxX, AdjY =< MaxY
]).
-spec at(Grid, {X, Y}) -> Tile when
Grid :: grid(),
X :: integer(),
Y :: integer(),
Tile :: tile().
%% @doc Shows what is at coordinates {X, Y}.
at(Grid, {X, Y}) ->
lists:nth(Y, lists:nth(X, Grid)).
-spec visible(Grid, {X, Y}) -> Tiles when
Grid :: grid(),
X :: integer(),
Y :: integer(),
Tiles :: [tile()].
%% @doc Lists non-floor tiles visible from {X, Y}.
visible(Grid, Origin) ->
WithWalls = [arrow(Grid, Origin, Vector) ||
Vector <- [{1, 0}, {1, 1}, {0, 1}, {-1, 1},
{-1, 0}, {-1, -1}, {0, -1}, {1, -1}]
],
WithoutWalls = [Tile || Tile <- WithWalls, Tile =/= wall],
WithoutWalls.
%% @doc Shoots an arrow from {X, Y} in the direction of {Dx, Dy}
%% returns either `empty`, `occupied`, or `wall`.
arrow(Grid, _Origin={X, Y}, Vector={Dx, Dy}) ->
{MaxX, MaxY} = dimensions(Grid),
NewOrigin = {NewX, NewY} = {X + Dx, Y + Dy},
case lists:member(NewX, [0, MaxX + 1])
or lists:member(NewY, [0, MaxY + 1]) of
true -> wall;
false ->
case at(Grid, {NewX, NewY}) of
empty -> empty;
occupied -> occupied;
floor -> arrow(Grid, NewOrigin, Vector)
end
end.
%% @doc Measures X, Y dimensions of the Grid.
dimensions(Grid) ->
{length(Grid), length(lists:nth(1, Grid))}.
%% @doc Advances grid to the next generation using strategy fun.
next(Grid, Strategy) ->
{MaxX, MaxY} = dimensions(Grid),
[
[Strategy(Grid, {Row, Col})
|| Col <- lists:seq(1, MaxY)]
|| Row <- lists:seq(1, MaxX)
].
%%% Counting Strategies
%% @doc Produces the next generation tile at {X, Y}.
next_adj(Grid, {X, Y}) ->
{MaxX, MaxY} = dimensions(Grid),
Tile = at(Grid, {X, Y}),
Adjacent = [at(Grid, {AdjX, AdjY})
|| {AdjX, AdjY} <- adjacent({MaxX, MaxY},
{X, Y})],
next_adj2(Tile, Adjacent).
%% @doc Produces the next generation tile given current and
%% adjacent tiles.
next_adj2(floor, _Adjacent) -> floor;
next_adj2(empty, Adjacent) ->
case lists:member(occupied, Adjacent) of
false -> occupied;
true -> empty
end;
next_adj2(occupied, Adjacent) ->
case count(occupied, Adjacent) >= 4 of
true -> empty;
false -> occupied
end.
%% @doc Produces the next generation tile using visibility rules
next_vis(Grid, Origin) ->
Tile = at(Grid, Origin),
Visible = visible(Grid, Origin),
case Tile of
floor -> floor;
occupied ->
case count(occupied, Visible) >= 5 of
true -> empty;
false -> occupied
end;
empty ->
case count(occupied, Visible) =:= 0 of
true -> occupied;
false -> empty
end
end.
%% @doc Counts the number of occupied seats.
count_occupied(Grid) ->
count(occupied, lists:flatten(Grid)).
%% @doc Counts X's occurence in list.
count(_, []) -> 0;
count(X, [X | XS]) -> 1 + count(X, XS);
count(X, [_|XS]) -> count(X, XS).
%% @doc Counts occupied seats after equilibrium using next_adj.
part1(Grid) ->
NewGrid = next(Grid, fun next_adj/2),
case NewGrid =:= Grid of
true -> count_occupied(Grid);
false -> part1(NewGrid)
end.
%% @doc Counts occupied seats after equilibrium using next_vis.
part2(Grid) ->
NewGrid = next(Grid, fun next_vis/2),
case NewGrid =:= Grid of
true -> count_occupied(Grid);
false -> part2(NewGrid)
end. | src/day11.erl | 0.685107 | 0.586641 | day11.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_stats).
-export([
start/0,
stop/0,
fetch/0,
reload/0,
sample/1,
new/2,
delete/1,
list/0,
increment_counter/1,
increment_counter/2,
decrement_counter/1,
decrement_counter/2,
update_histogram/2,
update_gauge/2
]).
-include("couch_stats.hrl").
-type response() :: ok | {error, unknown_metric}.
-type stat() :: {any(), [{atom(), any()}]}.
start() ->
application:start(couch_stats).
stop() ->
application:stop(couch_stats).
fetch() ->
couch_stats_aggregator:fetch().
reload() ->
couch_stats_aggregator:reload().
-spec sample(any()) -> stat().
sample(Name) ->
[{Name, Info}] = folsom_metrics:get_metric_info(Name),
sample_type(Name, proplists:get_value(type, Info)).
-spec new(atom(), any()) -> ok | {error, metric_exists | unsupported_type}.
new(counter, Name) ->
case folsom_metrics:new_counter(Name) of
ok -> ok;
{error, Name, metric_already_exists} -> {error, metric_exists}
end;
new(histogram, Name) ->
Time = config:get_integer("stats", "interval", ?DEFAULT_INTERVAL),
case folsom_metrics:new_histogram(Name, slide_uniform, {Time, 1024}) of
ok -> ok;
{error, Name, metric_already_exists} -> {error, metric_exists}
end;
new(gauge, Name) ->
case folsom_metrics:new_gauge(Name) of
ok -> ok;
{error, Name, metric_already_exists} -> {error, metric_exists}
end;
new(_, _) ->
{error, unsupported_type}.
delete(Name) ->
folsom_metrics:delete_metric(Name).
list() ->
folsom_metrics:get_metrics_info().
-spec increment_counter(any()) -> response().
increment_counter(Name) ->
notify_existing_metric(Name, {inc, 1}, counter).
-spec increment_counter(any(), pos_integer()) -> response().
increment_counter(Name, Value) ->
notify_existing_metric(Name, {inc, Value}, counter).
-spec decrement_counter(any()) -> response().
decrement_counter(Name) ->
notify_existing_metric(Name, {dec, 1}, counter).
-spec decrement_counter(any(), pos_integer()) -> response().
decrement_counter(Name, Value) ->
notify_existing_metric(Name, {dec, Value}, counter).
-spec update_histogram(any(), number()) -> response();
(any(), function()) -> any().
update_histogram(Name, Fun) when is_function(Fun, 0) ->
Begin = os:timestamp(),
Result = Fun(),
Duration = timer:now_diff(os:timestamp(), Begin) div 1000,
case notify_existing_metric(Name, Duration, histogram) of
ok ->
Result;
{error, unknown_metric} ->
throw({unknown_metric, Name})
end;
update_histogram(Name, Value) when is_number(Value) ->
notify_existing_metric(Name, Value, histogram).
-spec update_gauge(any(), number()) -> response().
update_gauge(Name, Value) ->
notify_existing_metric(Name, Value, gauge).
-spec notify_existing_metric(any(), any(), any()) -> response().
notify_existing_metric(Name, Op, Type) ->
try
ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
catch _:_ ->
couch_log:notice("unknown metric: ~p", [Name]),
{error, unknown_metric}
end.
-spec sample_type(any(), atom()) -> stat().
sample_type(Name, histogram) ->
folsom_metrics:get_histogram_statistics(Name);
sample_type(Name, _) ->
folsom_metrics:get_metric_value(Name). | src/couch_stats/src/couch_stats.erl | 0.802981 | 0.491212 | couch_stats.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2022. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose: Calculate tight bounds for integer operations.
%%
%% Reference:
%%
%% <NAME>, <NAME> (2 ed). <NAME> -
%% Pearson Education, Inc. Chapter 4. Arithmetic Bounds.
%%
%%
-module(beam_bounds).
-export([bounds/2, bounds/3, relop/3, infer_relop_types/3]).
-export_type([range/0]).
-type range() :: {integer(), integer()} |
{'-inf', integer()} |
{integer(), '+inf'} |
'any'.
-type range_result() :: range() | 'any'.
-type relop() :: '<' | '=<' | '>' | '>='.
-type bool_result() :: 'true' | 'false' | 'maybe'.
-type op() :: atom().
%% Maximum size of integers in bits to keep ranges for.
-define(NUM_BITS, 128).
-spec bounds(op(), range()) -> range_result().
bounds(abs, R) ->
case R of
{A,B} when is_integer(A), is_integer(B) ->
Min = 0,
Max = max(abs(A), abs(B)),
{Min,Max};
_ ->
{0,'+inf'}
end.
-spec bounds(op(), range(), range()) -> range_result().
bounds('+', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({A+C,B+D});
{{'-inf',B}, {_C,D}} when abs(B) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({'-inf',B+D});
{{_A,B}, {'-inf',D}} when abs(B) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({'-inf',B+D});
{{A,'+inf'}, {C,_D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0 ->
normalize({A+C,'+inf'});
{{A,_B}, {C,'+inf'}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0 ->
normalize({A+C,'+inf'});
{_, _} ->
any
end;
bounds('-', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({A-D,B-C});
{{A,'+inf'}, {_C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({A-D,'+inf'});
{{_A,B}, {C,'+inf'}} when abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0 ->
normalize({'-inf',B-C});
{{'-inf',B}, {C,_D}} when abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0 ->
normalize({'-inf',B-C});
{{A,_B}, {'-inf',D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
normalize({A-D,'+inf'});
{_, _} ->
any
end;
bounds('*', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0 ->
All = [X * Y || X <- [A,B], Y <- [C,D]],
Min = lists:min(All),
Max = lists:max(All),
normalize({Min,Max});
{{A,'+inf'}, {C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0,
C >= 0 ->
{min(A*C, A*D),'+inf'};
{{'-inf',B}, {C,D}} when abs(B) bsr ?NUM_BITS =:= 0,
abs(C) bsr ?NUM_BITS =:= 0,
abs(D) bsr ?NUM_BITS =:= 0,
C >= 0 ->
{'-inf',max(B*C, B*D)};
{{A,B}, {'-inf',_}} when is_integer(A), is_integer(B) ->
bounds('*', R2, R1);
{{A,B}, {_,'+inf'}} when is_integer(A), is_integer(B) ->
bounds('*', R2, R1);
{_, _} ->
any
end;
bounds('div', R1, R2) ->
div_bounds(R1, R2);
bounds('rem', R1, R2) ->
rem_bounds(R1, R2);
bounds('band', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when A bsr ?NUM_BITS =:= 0, A >= 0,
C bsr ?NUM_BITS =:= 0, C >= 0,
is_integer(B), is_integer(D) ->
Min = min_band(A, B, C, D),
Max = max_band(A, B, C, D),
{Min,Max};
{_, {C,D}} when is_integer(C), C >= 0 ->
{0,D};
{{A,B}, _} when is_integer(A), A >= 0 ->
{0,B};
{_, _} ->
any
end;
bounds('bor', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when A bsr ?NUM_BITS =:= 0, A >= 0,
C bsr ?NUM_BITS =:= 0, C >= 0,
is_integer(B), is_integer(D) ->
Min = min_bor(A, B, C, D),
Max = max_bor(A, B, C, D),
{Min,Max};
{_, _} ->
any
end;
bounds('bxor', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when A bsr ?NUM_BITS =:= 0, A >= 0,
C bsr ?NUM_BITS =:= 0, C >= 0,
is_integer(B), is_integer(D) ->
Max = max_bxor(A, B, C, D),
{0,Max};
{_, _} ->
any
end;
bounds('bsr', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when is_integer(C), C >= 0 ->
Min = inf_min(inf_bsr(A, C), inf_bsr(A, D)),
Max = inf_max(inf_bsr(B, C), inf_bsr(B, D)),
normalize({Min,Max});
{_, _} ->
any
end;
bounds('bsl', R1, R2) ->
case {R1,R2} of
{{A,B}, {C,D}} when abs(A) bsr ?NUM_BITS =:= 0,
abs(B) bsr ?NUM_BITS =:= 0 ->
Min = inf_min(inf_bsl(A, C), inf_bsl(A, D)),
Max = inf_max(inf_bsl(B, C), inf_bsl(B, D)),
normalize({Min,Max});
{_, _} ->
any
end;
bounds(max, R1, R2) ->
case {R1,R2} of
{{A,B},{C,D}} ->
normalize({inf_max(A, C),inf_max(B, D)});
{_,_} ->
any
end;
bounds(min, R1, R2) ->
case {R1,R2} of
{{A,B},{C,D}} ->
normalize({inf_min(A, C),inf_min(B, D)});
{_,_} ->
any
end.
-spec relop(relop(), range(), range()) -> bool_result().
relop('<', {A,B}, {C,D}) ->
case {inf_lt(B, C),inf_lt(A, D)} of
{Bool,Bool} -> Bool;
{_,_} -> 'maybe'
end;
relop('=<', {A,B}, {C,D}) ->
case {inf_le(B, C),inf_le(A, D)} of
{Bool,Bool} -> Bool;
{_,_} -> 'maybe'
end;
relop('>=', {A,B}, {C,D}) ->
case {inf_ge(B, C),inf_ge(A, D)} of
{Bool,Bool} -> Bool;
{_,_} -> 'maybe'
end;
relop('>', {A,B}, {C,D}) ->
case {inf_gt(B, C),inf_gt(A, D)} of
{Bool,Bool} -> Bool;
{_,_} -> 'maybe'
end;
relop(_, _, _) ->
'maybe'.
-spec infer_relop_types(relop(), range(), range()) -> any().
infer_relop_types(Op, {_,_}=Range1, {_,_}=Range2) ->
case relop(Op, Range1, Range2) of
'maybe' ->
infer_relop_types_1(Op, Range1, Range2);
_ ->
any
end;
infer_relop_types('<', {A,_}=R1, any) ->
{R1, normalize({inf_add(A, 1), '+inf'})};
infer_relop_types('<', any, {_,D}=R2) ->
{normalize({'-inf', inf_add(D, -1)}), R2};
infer_relop_types('=<', {A,_}=R1, any) ->
{R1, normalize({A, '+inf'})};
infer_relop_types('=<', any, {_,D}=R2) ->
{normalize({'-inf', D}), R2};
infer_relop_types('>=', {_,B}=R1, any) ->
{R1, normalize({'-inf', B})};
infer_relop_types('>=', any, {C,_}=R2) ->
{normalize({C, '+inf'}), R2};
infer_relop_types('>', {_,B}=R1, any) ->
{R1, normalize({'-inf', inf_add(B, -1)})};
infer_relop_types('>', any, {C,_}=R2) ->
{normalize({inf_add(C, 1), '+inf'}), R2};
infer_relop_types(_Op, _R1, _R2) ->
any.
%%%
%%% Internal functions.
%%%
div_bounds({A,B}, {C,D}) when is_integer(A), is_integer(B),
is_integer(C), is_integer(D) ->
Denominators = [min(C, D),max(C, D)|
%% Handle zero crossing for the denominator.
if
C < 0, 0 < D -> [-1, 1];
C =:= 0 -> [1];
D =:= 0 -> [-1];
true -> []
end],
All = [X div Y || X <- [A,B],
Y <- Denominators,
Y =/= 0],
Min = lists:min(All),
Max = lists:max(All),
normalize({Min,Max});
div_bounds({A,'+inf'}, {C,D}) when is_integer(C), C > 0, is_integer(D) ->
Min = min(A div C, A div D),
Max = '+inf',
normalize({Min,Max});
div_bounds({'-inf',B}, {C,D}) when is_integer(C), C > 0, is_integer(D) ->
Min = '-inf',
Max = max(B div C, B div D),
normalize({Min,Max});
div_bounds(_, _) ->
any.
rem_bounds({A,_}, {C,D}) when is_integer(C), is_integer(D), C > 0 ->
Max = inf_add(D, -1),
Min = if
A =:= '-inf' -> -Max;
A >= 0 -> 0;
true -> -Max
end,
normalize({Min,Max});
rem_bounds(_, {C,D}) when is_integer(C), is_integer(D),
C =/= 0 orelse D =/= 0 ->
Max = max(abs(C), abs(D)) - 1,
Min = -Max,
normalize({Min,Max});
rem_bounds(_, _) ->
any.
min_band(A, B, C, D) ->
M = 1 bsl (upper_bit(A bor C) + 1),
min_band(A, B, C, D, M).
min_band(A, _B, C, _D, 0) ->
A band C;
min_band(A, B, C, D, M) ->
if
(bnot A) band (bnot C) band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_band(NewA, B, C, D, 0);
_ ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_band(A, B, NewC, D, 0);
_ ->
min_band(A, B, C, D, M bsr 1)
end
end;
true ->
min_band(A, B, C, D, M bsr 1)
end.
max_band(A, B, C, D) ->
M = 1 bsl upper_bit(B bxor D),
max_band(A, B, C, D, M).
max_band(_A, B, _C, D, 0) ->
B band D;
max_band(A, B, C, D, M) ->
if
B band (bnot D) band M =/= 0 ->
case (B band (bnot M)) bor (M - 1) of
NewB when NewB >= A ->
max_band(A, NewB, C, D, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
(bnot B) band D band M =/= 0 ->
case (D band (bnot M)) bor (M - 1) of
NewD when NewD >= C ->
max_band(A, B, C, NewD, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
true ->
max_band(A, B, C, D, M bsr 1)
end.
min_bor(A, B, C, D) ->
M = 1 bsl upper_bit(A bxor C),
min_bor(A, B, C, D, M).
min_bor(A, _B, C, _D, 0) ->
A bor C;
min_bor(A, B, C, D, M) ->
if
(bnot A) band C band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_bor(NewA, B, C, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
A band (bnot C) band M =/= 0 ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_bor(A, B, NewC, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
true ->
min_bor(A, B, C, D, M bsr 1)
end.
max_bor(A, B, C, D) ->
Intersection = B band D,
M = 1 bsl upper_bit(Intersection),
max_bor(Intersection, A, B, C, D, M).
max_bor(_Intersection, _A, B, _C, D, 0) ->
B bor D;
max_bor(Intersection, A, B, C, D, M) ->
if
Intersection band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bor(Intersection, A, NewB, C, D, 0);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bor(Intersection, A, B, C, NewD, 0);
_ ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end
end;
true ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end.
max_bxor(A, B, C, D) ->
M = 1 bsl upper_bit(B band D),
max_bxor(A, B, C, D, M).
max_bxor(_A, B, _C, D, 0) ->
B bxor D;
max_bxor(A, B, C, D, M) ->
if
B band D band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bxor(A, NewB, C, D, M bsr 1);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bxor(A, B, C, NewD, M bsr 1);
_ ->
max_bxor(A, B, C, D, M bsr 1)
end
end;
true ->
max_bxor(A, B, C, D, M bsr 1)
end.
upper_bit(Val) ->
upper_bit_1(Val, 0).
upper_bit_1(Val0, N) ->
case Val0 bsr 1 of
0 -> N;
Val -> upper_bit_1(Val, N + 1)
end.
infer_relop_types_1('<', {A,B}, {C,D}) ->
Left = normalize({A, clamp(inf_add(D, -1), A, B)}),
Right = normalize({clamp(inf_add(A, 1), C, D), D}),
{Left,Right};
infer_relop_types_1('=<', {A,B}, {C,D}) ->
Left = normalize({A, clamp(D, A, B)}),
Right = normalize({clamp(A, C, D), D}),
{Left,Right};
infer_relop_types_1('>=', {A,B}, {C,D}) ->
Left = normalize({clamp(C, A, B), B}),
Right = normalize({C, clamp(B, C, D)}),
{Left,Right};
infer_relop_types_1('>', {A,B}, {C,D}) ->
Left = normalize({clamp(inf_add(C, 1), A, B), B}),
Right = normalize({C,clamp(inf_add(B, -1), C, D)}),
{Left,Right}.
%%%
%%% Handling of ranges.
%%%
%%% A range can begin with '-inf' OR end with '+inf'.
%%%
%%% Atoms are greater than all integers. Therefore, we don't
%%% need any special handling of '+inf'.
%%%
normalize({'-inf','-inf'}) ->
{'-inf',-1};
normalize({'-inf','+inf'}) ->
any;
normalize({'+inf','+inf'}) ->
{0,'+inf'};
normalize({Min,Max}=T) ->
true = inf_ge(Max, Min),
T.
clamp(V, A, B) ->
inf_min(inf_max(V, A), B).
inf_min(A, B) when A =:= '-inf'; B =:= '-inf' -> '-inf';
inf_min(A, B) when A =< B -> A;
inf_min(A, B) when A > B -> B.
inf_max('-inf', B) -> B;
inf_max(A, '-inf') -> A;
inf_max(A, B) when A >= B -> A;
inf_max(A, B) when A < B -> B.
inf_neg('-inf') -> '+inf';
inf_neg('+inf') -> '-inf';
inf_neg(N) -> -N.
inf_add(Int, N) when is_integer(Int) -> Int + N;
inf_add(Inf, _N) -> Inf.
inf_bsr('-inf', _S) ->
'-inf';
inf_bsr('+inf', _S) ->
'+inf';
inf_bsr(N, S0) when S0 =:= '-inf'; S0 < 0 ->
S = inf_neg(S0),
if
S >= ?NUM_BITS, N < 0 -> '-inf';
S >= ?NUM_BITS, N >= 0 -> '+inf';
true -> N bsl S
end;
inf_bsr(N, '+inf') ->
if
N < 0 -> -1;
N >= 0 -> 0
end;
inf_bsr(N, S) when S >= 0 ->
N bsr S.
inf_bsl(N, S) ->
inf_bsr(N, inf_neg(S)).
inf_lt(_, '-inf') -> false;
inf_lt('-inf', _) -> true;
inf_lt(A, B) -> A < B.
inf_ge(_, '-inf') -> true;
inf_ge('-inf', _) -> false;
inf_ge(A, B) -> A >= B.
inf_le(A, B) -> inf_ge(B, A).
inf_gt(A, B) -> inf_lt(B, A). | lib/compiler/src/beam_bounds.erl | 0.675444 | 0.433442 | beam_bounds.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021-2022 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Condition support.
%%
%% Conditions can be used in path patterns and `keep_while' conditions. They
%% allow to point to a specific node only if conditions are met, or to match
%% several tree nodes with a single path pattern.
%%
%% A condition is an Erlang record defining a specific property. Some of them
%% have arguments to further define the condition.
%%
%% All supported conditions are described in the <a href="#types">Data Types
%% section</a>.
-module(khepri_condition).
-include("include/khepri.hrl").
-include("src/internal.hrl").
-type comparison_op(Type) :: {eq, Type} |
{ne, Type} |
{lt, Type} |
{le, Type} |
{gt, Type} |
{ge, Type}.
%% Comparison operator in some {@link condition()}.
-type if_name_matches() :: #if_name_matches{}.
%% Condition. Evaluates to true if the name of the tested node matches the
%% condition pattern.
%%
%% Record fields:
%% <ul>
%% <li>`regex': defines the condition pattern. It can be either:
%% <ul>
%% <li>a regular expression</li>
%% <li>the atom `any' to match any node names; the equivalent of the `".*"'
%% regular expression but more efficient</li>
%% </ul></li>
%% </ul>
%%
%% Example:
%% ```
%% #if_name_matches{regex = "^user_"}.
%% #if_name_matches{regex = any}.
%% '''
-type if_path_matches() :: #if_path_matches{}.
%% Condition. Evaluates to true if the name of the tested node matches the
%% condition pattern. If it does not match, child node names are tested
%% recursively.
%%
%% Record fields:
%% <ul>
%% <li>`regex': defines the condition pattern. It can be either:
%% <ul>
%% <li>a regular expression</li>
%% <li>the atom `any' to match any node names; the equivalent of the `".*"'
%% regular expression but more efficient</li>
%% </ul></li>
%% </ul>
%%
%% Example:
%% ```
%% #if_path_matches{regex = "^user_"}.
%% #if_path_matches{regex = any}.
%% '''
-type if_has_data() :: #if_has_data{}.
%% Condition. Evaluates to true if the tested node's data payload presence
%% corresponds to the expected state.
%%
%% Record fields:
%% <ul>
%% <li>`has_data': boolean set to the expected presence of a data
%% payload.</li>
%% </ul>
%%
%% Data absence is either no payload or a non-data type of payload.
%%
%% Example:
%% ```
%% #if_has_data{has_data = false}.
%% '''
-type if_data_matches() :: #if_data_matches{}.
%% Condition. Evaluates to true if the tested node has a data payload and the
%% data payload term matches the given pattern.
%%
%% Record fields:
%% <ul>
%% <li>`pattern': an ETS-like match pattern.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_data_matches{pattern = {user, '_'}}.
%% '''
-type if_node_exists() :: #if_node_exists{}.
%% Condition. Evaluates to true if the tested node existence corresponds to
%% the expected state.
%%
%% Record fields:
%% <ul>
%% <li>`exists': boolean set to the expected presence of the node.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_node_exists{exists = false}.
%% '''
-type if_payload_version() :: #if_payload_version{}.
%% Condition. Evaluates to true if the tested node's payload version
%% corresponds to the expected value.
%%
%% Record fields:
%% <ul>
%% <li>`version': integer or {@link comparison_op()} to compare to the actual
%% payload version.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_payload_version{version = 1}.
%% #if_payload_version{version = {gt, 10}}.
%% '''
-type if_child_list_version() :: #if_child_list_version{}.
%% Condition. Evaluates to true if the tested node's child list version
%% corresponds to the expected value.
%%
%% Record fields:
%% <ul>
%% <li>`version': integer or {@link comparison_op()} to compare to the actual
%% child list version.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_child_list_version{version = 1}.
%% #if_child_list_version{version = {gt, 10}}.
%% '''
-type if_child_list_length() :: #if_child_list_length{}.
%% Condition. Evaluates to true if the tested node's child list size
%% corresponds to the expected value.
%%
%% Record fields:
%% <ul>
%% <li>`version': integer or {@link comparison_op()} to compare to the actual
%% child list child.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_child_list_length{count = 1}.
%% #if_child_list_length{count = {gt, 10}}.
%% '''
-type if_not() :: #if_not{}.
%% Condition. Evaluates to true if the inner condition evalutes to false.
%%
%% Record fields:
%% <ul>
%% <li>`condition': the inner condition to evaluate.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_not{condition = #if_name_matches{regex = "^a"}}.
%% '''
-type if_all() :: #if_all{}.
%% Condition. Evaluates to true if all inner conditions evalute to true.
%%
%% Record fields:
%% <ul>
%% <li>`conditions': a list of inner conditions to evaluate.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_all{conditions = [#if_name_matches{regex = "^a"},
%% #if_has_data{has_data = true}]}.
%% '''
-type if_any() :: #if_any{}.
%% Condition. Evaluates to true if any of the inner conditions evalute to
%% true.
%%
%% Record fields:
%% <ul>
%% <li>`conditions': a list of inner conditions to evaluate.</li>
%% </ul>
%%
%% Example:
%% ```
%% #if_any{conditions = [#if_name_matches{regex = "^a"},
%% #if_has_data{has_data = true}]}.
%% '''
-type condition() :: if_name_matches() |
if_path_matches() |
if_has_data() |
if_data_matches() |
if_node_exists() |
if_payload_version() |
if_child_list_version() |
if_child_list_length() |
if_not() |
if_all() |
if_any().
%% All supported conditions.
-type condition_using_regex() :: if_name_matches() |
if_path_matches().
-type condition_using_comparison_op() :: if_payload_version() |
if_child_list_version() |
if_child_list_length().
-type keep_while() :: #{khepri_path:path() => condition()}.
%% An association between a path and a condition. As long as the condition
%% evalutes to true, the tree node is kept. Once the condition evalutes to
%% false, the tree node is deleted.
-export([compile/1,
applies_to_grandchildren/1,
is_met/3,
is_valid/1]).
-ifdef(TEST).
-export([eval_regex/4,
compare_numerical_values/2]).
-endif.
-export_type([condition/0,
comparison_op/1,
keep_while/0]).
-spec compile(Condition) -> Condition when
Condition :: khepri_path:pattern_component().
%% @doc Preprocess properties inside some conditions to make them more
%% efficient.
%%
%% An example is the regular expression inside an {@link if_name_matches()}
%% condition.
%%
%% Conditions are also optimized if possible. An example is the replacement of
%% an {@link if_all()} condition and all its sub-conditions if one of them is
%% a specific node name. In this case, it is replaced by the node name
%% directly.
%%
%% @param Condition the condition to compile.
%%
%% @returns the same condition with all its properties preprocessed.
%%
%% @private
compile(#if_name_matches{regex = any} = Cond) ->
Cond;
compile(#if_name_matches{regex = Re, compiled = undefined} = Cond) ->
Compiled = re:compile(Re),
Cond#if_name_matches{compiled = Compiled};
compile(#if_data_matches{pattern = Pattern, compiled = undefined} = Cond) ->
Compiled = ets:match_spec_compile([{Pattern, [], [match]}]),
Cond#if_data_matches{compiled = Compiled};
compile(#if_not{condition = InnerCond} = Cond) ->
InnerCond1 = compile(InnerCond),
Cond#if_not{condition = InnerCond1};
compile(#if_all{conditions = InnerConds} = Cond) ->
InnerConds1 = lists:map(fun compile/1, InnerConds),
case optimize_if_all_conditions(InnerConds1) of
[InnerCond] -> InnerCond;
InnerConds2 -> Cond#if_all{conditions = InnerConds2}
end;
compile(#if_any{conditions = InnerConds} = Cond) ->
InnerConds1 = lists:map(fun compile/1, InnerConds),
case optimize_if_any_conditions(InnerConds1) of
[InnerCond] -> InnerCond;
InnerConds2 -> Cond#if_any{conditions = InnerConds2}
end;
compile(Cond) ->
Cond.
-spec optimize_if_all_conditions([condition()]) -> [condition()].
%% @private
%% @hidden
optimize_if_all_conditions(Conds) ->
optimize_if_all_conditions(Conds, []).
%% @private
%% @hidden
optimize_if_all_conditions([ChildName | Rest], Result)
when ?IS_PATH_COMPONENT(ChildName) ->
%% The path component exact match condition will become the first one
%% tested.
Result1 = Result ++ [ChildName],
optimize_if_all_conditions(Rest, Result1);
optimize_if_all_conditions([Cond | Rest], Result) ->
Result1 = [Cond | Result],
optimize_if_all_conditions(Rest, Result1);
optimize_if_all_conditions([], Result) ->
lists:reverse(Result).
-spec optimize_if_any_conditions([condition()]) -> [condition()].
%% @private
%% @hidden
optimize_if_any_conditions(Conds) ->
Conds.
-spec applies_to_grandchildren(condition()) -> boolean().
%% @doc Returns true if a condition should be evaluted against child nodes in
%% addition to the current node.
%%
%% An example is the {@link if_path_matches()} condition.
%%
%% @private
applies_to_grandchildren(#if_path_matches{}) ->
true;
applies_to_grandchildren(#if_not{condition = Cond}) ->
applies_to_grandchildren(Cond);
applies_to_grandchildren(#if_all{conditions = Conds}) ->
lists:any(fun applies_to_grandchildren/1, Conds);
applies_to_grandchildren(#if_any{conditions = Conds}) ->
lists:any(fun applies_to_grandchildren/1, Conds);
applies_to_grandchildren(_) ->
false.
-spec is_met(Condition, PathOrChildName, Child) -> IsMet when
Condition :: khepri_path:pattern_component(),
PathOrChildName :: khepri_path:path() | khepri_path:component(),
Child :: khepri_machine:tree_node() | khepri_machine:node_props(),
IsMet :: true | IsNotMet1 | IsNotMet2,
IsNotMet1 :: {false, khepri_path:pattern_component()},
IsNotMet2 :: {false, {condition(), any()}}.
%% @doc Returns true if the given condition is met when evaluated against the
%% given tree node name and properties.
%%
%% @param Condition the condition to evaluate.
%% @param PathOrChildName the path or child name to consider.
%% @param Child the properties or the tree node.
%%
%% @returns true if the condition is met, false otherwise.
%%
%% @private
is_met(Condition, Path, Child) when ?IS_PATH(Path) ->
ChildName = case Path of
[] -> '';
_ -> lists:last(Path)
end,
is_met(Condition, ChildName, Child);
is_met(ChildName, ChildName, _Child)
when ?IS_PATH_COMPONENT(ChildName) ->
true;
is_met(?THIS_NODE, _ChildNameB, _Child) ->
true;
is_met(ChildNameA, _ChildNameB, _Child)
when ?IS_PATH_COMPONENT(ChildNameA) ->
{false, ChildNameA};
is_met(#if_node_exists{exists = true}, _ChildName, _Child) ->
true;
is_met(#if_node_exists{exists = false} = Cond, _ChildName, _Child) ->
{false, Cond};
is_met(
#if_name_matches{regex = SourceRegex, compiled = CompiledRegex} = Cond,
ChildName,
_Child) ->
eval_regex(Cond, SourceRegex, CompiledRegex, ChildName);
is_met(
#if_path_matches{regex = SourceRegex, compiled = CompiledRegex} = Cond,
ChildName,
_Child) ->
eval_regex(Cond, SourceRegex, CompiledRegex, ChildName);
is_met(#if_has_data{has_data = true},
_ChildName, #node{payload = #kpayload_data{data = _}}) ->
true;
is_met(#if_has_data{has_data = false} = Cond,
_ChildName, #node{payload = #kpayload_data{data = _}}) ->
{false, Cond};
is_met(#if_has_data{has_data = true} = Cond, _ChildName, _Child) ->
{false, Cond};
is_met(#if_has_data{has_data = false}, _ChildName, _Child) ->
true;
is_met(#if_data_matches{compiled = CompMatchSpec} = Cond,
_ChildName, #node{payload = #kpayload_data{data = Data}}) ->
case term_matches(Data, CompMatchSpec) of
true -> true;
false -> {false, Cond}
end;
is_met(#if_data_matches{compiled = CompMatchSpec} = Cond,
_ChildName, #{data := Data}) ->
case term_matches(Data, CompMatchSpec) of
true -> true;
false -> {false, Cond}
end;
is_met(#if_payload_version{version = DVersionB} = Cond, _ChildName,
#node{stat = #{payload_version := DVersionA}}) ->
compare_numerical_values(Cond, DVersionA, DVersionB);
is_met(#if_payload_version{version = DVersionB} = Cond, _ChildName,
#{payload_version := DVersionA}) ->
compare_numerical_values(Cond, DVersionA, DVersionB);
is_met(#if_child_list_version{version = CVersionB} = Cond, _ChildName,
#node{stat = #{child_list_version := CVersionA}}) ->
compare_numerical_values(Cond, CVersionA, CVersionB);
is_met(#if_child_list_version{version = CVersionB} = Cond, _ChildName,
#{child_list_version := CVersionA}) ->
compare_numerical_values(Cond, CVersionA, CVersionB);
is_met(#if_child_list_length{count = ExpectedCount} = Cond, _ChildName,
#node{child_nodes = Children}) ->
Count = maps:size(Children),
compare_numerical_values(Cond, Count, ExpectedCount);
is_met(#if_child_list_length{count = ExpectedCount} = Cond, _ChildName,
#{child_list_length := Count}) ->
compare_numerical_values(Cond, Count, ExpectedCount);
is_met(#if_not{condition = InnerCond} = Cond, ChildName, Child) ->
case is_met(InnerCond, ChildName, Child) of
true -> {false, Cond};
{false, _} -> true
end;
is_met(#if_all{conditions = []}, _ChildName, _Child) ->
true;
is_met(#if_all{conditions = Conds}, ChildName, Child) ->
lists:foldl(
fun
(_, {false, _} = False) -> False;
(Cond, _) -> is_met(Cond, ChildName, Child)
end, true, Conds);
is_met(#if_any{conditions = []} = Cond, _ChildName, _Child) ->
{false, Cond};
is_met(#if_any{conditions = Conds} = IfAnyCond, ChildName, Child) ->
Ret = lists:foldl(
fun
(_, true) -> true;
(Cond, _) -> is_met(Cond, ChildName, Child)
end, {false, undefined}, Conds),
case Ret of
true -> true;
{false, _} -> {false, IfAnyCond}
end;
is_met(Cond, _, _) ->
{false, Cond}.
-spec term_matches(Term, MatchSpec) -> Matches when
Term :: khepri_machine:data(),
MatchSpec :: ets:comp_match_spec(),
Matches :: boolean().
%% @doc Returns true if the given match spec matches the given match term.
%%
%% @private
%% @hidden
term_matches(Term, MatchSpec) ->
case ets:match_spec_run([Term], MatchSpec) of
[match] -> true;
_ -> false
end.
-spec eval_regex(Condition, SourceRegex, CompiledRegex, Value) -> Ret when
Condition :: condition_using_regex(),
SourceRegex :: any | iodata() | unicode:charlist(),
CompiledRegex :: {ok, re_mp()} |
{error, {string(), non_neg_integer()}} |
undefined,
Value :: atom() | iodata() | unicode:charlist(),
Ret :: true |
{false, condition_using_regex()} |
{false, {condition_using_regex(),
{error,
match_limit |
match_limit_recursion |
{string(), non_neg_integer()}}}}.
%% @doc Returns true if the given regular expression matches the given string.
%%
%% @private
%% @hidden
eval_regex(_Cond, any, _CompiledRegex, _Value) ->
true;
eval_regex(Cond, _SourceRegex, {ok, Regex}, Value)
when not is_atom(Value) ->
case re:run(Value, Regex, [{capture, none}]) of
match -> true;
nomatch -> {false, Cond};
Error -> {false, {Cond, Error}}
end;
eval_regex(Cond, _SourceRegex, {error, _} = Error, Value)
when not is_atom(Value) ->
{false, {Cond, Error}};
eval_regex(Cond, SourceRegex, CompiledRegex, Value) when is_atom(Value) ->
eval_regex(Cond, SourceRegex, CompiledRegex, atom_to_list(Value));
eval_regex(Cond, SourceRegex, undefined, Value) ->
Compiled = re:compile(SourceRegex),
eval_regex(Cond, SourceRegex, Compiled, Value).
-spec compare_numerical_values(Cond, ValueA, ValueB) -> Equal when
Cond :: condition_using_comparison_op(),
ValueA :: non_neg_integer(),
ValueB :: non_neg_integer() | comparison_op(non_neg_integer()),
Equal :: true | {false, condition_using_comparison_op()}.
%% @private
%% @hidden
compare_numerical_values(Cond, ValueA, ValueB) ->
case compare_numerical_values(ValueA, ValueB) of
true -> true;
false -> {false, Cond}
end.
-spec compare_numerical_values(ValueA, ValueB) -> Equal when
ValueA :: non_neg_integer(),
ValueB :: non_neg_integer() | comparison_op(non_neg_integer()),
Equal :: boolean().
%% @private
%% @hidden
compare_numerical_values(Value, Value) -> true;
compare_numerical_values(Value, {eq, Value}) -> true;
compare_numerical_values(ValueA, {ne, ValueB}) -> ValueA =/= ValueB;
compare_numerical_values(ValueA, {lt, ValueB}) -> ValueA < ValueB;
compare_numerical_values(ValueA, {le, ValueB}) -> ValueA =< ValueB;
compare_numerical_values(ValueA, {gt, ValueB}) -> ValueA > ValueB;
compare_numerical_values(ValueA, {ge, ValueB}) -> ValueA >= ValueB;
compare_numerical_values(_, _) -> false.
-spec is_valid(Condition) -> IsValid when
Condition :: khepri_path:pattern_component(),
IsValid :: true | {false, khepri_path:pattern_component()}.
%% @doc Returns true if the condition's properties are valid.
%%
%% For instance, the function verifies that {@link if_node_exists()} takes a
%% boolean().
%%
%% @param Condition the condition to verify.
%%
%% @returns true if the condition's properties are valid, false otherwise.
%%
%% @private
is_valid(Component) when ?IS_PATH_COMPONENT(Component) ->
true;
is_valid(?THIS_NODE) ->
true;
is_valid(#if_node_exists{exists = Exists}) ->
is_boolean(Exists);
is_valid(#if_name_matches{}) ->
true;
is_valid(#if_path_matches{}) ->
true;
is_valid(#if_has_data{has_data = HasData}) ->
is_boolean(HasData);
is_valid(#if_data_matches{}) ->
true;
is_valid(#if_payload_version{}) ->
true;
is_valid(#if_child_list_version{}) ->
true;
is_valid(#if_child_list_length{}) ->
true;
is_valid(#if_not{condition = InnerCond}) ->
is_valid(InnerCond);
is_valid(#if_all{conditions = Conds}) when is_list(Conds) ->
lists:foldl(
fun
(_, {false, _} = False) -> False;
(Cond, _) -> is_valid(Cond)
end, true, Conds);
is_valid(#if_any{conditions = Conds}) when is_list(Conds) ->
lists:foldl(
fun
(_, {false, _} = False) -> False;
(Cond, _) -> is_valid(Cond)
end, true, Conds);
is_valid(Cond) ->
{false, Cond}. | src/khepri_condition.erl | 0.770853 | 0.458288 | khepri_condition.erl | starcoder |
%% @doc Observing a metric involves periodically recording a snapshot of
%% a scalar metric in the form of a histogram. These snapshots are recorded
%% over a pre-configured interval. Each histogram will be registered
%% automatically with the `metrics_reader'.
%% Currently, only the folsom backend is supported but it is possible to extend
%% the idea to any metrics backend using the erlang-metrics library
%% interface here:
%% https://github.com/benoitc/erlang-metrics
%% Warning: observing a metric will clear it's current value whenenver a
%% snapshot is recorded.
%% @end
-module(metrics_observer).
-behaviour(gen_server).
-include("metrics_reader.hrl").
%% API
-export([start_link/0,
observe/2,
unobserve/1,
observed/0]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-define(SERVER, ?MODULE).
-record(state, {running = false :: boolean(),
observations = sets:new(),
slide_interval :: pos_integer(),
acc_interval :: pos_integer(),
timer_ref :: reference()}).
-type metric_name() :: any().
-type histogram_name() :: any().
-type state() :: #state{}.
%%%===================================================================
%%% API
%%%===================================================================
-spec start_link() -> gen_server_startlink_ret().
start_link() ->
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
-spec observe(metric_name(), histogram_name()) -> ok.
observe(Name, HistogramName) ->
gen_server:call(?SERVER, {observe, Name, HistogramName}).
-spec unobserve(metric_name()) -> ok.
unobserve(Name) ->
gen_server:call(?SERVER, {unobserve, Name}).
-spec observed() -> list().
observed() ->
gen_server:call(?SERVER, observed).
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
-spec init([]) -> {ok, state()}.
init([]) ->
%% We want a high priority to ensure reporting accuracy
process_flag(priority, high),
SInterval = metrics_reader_helper:opt(histogram_slide_interval_sec, 60),
AInterval = metrics_reader_helper:opt(histogram_acc_interval_sec, 1),
{ok, #state{slide_interval = SInterval,
acc_interval = AInterval * 1000}}.
-spec handle_call(any(), any(), state()) -> {reply, term(), state()}.
handle_call({observe, Name, HistogramName}, _From,
State = #state{running = false, acc_interval = AccInterval}) ->
State1 = do_observe({Name, HistogramName}, State),
TRef = erlang:send_after(AccInterval, self(), tick),
Reply = ok,
{reply, Reply, State1#state{running = true, timer_ref = TRef}};
handle_call({observe, Name, HistogramName}, _From,
State = #state{running = true}) ->
Reply = ok,
State1 = do_observe({Name, HistogramName}, State),
{reply, Reply, State1};
handle_call({unobserve, Name}, _From,
State = #state{running = true,
timer_ref = TRef,
observations = OSet}) ->
Empty = sets:new(),
[{Name, Histogram}] = [{N, H} || {N, H} <- sets:to_list(OSet), N =:= Name],
State1 = do_unobserve({Name, Histogram}, State),
#state{observations = OSet1} = State1,
case OSet1 of
Empty ->
erlang:cancel_timer(TRef),
{reply, ok, State1#state{running = false, timer_ref = undefined}};
_ ->
{reply, ok, State1}
end;
handle_call(observed, _From, State = #state{observations = OSet}) ->
Reply = [Name || {Name, _HistogramName} <- sets:to_list(OSet)],
{reply, Reply, State};
handle_call(_Request, _From, State) ->
Reply = ok,
{reply, Reply, State}.
-spec handle_cast(any(), state()) -> {noreply, state()}.
handle_cast(_Msg, State) ->
{noreply, State}.
-spec handle_info(any(), state()) -> {noreply, state()} |
{stop, any(), state()}.
handle_info(tick, State = #state{running = true,
observations = OSet,
acc_interval = AccInterval}) ->
lists:foreach(fun ({Name, HistogramName}) ->
Value = folsom_metrics_counter:get_value(Name),
folsom_metrics:notify({HistogramName, Value}),
folsom_metrics_counter:clear(Name)
end, sets:to_list(OSet)),
TRef = erlang:send_after(AccInterval, self(), tick),
{noreply, State#state{timer_ref = TRef}};
handle_info(_Info, State) ->
{noreply, State}.
-spec terminate(any(), any()) -> ok.
terminate(_Reason, State = #state{timer_ref = TRef,
observations = OSet}) ->
Empty = sets:new(),
#state{observations = Empty} = lists:foldl(
fun do_unobserve/2,
State,
sets:to_list(OSet)),
erlang:cancel_timer(TRef),
ok.
-spec code_change(any(), state(), any()) -> {ok, state()}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
do_observe({Name, HistogramName},
State = #state{slide_interval = SlideInterval,
observations = OSet}) ->
case folsom_metrics:get_metric_info(Name) of
[{_, [{type, histogram}]}] ->
ok;
_ ->
folsom_metrics:new_histogram(HistogramName, slide, SlideInterval)
end,
metrics_reader:register(HistogramName),
OSet1 = sets:add_element({Name, HistogramName}, OSet),
State#state{observations = OSet1}.
do_unobserve({Name, HistogramName},
State = #state{observations = OSet}) ->
metrics_reader:deregister(HistogramName),
folsom_metrics:delete_metric(HistogramName),
OSet1 = sets:del_element({Name, HistogramName}, OSet),
State#state{observations = OSet1}. | src/metrics_observer.erl | 0.708011 | 0.558748 | metrics_observer.erl | starcoder |
-module(pvc_ring).
-include("pvc.hrl").
%% API
-export([grb_replica_info/3,
random_indexnode/1,
get_key_indexnode/3,
size/1]).
%% @doc Raw ring structure returned from antidote
%%
%% Nodes are in erlang format, i.e. node_name@ip_address
-type raw_ring() :: list({partition_id(), node()}).
%% @doc Fixed ring structured used to route protocol requests
%%
%% Uses a tuple-based structure to enable index accesses
%% in constant time.
%%
-type fixed_ring() :: tuple().
-record(ring, {
size :: non_neg_integer(),
fixed_ring :: fixed_ring()
}).
-opaque ring() :: #ring{}.
-export_type([ring/0]).
%% @doc Given an address and port, get the replica info from that node
%%
%% Returns the layout of the ring where the given node lives,
%% as well as the replica identifier from the cluster.
%%
-spec grb_replica_info(Address :: node_ip(),
Port :: inet:port_number(),
LenBits :: non_neg_integer()) -> {ok, inet:ip_address(), term(), ring(), unique_nodes()}
| socket_error().
grb_replica_info(Address, Port, LenBits) ->
case gen_tcp:connect(Address, Port, ?UTIL_CONN_OPTS) of
{error, Reason} ->
{error, Reason};
{ok, Sock} ->
{ok, {LocalIP, _}} = inet:sockname(Sock),
ok = gen_tcp:send(Sock, <<0:LenBits, (ppb_grb_driver:connect())/binary>>),
Reply = case gen_tcp:recv(Sock, 0) of
{error, Reason} ->
{error, Reason};
{ok, <<0:LenBits, RawReply/binary>>} ->
{ok, ReplicaID, RingSize, RawRing} = pvc_proto:decode_serv_reply(RawReply),
UniqueNodes = unique_ring_nodes(RawRing),
FixedRing = make_fixed_ring(RingSize, RawRing),
{ok, LocalIP, ReplicaID, #ring{size=RingSize, fixed_ring=FixedRing}, UniqueNodes}
end,
ok = gen_tcp:close(Sock),
Reply
end.
-spec random_indexnode(ring()) -> index_node().
random_indexnode(#ring{size=Size, fixed_ring=Layout}) ->
Pos = rand:uniform(Size - 1),
erlang:element(Pos, Layout).
-spec get_key_indexnode(ring(), term(), term()) -> index_node().
get_key_indexnode(#ring{size=Size, fixed_ring=Layout}, Key, Bucket) ->
Pos = convert_key(Key, Bucket) rem Size + 1,
erlang:element(Pos, Layout).
-spec size(ring()) -> non_neg_integer().
size(#ring{size=Size}) ->
Size.
%%====================================================================
%% Routing Internal functions
%%====================================================================
-spec convert_key(term(), term()) -> non_neg_integer().
convert_key(Key, Bucket) ->
if
is_integer(Key) -> convert_key_int(Key);
is_binary(Key) -> convert_key_binary(Key, Bucket);
is_tuple(Key) -> convert_key(element(1, Key), Bucket);
true -> convert_key_hash(Key, Bucket)
end.
-spec convert_key_int(integer()) -> non_neg_integer().
convert_key_int(Int) ->
abs(Int).
-spec convert_key_binary(binary(), term()) -> non_neg_integer().
convert_key_binary(Bin, Bucket) ->
AsInt = (catch list_to_integer(binary_to_list(Bin))),
if
is_integer(AsInt) ->
convert_key_int(AsInt);
true ->
convert_key_hash(Bin, Bucket)
end.
-spec convert_key_hash(term(), term()) -> non_neg_integer().
convert_key_hash(Key, Bucket) ->
%% Looked into the internals of riak_core for this
HashKey = crypto:hash(sha, term_to_binary({Bucket, Key})),
abs(crypto:bytes_to_integer(HashKey)).
%%====================================================================
%% Partition Internal functions
%%====================================================================
%% @doc Get an unique list of the ring owning IP addresses
-spec unique_ring_nodes(raw_ring()) -> unique_nodes().
unique_ring_nodes(Ring) ->
ordsets:from_list(lists:foldl(fun({_, Node}, Acc) ->
[erlang_node_to_ip(Node) | Acc]
end, [], Ring)).
%% @doc Get IP address from an erlang node name
-spec erlang_node_to_ip(atom()) -> node_ip().
erlang_node_to_ip(Node) ->
[_, Ip] = binary:split(atom_to_binary(Node, latin1), <<"@">>),
binary_to_atom(Ip, latin1).
%% @doc Convert a raw riak ring into a fixed tuple structure
-spec make_fixed_ring(non_neg_integer(), raw_ring()) -> fixed_ring().
make_fixed_ring(Size, RawRing) ->
erlang:make_tuple(Size, ignore, index_ring(RawRing)).
%% @doc Converts a raw Antidote ring into an indexed structure
%%
%% Adds a 1-based index to each entry, plus converts Erlang
%% nodes to an IP address, for easier matching with connection
%% sockets
%%
-spec index_ring(
RawRing :: raw_ring()
) -> [{non_neg_integer(), {partition_id(), node_ip()}}].
index_ring(RawRing) ->
index_ring(RawRing, 1, []).
index_ring([], _, Acc) ->
lists:reverse(Acc);
index_ring([{Partition, ErlangNode} | Rest], N, Acc) ->
Converted = {N, {Partition, erlang_node_to_ip(ErlangNode)}},
index_ring(Rest, N + 1, [Converted | Acc]). | src/pvc_ring.erl | 0.507568 | 0.418994 | pvc_ring.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(kai_hash_SUITE).
-compile(export_all).
-include("kai.hrl").
-include("kai_test.hrl").
all() -> [test_hash]. % test_hash_efficiency
% Hash ring of test1:
%
% 0 bucket-0 [3,2,1] -> [3,1,4]
% 98,830,170 item-4
%
% 536,870,912 bucket-1 [3,2,1] -> [3,1,4]
% 953,414,533 item-2
%
% 1,073,741,824 bucket-2 [3,2,1] -> [3,1,4]
% 1,258,937,939 NODE3(2)
%
% 1,610,612,736 bucket-3 [2,1,4] -> [1,4,3]
% 1,704,004,111 item-3
% 1,981,805,867 item-1
%
% 2,147,483,648 bucket-4 [2,1,4] -> [1,4,3]
% 2,203,089,259 NODE2(1)
% 2,311,136,591 NODE1(2)
% 2,365,722,681 NODE4(2)
%
% 2,684,354,560 bucket-5 [2,4,1] -> [4,1,3]
% 2,772,605,746 NODE2(2)
% 2,978,498,268 NODE4(1)
%
% 3,221,225,472 bucket-6 [1,3,2] -> [1,3,4]
% 3,495,790,055 NODE1(1)
%
% 3,758,096,384 bucket-7 [3,2,1] -> [3,1,4]
% 4,264,647,116 NODE3(1)
test_hash() -> [].
test_hash(_Conf) ->
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2}
]),
kai_hash:start_link(),
{node_info, ?NODE1, Info1} = kai_hash:node_info(),
?assertEqual(?INFO, Info1),
{node_info, ?NODE1, Info1} = kai_hash:node_info(?NODE1),
?assertEqual(?INFO, Info1),
{node_list, NodeList1} = kai_hash:node_list(),
?assertEqual([?NODE1], NodeList1),
{virtual_node_list, VirtualNodeList1} = kai_hash:virtual_node_list(),
?assertEqual(
[{2311136591, ?NODE1},
{3495790055, ?NODE1}],
VirtualNodeList1
),
{bucket_list, BucketList1} = kai_hash:bucket_list(),
?assertEqual(
[{0, [?NODE1]},
{1, [?NODE1]},
{2, [?NODE1]},
{3, [?NODE1]},
{4, [?NODE1]},
{5, [?NODE1]},
{6, [?NODE1]},
{7, [?NODE1]}],
BucketList1
),
{buckets, Buckets1} = kai_hash:buckets(),
?assertEqual([0,1,2,3,4,5,6,7], Buckets1),
{replaced_buckets, ReplacedBuckets2} =
kai_hash:update_nodes([{?NODE2, ?INFO}, {?NODE3, ?INFO}, {?NODE4, ?INFO}],
[]),
?assertEqual(
[{0,3,1}, {1,3,1}, {2,3,1}, {3,2,1}, {4,2,1}, {5,3,1}, {7,3,1}],
ReplacedBuckets2
),
{node_list, NodeList2} = kai_hash:node_list(),
?assertEqual(4, length(NodeList2)),
?assert(lists:member(?NODE2, NodeList2)),
?assert(lists:member(?NODE3, NodeList2)),
?assert(lists:member(?NODE4, NodeList2)),
{virtual_node_list, VirtualNodeList2} = kai_hash:virtual_node_list(),
?assertEqual(
[{1258937939, ?NODE3},
{2203089259, ?NODE2},
{2311136591, ?NODE1},
{2365722681, ?NODE4},
{2772605746, ?NODE2},
{2978498268, ?NODE4},
{3495790055, ?NODE1},
{4264647116, ?NODE3}],
VirtualNodeList2
),
{bucket_list, BucketList2} = kai_hash:bucket_list(),
?assertEqual(
[{0, [?NODE3, ?NODE2, ?NODE1]},
{1, [?NODE3, ?NODE2, ?NODE1]},
{2, [?NODE3, ?NODE2, ?NODE1]},
{3, [?NODE2, ?NODE1, ?NODE4]},
{4, [?NODE2, ?NODE1, ?NODE4]},
{5, [?NODE2, ?NODE4, ?NODE1]},
{6, [?NODE1, ?NODE3, ?NODE2]},
{7, [?NODE3, ?NODE2, ?NODE1]}],
BucketList2
),
{buckets, Buckets2} = kai_hash:buckets(),
?assertEqual([0,1,2,3,4,5,6,7], Buckets2),
{bucket, Bucket1} = kai_hash:find_bucket("item-1"),
?assertEqual(3, Bucket1),
{replica, Replica1} = kai_hash:find_replica(Bucket1),
?assertEqual(2, Replica1),
{nodes, Nodes1} = kai_hash:find_nodes(Bucket1),
?assertEqual([?NODE2, ?NODE1, ?NODE4], Nodes1),
{nodes, Nodes2} = kai_hash:find_nodes("item-1"),
?assertEqual([?NODE2, ?NODE1, ?NODE4], Nodes2),
{nodes, Nodes3} = kai_hash:find_nodes("item-2"),
?assertEqual([?NODE3, ?NODE2, ?NODE1], Nodes3),
{node, Node1} = kai_hash:choose_node_randomly(),
?assertNot(Node1 == ?NODE1),
{bucket, Bucket2} = kai_hash:choose_bucket_randomly(),
?assert((Bucket2 >= 0) or (Bucket2 < 8)), % TODO: choose it from my buckets
{replaced_buckets, ReplacedBuckets3} = kai_hash:update_nodes([], [?NODE2]),
?assertEqual(
[{0,2,3}, {1,2,3}, {2,2,3}, {3,1,2}, {4,1,2}, {5,2,3}, {7,2,3}],
ReplacedBuckets3
),
{node_list, NodeList3} = kai_hash:node_list(),
?assertEqual(3, length(NodeList3)),
?assertNot(lists:member(?NODE2, NodeList3)),
{virtual_node_list, VirtualNodeList3} = kai_hash:virtual_node_list(),
?assertEqual(
[{1258937939, ?NODE3},
{2311136591, ?NODE1},
{2365722681, ?NODE4},
{2978498268, ?NODE4},
{3495790055, ?NODE1},
{4264647116, ?NODE3}],
VirtualNodeList3
),
{bucket_list, BucketList3} = kai_hash:bucket_list(),
?assertEqual(
[{0, [?NODE3, ?NODE1, ?NODE4]},
{1, [?NODE3, ?NODE1, ?NODE4]},
{2, [?NODE3, ?NODE1, ?NODE4]},
{3, [?NODE1, ?NODE4, ?NODE3]},
{4, [?NODE1, ?NODE4, ?NODE3]},
{5, [?NODE4, ?NODE1, ?NODE3]},
{6, [?NODE1, ?NODE3, ?NODE4]},
{7, [?NODE3, ?NODE1, ?NODE4]}],
BucketList3
),
{buckets, Buckets3} = kai_hash:buckets(),
?assertEqual([0,1,2,3,4,5,6,7], Buckets3),
{nodes, Nodes4} = kai_hash:find_nodes("item-1"),
?assertEqual([?NODE1, ?NODE4, ?NODE3], Nodes4),
{nodes, Nodes5} = kai_hash:find_nodes("item-2"),
?assertEqual([?NODE3, ?NODE1, ?NODE4], Nodes5),
kai_hash:stop(),
kai_config:stop().
test_hash_efficiency() -> [].
test_hash_efficiency(_Conf) ->
io:format("simulate network of 64 nodes"),
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 1},
{n, 3},
{number_of_buckets, 16384}, % 16,384 = 128*64*2
{number_of_virtual_nodes, 128}
]),
kai_hash:start_link(),
Nodes =
lists:map(
fun(Port) -> {{{127,0,0,1}, Port}, ?INFO} end,
lists:seq(2, 63)
),
kai_hash:update_nodes(Nodes, []),
Args = [[{{{127,0,0,1}, 64}, [{number_of_virtual_nodes, 128}]}], []],
{Usec, _} = timer:tc(kai_hash, update, Args),
io:format("time to add a node: ~p [usec]", [Usec]),
?assert(Usec < 300000),
{Usec2, _} = timer:tc(kai_hash, find, ["item-1", 1]),
io:format("time to find a node: ~p [usec]", [Usec2]),
?assert(Usec2 < 1000),
{Usec3, _} = timer:tc(kai_hash, choose_node_randomly, []),
io:format("time to choose a node randomly: ~p [usec]", [Usec3]),
?assert(Usec3 < 1000),
{Usec4, _} = timer:tc(kai_hash, choose_bucket_randomly, []),
io:format("time to choose a bucket randomly: ~p [usec]", [Usec4]),
?assert(Usec4 < 300000),
{Usec5, _} = timer:tc(kai_hash, update, [[], [{{127,0,0,1}, 1}]]),
io:format("time to remove a node: ~p [usec]", [Usec5]),
?assert(Usec5 < 300000),
kai_hash:stop(),
kai_config:stop(). | test/kai_hash_SUITE.erl | 0.62498 | 0.443118 | kai_hash_SUITE.erl | starcoder |
%% Copyright 2019 Octavo Labs AG Zurich Switzerland (http://octavolabs.com)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_storage_engine_dets).
-export([open/2, close/1, write/2, read/2, fold/3, fold/4]).
-record(state, {ref}).
open(DataRoot, _Opts) ->
%% Get the data root directory
File = filename:join(DataRoot, "bucket.dets"),
filelib:ensure_dir(File),
case dets:open_file(File, []) of
{ok, Ref} ->
{ok, #state{ref=Ref}};
Error ->
Error
end.
close(#state{ref=Ref}) ->
dets:close(Ref).
write(#state{ref=Ref}, WriteOps) ->
lists:foreach(fun({put, Key, Val}) ->
ok = dets:insert(Ref, {Key, Val});
({delete, Key}) ->
ok = dets:delete(Ref, Key)
end, WriteOps).
read(#state{ref=Ref}, Key) ->
case dets:lookup(Ref, Key) of
[{Key, Val}] -> {ok, Val};
[] -> not_found
end.
fold(#state{ref=Ref}, Fun, Acc) ->
% we use a ets table to snapshot (and order) the dets content
Tab = ets:new(?MODULE, [ordered_set]),
Tab = dets:to_ets(Ref, Tab),
fold_iterate(ets:first(Tab), Tab, Fun, Acc).
fold(#state{ref=Ref}, Fun, Acc, FirstKey) ->
% we use a ets table to snapshot (and order) the dets content
Tab = ets:new(?MODULE, [ordered_set]),
Tab = dets:to_ets(Ref, Tab),
fold_iterate(ets:next(Tab, FirstKey), Tab, Fun, Acc).
fold_iterate('$end_of_table', Tab, _Fun, Acc) ->
ets:delete(Tab),
Acc;
fold_iterate(Key, Tab, Fun, Acc0) ->
[{Key, Value}] = ets:lookup(Tab, Key),
try Fun(Key, Value, Acc0) of
Acc1 ->
fold_iterate(ets:next(Tab, Key), Tab, Fun, Acc1)
catch
throw:_Throw ->
ets:delete(Tab),
Acc0
end. | apps/vmq_generic_msg_store/src/engines/vmq_storage_engine_dets.erl | 0.546738 | 0.429011 | vmq_storage_engine_dets.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(state_orset_ext).
-author("<NAME> <<EMAIL>>").
-define(TYPE, state_orset).
-export([intersect/2,
map/2,
union/2,
product/2,
filter/2]).
-export([
delta_operations/2
]).
union(LValue, RValue) ->
state_orset:merge(LValue, RValue).
product({?TYPE, LValue}, {?TYPE, RValue}) ->
FolderFun = fun({X, XCausality}, {?TYPE, Acc}) ->
{?TYPE, Acc ++ [{{X, Y}, causal_product(XCausality, YCausality)} || {Y, YCausality} <- RValue]}
end,
lists:foldl(FolderFun, new(), LValue).
intersect({?TYPE, LValue}, RValue) ->
lists:foldl(intersect_folder(RValue), new(), LValue).
%% @private
intersect_folder({?TYPE, RValue}) ->
fun({X, XCausality}, {?TYPE, Acc}) ->
Values = case lists:keyfind(X, 1, RValue) of
{_Y, YCausality} ->
[{X, causal_union(XCausality, YCausality)}];
false ->
[]
end,
{?TYPE, Acc ++ Values}
end.
map(Function, {?TYPE, V}) ->
FolderFun = fun({X, Causality}, {?TYPE, Acc}) ->
{?TYPE, Acc ++ [{Function(X), Causality}]}
end,
lists:foldl(FolderFun, new(), V).
filter(Function, {?TYPE, V}) ->
FolderFun = fun({X, Causality}, {?TYPE, Acc}) ->
case Function(X) of
true ->
{?TYPE, Acc ++ [{X, Causality}]};
false ->
{?TYPE, Acc}
end
end,
lists:foldl(FolderFun, new(), V).
%% @private
new() ->
state_orset:new().
%% @private
causal_product(Xs, Ys) ->
lists:foldl(fun({X, XActive}, XAcc) ->
lists:foldl(fun({Y, YActive}, YAcc) ->
[{[X, Y], XActive andalso YActive}] ++ YAcc
end, [], Ys) ++ XAcc
end, [], Xs).
%% @private
causal_union(Xs, Ys) ->
Xs ++ Ys.
%% @private
get_operations(Orddict, Value, Token, true) ->
case orddict:find(Value, Orddict) of
error -> [{add, Value}];
{ok, Tokens} ->
case orddict:find(Token, Tokens) of
error -> [{add, Value}];
{ok, true} -> [];
{ok, false} ->
erlang:error("Not a strict inflation")
end
end;
get_operations(Orddict, Value, Token, false) ->
case orddict:find(Value, Orddict) of
error -> [{add, Value}, {rmv, Value}];
{ok, Tokens} ->
case orddict:find(Token, Tokens) of
error -> [{add, Value}, {rmv, Value}];
{ok, false} -> [];
{ok, true} -> [{rmv, Value}]
end
end.
delta_operations({?TYPE, A}=ORSet1, {?TYPE, B}=ORSet2) ->
case state_orset:is_strict_inflation(ORSet1, ORSet2) of
true ->
orddict:fold(fun(Value, Tokens, Acc1) ->
Acc1 ++ orddict:fold(fun(Token, Flag, Acc2) ->
Acc2 ++ get_operations(A, Value, Token, Flag)
end, [], Tokens)
end, [], B);
false -> []
end. | src/state_orset_ext.erl | 0.541894 | 0.404272 | state_orset_ext.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2017 <NAME>
%% @doc Vector operation functions.
%%
%% Vector is a 1-dimensional array, which elements stored sequentially into Erlang binary object of
%% some IEEE758 floating point data type: single float, double float, single complex or double complex.
%%
%% Vector contents uses regular Erlang binary storage and relies on it's garbage collection. In some
%% cases it is possible to refer the same binary contents.
%%
%% Note that vector indexes starts from 0, but not from 1 as for lists.
%% @end
-module(nvector).
-include_lib("eunit/include/eunit.hrl").
-include("erlynum.hrl").
-export([full/2, full/3, zeros/1, zeros/2, ones/1, ones/2]).
-export([range/1, range/2, range/3, range/4]).
-export([linspace/3, linspace/4, logspace/3, logspace/4, logspace/5, geomspace/3, geomspace/4]).
-export([from_list/1, from_list/2, to_list/1, to_list/2]).
-export([get/2, get/3]).
-export([
copy/1, copy/2,
sum/2, sum/3,
scale/2, scale/3,
axpy/3, axpy/4,
dot/3, dot/2,
asum/2, asum/1,
iamax/1, iamin/1,
nrm2/2, nrm2/1,
euclidean_norm/1, euclidean_norm/2
]).
-define(WE, erlynum_p:wrap_error).
-spec zeros(non_neg_integer()) -> erlynum:nvector().
%% @equiv zeros(Size, [{dtype, d}])
zeros(Size) -> zeros(Size, [{dtype, d}]).
-spec zeros(non_neg_integer(), [erlynum:create_option()]) -> erlynum:nvector().
%% @doc Returns a vector of the given size filled by zeroes.
zeros(Size, Options) -> full(Size, 0, Options).
-spec ones(non_neg_integer()) -> erlynum:nvector().
%% @equiv ones(Size, [{dtype, d}])
ones(Size) -> ones(Size, [{dtype, d}]).
-spec ones(non_neg_integer(), [erlynum:create_option()]) -> erlynum:nvector().
%% @doc Returns a vector of the given size filled by ones.
ones(Size, Options) -> ?WE(erlynum_nif:nvector_full(Size, 1, Options)).
-spec full(non_neg_integer(), erlynum:nscalar()) -> erlynum:nvector().
%% @equiv full(Size, InitialValue, [{dtype, auto}])
full(Size, InitialValue) -> ?WE(erlynum_nif:nvector_full(Size, InitialValue, [{dtype, auto}])).
-spec full(non_neg_integer(), erlynum:nscalar(), [erlynum:create_option()]) -> erlynum:nvector().
%% @doc Returns a vector of the given size filled by the specified scalar value `InitialValue'.
full(Size, InitialValue, Options) -> ?WE(erlynum_nif:nvector_full(Size, InitialValue, Options)).
-spec from_list([erlynum:nscalar()]) -> erlynum:nvector().
%% @equiv from_list(List, [{dtype, auto}])
from_list(List) -> from_list(List, [{dtype, auto}]).
-spec from_list([erlynum:nscalar()], [erlynum:create_option()]) -> erlynum:nvector().
%% @doc Returns a vector from the given list items.
from_list(List, Options) -> ?WE(erlynum_nif:nvector_from_list(List, Options)).
-spec to_list(NVector :: erlynum:nvector()) -> [ erlynum:nscalar() ].
%% @equiv to_list(NVector, noconvert)
to_list(NVector) -> to_list(NVector, noconvert).
-spec to_list(
NVector :: erlynum:nvector(),
Convert :: erlynum:convert_option()
) -> [ erlynum:nscalar() ].
%% @doc Returns an Erlang list from the given vector value.
%% @param Convert specifies target data type.
to_list(NVector, Convert) -> ?WE(erlynum_nif:nvector_to_list(NVector, Convert)).
-spec get(
NVector :: erlynum:nvector(),
Index :: non_neg_integer()
) -> erlynum:nscalar().
%% @equiv get(NVector, Index, noconvert)
get(NVector, Index) -> get(NVector, Index, noconvert).
-spec get(
NVector :: erlynum:nvector(),
Index :: non_neg_integer(),
Convert :: erlynum:convert_option()
) -> erlynum:nscalar().
%% @doc Returns a scalar element value from the `Vector'.
%% Note that `Index' starts from 0, but not 1.
%% It is possible to specify `Convert' option to cast scalar value into integer, real of complex value.
get(NVector, Index, Convert) -> ?WE(erlynum_nif:nvector_get(NVector, Index, Convert)).
-spec range(erlynum:nscalar()) -> erlynum:nvector().
%% @equiv range(0, Stop, 1, [])
range(Stop) -> range(0, Stop, 1, []).
-spec range(
Start :: erlynum:nscalar(),
Options_Stop :: [ erlynum:create_option() | erlynum:range_option() ]
| erlynum:nscalar()
) -> erlynum:nvector().
%% @doc Returns a vector filled by values withing from `Start' and step `1'.
%%
%% If last argument is a {@link erlynum:scalar()} value, it specifies range `Stop'.
%% If last argument is a list, it specifies create options.
%%
%% @see range/4
range(Stop, Options) when is_list(Options) ->
range(0, Stop, 1, Options);
range(Start, Stop) ->
range(Start, Stop, 1, []).
-spec range(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Step_Options :: [ erlynum:create_option() | erlynum:range_option() ]
| erlynum:nscalar()
) -> erlynum:nvector().
%% @doc Returns a vector filled by values withing the given range `[Start, Stop)'.
%%
%% If last argument is a {@link erlynum:scalar()} value, it specifies increment on next value.
%% If last argument is a list, it specifies create options.
%%
%% @see range/4
range(Start, Stop, Step_Options) when is_list(Step_Options) ->
range(Start, Stop, 1, Step_Options);
range(Start, Stop, Step_Options) ->
range(Start, Stop, Step_Options, []).
-spec range(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Step :: erlynum:nscalar(),
Options :: [ erlynum:create_option() | erlynum:range_option() ]
) -> erlynum:nvector().
%% @doc Returns a vector filled by values withing the given range `[Start, Stop)' and increment `Step'.
range(Start, Stop, Step, Options) -> ?WE(erlynum_nif:nvector_range(Start, Stop, Step, Options)).
-spec linspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer()
) -> erlynum:nvector().
%% @equiv linspace(Start, Stop, Count, [])
linspace(Start, Stop, Count) -> linspace(Start, Stop, Count, []).
-spec linspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer(),
Options :: [ erlynum:create_option() | erlynum:range_option() ]
) -> erlynum:nvector().
%% @doc Returns a vector filled by the specified `Count' of values withing the given range `[Start, Stop)'.
linspace(Start, Stop, Count, Options) -> ?WE(erlynum_nif:nvector_linspace(Start, Stop, Count, Options)).
-spec logspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer()
) -> erlynum:nvector().
%% @equiv logspace(Stat, Stop, Count, 10, [])
logspace(Start, Stop, Count) ->
logspace(Start, Stop, Count, 10.0).
-spec logspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: erlynum:nscalar(),
Base_Options :: [ erlynum:create_option() | erlynum:range_option() ]
| erlynum:nscalar()
) -> erlynum:nvector().
%% @doc Returns a vector filled by the specified `Count' of powers withing the given range `[Start, Stop)'.
%%
%% If last argument is a {@link erlynum:scalar()} value, it specifies `Base'.
%% If last argument is a list, it specifies create options.
%%
%% @see logspace/5
logspace(Start, Stop, Count, Base_Options) when is_number(Base_Options) or is_tuple(Base_Options) ->
logspace(Start, Stop, Count, Base_Options, []);
logspace(Start, Stop, Count, Base_Options) when is_list(Base_Options) ->
logspace(Start, Stop, Count, 10.0, Base_Options).
-spec logspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer(),
Base :: erlynum:nscalar(),
Options :: [ erlynum:create_option() | erlynum:range_option() ]
) -> erlynum:nvector().
%% @doc Returns a vector filled by the specified `Count' of powers of `Base' withing the given range `[Start, Stop)'.
logspace(Start, Stop, Count, Base, Options) -> ?WE(erlynum_nif:nvector_logspace(Start, Stop, Count, Base, Options)).
-spec geomspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer()
) -> erlynum:nvector().
%% @equiv geomspace(Start, Stop, Count, [])
geomspace(Start, Stop, Count) -> geomspace(Start, Stop, Count, []).
-spec geomspace(
Start :: erlynum:nscalar(),
Stop :: erlynum:nscalar(),
Count :: non_neg_integer(),
Options :: [ erlynum:create_option() | erlynum:range_option() ]
) -> erlynum:nvector().
%% @doc Returns a vector filled by specified `Count' of geometric progression values.
geomspace(Start, Stop, Count, Options) -> ?WE(erlynum_nif:nvector_geomspace(Start, Stop, Count, Options)).
-spec copy(X :: erlynum:nvector()) -> erlynum:nvector().
%% @equiv copy(X, [])
copy(X) -> copy(X, []).
-spec copy(X :: erlynum:nvector(), Options :: [erlynum:create_option()]) -> erlynum:nvector().
%% @doc Returns a packed copy of vector view `Y ← αX'.
%% This allow Erlang garbage collector to release referenced binary.
%% The function is useless in case of vector view source (some big matrix, for example)
%% stored in the same process stack, due to referenced source binary still alive and will not be collected.
%%
%% Generic cases to use this function are:
%% * to pass return value to another process or node if source vector generated from another big array-like object;
%% * to convert internal data type while copying.
copy(X, Options) -> ?WE(erlynum_nif:nvector_copy(X, Options)).
-spec scale(
Y :: erlynum:nvector(),
Alpha :: erlynum:nscalar()
) -> erlynum:nvector().
%% @equiv scale(Y, Alpha, [])
scale(Y, Alpha) -> scale(Y, Alpha, []).
-spec scale(
Y :: erlynum:nvector(),
Alpha :: erlynum:nscalar(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nvector().
%% @doc Returns a scaled copy `Y ← αY' of the vector of the same size and the specified data type.
scale(Y, Alpha, Options) -> ?WE(erlynum_nif:nvector_scale(Y, Alpha, Options)).
-spec sum(Y :: erlynum:nvector(), X :: erlynum:nvector()) -> erlynum:nvector().
%% @equiv sum(Y, X, [])
sum(NVectorY, NVectorX) -> axpy(NVectorX, NVectorY, 1).
-spec sum(
Y :: erlynum:nvector(),
X :: erlynum:nvector(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nvector().
%% @doc Returns an element-wise sum of two vectors `Y ← X + Y'.
%%
%% If vector sizes differs, the result is a vector of greater size, leaving
%% rest elements unchanged.
%%
%% If data types differs, the `Y' vector (first argument) data type is used until options provided.
sum(Y, X, Options) -> axpy(X, Y, 1, Options).
-spec axpy(
Y :: erlynum:nvector(),
X :: erlynum:nvector(),
Alpha :: erlynum:nscalar()
) -> erlynum:nvector().
%% @equiv axpy(Y, X, Alpha, [])
axpy(Y, X, Alpha) -> axpy(Y, X, Alpha, []).
-spec axpy(
Y :: erlynum:nvector(),
X :: erlynum:nvector(),
Alpha :: erlynum:nscalar(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nvector().
%% @doc Returns a vector, each element calculated in form `Y ← αX + Y'
axpy(Y, X, Alpha, Options) ->
?WE(erlynum_nif:nvector_axpy(Y, X, Alpha, Options)).
-spec dot(
Y :: erlynum:nvector(),
X :: erlynum:nvector()
) -> erlynum:nscalar().
%% @equiv dot(Y, X, [])
dot(Y, X) -> dot(Y, X, []).
-spec dot(
Y :: erlynum:nvector(),
X :: erlynum:nvector(),
Options :: [ erlynum:create_option() | {conjuated, boolean()}]
) -> erlynum:nscalar().
%% @doc Returns the inner product of two vectors.
dot(Y, X, Options) -> ?WE(erlynum_nif:nvector_dot(Y, X, Options)).
-spec asum(X :: erlynum:nvector()) -> erlynum:nscalar().
%% @equiv asum(X, [])
asum(X) -> asum(X, []).
-spec asum(
X :: erlynum:nvector(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nscalar().
%% @doc Returns the sum of magnitudes of elements of a real vector, or the sum of magnitudes
%% of the real and imaginary parts of elements of a complex vector.
asum(X, Options) -> ?WE(erlynum_nif:nvector_asum(X, Options)).
-spec iamax(X :: erlynum:nvector()) -> non_neg_integer() | undefined.
%% @doc Returns the lowest index of vector element that has the largest absolute value.
iamax(X) -> ?WE(erlynum_nif:nvector_iamax_iamin(X, iamax)).
-spec iamin(X :: erlynum:nvector()) -> non_neg_integer() | undefined.
%% @doc Returns the lowest index of vector element that has the lowest absolute value.
iamin(X) -> ?WE(erlynum_nif:nvector_iamax_iamin(X, iamin)).
-spec nrm2(X :: erlynum:nvector()) -> erlynum:nscalar().
%% @equiv nrm2(X, [])
nrm2(X) -> nrm2(X, []).
-spec nrm2(
X :: erlynum:nvector(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nscalar().
%% @doc Returns the Euclidean norm of vector.
%%
%% The function is named `nrm2' to match corresponding function name in BLAST.
%% It is better to use `euclidean_norm' function for better readability.
nrm2(X, Options) -> ?WE(erlynum_nif:nvector_nrm2(X, Options)).
-spec euclidean_norm(X :: erlynum:nvector()) -> erlynum:nscalar().
%% @doc equiv nrm2(X, [])
euclidean_norm(X) -> nrm2(X).
-spec euclidean_norm(
X :: erlynum:nvector(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nscalar().
%% @equiv nrm2(X, Options)
euclidean_norm(X, Options) -> nrm2(X, Options). | src/nvector.erl | 0.733643 | 0.517876 | nvector.erl | starcoder |
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% INSTRUCTION SCHEDULER
%%
%% This is a basic ILP cycle scheduler:
%% * set cycle = 0
%% * while ready[cycle] nonempty do
%% - take x with greatest priority from ready[cycle]
%% - try to schedule x;
%% * if scheduling x was possible,
%% - reserve resources
%% - add x to schedule and delete x from dag
%% - update earliest-time for all successor nodes
%% as max[earliest[y],cycle+latency[x]]
%% - if some node y now has no predecessors,
%% add y to ready[earliest[y]]
%% * if it was impossible, put x in ready[cycle+1]
%% (= try again)
%%
%% We use the following data structures:
%% 1. all nodes are numbered and indices used as array keys
%% 2. priority per node can be computed statically or dynamically
%% * statically: before scheduling, each node gets a priority value
%% * dynamically: at each cycle, compute priorities for all ready nodes
%% 3. earliest: earliest cycle of issue, starts at 0
%% and is updated as predecessors issue
%% 4. predecessors: number of predecessors (0 = ready to issue)
%% 5. successors: list of {Latency,NodeID}
%% 6. ready: an array indexed by cycle-time (integer), where
%% ready nodes are kept.
%% 7. resources: a resource representation (ADT) that answers
%% certain queries, e.g., "can x be scheduled this cycle"
%% and "reserve resources for x".
%% 8. schedule: list of scheduled instructions {Instr,Cycle}
%% in the order of issue
%% 9. instructions: maps IDs back to instructions
%%
%% Inputs:
%% - a list of {ID,Node} pairs (where ID is a unique key)
%% - a dependence list {ID0,Latency,ID1}, which is used to
%% build the DAG.
%%
%% Note that there is some leeway in how things are represented
%% from here.
%%
%% MODIFICATIONS:
%% - Some basic blocks are not worth scheduling (e.g., GC save/restore code)
%% yet are pretty voluminous. How do we skip them?
%% - Scheduling should be done at finalization time: when basic block is
%% linearized and is definitely at Sparc assembly level, THEN reorder
%% stuff.
-module(hipe_schedule).
-export([cfg/1, est_cfg/1, delete_node/5]).
-include("../sparc/hipe_sparc.hrl").
%%-define(debug1,true).
-define(debug2(Str,Args),ok).
%%-define(debug2(Str,Args),io:format(Str,Args)).
-define(debug3(Str,Args),ok).
%%-define(debug3(Str,Args),io:format(Str,Args)).
-define(debug4(Str,Args),ok).
%%-define(debug4(Str,Args),io:format(Str,Args)).
-define(debug5(Str,Args),ok).
%%-define(debug5(Str,Args),io:format(Str,Args)).
-define(debug(Str,Args),ok).
%%-define(debug(Str,Args),io:format(Str,Args)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cfg
%% Argument : CFG - the control flow graph
%% Returns : CFG - A new cfg with scheduled blocks
%% Description : Takes each basic block and schedules them one by one.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cfg(CFG) ->
?debug3("CFG: ~n~p", [CFG]),
update_all( [ {L,
hipe_bb:mk_bb(
block(L,hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))) )}
|| L <- hipe_sparc_cfg:labels(CFG) ], CFG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : update_all
%% Argument : Blocks - [{Label, Block}] , a list with labels and new code
%% used for updating the old CFG.
%% CFG - The old controlflow graph
%% Returns : An updated controlflow graph.
%% Description : Just swappes the basic blocks in the CFG to the scheduled one.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
update_all([],CFG) -> CFG;
update_all([{L,NewB}|Ls],CFG) ->
update_all(Ls,hipe_sparc_cfg:bb_add(CFG,L,NewB)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
est_cfg(CFG) ->
update_all([ {L, hipe_bb:mk_bb(est_block(hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))))}
|| L <- hipe_sparc_cfg:labels(CFG) ], CFG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Provides an estimation of how quickly a block will execute.
%% This is done by chaining all instructions in sequential order
%% by 0-cycle dependences (which means they will never be reordered),
%% then scheduling the mess.
est_block([]) -> [];
est_block([I]) -> [I];
est_block(Blk) ->
{IxBlk,DAG} = est_deps(Blk),
Sch = bb(IxBlk,DAG),
separate_block(Sch,IxBlk).
est_deps(Blk) ->
IxBlk = indexed_bb(Blk),
DAG = deps(IxBlk),
{IxBlk, chain_instrs(IxBlk,DAG)}.
chain_instrs([{N,_}|Xs],DAG) ->
chain_i(N,Xs,DAG).
chain_i(_,[],DAG) -> DAG;
chain_i(N,[{M,_}|Xs],DAG) ->
NewDAG = dep_arc(N,zero_latency(),M,DAG),
chain_i(M,Xs,NewDAG).
zero_latency() -> 0.
lookup_instr([{N,I}|_], N) -> I;
lookup_instr([_|Xs], N) -> lookup_instr(Xs, N).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : block
%% Argument : Instrs - [Instr], list of all the instructions in a basic
%% block.
%% Returns : A new scheduled block
%% Description : Schedule a basic block
%%
%% Note: does not consider delay slots!
%% (another argument for using only annulled delay slots?)
%% * how do we add delay slots? somewhat tricky to
%% reconcile with the sort of scheduling we consider.
%% (as-early-as-possible)
%% => rewrite scheduler into as-late-as-possible?
%% (=> just reverse the dependence arcs??)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Don't fire up the scheduler if there's no work to do.
block(_, []) ->
[];
block(_L, [I]) ->
case hipe_sparc:is_any_branch(I) of
true -> [hipe_sparc:nop_create(), I];
false -> [I]
end;
block(_L, Blk) ->
IxBlk = indexed_bb(Blk),
case IxBlk of
[{_N, I}] -> % comments and nops may have been removed.
case hipe_sparc:is_any_branch(I) of
true -> [hipe_sparc:nop_create(), I];
false -> [I]
end;
_ ->
Sch = bb(IxBlk, {DAG, _Preds} = deps(IxBlk)),
{NewSch, NewIxBlk} = fill_delays(Sch, IxBlk, DAG),
X = finalize_block(NewSch, NewIxBlk),
debug1_stuff(Blk, DAG, IxBlk, Sch, X),
X
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : fill_delays
%% Argument : Sch - List of {{cycle, C}, {node, N}} : C = current cycle
%% N = node index
%% IxBlk - Indexed block [{N, Instr}]
%% DAG - Dependence graph
%% Returns : {NewSch, NewIxBlk} - vector with new schedule and vector
%% with {N, Instr}
%% Description : Goes through the schedule from back to front looking for
%% branches/jumps. If one is found fill_del tries to find
%% an instr to fill the delayslot.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fill_delays(Sch, IxBlk, DAG) ->
NewIxBlk = hipe_vectors:list_to_vector(IxBlk),
%% NewSch = hipe_vectors:list_to_vector(Sch),
NewSch = fill_del(length(Sch), hipe_vectors:list_to_vector(Sch),
NewIxBlk, DAG),
{NewSch, NewIxBlk}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : fill_del
%% Argument : N - current index in the schedule
%% Sch - schedule
%% IxBlk - indexed block
%% DAG - dependence graph
%% Returns : Sch - New schedule with possibly a delay instr in the last
%% position.
%% Description : If a call/jump is found fill_branch_delay/fill_call_delay
%% is called to find a delay-filler.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fill_del(N, Sch, _IxBlk, _DAG) when N < 1 -> Sch;
fill_del(N, Sch, IxBlk, DAG) ->
Index = get_index(Sch, N),
?debug2("Index for ~p: ~p~nInstr: ~p~n",
[N, Index, get_instr(IxBlk, Index)]),
NewSch =
case get_instr(IxBlk, Index) of
#call_link{} ->
fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
#jmp_link{} ->
fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
#jmp{} ->
fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
#b{} ->
fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
#br{} ->
fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
#goto{} ->
fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
_Other ->
Sch
end,
NewSch.
%% fill_del(N - 1, NewSch, IxBlk, DAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : fill_call_delay
%% Argument : Cand - index in schedule of delay-candidate
%% Call - index in schedule of call
%% Sch - schedule vector: < {{cycle,Ci},{node,Nj}}, ... >
%% IxBlk - block vector: < {N, Instr1}, {N+1, Instr2} ... >
%% DAG - dependence graph
%% Returns : Sch - new updated schedule.
%% Description : Searches backwards through the schedule trying to find an
%% instr without conflicts with the Call-instr.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fill_call_delay(Cand, _Call, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
fill_call_delay(Cand, Call, Sch, IxBlk, DAG) ->
CandIndex = get_index(Sch, Cand),
CallIndex = get_index(Sch, Call),
CandI = get_instr(IxBlk, CandIndex),
case move_or_alu(CandI) of
true ->
case single_depend(CandIndex, CallIndex, DAG) of
false -> % Other instrs depends on Cand ...
fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG);
true ->
CallI = get_instr(IxBlk, CallIndex),
CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
%% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
%% CallDefs = ordsets:from_list(hipe_sparc:defines(CallI)),
CallUses = ordsets:from_list(hipe_sparc:uses(CallI)),
Args = case CallI of
#jmp_link{} ->
ordsets:from_list(
hipe_sparc:jmp_link_args(CallI));
#jmp{} ->
ordsets:from_list(hipe_sparc:jmp_args(CallI));
#call_link{} ->
ordsets:from_list(
hipe_sparc:call_link_args(CallI))
end,
CallUses2 = ordsets:subtract(CallUses, Args),
Conflict = ordsets:intersection(CandDefs, CallUses2),
%% io:format("single_depend -> true:~n ~p~n, ~p~n,~p~n",[CandI,CallI,DAG]),
%% io:format("Cand = ~p~nCall = ~p~n",[CandI,CallI]),
%% io:format("CandDefs = ~p~nCallDefs = ~p~n",[CandDefs,CallDefs]),
%% io:format("CandUses = ~p~nCallUses = ~p~n",[CandUses,CallUses]),
%% io:format("Args = ~p~nCallUses2 = ~p~n",[Args,CallUses2]),
%% io:format("Conflict = ~p~n",[Conflict]),
case Conflict of
[] -> % No conflicts ==> Cand can fill delayslot after Call
update_schedule(Cand, Call, Sch);
_ -> % Conflict: try with preceeding instrs
fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
end
end;
false ->
fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : fill_branch_delay
%% Argument : Cand - index in schedule of delay-candidate
%% Branch - index in schedule of branch
%% Sch - schedule
%% IxBlk - indexed block
%% DAG - dependence graph
%% Returns : Sch - new updated schedule.
%% Description : Searches backwards through the schedule trying to find an
%% instr without conflicts with the Branch-instr.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fill_branch_delay(Cand, _Br, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
fill_branch_delay(Cand, Br, Sch, IxBlk, DAG) ->
CandIndex = get_index(Sch, Cand),
BrIndex = get_index(Sch, Br),
CandI = get_instr(IxBlk, CandIndex),
case move_or_alu(CandI) of
true ->
case single_depend(CandIndex, BrIndex, DAG) of
false -> % Other instrs depends on Cand ...
fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG);
true ->
BrI = get_instr(IxBlk, BrIndex),
CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
%% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
%% BrDefs = ordsets:from_list(hipe_sparc:defines(BrI)),
BrUses = ordsets:from_list(hipe_sparc:uses(BrI)),
Conflict = ordsets:intersection(CandDefs, BrUses),
%% io:format("single_depend -> true: ~p~n, ~p~n,~p~n", [CandI, BrI, DAG]),
%% io:format("Cand = ~p~nBr = ~p~n",[CandI,BrI]),
%% io:format("CandDefs = ~p~nBrDefs = ~p~n",[CandDefs,BrDefs]),
%% io:format("CandUses = ~p~nBrUses = ~p~n",[CandUses,BrUses]),
%% io:format("Conflict = ~p~n",[Conflict]);
case Conflict of
[] -> % No conflicts ==>
% Cand can fill delayslot after Branch
update_schedule(Cand, Br, Sch);
_ -> % Conflict: try with preceeding instrs
fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
end
end;
false ->
fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : update_schedule
%% Argument : From - the position from where to switch indexes in Sch
%% To - the position to where to switch indexes in Sch
%% Sch - schedule
%% Returns : Sch - an updated schedule
%% Description : If From is the delay-filler and To is the Call/jump, the
%% schedule is updated so From gets index To, To gets index
%% To - 1, and the nodes between From and To gets old_index - 1.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
update_schedule(To, To, Sch) ->
{{cycle, C}, {node, _N} = Node} = hipe_vectors:get(Sch, To-1),
hipe_vectors:set(Sch, To-1, {{cycle, C+1}, Node});
update_schedule(From, To, Sch) ->
Temp = hipe_vectors:get(Sch, From-1),
Sch1 = hipe_vectors:set(Sch, From-1, hipe_vectors:get(Sch, From)),
update_schedule(From + 1, To, hipe_vectors:set(Sch1, From, Temp)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : single_depend
%% Argument : N - Index of the delayslot candidate
%% M - Index of the node that N possibly has a single
%% depend to.
%% DAG - The dependence graph
%% Returns : true if no other nodes than N os depending on N
%% Description : Checks that no other nodes than M depends on N and that the
%% latency between them is zero or 1.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
single_depend(N, M, DAG) ->
Deps = hipe_vectors:get(DAG, N-1),
single_depend(M, Deps).
single_depend(_N, []) -> true;
single_depend(N, [{0, N}]) -> true;
single_depend(N, [{1, N}]) -> true;
single_depend(_N, [{_Lat, _}|_]) -> false.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : get_index
%% Argument : Sch - schedule
%% N - index in schedule
%% Returns : Index - index of the node
%% Description : Returns the index of the node on position N in the schedule.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
get_index(Sch, N) ->
{{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch,N-1),
Index.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : get_instr
%% Argument : IxBlk - indexed block
%% N - index in block
%% Returns : Instr
%% Description : Returns the instr on position N in the indexed block.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
get_instr(IxBlk, N) ->
{_, Instr} = hipe_vectors:get(IxBlk, N-1),
Instr.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : get_instr
%% Argument : Sch - schedule
%% IxBlk - indexed block
%% N - index in schedule
%% Returns : Instr
%% Description : Returns the instr on position N in the schedule.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
get_instr(Sch, IxBlk, N) ->
{{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch, N-1),
{_, Instr} = hipe_vectors:get(IxBlk, Index-1),
Instr.
separate_block(Sch,IxBlk) ->
sep_comments([{C,lookup_instr(IxBlk,N)} || {{cycle,C},{node,N}} <- Sch]).
sep_comments([]) -> [];
sep_comments([{C,I}|Xs]) ->
[hipe_sparc:comment_create({cycle,C}), I | sep_comments(Xs,C)].
sep_comments([], _) -> [];
sep_comments([{C1,I}|Xs], C0) ->
if
C1 > C0 ->
[hipe_sparc:comment_create({cycle,C1}),I|sep_comments(Xs,C1)];
true ->
[I|sep_comments(Xs, C0)]
end.
finalize_block(Sch, IxBlk) ->
?debug5("Sch: ~p~nIxBlk: ~p~n",[Sch,IxBlk]),
finalize_block(1, hipe_vectors:size(Sch), 1, Sch, IxBlk, []).
finalize_block(N, End, _C, Sch, IxBlk, _Instrs) when N =:= End - 1 ->
NextLast = get_instr(Sch, IxBlk, N),
Last = get_instr(Sch, IxBlk, End),
?debug5("NextLast: ~p~nLast: ~p~n",[NextLast,Last]),
case hipe_sparc:is_any_branch(Last) of
true -> % Couldn't fill delayslot ==> add NOP
[NextLast , hipe_sparc:nop_create(), Last];
false -> % Last is a delayslot-filler ==> change order...
[Last, NextLast]
end;
finalize_block(N, End, C0, Sch, IxBlk, Instrs) ->
{{cycle, _C1}, {node, _M}} = hipe_vectors:get(Sch, N-1),
Instr = get_instr(Sch, IxBlk, N),
?debug5("Instr: ~p~n~n",[Instr]),
[Instr | finalize_block(N + 1, End, C0, Sch, IxBlk, Instrs)].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : bb
%% Argument : IxBlk - indexed block
%% DAG - {Dag, Preds} where Dag is dependence graph and
%% Preds is number of predecessors for each node.
%% Returns : Sch
%% Description : Initializes earliest-list, ready-list, priorities, resources
%% and so on, and calls the cycle_sched which does the scheduling
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
bb(IxBlk,DAG) ->
bb(length(IxBlk), IxBlk, DAG).
bb(N,IxBlk,{DAG, Preds}) ->
Earliest = init_earliest(N),
BigArray = N*10, % "nothing" is this big :-)
Ready = hipe_schedule_prio:init_ready(BigArray,Preds),
I_res = init_instr_resources(N, IxBlk),
Prio = hipe_schedule_prio:init_instr_prio(N,DAG),
Rsrc = init_resources(BigArray),
?debug4("I_res: ~n~p~nPrio: ~n~p~nRsrc: ~n~p~n", [I_res,Prio,Rsrc]),
?debug('cycle 1~n',[]),
Sch = empty_schedule(),
cycle_sched(1,Ready,DAG,Preds,Earliest,Rsrc,I_res,Prio,Sch,N,IxBlk).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cycle_sched
%% Argument : - C is current cycle, 1 or more.
%% - Ready is an array (Cycle -> [Node])
%% yielding the collection of nodes ready to be
%% scheduled in a cycle.
%% - DAG is an array (Instr -> [{Latency,Instr}])
%% represents the dependence DAG.
%% - Preds is an array (Instr -> NumPreds)
%% counts the number of predecessors
%% (0 preds = ready to be scheduled).
%% - Earl is an array (Instr -> EarliestCycle)
%% holds the earliest cycle an instruction can be scheduled.
%% - Rsrc is a 'resource ADT' that handles scheduler resource
%% management checks whether instruction can be scheduled
%% this cycle without a stall.
%% - I_res is an array (Instr -> Required_resources)
%% holds the resources required to schedule an instruction.
%% - Sch is the representation of the schedule current schedule.
%% - N is the number of nodes remaining to be scheduled
%% tells us when to stop the scheduler.
%% - IxBlk is the indexed block with instrs
%% Returns : present schedule
%% Description : Scheduler main loop.
%% Pick next ready node in priority order for cycle C until
%% none remain.
%% * check each node if it can be scheduled w/o stalling
%% * if so, schedule it
%% * otherwise, bump the node to the next cycle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cycle_sched(C,Ready,DAG,Preds,Earl,Rsrc,I_res,Prio,Sch,N,IxBlk) ->
case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk,DAG,Preds,Earl) of
% case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk) of
{next,I,Ready1} ->
?debug('try ~p~n==> ready = ~p~n',[I, Ready1]),
case resources_available(C,I,Rsrc,I_res) of
{yes,NewRsrc} ->
?debug(' scheduled~n==> Rscrs = ~p~n',[NewRsrc]),
NewSch = add_to_schedule(I,C,Sch),
{ReadyNs,NewDAG,NewPreds,NewEarl} =
delete_node(C,I,DAG,Preds,Earl),
?debug("NewPreds : ~p~n",[Preds]),
?debug(' ReadyNs: ~p~n',[ReadyNs]),
NewReady = hipe_schedule_prio:add_ready_nodes(ReadyNs,
Ready1),
?debug(' New ready: ~p~n',[NewReady]),
cycle_sched(C,NewReady,NewDAG,NewPreds,NewEarl,
NewRsrc,I_res,Prio,NewSch,N-1, IxBlk);
no ->
?debug(' resource conflict~n',[]),
NewReady = hipe_schedule_prio:insert_node(C+1,I,Ready1),
cycle_sched(C,NewReady,DAG,Preds,Earl,Rsrc,
I_res,Prio,Sch,N,IxBlk)
end;
none -> % schedule next cycle if some node remains
if
N > 0 ->
?debug('cycle ~p~n',[C+1]),
cycle_sched(C+1,Ready,DAG,Preds,Earl,
advance_cycle(Rsrc),
I_res,Prio,Sch,N, IxBlk);
true ->
present_schedule(Sch)
end
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : init_earliest
%% Argument : N - number of instrs
%% Returns :
%% Description :
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
init_earliest(N) ->
hipe_vectors:new(N,1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Schedule is kept reversed until the end.
-define(present_node(I,Cycle),{{cycle,Cycle},{node,I}}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : empty_schedule
%% Description : Returns an empty schedule.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
empty_schedule() -> [].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_to_schedule
%% Argument : I - instr
%% Cycle - cycle when I was placed
%% Sch - schedule
%% Description : Adds instr to schedule
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_to_schedule(I,Cycle,Sch) ->
[?present_node(I,Cycle)|Sch].
present_schedule(Sch) -> lists:reverse(Sch).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Interface to resource manager:
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : init_resources
%% Description : Yields a 'big enough' array mapping (Cycle -> Resources);
%% this array is called Rsrc below.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
init_resources(S) ->
hipe_target_machine:init_resources(S).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : init_instr_resources
%% Argument : Nodes - a list of the instructions
%% N - is the number of nodes
%% Description : return a vector (NodeID -> Resource_requirements)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
init_instr_resources(N,Nodes) ->
hipe_target_machine:init_instr_resources(N,Nodes).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : resources_available
%% Argument : Cycle - the current cycle
%% I - the current instruction (index = NodeID)
%% Rsrc - a map (Cycle -> Resources)
%% I_res - maps (NodeID -> Resource_requirements)
%% Description : returns {yes,NewResTab} | no
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
resources_available(Cycle,I,Rsrc,I_res) ->
hipe_target_machine:resources_available(Cycle,I,Rsrc,I_res).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : advance_cycle
%% Argument : Rsrc - resources
%% Description : Returns an empty resources-state
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
advance_cycle(Rsrc) ->
hipe_target_machine:advance_cycle(Rsrc).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : delete_node
%% Argument : Cycle - current cycle
%% I - index of instr
%% DAG - dependence dag
%% Preds - array with number of predecessors for nodes
%% Earl - array with earliest-times for nodes
%% Returns : {ReadyNs,NewDAG,NewPreds,NewEarl}
%% Description : Deletes node I and updates earliest times for the rest.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
delete_node(Cycle,I,DAG,Preds,Earl) ->
Succ = hipe_vectors:get(DAG,I-1),
NewDAG = hipe_vectors:set(DAG,I-1,scheduled), % provides debug 'support'
{ReadyNs,NewPreds,NewEarl} = update_earliest(Succ,Cycle,Preds,Earl,[]),
?debug('earliest after ~p: ~p~n',[I,[{Ix+1,V} || {Ix,V} <- hipe_vectors:list(NewEarl)]]),
{ReadyNs,NewDAG,NewPreds,NewEarl}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : update_earliest
%% Argument : Succ - successor list
%% Cycle - current cycle
%% Preds - predecessors
%% Earl - earliest times for nodes
%% Ready - array with readynodes for cycles
%% Returns : {Ready,Preds,Earl}
%% Description : Updates the earliest times for nodes and updates number of
%% predecessors for nodes
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
update_earliest([],_Cycle,Preds,Earl,Ready) ->
{Ready,Preds,Earl};
update_earliest([{Lat,N}|Xs],Cycle,Preds,Earl,Ready) ->
Old_earl = hipe_vectors:get(Earl,N-1),
New_earl = erlang:max(Old_earl,Cycle+Lat),
NewEarl = hipe_vectors:set(Earl,N-1,New_earl),
Num_preds = hipe_vectors:get(Preds,N-1),
NewPreds = hipe_vectors:set(Preds,N-1,Num_preds-1),
if
Num_preds =:= 0 ->
?debug('inconsistent DAG~n',[]),
exit({update_earliest,N});
Num_preds =:= 1 ->
NewReady = [{New_earl,N}|Ready],
NewPreds2 = hipe_vectors:set(NewPreds,N-1,0),
update_earliest(Xs,Cycle,NewPreds2,NewEarl,NewReady);
is_integer(Num_preds), Num_preds > 1 ->
update_earliest(Xs,Cycle,NewPreds,NewEarl,Ready)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Collect instruction dependences.
%%
%% Three forms:
%% - data/register
%% * insert RAW, WAR, WAW dependences
%% - memory
%% * stores serialize memory references
%% * alias analysis may allow loads to bypass stores
%% - control
%% * unsafe operations are 'trapped' between branches
%% * branches are ordered
%%
%% returns { [{Index,Instr}], DepDAG }
%% DepDAG is defined below.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : deps
%% Argument : BB - Basic block
%% Returns : {IxBB,DAG} - indexed block and dependence graph. DAG consists
%% of both Dag and Preds, where Preds is number
%% of predecessors for nodes.
%% Description : Collect instruction dependences.
%%
%% Three forms:
%% - data/register
%% * insert RAW, WAR, WAW dependences
%% - memory
%% * stores serialize memory references
%% * alias analysis may allow loads to bypass stores
%% - control
%% * unsafe operations are 'trapped' between branches
%% * branches are ordered
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
deps(IxBB) ->
N = length(IxBB),
DAG = empty_dag(N), % The DAG contains both dependence-arcs and
% number of predeccessors...
{_DepTab,DAG1} = dd(IxBB, DAG),
DAG2 = md(IxBB, DAG1),
cd(IxBB, DAG2).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : empty_dag
%% Argument : N - number of nodes
%% Returns : empty DAG
%% Description : DAG consists of dependence graph and predeccessors
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
empty_dag(N) ->
{hipe_vectors:new(N, []), hipe_vectors:new(N, 0)}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : indexed_bb
%% Argument : BB - basic block
%% Returns : [{N, Instr}]
%% Description : Puts indexes to all instrs of a block, removes comments.
%% NOP's are also removed because if both sparc_schedule and
%% sparc_post_schedule options are used, the first pass will
%% add nop's before the branch if necessary, and these are
%% removed before scheduling the second pass.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
indexed_bb(BB) ->
indexed_bb(BB,1).
indexed_bb([],_N) -> [];
indexed_bb([X|Xs],N) ->
case X of
#comment{} ->
indexed_bb(Xs,N);
#nop{} ->
indexed_bb(Xs,N);
_Other ->
[{N,X}|indexed_bb(Xs,N+1)]
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : dep_arc
%% Argument : N - Current node
%% Lat - Latency from current node to M
%% M - The dependent node
%% DAG - The dependence graph. Consists of both DAG and
%% predeccessors
%% Returns : A new DAG with the arc added and number of predeccessors for
%% M increased.
%% Description : Adds a new arc to the graph, if an older arc goes from N to M
%% it will be replaced with a new arc {max(OldLat, NewLat), M}.
%% Number of predeccessors for node M is increased.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
dep_arc(N, Lat, M, {Dag,Preds}) ->
OldDeps = hipe_vectors:get(Dag, N-1),
%% io:format("{OldDeps} = {~p}~n",[OldDeps]),
{NewDeps, Status} = add_arc(Lat, M, OldDeps),
%% io:format("{NewDeps, Status} = {~p, ~p}~n",[NewDeps, Status]),
NewDag = hipe_vectors:set(Dag, N-1, NewDeps),
NewPreds = case Status of
added -> % just increase preds if new arc was added
OldPreds = hipe_vectors:get(Preds, M-1),
hipe_vectors:set(Preds, M-1, OldPreds + 1);
non_added ->
Preds
end,
{NewDag, NewPreds}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_arc
%% Argument : Lat - The latency from current node to To.
%% To - The instr-id of the node which the dependence goes to
%% Arcs - The dependecies that are already in the dep-graph
%% Returns : A dependence graph sorted by To.
%% Description : A new arc that is added is sorted in the right place, and if
%% there is already an arc between nodes A and B, the one with
%% the greatest latency is chosen.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_arc(Lat,To, []) -> {[{Lat, To}], added};
add_arc(Lat1, To, [{Lat2, To} | Arcs]) ->
{[{erlang:max(Lat1, Lat2), To} | Arcs], non_added};
add_arc(Lat1,To1, [{Lat2, To2} | Arcs]) when To1 < To2 ->
{[{Lat1, To1}, {Lat2, To2} | Arcs], added};
add_arc(Lat1 ,To1, [{Lat2, To2} | Arcs]) ->
{Arcs1, Status} = add_arc(Lat1, To1, Arcs),
{[{Lat2, To2} | Arcs1], Status}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% The register/data dependence DAG of a block is represented
%% as a mapping (Variable -> {NextWriter,NextReaders})
%% where NextWriter is a pair {Ix,Type}
%% and NextReaders is a list of pairs {Ix,Type}.
%%
%% Type is used to determine latencies of operations; on the UltraSparc,
%% latencies of arcs (n -> m) are determined by both n and m. (E.g., if
%% n is an integer op and m is a store, then latency is 0; if m is an
%% integer op, it's 1.)
dd([],DAG) -> { empty_deptab(), DAG };
dd([{N,I}|Is],DAG0) ->
{DepTab,DAG1} = dd(Is,DAG0),
add_deps(N,I,DepTab,DAG1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_deps
%% Argument : N - current node
%% Instr - current instr
%% DepTab - hashtable with {next-writer, next-readers} for reg
%% DAG - dependence graph
%% Returns : {DepTab, BlockInfo, DAG} - with new values
%% Description : Adds dependencies for node N to the graph. The registers that
%% node N defines and uses are used for computing the
%% dependencies to the following nodes.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_deps(N,Instr,DepTab,DAG) ->
{Ds,Us} = def_use(Instr),
Type = dd_type(Instr),
{DepTab1,DAG1} = add_write_deps(Ds,N,Type,DepTab,DAG),
add_read_deps(Us,N,Type,DepTab1,DAG1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Instructions are classified into symbolic categories,
%% which are subsequently used to determine operation latencies
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
dd_type(Instr) ->
case Instr of
#b{} -> branch;
%% #br{} -> branch;
#call_link{} -> branch;
#jmp_link{} -> branch;
#jmp{} -> branch;
#goto{} -> branch;
#load{} -> load;
#store{} -> store;
#alu{} -> alu;
#move{} -> alu;
#multimove{} ->
Src = hipe_sparc:multimove_src(Instr),
Lat = round(length(Src)/2),
{mmove,Lat};
#sethi{} -> alu;
#alu_cc{} -> alu_cc;
%% #cmov_cc{} -> cmov_cc;
%% #cmov_r{} -> alu;
#load_atom{} -> alu;
#load_address{} -> alu;
#pseudo_enter{} -> pseudo;
#pseudo_pop{} -> pseudo;
#pseudo_return{} -> pseudo;
#pseudo_spill{} -> pseudo;
#pseudo_unspill{} -> pseudo
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_write_deps
%% Argument : Defs - registers that node N defines.
%% N - current node
%% Ty - the type of current instr
%% DepTab - Dependence-table
%% DAG - The dependence graph.
%% Returns : {DepTab,DAG} - with new values
%% Description : Adds dependencies to the graph for nodes that depends on the
%% registers that N defines.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_write_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
add_write_deps([D|Ds],N,Ty,DepTab,DAG) ->
{NewDepTab,NewDAG} = add_write_dep(D,N,Ty,DepTab,DAG),
add_write_deps(Ds,N,Ty,NewDepTab,NewDAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_write_dep
%% Description : Updates the dependence table with N as next writer, and
%% updates the DAG with the dependencies from N to subsequent
%% nodes.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_write_dep(X,N,Ty,DepTab,DAG) ->
{NxtWriter,NxtReaders} = lookup(X,DepTab),
NewDepTab = writer(X,N,Ty,DepTab),
NewDAG = write_deps(N,Ty,NxtWriter,NxtReaders,DAG),
{NewDepTab, NewDAG}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : write_deps
%% Argument : Instr - Current instr
%% Ty - Type of current instr
%% NxtWriter - The node that is the next writer of the ragister
%% that Instr defines.
%% NxtReaders - The nodes that are subsequent readers of the
%% register that N defines.
%% DAG - The dependence graph
%% Returns : Calls raw_deps that finally returns a new DAG with the new
%% dependence arcs added.
%% Description : If a next writer exists a dependence arc for this node is
%% added, and after this raw_deps is called to compute the
%% arcs for read-after-write dependencies.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
write_deps(Instr,Ty,NxtWriter,NxtReaders,DAG) ->
DAG1 = case NxtWriter of
none ->
DAG;
{Instr,_} ->
DAG;
{Wr,WrTy} ->
dep_arc(Instr,
hipe_target_machine:waw_latency(Ty,WrTy),
Wr, DAG)
end,
raw_deps(Instr,Ty,NxtReaders,DAG1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : raw_deps
%% Argument : Instr - current instr
%% Type - type of instr
%% Readers - subsequent readers
%% DAG - dependence graph
%% Returns : DAG - A new DAG with read-after-write dependencies added
%% Description : Updates the DAG with the dependence-arcs from Instr to the
%% subsequent readers, with the appropriate latencies.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
raw_deps(_Instr,_Type,[],DAG) -> DAG;
raw_deps(Instr,Ty,[{Rd,RdTy}|Xs],DAG) ->
raw_deps(Instr,Ty,Xs,
dep_arc(Instr,hipe_target_machine:raw_latency(Ty,RdTy),
Rd,DAG)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_read_deps
%% Argument : Uses - The registers that node N uses.
%% N - Index of the current node.
%% Ty - Type of current node.
%% DepTab - Dependence table
%% DAG - Dependence graph
%% Returns : {DepTab, DAG} - with updated values.
%% Description : Adds the read dependencies from node N to subsequent ones,
%% according to the registers that N uses.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_read_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
add_read_deps([U|Us],N,Ty,DepTab,DAG) ->
{NewDepTab,NewDAG} = add_read_dep(U,N,Ty,DepTab,DAG),
add_read_deps(Us,N,Ty,NewDepTab,NewDAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_read_dep
%% Argument : X - Used register
%% N - Index of checked instr
%% Ty - Type of checked instr
%% DepTab - Hashtable with {next-writer, next-readers}
%% DAG - Dependence graph
%% Returns : {DepTab, DAG} - with updated values
%% Description : Looks up what the next-writer/next-readers are, and adjusts
%% the table with current node as new reader. Finally
%% read-dependencies are added to the DAG.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_read_dep(X,N,Ty,DepTab,DAG) ->
{NxtWriter,_NxtReaders} = lookup(X,DepTab),
NewDepTab = reader(X,N,Ty,DepTab),
NewDAG = read_deps(N,Ty,NxtWriter,DAG),
{NewDepTab, NewDAG}.
% If NxtWriter is 'none', then this var is not written subsequently
% Add WAR from Instr to NxtWriter (if it exists)
% *** UNFINISHED ***
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : read_deps
%% Argument : N - Index of current node
%% Ty - Type of current node
%% Writer - tuple {NextWriter, WrType} where NextWriter is the
%% subsequent instr that writes this register next time,
%% and WrType is the type of that instr.
%% DAG - The dependence graph
%% Returns : DAG
%% Description : Returns a new DAG if a next-writer exists, otherwise the old
%% DAG is returned.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
read_deps(_Instr,_Ty,none,DAG) ->
DAG;
read_deps(_Instr,_Ty,{_Instr,_},DAG) ->
DAG;
read_deps(Instr,Ty,{NxtWr,NxtWrTy},DAG) ->
dep_arc(Instr,hipe_target_machine:war_latency(Ty,NxtWrTy),NxtWr,
DAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : empty_deptab
%% Description : Creates an empty dependence table (hash-table)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
empty_deptab() ->
gb_trees:empty().
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : lookup
%% Argument : X - key (register)
%% DepTab - dependence table
%% Returns : {NextWriter, NextReaders}
%% Description : Returns next writer and a list of following readers on
%% register X.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
lookup(X, DepTab) ->
case gb_trees:lookup(X, DepTab) of
none ->
{none, []};
{value, {W, Rs} = Val} ->
Val
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : writer
%% Argument : X - key (register)
%% N - index of writer
%% Ty - type of writer
%% DepTab - dependence table to be updated
%% Returns : DepTab - new dependence table
%% Description : Sets N tobe next writer on X
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
writer(X, N, Ty, DepTab) ->
gb_trees:enter(X, {{N, Ty}, []}, DepTab).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : reader
%% Argument : X - key (register)
%% N - index of reader
%% Ty - type of reader
%% DepTab - dependence table to be updated
%% Returns : DepTab - new dependence table
%% Description : Adds N to the dependence table as a reader.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
reader(X,N,Ty,DepTab) ->
{W,Rs} = lookup(X,DepTab),
gb_trees:enter(X,{W,[{N,Ty}|Rs]},DepTab).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% The following version of md/2 separates heap- and stack operations,
%% which allows for greater reordering.
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : md
%% Argument : IxBB - indexed block
%% DAG - dependence graph
%% Returns : DAG - new dependence graph
%% Description : Adds arcs for load/store dependencies to the DAG.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
md(IxBB, DAG) ->
md(IxBB,empty_md_state(),DAG).
md([],_,DAG) -> DAG;
md([{N,I}|Is],St,DAG) ->
case md_type(I) of
other ->
md(Is,St,DAG);
{st,T} ->
{ WAW_nodes, WAR_nodes, NewSt } = st_overlap(N,T,St),
md(Is,NewSt,
md_war_deps(WAR_nodes,N,md_waw_deps(WAW_nodes,N,DAG)));
{ld,T} ->
{ RAW_nodes, NewSt } = ld_overlap(N,T,St),
md(Is,NewSt,
md_raw_deps(RAW_nodes,N,DAG))
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : md_war_deps
%% Argument : WAR_nodes - write-after-read nodes depending on N
%% N - index of current instr
%% DAG - dependence graph
%% Returns : DAG - updated DAG
%% Description : Adds arcs for write-after-read dependencies for N
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
md_war_deps([],_,DAG) -> DAG;
md_war_deps([M|Ms],N,DAG) ->
md_war_deps(Ms,N,dep_arc(M,hipe_target_machine:m_war_latency(),N,DAG)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : md_waw_deps
%% Argument : WAW_nodes - write-after-write nodes depending on N
%% N - index of current instr
%% DAG - dependence graph
%% Returns : DAG - updated DAG
%% Description : Adds arcs for write-after-write dependencies for N
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
md_waw_deps([],_,DAG) -> DAG;
md_waw_deps([M|Ms],N,DAG) ->
md_waw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_waw_latency(),N,DAG)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : md_raw_deps
%% Argument : RAW_nodes - read-after-write nodes depending on N
%% N - index of current instr
%% DAG - dependence graph
%% Returns : DAG - updated DAG
%% Description : Adds arcs for read-after-write dependencies for N
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
md_raw_deps([],_,DAG) -> DAG;
md_raw_deps([M|Ms],N,DAG) ->
md_raw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_raw_latency(),N,DAG)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : empty_md_state
%% Description : Returns an empty memorydependence state, eg. 4 lists
%% representing {StackStores, HeapStores, StackLoads, HeapLoads}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
empty_md_state() -> {[], [], [], []}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : md_type
%% Argument : I - instr
%% Description : Maps the instr-type to a simplified type, telling if it's
%% store/load resp. heap or stack.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
md_type(I) ->
case I of
#load{} ->
Sp = hipe_sparc_registers:stack_pointer(),
Src = hipe_sparc:load_src(I),
N = hipe_sparc:reg_nr(Src),
Off = hipe_sparc:load_off(I),
if
N =:= Sp -> % operation on stack
{ld,{sp,Off}};
true ->
{ld,{hp,Src,Off}}
end;
#store{} ->
Sp = hipe_sparc_registers:stack_pointer(),
Dst = hipe_sparc:store_dest(I),
N = hipe_sparc:reg_nr(Dst),
Off = hipe_sparc:store_off(I),
if
N =:= Sp ->
{st,{sp,Off}};
true ->
{st,{hp,Dst,Off}}
end;
_ ->
other
end.
%% Given a memory operation and a 'memory op state',
%% overlap(N,MemOp,State) returns { Preceding_Dependent_Ops, NewState }.
%% which are either a tuple { WAW_deps, WAR_deps } or a list RAW_deps.
%%
%% NOTES:
%% Note that Erlang's semantics ("heap stores never overwrite existing data")
%% means we can be quite free in reordering stores to the heap.
%% Ld/St to the stack are simply handled by their offsets; since we do not
%% rename the stack pointer, this is sufficient.
%% *** We assume all memory ops have uniform size = 4 ***
%%
%% NOTES:
%% The method mentioned above has now been changed because the assumption that
%% "heap stores never overwrite existing data" caused a bug when the
%% process-pointer was treated the same way as the heap. We were also told
%% that the semantics can possibly change in the future, so it would be more
%% safe to treat the heap store/loads as the stack.
%% A future improvement can be to do an alias analysis to give more freedom
%% in reordering stuff...
%%
%% Alias state:
%% { [StackOp], [HeapOp], [StackOp], [HeapOp] }
%% where StackOp = {InstrID, Offset}
%% HeapOp = {InstrID, Reg, Offset}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : st_overlap
%% Argument : N - Index of current node
%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
%% where StackStrs/StackLds = {InstrID, Offset}
%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
%% Returns : { DepStrs, DepLds, State } -
%% where DepStrs/DepLds = [NodeId]
%% and State is the new state
%% Description : Adds dependencies for overlapping stores.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
st_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
{DepSt, IndepSt_Sp} = st_sp_dep(St_Sp, Off),
{DepLd, IndepLd_Sp} = ld_sp_dep(Ld_Sp, Off),
{DepSt, DepLd, {[{N, Off}|IndepSt_Sp], St_Hp, IndepLd_Sp, Ld_Hp}};
st_overlap(N, {hp, Dst, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
DstOff = {Dst, Off},
{DepSt,_IndepSt_Hp} = st_hp_dep(St_Hp, DstOff),
{DepLd, IndepLd_Hp} = ld_hp_dep(Ld_Hp, DstOff),
{DepSt, DepLd, {St_Sp, [{N, Dst, Off}|St_Hp], Ld_Sp, IndepLd_Hp}}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : ld_overlap
%% Argument : N - Index of current node
%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
%% where StackStrs/StackLds = {InstrID, Offset}
%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
%% Returns : { DepStrs, State }
%% Description : Adds dependencies for overlapping laods
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ld_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
DepSt = sp_dep_only(St_Sp, Off),
{DepSt, {St_Sp, St_Hp, [{N, Off}|Ld_Sp], Ld_Hp}};
ld_overlap(N, {hp, Src, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
DepSt = hp_dep_only(St_Hp, Src, Off),
{DepSt, {St_Sp, St_Hp, Ld_Sp, [{N, Src, Off}|Ld_Hp]}}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : st_sp_dep
%% Description : Adds dependencies that are depending on a stack store
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
st_sp_dep(Stores, Off) ->
sp_dep(Stores, Off, [], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : ld_sp_dep
%% Description : Adds dependencies that are depending on a stack load
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ld_sp_dep(Loads, Off) ->
sp_dep(Loads, Off, [], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : st_hp_dep
%% Description : Adds dependencies that are depending on a heap store
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
st_hp_dep(Stores, {_Reg, _Off} = RegOff) ->
hp_dep(Stores, RegOff, [], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : ld_hp_dep
%% Description : Adds dependencies that are depending on a heap load
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ld_hp_dep(Loads, {_Reg, _Off} = RegOff) ->
hp_dep(Loads, RegOff, [], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : sp_dep
%% Description : Returns {Dependent, Independent} which are lists of nodes
%% that depends or not on a stack load/store
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sp_dep([], _Off, Dep, Indep) -> {Dep, Indep};
sp_dep([{N,Off}|Xs], Off, Dep, Indep) ->
sp_dep(Xs, Off, [N|Dep], Indep);
sp_dep([X|Xs], Off, Dep, Indep) ->
sp_dep(Xs, Off, Dep, [X|Indep]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : hp_dep
%% Description : Returns {Dependent, Independent} which are lists of nodes
%% that depends or not on a heap load/store
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
hp_dep([], {_Reg,_Off}, Dep, Indep) -> {Dep,Indep};
hp_dep([{N,Reg,Off1}|Xs], {Reg,Off}, Dep, Indep) when Off1 =/= Off ->
hp_dep(Xs, {Reg,Off}, Dep, [{N,Reg,Off1}|Indep]);
hp_dep([{N,_,_}|Xs], {Reg,Off}, Dep, Indep) ->
hp_dep(Xs, {Reg,Off}, [N|Dep], Indep).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : sp_dep_only
%% Description : Returns a list of nodes that are depending on a stack store
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sp_dep_only(Stores, Off) ->
[N || {N,Off0} <- Stores, Off =:= Off0].
%% Dependences from heap stores to heap loads.
%% *** UNFINISHED ***
%% - but works
%% This is somewhat subtle:
%% - a heap load can only bypass a heap store if we KNOW it won't
%% load the stored value
%% - unfortunately, we do not know the relationships between registers
%% at this point, so we can't say that store(p+4) is independent of
%% load(q+0).
%% (OR CAN WE? A bit closer reasoning might show that it's possible?)
%% - We can ONLY say that st(p+c) and ld(p+c') are independent when c /= c'
%%
%% (As said before, it might be possible to lighten this restriction?)
hp_dep_only([], _Reg, _Off) -> [];
hp_dep_only([{_N,Reg,Off_1}|Xs], Reg, Off) when Off_1 =/= Off ->
hp_dep_only(Xs, Reg, Off);
hp_dep_only([{N,_,_}|Xs], Reg, Off) ->
[N|hp_dep_only(Xs, Reg, Off)].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Control dependences:
%% - add dependences so that
%% * branches are performed in order
%% * unsafe operations are 'fenced in' by surrounding branches
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd
%% Argument : IxBB - indexed block
%% DAG - dependence graph
%% Returns : DAG - new dependence graph
%% Description : Adds conditional dependencies to the DAG
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd(IxBB,DAG) ->
cd(IxBB, DAG, none, [], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd
%% Argument : IxBB - indexed block
%% DAG - dependence graph
%% PrevBr - previous branch
%% PrevUnsafe - previous unsafe instr (mem-op)
%% PrevOthers - previous other instrs, used to "fix" preceeding
%% instrs so they don't bypass a branch.
%% Returns : DAG - new dependence graph
%% Description : Adds conditional dependencies to the graph.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd([], DAG, _PrevBr, _PrevUnsafe, _PrevOthers) ->
DAG;
cd([{N,I}|Xs], DAG, PrevBr, PrevUnsafe, PrevOthers) ->
case cd_type(I) of
{branch,Ty} ->
DAG1 = cd_branch_to_other_deps(N, PrevOthers, DAG),
NewDAG = cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG1),
cd(Xs,NewDAG,{N,Ty},[],[]);
{unsafe,Ty} ->
NewDAG = cd_unsafe_deps(PrevBr,N,Ty,DAG),
cd(Xs, NewDAG, PrevBr, [{N,Ty}|PrevUnsafe], PrevOthers);
{other,_Ty} ->
cd(Xs, DAG, PrevBr, PrevUnsafe, [N|PrevOthers])
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd_branch_to_other_deps
%% Argument : N - index of branch
%% Ms - list of indexes of "others" preceding instrs
%% DAG - dependence graph
%% Returns : DAG - new graph
%% Description : Makes preceding instrs fixed so they don't bypass a branch
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd_branch_to_other_deps(_, [], DAG) ->
DAG;
cd_branch_to_other_deps(N, [M | Ms], DAG) ->
cd_branch_to_other_deps(N, Ms, dep_arc(M, zero_latency(), N, DAG)).
%% Is the operation a branch, an unspeculable op or something else?
%% Returns
%% {branch,BranchType}
%% {unsafe,OpType}
%% {other,OpType}
%% *** UNFINISHED ***
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd_type
%% Argument : I - instr
%% Description : Maps instrs to a simpler type.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd_type(I) ->
case I of
#goto{} ->
{branch,uncond};
#br{} ->
{branch,'cond'};
#b{} ->
{branch,'cond'};
#call_link{} ->
{branch,call};
#jmp_link{} ->
{branch,call};
#jmp{} ->
{branch,call};
#load{} ->
{unsafe,load};
#store{} ->
{unsafe,load};
T ->
{other,T}
end.
%% add dependences to keep order of branches + unspeculable ops:
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd_branch_deps
%% Argument : PrevBr - preceeding branch
%% PrevUnsafe - preceeding unsafe ops, eg, mem-ops
%% N - current id.
%% Ty - type of current instr
%% DAG - dependence graph
%% Returns : DAG - new DAG
%% Description : Adds arcs between branches and calls deps_to_unsafe that adds
%% arcs between branches and unsafe ops.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG) ->
DAG1 = case PrevBr of
none ->
DAG;
{Br,BrTy} ->
dep_arc(Br,
hipe_target_machine:br_br_latency(BrTy,Ty),
N, DAG)
end,
deps_to_unsafe(PrevUnsafe, N, Ty, DAG1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : deps_to_unsafe
%% Description : Adds dependencies between unsafe's and branches
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
deps_to_unsafe([], _, _, DAG) -> DAG;
deps_to_unsafe([{M,UTy}|Us], N, Ty, DAG) ->
deps_to_unsafe(Us,N,Ty,
dep_arc(M, hipe_target_machine:unsafe_to_br_latency(UTy,Ty),
N, DAG)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cd_unsafe_deps
%% Description : Adds dependencies between branches and unsafe's
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cd_unsafe_deps(none, _, _, DAG) ->
DAG;
cd_unsafe_deps({Br,BrTy}, N, Ty, DAG) ->
dep_arc(Br, hipe_target_machine:br_to_unsafe_latency(BrTy, Ty), N, DAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : def_use
%% Argument : Instr
%% Description : Returns the registers that Instr defines resp. uses as 2 lists
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def_use(Instr) ->
{hipe_sparc:defines(Instr), hipe_sparc:uses(Instr)}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : move_or_alu
%% Description : True if the instruction is a move or an alu; false otherwise
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
move_or_alu(#move{}) -> true;
move_or_alu(#alu{}) -> true;
move_or_alu(_) -> false.
%% Debugging stuff below %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ifdef(debug1).
debug1_stuff(Blk, DAG, IxBlk, Sch, X) ->
io:format("Blk: ~p~n",[Blk]),
io:format("DAG: ~n~p~n~p",[DAG,IxBlk]),
io:format("~n"),
print_instrs(IxBlk),
print_sch(Sch, IxBlk),
print_instrs2(X).
print_instrs([]) ->
io:format("~n");
print_instrs([{N,Instr} | Instrs]) ->
io:format("(~p): ",[N]),
hipe_sparc_pp:pp_instr(Instr),
io:format("~p~n",[element(1,Instr)]),
print_instrs(Instrs).
print_instrs2([]) ->
io:format("~n");
print_instrs2([Instr | Instrs]) ->
hipe_sparc_pp:pp_instr(Instr),
print_instrs2(Instrs).
print_sch([],_) -> io:format("~n");
print_sch([{{cycle,Cycle},{node,I}} | Rest], IxBlk) ->
io:format("{C~p, N~p} ",[Cycle,I]),
print_node(I, IxBlk),
print_sch(Rest, IxBlk).
print_node(_, []) ->
io:format("~n");
print_node(I, [{I, Instr} | _]) ->
hipe_sparc_pp:pp_instr(Instr);
print_node(I, [_ | IxBlk]) ->
print_node(I, IxBlk).
-else.
debug1_stuff(_Blk, _DAG, _IxBlk, _Sch, _X) ->
ok.
-endif. | lib/hipe/opt/hipe_schedule.erl | 0.524882 | 0.503723 | hipe_schedule.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core_coverage_plan: Create a plan to cover a minimal set of VNodes.
%%
%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc A module to calculate plans to cover a set of VNodes.
%%
%% There are two types of plans available: a "traditional"
%% coverage plan which minimizes the set of VNodes to be
%% contacted. This is used internally for functionality such as
%% 2i and Riak Pipe.
%%
%% There is also a new "subpartition" coverage plan designed to
%% achieve the opposite: allow clients to make parallel requests
%% across as much of the cluster as is desired, designed for use
%% with Basho Data Platform.
%%
%% Both plan types are now available through the protocol buffers
%% API, delivered as discrete opaque binary chunks to be sent
%% with queries that support the functionality (primarily 2i).
%% Example traditional coverage plan for a two node, 8 vnode cluster
%% at nval=3
%% {
%% %% First component is a list of {vnode hash, node name} tuples
%% [
%% {0, 'dev1@127.0.0.1'},
%% {548063113999088594326381812268606132370974703616, 'dev2@127.0.0.1'},
%% {913438523331814323877303020447676887284957839360, 'dev2@127.0.0.1'}
%% ],
%% %% Second component is a list of {vnode hash, [partition list]}
%% %% tuples representing filters when not all partitions managed by a
%% %% vnode are required to complete the coverage plan
%% [
%% {913438523331814323877303020447676887284957839360,
%% [730750818665451459101842416358141509827966271488,
%% 913438523331814323877303020447676887284957839360]
%% }
%% ]
%% }
%% Snippet from a subpartition coverage plan for a two node, 8 vnode
%% cluster at nval=3, with each partition represented twice for up to
%% 16 parallel queries.
%% The nested tuple in the third position is a representation of the
%% mask against the cluster's full keyspace and bits necessary to
%% shift keys right to compare against the mask (or shift the mask
%% left to represent its actual value).
%% [
%% %% Second vnode, first half of first partition
%% {182687704666362864775460604089535377456991567872,
%% 'dev2@127.0.0.1', {0, 156}
%% },
%% %% Second vnode, second half of first partition
%% {182687704666362864775460604089535377456991567872,
%% 'dev2@127.0.0.1', {1, 156}},
%% %% Third vnode, first half of second partition
%% {365375409332725729550921208179070754913983135744,
%% 'dev1@127.0.0.1', {2, 156}},
%% %% Third vnode, second half of second partition
%% {365375409332725729550921208179070754913983135744,
%% 'dev1@127.0.0.1', {3, 156}},
%% ...
%% ]
-module(riak_core_coverage_plan).
-include("riak_core_vnode.hrl").
-ifdef(TEST).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% API
-export([create_plan/5, create_plan/6, create_subpartition_plan/6]).
-export([replace_subpartition_chunk/7, replace_traditional_chunk/7]).
%% For other riak_core applications' coverage plan modules
-export([identify_unavailable_vnodes/3, identify_unavailable_vnodes/4]).
-export([add_offset/3]).
-type ring_size() :: pos_integer().
-type index() :: chash:index_as_int().
%% This module is awash with lists of integers with the same range but
%% and similar but distinct meanings (vnodes and partitions), or
%% different ranges but same meanings (partition short id vs partition
%% hash), or different values depending on context (partitions are
%% incremented by a full RingIndexInc when used as traditional cover
%% filters).
%%
%% Thus, tagged tuples are used in many places to make it easier for
%% maintainers to keep track of what data is flowing where.
%% IDs (vnode, partition) are integers in the [0, RingSize) space
%% (and trivially map to indexes). Private functions deal with IDs
%% instead of indexes as much as possible.
%% ID, ring size, ring index increment
-type vnode_id() :: {'vnode_id', non_neg_integer(), ring_size()}.
-type partition_id() :: {'partition_id', non_neg_integer(), ring_size()}.
%% Not a tagged tuple. ID + bits necessary to shift the ID to the left
%% to create the subpartition index.
-type subpartition_id() :: {non_neg_integer(), pos_integer()}.
-type req_id() :: non_neg_integer().
-type coverage_vnodes() :: [{index(), node()}].
-type vnode_filters() :: [{index(), [index()]}].
-type coverage_plan() :: {coverage_vnodes(), vnode_filters()}.
%% Vnode index, node, { Subpartition id, BSL }
-type subp_plan() :: [{index(), node(), subpartition_id()}].
-export_type([coverage_plan/0, coverage_vnodes/0, vnode_filters/0]).
%% Function to determine nodes currently available. This can be
%% swapped out for testing to avoid using meck. The argument ignored
%% here is the chashbin, potentially useful for testing.
-define(AVAIL_NODE_FUN, fun(Svc, _) -> riak_core_node_watcher:nodes(Svc) end).
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc Create Riak's traditional coverage plan to distribute work to
%% a minimal set of covering VNodes around the ring.
-spec create_plan(vnode_selector(),
pos_integer(),
pos_integer(),
req_id(), atom()) ->
{error, term()} | coverage_plan().
create_plan(VNodeTarget, NVal, PVC, ReqId, Service) ->
%% jdaily added a sixth parameter, _Request, which is ignored
%% here, apparently used in coverage API work. See
%% riak_kv_qry_coverage_plan:create_plan for more clues.
create_plan(VNodeTarget, NVal, PVC, ReqId, Service, undefined).
-spec create_plan(vnode_selector(),
pos_integer(),
pos_integer(),
req_id(), atom(), term()) ->
{error, term()} | coverage_plan().
create_plan(VNodeTarget, NVal, PVC, ReqId, Service, _Request) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
create_traditional_plan(VNodeTarget, NVal, PVC, ReqId, Service,
CHBin, ?AVAIL_NODE_FUN).
%% @doc Create a "mini" traditional coverage plan to replace
%% components of a previously-generated plan that are not useful
%% to the client because the node is unavailable
-spec replace_traditional_chunk(VnodeIdx :: index(),
Node :: node(),
Filters :: list(index()),
NVal :: pos_integer(),
ReqId :: req_id(),
DownNodes :: list(node()),
Service :: atom()) ->
{error, term()} | coverage_plan().
replace_traditional_chunk(VnodeIdx, Node, Filters, NVal,
ReqId, DownNodes, Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
replace_traditional_chunk(VnodeIdx, Node, Filters, NVal,
ReqId, DownNodes, Service, CHBin,
?AVAIL_NODE_FUN).
%% @doc Create a coverage plan with at least one slice per partition,
%% originally designed for parallel extraction of data via 2i.
-spec create_subpartition_plan('all'|'allup',
pos_integer(),
pos_integer() | {pos_integer(), pos_integer()},
pos_integer(),
req_id(), atom()) ->
{error, term()} | subp_plan().
create_subpartition_plan(VNodeTarget, NVal, {MinPar, RingSize}, PVC, ReqId, Service) ->
Count =
if
MinPar =< RingSize ->
RingSize;
true ->
next_power_of_two(MinPar)
end,
create_subpartition_plan(VNodeTarget, NVal, Count, PVC, ReqId, Service);
create_subpartition_plan(VNodeTarget, NVal, Count, PVC, ReqId, Service) ->
%% IMPORTANT: `Count' is assumed to be a power of 2. Anything else
%% will behave badly.
{ok, ChashBin} = riak_core_ring_manager:get_chash_bin(),
create_subpartition_plan(VNodeTarget, NVal, Count, PVC, ReqId, Service,
ChashBin, ?AVAIL_NODE_FUN).
%% @doc Create a "mini" traditional coverage plan to replace
%% components of a previously-generated plan that are not useful
%% to the client because the node is unavailable
-spec replace_subpartition_chunk(VnodeIdx :: index(),
Node :: node(),
{Mask :: non_neg_integer(),
Bits :: non_neg_integer()},
NVal :: pos_integer(),
ReqId :: req_id(),
DownNodes :: list(node()),
Service :: atom()) ->
{error, term()} | subp_plan().
replace_subpartition_chunk(VnodeIdx, Node, {Mask, Bits}, NVal,
ReqId, DownNodes, Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
replace_subpartition_chunk(VnodeIdx, Node, {Mask, Bits}, NVal,
ReqId, DownNodes, Service, CHBin,
?AVAIL_NODE_FUN).
%% ====================================================================
%% Internal functions that are useful for other riak_core applications
%% that need to generate custom coverage plans
%% ====================================================================
-spec identify_unavailable_vnodes(chashbin:chashbin(), pos_integer(), atom()) ->
list(vnode_id()).
identify_unavailable_vnodes(CHBin, RingSize, Service) ->
identify_unavailable_vnodes(CHBin, RingSize, Service, ?AVAIL_NODE_FUN).
-spec identify_unavailable_vnodes(chashbin:chashbin(), pos_integer(), atom(),
fun((atom(), binary()) -> list(node()))) ->
list(vnode_id()).
identify_unavailable_vnodes(CHBin, RingSize, Service, AvailNodeFun) ->
%% Get a list of the VNodes owned by any unavailable nodes
[{vnode_id, index_to_id(Index, RingSize), RingSize} ||
{Index, _Node}
<- riak_core_apl:offline_owners(
AvailNodeFun(Service, CHBin), CHBin)].
%% Adding an offset while keeping the result inside [0, Top). Adding
%% Top to the left of `rem' allows offset to be negative without
%% violating the lower bound of zero
add_offset(Position, Offset, Top) ->
(Position + (Top + Offset)) rem Top.
%% ====================================================================
%% Internal functions
%% ====================================================================
%% @private
create_traditional_plan(VNodeTarget, NVal, PVC, ReqId, Service, CHBin, AvailNodeFun) ->
RingSize = chashbin:num_partitions(CHBin),
%% Calculate an offset based on the request id to offer the
%% possibility of different sets of VNodes being used even when
%% all nodes are available. Used in compare_vnode_keyspaces as a
%% tiebreaker.
Offset = ReqId rem NVal,
AllVnodes = list_all_vnode_ids(RingSize),
%% Older versions of this call chain used the same list of
%% integers for both vnode IDs and partition IDs, which made for
%% confusing reading. We can cheat a little less obnoxiously
AllPartitions = lists:map(fun({vnode_id, Id, RS}) ->
{partition_id, Id, RS} end,
AllVnodes),
UnavailableVnodes = identify_unavailable_vnodes(CHBin, RingSize,
Service, AvailNodeFun),
%% Create function to map coverage keyspaces to
%% actual VNode indexes and determine which VNode
%% indexes should be filtered.
CoverageVNodeFun =
fun({VNodeID, KeySpaces}, Acc) ->
%% Calculate the VNode index using the
%% ring position and the increment of
%% ring index values.
VNodeIndex = convert(VNodeID, vnode_index),
Node = chashbin:index_owner(VNodeIndex, CHBin),
CoverageVNode = {VNodeIndex, Node},
case length(KeySpaces) < NVal of
true ->
KeySpaceIndexes = [convert(PartitionID, keyspace_filter) ||
PartitionID <- KeySpaces],
{CoverageVNode, [{VNodeIndex, KeySpaceIndexes} | Acc]};
false ->
{CoverageVNode, Acc}
end
end,
%% The offset value serves as a tiebreaker in the
%% compare_vnode_keyspaces function and is used to distribute work
%% to different sets of VNodes.
CoverageResult = find_minimal_coverage(AllPartitions,
AllVnodes -- UnavailableVnodes,
Offset,
NVal,
lists:min([PVC, NVal]),
[]),
case CoverageResult of
{ok, CoveragePlan} ->
%% Assemble the data structures required for
%% executing the coverage operation.
lists:mapfoldl(CoverageVNodeFun, [], CoveragePlan);
{insufficient_vnodes_available, _KeySpace, PartialCoverage} ->
case VNodeTarget of
allup ->
%% The allup indicator means generate a coverage plan
%% for any available VNodes.
lists:mapfoldl(CoverageVNodeFun, [], PartialCoverage);
all ->
{error, insufficient_vnodes_available}
end
end.
%% @private
replace_traditional_chunk(VnodeIdx, Node, Filters, NVal,
ReqId, DownNodes, Service, CHBin,
AvailNodeFun) ->
RingSize = chashbin:num_partitions(CHBin),
Offset = ReqId rem NVal,
%% We have our own idea of what nodes are available. The client
%% may have a different idea of offline nodes based on network
%% partitions, so we take that into account.
%%
%% The client can't really tell us what nodes it thinks are up (it
%% only knows hostnames at best) but the opaque coverage chunks it
%% uses have node names embedded in them, and it can tell us which
%% chunks do *not* work.
UpNodes = AvailNodeFun(Service, CHBin) -- [Node|DownNodes],
NeededPartitions = partitions_by_index_or_filter(
VnodeIdx, NVal, Filters, RingSize),
%% For each partition, create a tuple with that partition (as a
%% filter index) mapped to a nested tuple of preflist + (initially
%% empty) filter list list.
Preflists =
lists:map(
fun(Partition) ->
{convert(Partition, keyspace_filter),
safe_hd(
partition_to_preflist(Partition, NVal, Offset, UpNodes, CHBin))
}
end,
NeededPartitions),
maybe_create_traditional_replacement(Preflists, lists:keyfind([], 2, Preflists)).
%% @private
maybe_create_traditional_replacement(Preflists, false) ->
%% We do not go to great lengths to minimize the number of
%% coverage plan components we'll return, but we do at least sort
%% the vnodes we find so that we can consolidate filters later.
create_traditional_replacement(lists:sort(Preflists));
maybe_create_traditional_replacement(_Preflists, _) ->
{error, primary_partition_unavailable}.
%% @private
%% Argument to this should be sorted
create_traditional_replacement(Preflists) ->
dechunk_traditional_replacement(
lists:foldl(
%% Pattern match the current vnode against the head of the
%% accumulator; if the vnodes match, we can consolidate
%% partition filters
fun({PartIdx, VNode}, [{VNode, Partitions}|Tail]) ->
[{VNode,
lists:sort([PartIdx|Partitions])}|Tail];
%% Instead if this vnode is new, place it into the
%% accumulator as is
({PartIdx, VNode}, Accum) ->
[{VNode, [PartIdx]}|Accum]
end,
[],
Preflists)).
%% @private
%% Take our replacement traditional coverage chunks and consolidate it
%% into a traditional coverage plan (that another layer will rechunk,
%% but whatchagonnado)
dechunk_traditional_replacement(Coverage) ->
{
lists:map(fun({{Vnode, Node}, _Filters}) ->
{Vnode, Node}
end, Coverage),
lists:filtermap(fun({{_Vnode, _Node}, []}) ->
false;
({{Vnode, _Node}, Filters}) ->
{true, {Vnode, Filters}}
end,
Coverage)
}.
%% @private
safe_hd([]) ->
[];
safe_hd(List) ->
hd(List).
%% @private
replace_subpartition_chunk(_VnodeIdx, Node, {_Mask, _Bits}=SubpID, NVal,
ReqId, DownNodes, Service, CHBin,
AvailNodeFun) ->
Offset = ReqId rem NVal,
%% We have our own idea of what nodes are available. The client
%% may have a different idea of offline nodes based on network
%% partitions, so we take that into account.
%%
%% The client can't really tell us what nodes it thinks are up (it
%% only knows hostnames at best) but the opaque coverage chunks it
%% uses have node names embedded in them, and it can tell us which
%% chunks do *not* work.
UpNodes = AvailNodeFun(Service, CHBin) -- [Node|DownNodes],
%% We don't know what partition this subpartition is in, but we
%% can request a preflist for it by converting the subpartition to
%% a document index.
%%
%% Unlike traditional coverage filters to document key hash
%% mappings which have off-by-one adjustments, subpartition masks
%% map directly against the relevant key hashes.
DocIdx = convert(SubpID, subpartition_index),
PrefList =
docidx_to_preflist(DocIdx, NVal, Offset, UpNodes, CHBin),
singular_preflist_to_chunk(safe_hd(PrefList), SubpID).
%% @private
singular_preflist_to_chunk([], _SubpID) ->
{error, primary_partition_unavailable};
singular_preflist_to_chunk({VnodeIdx, Node}, SubpID) ->
[{VnodeIdx, Node, SubpID}].
%% ====================================================================
%%% Conversion functions
%% @private
%% We spend a lot of code mapping between data types, mostly
%% integer-based. Consolidate that as much as possible here.
-spec convert(partition_id() | vnode_id() | subpartition_id(), atom()) ->
non_neg_integer().
convert({partition_id, PartitionID, RingSize}, keyspace_filter) ->
%% Because data is stored one partition higher than the keyspace
%% into which it directly maps, we have to increment a partition
%% ID by one to find the relevant keyspace index for traditional
%% coverage plan filters
id_to_index(add_offset(PartitionID, 1, RingSize), RingSize);
convert({partition_id, PartitionID, RingSize}, partition_index) ->
id_to_index(PartitionID, RingSize);
convert({vnode_id, VNodeID, RingSize}, vnode_index) ->
id_to_index(VNodeID, RingSize);
convert({SubpID, Bits}, subpartition_index) ->
SubpID bsl Bits.
%% @private
partition_to_preflist(Partition, NVal, Offset, UpNodes, CHBin) ->
DocIdx = convert(Partition, partition_index),
docidx_to_preflist(DocIdx, NVal, Offset, UpNodes, CHBin).
%% @private
docidx_to_preflist(DocIdx, NVal, Offset, UpNodes, CHBin) ->
%% `chashbin' would be fine with a straight integer, but
%% `riak_core_apl' has a -spec that mandates binary(), so we'll
%% play its game
OrigPreflist =
lists:map(fun({{Idx, Node}, primary}) -> {Idx, Node} end,
riak_core_apl:get_primary_apl_chbin(
<<DocIdx:160/integer>>, NVal, CHBin, UpNodes)),
rotate_list(OrigPreflist, length(OrigPreflist), Offset).
%% @private
rotate_list(List, Len, Offset) when Offset >= Len ->
List;
rotate_list(List, _Len, Offset) ->
{Head, Tail} = lists:split(Offset, List),
Tail ++ Head.
%% @private
index_to_id(Index, RingSize) ->
RingIndexInc = chash:ring_increment(RingSize),
Index div RingIndexInc.
%% @private
id_to_index(Id, RingSize) when Id < RingSize->
RingIndexInc = chash:ring_increment(RingSize),
Id * RingIndexInc.
%% @private
find_vnode_partitions(Index, N, RingSize) ->
n_keyspaces({vnode_id, index_to_id(Index, RingSize), RingSize}, N).
%% @private
partitions_by_index_or_filter(Idx, NVal, [], RingSize) ->
find_vnode_partitions(Idx, NVal, RingSize);
partitions_by_index_or_filter(_Idx, _NVal, Filters, RingSize) ->
%% Filters for traditional coverage plans are offset by 1, so when
%% converting to partition indexes or IDs, we have to subtract 1
lists:map(fun(P) ->
{partition_id,
add_offset(index_to_id(P, RingSize),
-1, RingSize),
RingSize} end,
Filters).
%% @private
%% Must be able to comply with PVC if the target is 'all'
check_pvc(List, _PVC, allup) ->
List;
check_pvc(List, PVC, all) ->
check_pvc2(List, length(List), PVC).
%% @private
check_pvc2(List, Len, PVC) when Len >= PVC ->
List;
check_pvc2(_List, _Len, _PVC) ->
[].
%% @private
create_subpartition_plan(VNodeTarget, NVal, Count, PVC, ReqId, Service, CHBin, AvailNodeFun) ->
MaskBSL = data_bits(Count),
%% Calculate an offset based on the request id to offer the
%% possibility of different sets of VNodes being used even when
%% all nodes are available.
Offset = ReqId rem NVal,
RingSize = chashbin:num_partitions(CHBin),
UpNodes = AvailNodeFun(Service, CHBin),
SubpList =
lists:map(fun(SubpCounter) ->
SubpID = {SubpCounter, MaskBSL},
SubpIndex = convert(SubpID, subpartition_index),
PartID =
{partition_id,
chashbin:responsible_position(SubpIndex, CHBin),
RingSize},
%% PVC is much like R; if the number of
%% available primary partitions won't reach
%% the specified PVC value, don't bother
%% including this partition at all. We can
%% decide later (based on all vs allup)
%% whether to return the successful
%% components of the coverage plan
{PartID, SubpID,
check_pvc(
docidx_to_preflist(SubpIndex, NVal, Offset, UpNodes, CHBin),
PVC, VNodeTarget)}
end,
lists:seq(0, Count - 1)),
%% Now we have a list of tuples; each subpartition maps to a
%% partition ID, subpartition ID, and a list of zero or more
%% {vnode_index, node} tuples for that partition
maybe_create_subpartition_plan(VNodeTarget, SubpList, PVC).
%% @private
maybe_create_subpartition_plan(allup, SubpList, PVC) ->
map_subplist_to_plan(SubpList, PVC);
maybe_create_subpartition_plan(all, SubpList, PVC) ->
maybe_create_subpartition_plan(all, lists:keyfind([], 3, SubpList), SubpList, PVC).
%% @private
maybe_create_subpartition_plan(all, false, SubpList, PVC) ->
map_subplist_to_plan(SubpList, PVC);
maybe_create_subpartition_plan(all, _, _SubpList, _PVC) ->
{error, insufficient_vnodes_available}.
%% @private
map_subplist_to_plan(SubpList, PVC) ->
lists:flatten(
lists:filtermap(fun({_PartID, _SubpID, []}) ->
false;
({_PartID, SubpID, Vnodes}) ->
{true,
map_pvc_vnodes(Vnodes, SubpID, PVC)}
end, SubpList)).
%% @private
map_pvc_vnodes(Vnodes, SubpID, PVC) ->
map_pvc_vnodes(Vnodes, SubpID, PVC, []).
%% @private
map_pvc_vnodes(_Vnodes, _SubpID, 0, Accum) ->
lists:reverse(Accum);
map_pvc_vnodes([{VnodeIdx, Node}|Tail], SubpID, PVC, Accum) ->
map_pvc_vnodes(Tail, SubpID, PVC-1,
[{ VnodeIdx, Node, SubpID }] ++ Accum).
%% @private
increment_vnode({vnode_id, Position, RingSize}, Offset) ->
{vnode_id, add_offset(Position, Offset, RingSize), RingSize}.
%% @private
list_all_vnode_ids(RingSize) ->
lists:map(fun(Id) -> {vnode_id, Id, RingSize} end,
lists:seq(0, RingSize - 1)).
%% @private
%% Note that these Id values are tagged tuples, not integers
merge_coverage_results({VnodeId, PartitionIds}, Acc) ->
case proplists:get_value(VnodeId, Acc) of
undefined ->
[{VnodeId, PartitionIds} | Acc];
MorePartitionIds ->
UniqueValues =
lists:usort(PartitionIds ++ MorePartitionIds),
[{VnodeId, UniqueValues} |
proplists:delete(VnodeId, Acc)]
end.
%% @private
%% @doc Generates a minimal set of vnodes and partitions to find the requested data
-spec find_minimal_coverage(list(partition_id()), list(vnode_id()),
non_neg_integer(),
non_neg_integer(),
non_neg_integer(),
list({vnode_id(), list(partition_id())})) ->
{ok,
list({vnode_id(), list(partition_id())})} |
{error, term()}.
find_minimal_coverage(_AllPartitions, _AvailableVnodes, _Offset, _NVal,
0, Results) ->
{ok, Results};
find_minimal_coverage(AllPartitions,
AvailableVnodes,
Offset,
NVal,
PVC,
ResultsAcc) ->
%% Calculate the available keyspaces. The list of
%% keyspaces for each vnode that have already been
%% covered by the plan are subtracted from the complete
%% list of keyspaces so that coverage plans that
%% want to cover more one preflist vnode work out
%% correctly.
AvailableKeySpaces = [{increment_vnode(VNode, Offset),
VNode,
n_keyspaces(VNode, NVal) --
proplists:get_value(VNode, ResultsAcc, [])}
|| VNode <- (AvailableVnodes)],
case find_coverage_vnodes(ordsets:from_list(AllPartitions),
AvailableKeySpaces,
ResultsAcc) of
{ok, CoverageResults} ->
UpdatedResults =
lists:foldl(fun merge_coverage_results/2, ResultsAcc, CoverageResults),
find_minimal_coverage(AllPartitions,
AvailableVnodes,
Offset,
NVal,
PVC-1,
UpdatedResults);
Error ->
Error
end.
%% @private
%% @doc Find the N key spaces for a VNode. These are *not* the same as
%% the filters for the traditional cover plans; the filters would be
%% incremented by 1
-spec n_keyspaces(vnode_id(), pos_integer()) -> list(partition_id()).
n_keyspaces({vnode_id, VNode, RingSize}, N) ->
ordsets:from_list([{partition_id, X rem RingSize, RingSize} ||
X <- lists:seq(RingSize + VNode - N,
RingSize + VNode - 1)]).
%% @private
%% @doc Find a minimal set of covering VNodes.
%%
%% Takes:
%% A list of all partition IDs still needed for coverage
%% A list of available vnode IDs with partitions they cover
%% An accumulator for results
%% Returns a list of {vnode_id, [partition_id,...]} tuples.
-spec find_coverage_vnodes(list(partition_id()), list(vnode_id()), list({vnode_id(), list(partition_id())})) ->
{ok, list({vnode_id(), list(partition_id())})}|
{insufficient_vnodes_available, list(vnode_id()), list({vnode_id(), list(partition_id())})}.
find_coverage_vnodes([], _, Coverage) ->
{ok, lists:sort(Coverage)};
find_coverage_vnodes(Partitions, [], Coverage) ->
{insufficient_vnodes_available, Partitions, lists:sort(Coverage)};
find_coverage_vnodes(Partitions, AvailableVnodes, Coverage) ->
case find_best_vnode_for_keyspace(Partitions, AvailableVnodes) of
{error, no_coverage} ->
%% Bail
find_coverage_vnodes(Partitions, [], Coverage);
VNode ->
{value, {_, VNode, Covers}, UpdAvailable} = lists:keytake(VNode, 2, AvailableVnodes),
UpdCoverage = [{VNode, ordsets:intersection(Partitions, Covers)} | Coverage],
UpdPartitions = ordsets:subtract(Partitions, Covers),
find_coverage_vnodes(UpdPartitions, UpdAvailable, UpdCoverage)
end.
%% @private
%% Find the vnode that covers the most of the remaining keyspace. Use
%% VNode ID + offset (determined by request ID) as the tiebreaker
%% (more precisely, the tagged tuple that contains the offset vnode's
%% ID)
find_best_vnode_for_keyspace(PartitionIDs, Available) ->
CoverCount = [{covers(PartitionIDs, CoversKeys), VNode, TieBreaker} ||
{TieBreaker, VNode, CoversKeys} <- Available],
%% Head of the list is the best result unless all of them have
%% zero overlap with the partitions for which we need coverage
interpret_best_vnode(hd(lists:sort(fun compare_vnode_keyspaces/2,
CoverCount))).
%% @private
interpret_best_vnode({0, _, _}) ->
{error, no_coverage};
interpret_best_vnode({_, VNode, _}) ->
VNode.
%% @private
%% There is a potential optimization here once
%% the partition claim logic has been changed
%% so that physical nodes claim partitions at
%% regular intervals around the ring.
%% The optimization is for the case
%% when the partition count is not evenly divisible
%% by the n_val and when the coverage counts of the
%% two arguments are equal and a tiebreaker is
%% required to determine the sort order. In this
%% case, choosing the lower node for the final
%% vnode to complete coverage will result
%% in an extra physical node being involved
%% in the coverage plan so the optimization is
%% to choose the upper node to minimize the number
%% of physical nodes.
compare_vnode_keyspaces({CA, _VA, TBA}, {CB, _VB, TBB}) ->
if
CA > CB -> %% Descending sort on coverage
true;
CA < CB ->
false;
true ->
TBA < TBB %% If equal coverage choose the lower node.
end.
%% @private
%% @doc Count how many of CoversKeys appear in KeySpace
covers(KeySpace, CoversKeys) ->
ordsets:size(ordsets:intersection(KeySpace, CoversKeys)).
%% @private
%% Determines the number of non-mask bits in the 2^160 keyspace.
%% Note that PartitionCount does not have to be ring size; we could be
%% creating a coverage plan for subpartitions
data_bits(PartitionCount) ->
160 - round(math:log(PartitionCount) / math:log(2)).
%% @private
next_power_of_two(X) ->
round(math:pow(2, round(log2(X - 1) + 0.5))).
log2(X) -> math:log(X) / math:log(2.0).
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
-define(SET(X), ordsets:from_list(X)).
bits_test() ->
%% 160 - log2(8)
?assertEqual(157, data_bits(8)),
%% 160 - log2(65536)
?assertEqual(144, data_bits(65536)).
partid(Partitions) ->
lists:map(fun({partition_id, P, _RingSize}) -> P end,
Partitions).
n_keyspaces_test() ->
%% First vnode in a cluster with ring size 64 should (with nval 3)
%% cover keyspaces 61-63
?assertEqual([61, 62, 63], partid(n_keyspaces({vnode_id, 0, 64}, 3))),
%% 4th vnode in a cluster with ring size 8 should (with nval 5)
%% cover the first 3 and last 2 keyspaces
?assertEqual([0, 1, 2, 6, 7], partid(n_keyspaces({vnode_id, 3, 8}, 5))),
%% First vnode in a cluster with a single partition should (with
%% any nval) cover the only keyspace
?assertEqual([0], partid(n_keyspaces({vnode_id, 0, 1}, 1))).
covers_test() ->
%% Count the overlap between the sets
?assertEqual(2, covers(?SET([1, 2]),
?SET([0, 1, 2, 3]))),
?assertEqual(1, covers(?SET([1, 2]),
?SET([0, 1]))),
?assertEqual(0, covers(?SET([1, 2, 3]),
?SET([4, 5, 6, 7]))).
%% `find_best_vnode_for_keyspace' actually takes tagged tuples, but it
%% works fine with plain ol' integers, and the logic is much easier to
%% see this way
best_vnode_test() ->
%% Given two vnodes 0 and 7, pick 0 because it has more of the
%% desired keyspaces
?assertEqual(0, find_best_vnode_for_keyspace(
?SET([0, 1, 2, 3, 4]),
[{2, 0, ?SET([6, 7, 0, 1, 2])},
{1, 7, ?SET([5, 6, 7, 0, 1])}])),
%% Given two vnodes 0 and 7, pick 7 because they cover the same
%% keyspaces and 7 has the lower tiebreaker
?assertEqual(7, find_best_vnode_for_keyspace(
?SET([0, 1, 2, 3, 4]),
[{2, 0, ?SET([6, 7, 0, 1, 2])},
{1, 7, ?SET([6, 7, 0, 1, 2])}])),
%% Given two vnodes 0 and 7, pick 0 because they cover the same
%% keyspaces and 0 has the lower tiebreaker
?assertEqual(0, find_best_vnode_for_keyspace(
?SET([0, 1, 2, 3, 4]),
[{2, 0, ?SET([6, 7, 0, 1, 2])},
{3, 7, ?SET([6, 7, 0, 1, 2])}])).
create_plan_test_() ->
{foreach,
fun cpsetup/0,
[fun test_create_traditional_plan/1,
fun test_create_subpartition_plan/1,
fun test_replace_traditional/1,
fun test_replace_traditional2/1,
fun test_replace_traditional3/1,
fun test_replace_subpartition/1]
}.
chash_init() ->
{8,
[{0,node1},
{182687704666362864775460604089535377456991567872,node2},
{365375409332725729550921208179070754913983135744,node3},
{548063113999088594326381812268606132370974703616,node1},
{730750818665451459101842416358141509827966271488,node2},
{913438523331814323877303020447676887284957839360,node3},
{1096126227998177188652763624537212264741949407232,node1},
{1278813932664540053428224228626747642198940975104,node2}]}.
cpsetup() ->
CHash = chash_init(),
chashbin:create(CHash).
test_replace_traditional3(CHBin) ->
%% Unlike test_replace_traditional, this function will iterate
%% through through N request IDs. Seems like an obvious place to
%% use QuickCheck for more coverage
OldFilters = [1278813932664540053428224228626747642198940975104, 0],
?_assertMatch([true, true, true],
lists:map(fun(N) ->
{NewVnodes, NewFilters} =
replace_traditional_chunk(
182687704666362864775460604089535377456991567872,
node2, OldFilters, 3, N, [], riak_kv, CHBin,
fun(_, _) -> [node1, node3] end),
equivalent_coverage(
OldFilters,
NewFilters) andalso
%% Make sure none of the new
%% vnodes live on our "down"
%% node, node3
[] ==
lists:filter(fun({_, node2}) -> true;
(_) -> false
end, NewVnodes)
end, lists:seq(0, 2))).
test_replace_traditional2(CHBin) ->
%% Unlike test_replace_traditional, this function will iterate
%% through through N request IDs. Seems like an obvious place to
%% use QuickCheck for more coverage
?_assertMatch([true, true, true],
lists:map(fun(N) ->
{NewVnodes, NewFilters} =
replace_traditional_chunk(
913438523331814323877303020447676887284957839360,
node3, [], 3, N, [], riak_kv, CHBin,
fun(_, _) -> [node1, node2] end),
equivalent_coverage(
913438523331814323877303020447676887284957839360,
NewFilters,
3, CHBin) andalso
%% Make sure none of the new
%% vnodes live on our "down"
%% node, node3
[] ==
lists:filter(fun({_, node3}) -> true;
(_) -> false
end, NewVnodes)
end, lists:seq(0, 2))).
equivalent_coverage(OldVNode, NewFilters, NVal, CHBin) ->
RingSize = chashbin:num_partitions(CHBin),
PartitionSize = chash:ring_increment(RingSize),
OldFilters = test_vnode_to_filters(OldVNode, NVal, RingSize, PartitionSize),
equivalent_coverage(OldFilters, NewFilters).
equivalent_coverage(OldFilters, NewFilters) ->
%% This logic relies on the fact that all of the new vnodes must
%% have an explicit filter list since we're replacing a full vnode
%% and there is no other vnode which has exactly the same
%% partitions
ConsolidatedFilters = lists:foldl(
fun({_VNode, FilterList}, Acc) -> Acc ++ FilterList end,
[],
NewFilters),
lists:sort(OldFilters) == lists:sort(ConsolidatedFilters).
test_vnode_to_filters(Index, NVal, RingSize, PartitionSize) ->
lists:map(fun(N) -> (((Index div PartitionSize) + (RingSize - N)) rem RingSize) * PartitionSize end,
lists:seq(0, NVal-1)).
test_replace_traditional(CHBin) ->
%% We're asking or a replacement for the 4th vnode (id 3), with
%% nval of 3. This means it is responsible for partitions 0, 1, 2,
%% but given the off-by-one behavior for filter lists the implicit
%% filters for this vnode are 1, 2, and 3.
%% Because we need to replace all 3 partitions that this vnode is
%% responsible for, we're going to need at least 2 vnodes in the
%% new chunk.
%% We're reporting that node2 is the only node online, so the
%% vnodes we have available with relevant partitions are limited
%% to these two: 182687704666362864775460604089535377456991567872 (id 1)
%% and 730750818665451459101842416358141509827966271488 (id 4)
%% vnode 1 has partition 0 (filter 1)
%% vnode 4 has partitions 1 and 2 (filters 2 and 3)
{ExpectedVnodes, ExpectedFilters} = {
[
{182687704666362864775460604089535377456991567872, node2},
{730750818665451459101842416358141509827966271488, node2}
],
[
{182687704666362864775460604089535377456991567872,
[182687704666362864775460604089535377456991567872]
},
{730750818665451459101842416358141509827966271488,
[365375409332725729550921208179070754913983135744,
548063113999088594326381812268606132370974703616]
}
]
},
{NewVnodes, NewFilters} =
replace_traditional_chunk(
548063113999088594326381812268606132370974703616,
node1, [], 3, 0, [], riak_kv, CHBin,
fun(_, _) -> [node2] end),
[?_assertEqual(ExpectedVnodes, lists:sort(NewVnodes)),
?_assertEqual(ExpectedFilters, lists:sort(NewFilters))].
test_replace_subpartition(CHBin) ->
%% Our riak_core_node_watcher replacement function says only node1
%% is up, so the code will have no choice but to give us back the
%% only node1 vnode with the zeroth partition
NewChunk = [{548063113999088594326381812268606132370974703616,
node1, {0, 156}}],
[?_assertEqual(NewChunk,
replace_subpartition_chunk(182687704666362864775460604089535377456991567872,
node2, {0, 156}, 3,
0, [], riak_kv, CHBin,
fun(_, _) -> [node1] end))].
test_create_subpartition_plan(CHBin) ->
Plan =
[
{182687704666362864775460604089535377456991567872,
node2, {0, 156}},
{182687704666362864775460604089535377456991567872,
node2, {1, 156}},
{365375409332725729550921208179070754913983135744,
node3, {2, 156}},
{365375409332725729550921208179070754913983135744,
node3, {3, 156}},
{548063113999088594326381812268606132370974703616,
node1, {4, 156}},
{548063113999088594326381812268606132370974703616,
node1, {5, 156}},
{730750818665451459101842416358141509827966271488,
node2, {6, 156}},
{730750818665451459101842416358141509827966271488,
node2, {7, 156}},
{913438523331814323877303020447676887284957839360,
node3, {8, 156}},
{913438523331814323877303020447676887284957839360,
node3, {9, 156}},
{1096126227998177188652763624537212264741949407232,
node1, {10, 156}},
{1096126227998177188652763624537212264741949407232,
node1, {11, 156}},
{1278813932664540053428224228626747642198940975104,
node2, {12, 156}},
{1278813932664540053428224228626747642198940975104,
node2, {13, 156}},
{0,node1, {14, 156}},
{0,node1, {15, 156}}
],
[?_assertEqual(Plan,
create_subpartition_plan(all, 3, 16, 1, 3, riak_kv, CHBin, fun(_, _) -> [node1, node2, node3] end))].
test_create_traditional_plan(CHBin) ->
Plan =
{[{1278813932664540053428224228626747642198940975104,
node2},
{730750818665451459101842416358141509827966271488,
node2},
{365375409332725729550921208179070754913983135744,
node3}],
[{730750818665451459101842416358141509827966271488,
[548063113999088594326381812268606132370974703616,
730750818665451459101842416358141509827966271488]}]},
[?_assertEqual(Plan,
create_traditional_plan(all, 3, 1, 1234, riak_kv, CHBin, fun(_, _) -> [node1, node2, node3] end))].
-endif. | src/riak_core_coverage_plan.erl | 0.708112 | 0.478224 | riak_core_coverage_plan.erl | starcoder |
-module(blockchain_assert_loc_v2_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("blockchain_vars.hrl").
-include("blockchain_utils.hrl").
-export([all/0, init_per_testcase/2, end_per_testcase/2]).
-export([
basic_test/1,
bad_owner_sig_test/1,
bad_payer_sig_test/1,
zero_staking_fee_test/1,
same_loc_diff_gain_test/1,
same_loc_diff_elevation_test/1,
invalid_gain_test/1,
insufficient_assert_version_test/1,
insufficient_assert_res_test/1,
bad_nonce_test/1,
min_antenna_gain_not_set_test/1,
max_antenna_gain_not_set_test/1
]).
all() ->
[
basic_test,
bad_owner_sig_test,
bad_payer_sig_test,
zero_staking_fee_test,
same_loc_diff_gain_test,
same_loc_diff_elevation_test,
invalid_gain_test,
insufficient_assert_version_test,
insufficient_assert_res_test,
bad_nonce_test,
min_antenna_gain_not_set_test,
max_antenna_gain_not_set_test
].
%%--------------------------------------------------------------------
%% TEST CASE SETUP
%%--------------------------------------------------------------------
init_per_testcase(TestCase, Config) ->
Config0 = blockchain_ct_utils:init_base_dir_config(?MODULE, TestCase, Config),
Balance = 5000,
{ok, Sup, {PrivKey, PubKey}, Opts} = test_utils:init(?config(base_dir, Config0)),
ExtraVars = extra_vars(TestCase),
{ok, GenesisMembers, _GenesisBlock, ConsensusMembers, Keys} =
test_utils:init_chain(Balance, {PrivKey, PubKey}, true, ExtraVars),
Chain = blockchain_worker:blockchain(),
Swarm = blockchain_swarm:swarm(),
N = length(ConsensusMembers),
% Check ledger to make sure everyone has the right balance
Ledger = blockchain:ledger(Chain),
Entries = blockchain_ledger_v1:entries(Ledger),
_ = lists:foreach(
fun(Entry) ->
Balance = blockchain_ledger_entry_v1:balance(Entry),
0 = blockchain_ledger_entry_v1:nonce(Entry)
end,
maps:values(Entries)
),
[
{balance, Balance},
{sup, Sup},
{pubkey, PubKey},
{privkey, PrivKey},
{opts, Opts},
{chain, Chain},
{swarm, Swarm},
{n, N},
{consensus_members, ConsensusMembers},
{genesis_members, GenesisMembers},
{ledger, Ledger},
{keys, Keys}
| Config0
].
%%--------------------------------------------------------------------
%% TEST CASE TEARDOWN
%%--------------------------------------------------------------------
end_per_testcase(_, Config) ->
Sup = ?config(sup, Config),
% Make sure blockchain saved on file = in memory
case erlang:is_process_alive(Sup) of
true ->
true = erlang:exit(Sup, normal),
ok = test_utils:wait_until(fun() -> false =:= erlang:is_process_alive(Sup) end);
false ->
ok
end,
ok.
%%--------------------------------------------------------------------
%% TEST CASES
%%--------------------------------------------------------------------
basic_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
%% This transaction should be valid
ok = blockchain_txn_assert_location_v2:is_valid(STxn1, Chain),
%% Check that the transaction propogates properly
{ok, Block} = test_utils:create_block(ConsensusMembers, [STxn1]),
_ = blockchain_gossip_handler:add_block(Block, Chain, self(), blockchain_swarm:swarm()),
?assertEqual({ok, blockchain_block:hash_block(Block)}, blockchain:head_hash(Chain)),
?assertEqual({ok, Block}, blockchain:head_block(Chain)),
?assertEqual({ok, 2}, blockchain:height(Chain)),
?assertEqual({ok, Block}, blockchain:get_block(2, Chain)),
ok.
bad_owner_sig_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[
{_NotOwner, {_NotOwnerPubkey, _NotOwnerPrivKey, NotOwnerSigFun}},
{Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}}
| _
] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, NotOwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
%% This transaction should be invalid
{error, {bad_owner_signature, _}} = blockchain_txn_assert_location_v2:is_valid(STxn1, Chain),
ok.
bad_payer_sig_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[
{_NotPayer, {_NotPayerPubkey, _NotPayerPrivKey, NotPayerSigFun}},
{Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}}
| _
] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, NotPayerSigFun),
%% This transaction should be invalid
{error, {bad_payer_signature, _}} = blockchain_txn_assert_location_v2:is_valid(STxn1, Chain),
ok.
zero_staking_fee_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{Txn1, _Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
%% Zero-ing out the staking_fee
ZeroStakingFee = 0,
ZeroStakingFeeTxn = blockchain_txn_assert_location_v2:staking_fee(Txn1, ZeroStakingFee),
ZeroStakingFeeSTxn0 = blockchain_txn_assert_location_v2:sign(ZeroStakingFeeTxn, OwnerSigFun),
ZeroStakingFeeSTxn1 = blockchain_txn_assert_location_v2:sign_payer(
ZeroStakingFeeSTxn0,
OwnerSigFun
),
%% This transaction should be invalid
%% Reason: We have zero-ed the staking_fee but a new location was specified
{error, {wrong_staking_fee, {assert_location_v2, _, _}}} = blockchain_txn_assert_location_v2:is_valid(
ZeroStakingFeeSTxn1,
Chain
),
ok.
same_loc_diff_gain_test(Config) ->
Ledger = ?config(ledger, Config),
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% assert that the default gain is set correctly
{ok, GW} = blockchain_ledger_v1:find_gateway_info(GatewayPubkeyBin, Ledger),
?assertEqual(?DEFAULT_GAIN, blockchain_ledger_gateway_v2:gain(GW)),
ExistingLocation = blockchain_ledger_gateway_v2:location(GW),
%% We will not change the location of the gateway but will update the gain
%% and supply 0 staking_fee
ZeroStakingFee = 0,
NewGain = 23,
SameLocDiffGainTxn0 = blockchain_txn_assert_location_v2:new(
GatewayPubkeyBin,
Owner,
Payer,
ExistingLocation,
1
),
SameLocDiffGainFee = blockchain_txn_assert_location_v2:calculate_fee(
SameLocDiffGainTxn0,
Chain
),
SameLocDiffGainTxn1 = blockchain_txn_assert_location_v2:fee(
SameLocDiffGainTxn0,
SameLocDiffGainFee
),
SameLocDiffGainTxn2 = blockchain_txn_assert_location_v2:gain(SameLocDiffGainTxn1, NewGain),
SameLocDiffGainTxn3 = blockchain_txn_assert_location_v2:staking_fee(
SameLocDiffGainTxn2,
ZeroStakingFee
),
SameLocDiffGainSTxn0 = blockchain_txn_assert_location_v2:sign(SameLocDiffGainTxn3, OwnerSigFun),
SameLocDiffGainSTxn1 = blockchain_txn_assert_location_v2:sign_payer(
SameLocDiffGainSTxn0,
OwnerSigFun
),
%% This transaction should be valid
ok = blockchain_txn_assert_location_v2:is_valid(SameLocDiffGainSTxn1, Chain),
%% Check that the transaction propogates properly
{ok, Block} = test_utils:create_block(ConsensusMembers, [SameLocDiffGainSTxn1]),
_ = blockchain_gossip_handler:add_block(Block, Chain, self(), blockchain_swarm:swarm()),
?assertEqual({ok, blockchain_block:hash_block(Block)}, blockchain:head_hash(Chain)),
?assertEqual({ok, Block}, blockchain:head_block(Chain)),
?assertEqual({ok, 2}, blockchain:height(Chain)),
?assertEqual({ok, Block}, blockchain:get_block(2, Chain)),
%% Check that the new elevation reflects properly
Ledger1 = blockchain:ledger(Chain),
{ok, GW1} = blockchain_ledger_v1:find_gateway_info(GatewayPubkeyBin, Ledger1),
?assertEqual(NewGain, blockchain_ledger_gateway_v2:gain(GW1)),
ok.
same_loc_diff_elevation_test(Config) ->
Ledger = ?config(ledger, Config),
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
{ok, GW} = blockchain_ledger_v1:find_gateway_info(GatewayPubkeyBin, Ledger),
?assertEqual(?DEFAULT_ELEVATION, blockchain_ledger_gateway_v2:elevation(GW)),
ExistingLocation = blockchain_ledger_gateway_v2:location(GW),
%% We will not change the location of the gateway but will update the gain
%% and supply 0 staking_fee
ZeroStakingFee = 0,
NewElevation = 5,
SameLocDiffElevationTxn0 = blockchain_txn_assert_location_v2:new(
GatewayPubkeyBin,
Owner,
Payer,
ExistingLocation,
1
),
SameLocDiffElevationFee = blockchain_txn_assert_location_v2:calculate_fee(
SameLocDiffElevationTxn0,
Chain
),
SameLocDiffElevationTxn1 = blockchain_txn_assert_location_v2:fee(
SameLocDiffElevationTxn0,
SameLocDiffElevationFee
),
SameLocDiffElevationTxn2 = blockchain_txn_assert_location_v2:elevation(
SameLocDiffElevationTxn1,
NewElevation
),
SameLocDiffElevationTxn3 = blockchain_txn_assert_location_v2:staking_fee(
SameLocDiffElevationTxn2,
ZeroStakingFee
),
SameLocDiffElevationSTxn0 = blockchain_txn_assert_location_v2:sign(
SameLocDiffElevationTxn3,
OwnerSigFun
),
SameLocDiffElevationSTxn1 = blockchain_txn_assert_location_v2:sign_payer(
SameLocDiffElevationSTxn0,
OwnerSigFun
),
%% This transaction should be valid
ok = blockchain_txn_assert_location_v2:is_valid(SameLocDiffElevationSTxn1, Chain),
%% Check that the transaction propogates properly
{ok, Block} = test_utils:create_block(ConsensusMembers, [SameLocDiffElevationSTxn1]),
_ = blockchain_gossip_handler:add_block(Block, Chain, self(), blockchain_swarm:swarm()),
?assertEqual({ok, blockchain_block:hash_block(Block)}, blockchain:head_hash(Chain)),
?assertEqual({ok, Block}, blockchain:head_block(Chain)),
?assertEqual({ok, 2}, blockchain:height(Chain)),
?assertEqual({ok, Block}, blockchain:get_block(2, Chain)),
%% Check that the new elevation reflects properly
Ledger1 = blockchain:ledger(Chain),
{ok, GW1} = blockchain_ledger_v1:find_gateway_info(GatewayPubkeyBin, Ledger1),
?assertEqual(NewElevation, blockchain_ledger_gateway_v2:elevation(GW1)),
ok.
invalid_gain_test(Config) ->
Ledger = ?config(ledger, Config),
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% assert that the default gain is set correctly
{ok, GW} = blockchain_ledger_v1:find_gateway_info(GatewayPubkeyBin, Ledger),
?assertEqual(?DEFAULT_GAIN, blockchain_ledger_gateway_v2:gain(GW)),
ExistingLocation = blockchain_ledger_gateway_v2:location(GW),
%% We will not change the location of the gateway and supply 0 staking_fee
ZeroStakingFee = 0,
%% NOTE: Supply a gain value outside of the allowed range (16 dbi = 160)
NewGain = 160,
SameLocDiffGainTxn0 = blockchain_txn_assert_location_v2:new(
GatewayPubkeyBin,
Owner,
Payer,
ExistingLocation,
1
),
SameLocDiffGainFee = blockchain_txn_assert_location_v2:calculate_fee(
SameLocDiffGainTxn0,
Chain
),
SameLocDiffGainTxn1 = blockchain_txn_assert_location_v2:fee(
SameLocDiffGainTxn0,
SameLocDiffGainFee
),
SameLocDiffGainTxn2 = blockchain_txn_assert_location_v2:gain(SameLocDiffGainTxn1, NewGain),
SameLocDiffGainTxn3 = blockchain_txn_assert_location_v2:staking_fee(
SameLocDiffGainTxn2,
ZeroStakingFee
),
SameLocDiffGainSTxn0 = blockchain_txn_assert_location_v2:sign(SameLocDiffGainTxn3, OwnerSigFun),
SameLocDiffGainSTxn1 = blockchain_txn_assert_location_v2:sign_payer(
SameLocDiffGainSTxn0,
OwnerSigFun
),
%% This transaction should be invalid
{error, {invalid_assert_loc_txn_v2, {invalid_antenna_gain, 160, 10, 150}}} = blockchain_txn_assert_location_v2:is_valid(
SameLocDiffGainSTxn1,
Chain
),
ok.
insufficient_assert_version_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
{error, {invalid_assert_loc_txn_v2, insufficient_assert_loc_txn_version}} = blockchain_txn_assert_location_v2:is_valid(
STxn1,
Chain
),
ok.
insufficient_assert_res_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = h3:parent(631252734740306943, 9),
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
%% This transaction should be valid
{error, {insufficient_assert_res, _}} = blockchain_txn_assert_location_v2:is_valid(
STxn1,
Chain
),
ok.
bad_nonce_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Some random bad nonce
BadNonce = 10,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, BadNonce, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
%% This transaction should be valid
{error, {bad_nonce, {assert_location_v2, BadNonce, 0}}} = blockchain_txn_assert_location_v2:is_valid(
STxn1,
Chain
),
ok.
min_antenna_gain_not_set_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
{error, {invalid_assert_loc_txn_v2, min_antenna_gain_not_set}} = blockchain_txn_assert_location_v2:is_valid(
STxn1,
Chain
),
ok.
max_antenna_gain_not_set_test(Config) ->
Chain = ?config(chain, Config),
ConsensusMembers = ?config(consensus_members, Config),
[_, {Owner, {_OwnerPubkey, _OwnerPrivKey, OwnerSigFun}} | _] = ConsensusMembers,
%% NOTE: gateway pubkey bin = owner = payer for the tests
GatewayPubkeyBin = Owner,
Payer = Owner,
%% Construct a simple assert_location_v2 transaction
NewLoc = 631252734740306943,
{_Txn1, Txn2} = base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, NewLoc, 1, Chain),
STxn0 = blockchain_txn_assert_location_v2:sign(Txn2, OwnerSigFun),
STxn1 = blockchain_txn_assert_location_v2:sign_payer(STxn0, OwnerSigFun),
%% This transaction should be invalid
{error, {invalid_assert_loc_txn_v2, max_antenna_gain_not_set}} = blockchain_txn_assert_location_v2:is_valid(
STxn1,
Chain
),
ok.
%%--------------------------------------------------------------------
%% HELPERS
%%--------------------------------------------------------------------
base_assert_loc_v2_txn(GatewayPubkeyBin, Owner, Payer, Loc, Nonce, Chain) ->
Txn0 = blockchain_txn_assert_location_v2:new(GatewayPubkeyBin, Owner, Payer, Loc, Nonce),
Fee = blockchain_txn_assert_location_v2:calculate_fee(Txn0, Chain),
SFee = blockchain_txn_assert_location_v2:calculate_staking_fee(Txn0, Chain),
Txn1 = blockchain_txn_assert_location_v2:fee(Txn0, Fee),
{Txn1, blockchain_txn_assert_location_v2:staking_fee(Txn1, SFee)}.
extra_vars(min_antenna_gain_not_set_test) ->
#{
txn_fees => true,
staking_fee_txn_assert_location_v1 => 1000000,
assert_loc_txn_version => 2,
max_antenna_gain => 150
};
extra_vars(max_antenna_gain_not_set_test) ->
#{
txn_fees => true,
staking_fee_txn_assert_location_v1 => 1000000,
assert_loc_txn_version => 2,
min_antenna_gain => 10
};
extra_vars(insufficient_assert_version_test) ->
#{
txn_fees => true,
staking_fee_txn_assert_location_v1 => 1000000,
assert_loc_txn_version => 1,
min_antenna_gain => 10,
max_antenna_gain => 150
};
extra_vars(_) ->
#{
txn_fees => true,
staking_fee_txn_assert_location_v1 => 1000000,
assert_loc_txn_version => 2,
min_antenna_gain => 10,
max_antenna_gain => 150
}. | test/blockchain_assert_loc_v2_SUITE.erl | 0.545286 | 0.601067 | blockchain_assert_loc_v2_SUITE.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2015, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
%% @doc `backoff_supervisor' is a simple supervisor for a single child. If the
%% child is not started the supervisor will make attempts to start it, with
%% increasing backoff between attempts. If the child returns an error from its
%% start function when an automatic start attempt is made the supervisor will
%% shutdown.
%%
%% If the restart type of the child is `permanent' the supervisor will shutdown
%% if the child exits with any reason.
%%
%% If the restart type is `transient' the supervisor will shutdown if the
%% child exits abnormally. If a `transient' child exits with a normal reason
%% (`normal', `shutdown', `{shutdown, any()}') the child is removed and a new
%% child is started after an initial delay.
%%
%% If the restart type of the child is `temporary' the child is removed on exit
%% and a new child is started after an initial delay.
%%
%% For the supervisor to continue making start attempts the child start function
%% must return `ignore'. However if a start attempt is made using
%% `start_child/2' an error will be ignored and the supervisor will not
%% shutdown. However the current delay between start attempts will be restarted.
%% If a child is alive `start_child/2' returns
%% `{error, {already_started, Pid}}', where `Pid' is the `pid()' of the child.
%%
%% `terminate_child/2', `delete_child/2' and `restart_child/2' behave the same
%% as the equivalent functions for a `simple_one_for_one' `supervisor'.
%% Therefore a `backoff_supervisor' is equivalent to a `simple_one_for_one'
%% `supervisor' with `0' maximum restarts - except only one child can be
%% running and the supervisor will try to start a child if no child is running
%% for a period of time. If one of theses attempts fails the supervisor will
%% shutdown. If one of these attempts returns `ignore' the supervisor will
%% increase the time period (up to a maximum) and try again.
%%
%% The specification for a `backoff_supervisor' uses a similar behaviour to a
%% supervisor. A callback module must implement a single function, `init/1'.
%% `init/1' should return `{ok, {{BackoffType, Start, Max}, [ChildSpec]}}' or
%% `ignore'. `BackoffType' is the `backoff' type to use, either `normal' for
%% exponential backoff or `jitter' for backoff with random increments. `Start'
%% is the initial delay, in milliseconds, before making a first attempt to start
%% the child. `Max' is the maximum delay, in milliseconds, between start
%% attempts. `ChildSpec' is the child specification of the child, which is a
%% `supervisor:child_spec()'. There must be a single `ChildSpec'. In the case of
%% `ignore' the `backoff_supervisor' will not start and return `ignore' from
%% `start_link/2,3'.
%%
%% The specification of a `backoff_supervisor' can be changed in the same way as
%% a `supervisor' using a code change. The backoff strategy will be used to set
%% the next delay, and all subsequent delays, but will not effect an active
%% delay. The child specification is updated for an active child and all
%% subsequent children. In the case of `ignore' the specification is unchanged.
%%
%% An example of a `backoff_supervisor' callback module:
%% ```
%% -module(backoff_example).
%%
%% -export([start_link/0]).
%% -export([init/1]).
%%
%% start_link() ->
%% backoff_supervisor:start_link(?MODULE, []).
%%
%% init([]) ->
%% {ok, {{jitter, 100, 5000},
%% {name, {mod, fun, []}, transient, 5000, worker, [mod]}}}.
%% '''
%%
%% When declaring a child spec of a `backoff_supervisor' in its parent
%% supervisor the shutdown should be `infinity' and the child type `supervisor'.
%% A `backoff_supervisor' should not be used as the top level supervisor in an
%% application because that process is assumed to be a `supervisor'.
%%
%% @see supervisor
%% @see backoff
-module(backoff_supervisor).
-behaviour(gen_server).
-define(TIMER_MAX, 4294967295).
%% public api
-export([start_child/2]).
-export([restart_child/2]).
-export([terminate_child/2]).
-export([delete_child/2]).
-export([which_children/1]).
-export([count_children/1]).
-export([start_link/2]).
-export([start_link/3]).
%% gen_server api
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([terminate/2]).
-export([code_change/3]).
%% types
-type backoff_supervisor() :: pid() | atom() | {global, any()} |
{via, module(), any()}.
-type backoff_spec() :: {normal | jitter, pos_integer(),
pos_integer() | infinity}.
-export_type([backoff_supervisor/0]).
-export_type([backoff_spec/0]).
-callback init(Args :: any()) ->
{ok, {backoff_spec(), [supervisor:child_spec(), ...]}} | ignore.
-record(state, {name :: {pid(), module()} | {local, atom()} | {global, any()} |
{via, module(), any()},
module :: module(),
args :: any(),
id :: any(),
mfargs :: {module(), atom(), list()},
restart :: temporary | transient | permanent,
shutdown :: brutal_kill | timeout(),
child_type :: worker | supervisor,
modules :: dynamic | [module()],
child :: undefined | pid(),
child_mfargs :: undefined | {module(), atom(), list()},
start :: pos_integer(),
max :: pos_integer() | infinity,
backoff_type :: normal | jitter,
backoff :: backoff:backoff(),
backoff_ref :: undefined | reference()}).
%% public api
%% @doc Start a child under the supervisor, if one is not already started.
%%
%% `ExtraArgs' will be appended to the list of arguments in the child
%% specification for this child. `ExtraArgs' will only be appended to this
%% child, subsequent children started by the supervisor automatically will not
%% have `ExtraArgs'.
%%
%% @see supervisor:start_child/2
-spec start_child(Supervisor, ExtraArgs) -> Result when
Supervisor :: backoff_supervisor(),
ExtraArgs :: [any()],
Result :: supervisor:startchild_ret().
start_child(Supervisor, ExtraArgs) ->
call(Supervisor, {start_child, ExtraArgs}).
%% @doc Restart a child under the supervisor, always returns
%% `{error, simple_one_for_one}'.
%%
%% @see supervisor:restart_child/2
-spec restart_child(Supervisor, Child) -> {error, simple_one_for_one} when
Supervisor :: backoff_supervisor(),
Child :: pid().
restart_child(Supervisor, Child) ->
call(Supervisor, {restart_child, Child}).
%% @doc Terminate the child, `Child', under the supervisor, if it is the
%% supervisor's child.
%%
%% If `Child' is the supervisor's child it will be terminated. A new child will
%% automatically be started by the supervisor after an initial delay.
%%
%% @see supervisor:terminate_child/2
-spec terminate_child(Supervisor, Child) -> ok | {error, not_found} when
Supervisor :: backoff_supervisor(),
Child :: pid().
terminate_child(Supervisor, Child) ->
call(Supervisor, {terminate_child, Child}).
%% @doc Delete a child under the supervisor, always returns
%% `{error, simple_one_for_one}'.
%%
%% @see supervisor:delete_child/2
-spec delete_child(Supervisor, Child) -> {error, simple_one_for_one} when
Supervisor :: backoff_supervisor(),
Child :: pid().
delete_child(Supervisor, Child) ->
call(Supervisor, {delete_child, Child}).
%% @doc Return a list of child specifications and child processes.
%%
%% The list will be empty if there is no children under the supervisor, or
%% contain the information for the supervisors single child if is alive. The
%% name of the child will always be `undefined'.
%%
%% @see supervisor:which_children/1
-spec which_children(Supervisor) -> [Child] when
Supervisor :: backoff_supervisor(),
Child :: {undefined, pid(), supervisor | worker, dynamic | [module()]}.
which_children(Supervisor) ->
call(Supervisor, which_children).
%% @doc Return a property list containing counts relating to the supervisor's
%% child specifications and child processes.
%%
%% @see supervisor:count_children/1
-spec count_children(Supervisor) -> [Count, ...] when
Supervisor :: backoff_supervisor(),
Count :: {specs, 1} | {active, 0 | 1} | {supervisors, 0 | 1} |
{workers, 0 | 1}.
count_children(Supervisor) ->
call(Supervisor, count_children).
%% @doc Starts a supervisor with callback module `Module' and argument `Args'.
%%
%% @see supervisor:start_link/2
-spec start_link(Module, Args) -> {ok, Pid} | ignore | {error, Reason} when
Module :: module(),
Args :: any(),
Pid :: pid(),
Reason :: term().
start_link(Module, Args) ->
gen_server:start_link(?MODULE, {self, Module, Args}, []).
%% @doc Starts a supervisor with name `Name', callback module `Module' and
%% argument `Args'.
%%
%% @see supervisor:start_link/3
-spec start_link(Name, Module, Args) ->
{ok, Pid} | ignore | {error, Reason} when
Name :: {local, atom()} | {global, any()} | {via, module(), any()},
Module :: module(),
Args :: any(),
Pid :: pid(),
Reason :: term().
start_link(Name, Module, Args) ->
gen_server:start_link(Name, ?MODULE, {Name, Module, Args}, []).
%% gen_server api
%% @private
init({self, Module, Args}) ->
init({{self(), Module}, Module, Args});
init({Name, Module, Args}) ->
process_flag(trap_exit, true),
case catch Module:init(Args) of
{ok, {BackoffSpec, StartSpec}} ->
init(Name, Module, Args, BackoffSpec, StartSpec);
ignore ->
ignore;
{'EXIT', Reason} ->
{stop, Reason};
Other ->
{stop, {bad_return, {Module, init, Other}}}
end.
%% @private
handle_call({start_child, ExtraArgs}, _, State) ->
handle_start_child(ExtraArgs, State);
handle_call({restart_child, _}, _, State) ->
{reply, {error, simple_one_for_one}, State};
handle_call({terminate_child, Terminate}, _, State) ->
handle_terminate_child(Terminate, State);
handle_call({delete_child, _}, _, State) ->
{reply, {error, simple_one_for_one}, State};
handle_call(which_children, _, State) ->
handle_which_children(State);
handle_call(count_children, _, State) ->
handle_count_children(State).
%% @private
handle_cast(Cast, State) ->
{stop, {bad_cast, Cast}, State}.
%% @private
handle_info({timeout, TRef, ?MODULE}, State) ->
handle_timeout(TRef, State);
handle_info({'EXIT', Pid, Reason}, State) ->
handle_exit(Pid, Reason, State);
handle_info(Msg, State) ->
error_logger:error_msg("Backoff Supervisor received unexpected message: ~p",
[Msg]),
{noreply, State}.
%% @private
code_change(_, #state{module=Module, args=Args} = State, _) ->
case catch Module:init(Args) of
{ok, {BackoffSpec, StartSpec}} ->
handle_code_change(BackoffSpec, StartSpec, State);
ignore ->
{ok, State};
{'EXIT', _} = Exit ->
{error, Exit};
Other ->
{error, {bad_return, {Module, init, Other}}}
end.
%% @private
terminate(_Reason, State) ->
shutdown(State).
%% Internal
call(Supervisor, Request) ->
gen_server:call(Supervisor, Request, infinity).
init(Name, Module, Args, {_, _, _} = BackoffSpec, StartSpec) ->
case check_backoff_spec(BackoffSpec) of
ok ->
do_init(Name, Module, Args, BackoffSpec, StartSpec);
{error, Reason} ->
{stop, {backoff_spec, Reason}}
end;
init(_, _, _, Other, _) ->
{stop, {bad_backoff_spec, Other}}.
do_init(Name, Module, Args, {BackoffType, Start, Max}, [ChildSpec]) ->
case supervisor:check_childspecs([ChildSpec]) of
ok ->
{Id, MFA, Restart, Shutdown, ChildType, Modules} = ChildSpec,
Backoff = backoff_init(BackoffType, Start, Max),
BRef = backoff:fire(Backoff),
State = #state{name=Name, module=Module, args=Args,
backoff_type=BackoffType, start=Start, max=Max,
backoff=Backoff, backoff_ref=BRef,
id=Id, mfargs=MFA, restart=Restart,
shutdown=Shutdown, child_type=ChildType,
modules=Modules},
{ok, State};
{error, Reason} ->
{stop, {start_spec, Reason}}
end;
do_init(_, _, _, _, StartSpec) ->
{stop, {bad_start_spec, StartSpec}}.
check_backoff_spec({BackoffType, _, _})
when not (BackoffType =:= normal orelse BackoffType =:= jitter) ->
{error, {invalid_type, BackoffType}};
check_backoff_spec({_, Start, _})
when not (is_integer(Start) andalso Start > 0 andalso Start =< ?TIMER_MAX) ->
{error, {invalid_start, Start}};
check_backoff_spec({_, Start, Max})
when not (Max >= Start andalso (is_integer(Max) orelse Max =:= infinity)) ->
{error, {invalid_max, Max}};
check_backoff_spec({_, _, _}) ->
ok.
backoff_init(BackoffType, Start, Max) ->
Backoff = backoff:init(Start, min(Max, ?TIMER_MAX), self(), ?MODULE),
backoff:type(Backoff, BackoffType).
handle_start_child(ExtraArgs, #state{child=undefined, backoff_ref=BRef} = State)
when is_reference(BRef) ->
_ = erlang:cancel_timer(BRef),
do_start_child(ExtraArgs, State#state{backoff_ref=undefined});
handle_start_child(_, #state{child=Child, backoff_ref=undefined} = State)
when is_pid(Child) ->
{reply, {error, {already_started, Child}}, State}.
do_start_child(ExtraArgs, #state{mfargs={Mod, Fun, Args}} = State) ->
NArgs = Args ++ ExtraArgs,
case catch apply(Mod, Fun, NArgs) of
{ok, Child} = OK when is_pid(Child) ->
{reply, OK, started(Child, {Mod, Fun, NArgs}, State)};
{ok, Child, _} = OK when is_pid(Child) ->
{reply, OK, started(Child, {Mod, Fun, NArgs}, State)};
ignore ->
{reply, {ok, undefined}, fire(State)};
{error, _} = Error ->
{reply, Error, fire(State)};
Reason ->
{reply, {error, Reason}, fire(State)}
end.
started(Child, MFA, #state{backoff=Backoff} = State) ->
{_, NBackoff} = backoff:succeed(Backoff),
State#state{child=Child, child_mfargs=MFA, backoff=NBackoff}.
fire(#state{backoff=Backoff} = State) ->
State#state{backoff_ref=backoff:fire(Backoff)}.
handle_terminate_child(Child, #state{child=Child} = State) when is_pid(Child) ->
Reply = shutdown(State),
{reply, Reply, fire(State#state{child=undefined, child_mfargs=undefined})};
handle_terminate_child(Pid, State) when is_pid(Pid) ->
case erlang:is_process_alive(Pid) of
true ->
{reply, {error, not_found}, State};
false ->
{reply, ok, State}
end;
handle_terminate_child(_, State) ->
{reply, {error, simple_one_for_one}, State}.
shutdown(#state{child=Child} = State) when is_pid(Child) ->
Monitor = monitor(process, Child),
shutdown(Monitor, State);
shutdown(#state{child=undefined}) ->
ok.
shutdown(Monitor, #state{shutdown=brutal_kill, child=Child} = State) ->
exit(Child, kill),
shutdown_await(Monitor, killed, infinity, State);
shutdown(Monitor, #state{shutdown=Timeout, child=Child} = State) ->
exit(Child, shutdown),
shutdown_await(Monitor, shutdown, Timeout, State).
shutdown_await(Monitor, Reason, Timeout,
#state{child=Child, restart=Restart} = State) ->
case do_shutdown_await(Monitor, Child, Reason, Timeout) of
ok ->
ok;
{error, normal} when Restart =/= permanent ->
ok;
{error, Reason2} ->
supervisor_report(shutdown_error, Reason2, State)
end.
do_shutdown_await(Monitor, Pid, Reason, Timeout) ->
receive
{'DOWN', Monitor, _, _, Reason} ->
ok;
{'DOWN', Monitor, _, _, Other} ->
{error, Other}
after
Timeout ->
shutdown_timeout(Monitor, Pid)
end.
shutdown_timeout(Monitor, Pid) ->
exit(Pid, kill),
receive
{'DOWN', Monitor, _, _, Reason} ->
{error, Reason}
end.
supervisor_report(Error, Reason, #state{name=Name} = State) ->
Report = [{supervisor, Name}, {errorContext, Error}, {reason, Reason},
{offender, child(State)}],
error_logger:error_report(supervisor_report, Report).
child(#state{child_mfargs=undefined, mfargs=MFA} = State) ->
child(State#state{child_mfargs=MFA});
child(#state{child=Child, child_mfargs=MFA, id=Id,
restart=Restart, shutdown=Shutdown, child_type=ChildType}) ->
[{pid, Child}, {name, Id}, {mfargs, MFA}, {restart_type, Restart},
{shutdown, Shutdown}, {child_type, ChildType}].
handle_which_children(#state{child=Child, child_type=ChildType,
modules=Modules} = State) when is_pid(Child) ->
{reply, [{undefined, Child, ChildType, Modules}], State};
handle_which_children(#state{child=undefined} = State) ->
{reply, [], State}.
handle_count_children(State) ->
{reply, [{specs, 1} | do_count_children(State)], State}.
do_count_children(#state{child=Child, child_type=supervisor})
when is_pid(Child) ->
[{active, 1}, {supervisors, 1}, {workers, 0}];
do_count_children(#state{child=Child, child_type=worker}) when is_pid(Child) ->
[{active, 1}, {supervisors, 0}, {workers, 1}];
do_count_children(#state{child=undefined}) ->
[{active, 0}, {supervisors, 0}, {workers, 0}].
handle_timeout(BRef, #state{backoff_ref=BRef, mfargs={Mod, Fun, Args}} = State)
when is_reference(BRef) ->
NState = State#state{backoff_ref=undefined},
case catch apply(Mod, Fun, Args) of
{ok, Child} when is_pid(Child) ->
restarted(Child, NState);
{ok, Child, _} when is_pid(Child) ->
restarted(Child, NState);
ignore ->
backoff(NState);
{error, Reason} ->
failed(Reason, NState);
Reason ->
failed(Reason, NState)
end;
handle_timeout(_, State) ->
{noreply, State}.
restarted(Child, #state{mfargs=MFA} = State) ->
{noreply, started(Child, MFA, State)}.
backoff(#state{backoff=Backoff} = State) ->
{_, NBackoff} = backoff:fail(Backoff),
{noreply, fire(State#state{backoff=NBackoff})}.
failed(Reason, #state{id=Id, mfargs=MFA} = State) ->
supervisor_report(start_error, Reason, State#state{child_mfargs=MFA}),
NReason = {shutdown, {failed_to_start_child, Id, Reason}},
{stop, NReason, State}.
handle_exit(Child, Reason, #state{child=Child} = State) when is_pid(Child) ->
terminated(Reason, State);
handle_exit(_, _, State) ->
{noreply, State}.
terminated(Reason, #state{restart=permanent} = State) ->
terminated_stop(Reason, State);
terminated(normal, State) ->
terminated(State);
terminated(shutdown, State) ->
terminated(State);
terminated({shutdown, _}, State) ->
terminated(State);
terminated(Reason, #state{restart=transient} = State) ->
terminated_stop(Reason, State);
terminated(Reason, #state{restart=temporary} = State) ->
supervisor_report(child_terminated, Reason, State),
terminated(State).
terminated_stop(Reason, #state{id=Id} = State) ->
supervisor_report(child_terminated, Reason, State),
supervisor_report(shutdown, reached_max_restart_intensity, State),
NReason = {shutdown, {reached_max_restart_intensity, Id, Reason}},
{stop, NReason, State#state{child=undefined, child_mfargs=undefined}}.
terminated(State) ->
{noreply, fire(State#state{child=undefined, child_mfargs=undefined})}.
handle_code_change({_, _, _} = BackoffSpec, StartSpec, State) ->
case check_backoff_spec(BackoffSpec) of
ok ->
NState = change_backoff_spec(BackoffSpec, State),
change_start_spec(StartSpec, NState);
{error, Reason} ->
{error, {backoff_spec, Reason}}
end;
handle_code_change(Other, _, _) ->
{error, {bad_backoff_spec, Other}}.
change_backoff_spec({BackoffType, Start, Max},
#state{start=Start, max=Max, backoff=Backoff} = State) ->
State#state{backoff=backoff:type(Backoff, BackoffType)};
change_backoff_spec({BackoffType, Start, Max}, State) ->
State#state{backoff=backoff_init(BackoffType, Start, Max)}.
change_start_spec([ChildSpec], State) ->
case supervisor:check_childspecs([ChildSpec]) of
ok ->
{Id, MFA, Restart, Shutdown, ChildType, Modules} = ChildSpec,
NState = State#state{id=Id, mfargs=MFA, restart=Restart,
shutdown=Shutdown, child_type=ChildType,
modules=Modules},
{ok, NState};
{error, Reason} ->
{error, {start_spec, Reason}}
end;
change_start_spec(Other, _) ->
{stop, {bad_start_spec, Other}}. | src/backoff_supervisor.erl | 0.716219 | 0.441492 | backoff_supervisor.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% Span behaviour.
%% @end
%%%-------------------------------------------------------------------------
-module(ot_span).
-export([start_span/3,
end_span/2,
get_ctx/2,
is_recording_events/2,
set_attribute/4,
set_attributes/3,
add_event/4,
add_events/3,
set_status/3,
update_name/3]).
-include("opentelemetry.hrl").
-type start_opts() :: #{parent => undefined | opentelemetry:span() | opentelemetry:span_ctx(),
attributes => opentelemetry:attributes(),
sampler => ot_sampler:sampler(),
sampling_hint => ot_sampler:sampling_decision(),
links => opentelemetry:links(),
is_recorded => boolean(),
start_time => wts:timestamp(),
kind => opentelemetry:span_kind()}.
-export_type([start_opts/0]).
-callback start_span(opentelemetry:span_name(), start_opts()) -> opentelemetry:span_ctx().
-callback end_span(opentelemetry:span_ctx()) -> boolean() | {error, term()}.
-callback get_ctx(opentelemetry:span()) -> opentelemetry:span_ctx().
-callback is_recording_events(opentelemetry:span_ctx()) -> boolean().
-callback set_attribute(opentelemetry:span_ctx(),
opentelemetry:attribute_key(),
opentelemetry:attribute_value()) -> boolean().
-callback set_attributes(opentelemetry:span_ctx(), opentelemetry:attributes()) -> boolean().
-callback add_event(opentelemetry:span_ctx(), unicode:unicode_binary(), opentelemetry:attributes()) -> boolean().
-callback add_events(opentelemetry:span_ctx(), opentelemetry:time_events()) -> boolean().
-callback set_status(opentelemetry:span_ctx(), opentelemetry:status()) -> boolean().
-callback update_name(opentelemetry:span_ctx(), opentelemetry:span_name()) -> boolean().
%% handy macros so we don't have function name typos
-define(DO(Tracer, SpanCtx, Args), do_span_function(?FUNCTION_NAME, Tracer, SpanCtx, Args)).
-spec start_span(Tracer, SpanName, Opts) -> SpanCtx when
Tracer :: opentelemetry:tracer(),
SpanName :: opentelemetry:span_name(),
Opts :: start_opts(),
SpanCtx :: opentelemetry:span_ctx().
start_span(Tracer, SpanName, Opts) ->
SpanModule = ot_tracer:span_module(Tracer),
SpanModule:start_span(SpanName, Opts).
-spec end_span(Tracer, SpanCtx) -> boolean() | {error, term()} when
Tracer :: opentelemetry:tracer(),
SpanCtx :: opentelemetry:span_ctx().
end_span(Tracer, SpanCtx) ->
SpanModule = ot_tracer:span_module(Tracer),
SpanModule:end_span(SpanCtx).
-spec get_ctx(Tracer, Span) -> SpanCtx when
Tracer :: opentelemetry:tracer(),
Span :: opentelemetry:span(),
SpanCtx :: opentelemetry:span_ctx().
get_ctx(Tracer, Span) ->
SpanModule = ot_tracer:span_module(Tracer),
SpanModule:get_ctx(Span).
-spec is_recording_events(Tracer, SpanCtx) -> boolean() when
Tracer :: opentelemetry:tracer(),
SpanCtx :: opentelemetry:span_ctx().
is_recording_events(Tracer, SpanCtx) ->
?DO(Tracer, SpanCtx, []).
-spec set_attribute(Tracer, SpanCtx, Key, Value) -> boolean() when
Tracer :: opentelemetry:tracer(),
Key :: opentelemetry:attribute_key(),
Value :: opentelemetry:attribute_value(),
SpanCtx :: opentelemetry:span_ctx().
set_attribute(Tracer, SpanCtx, Key, Value) ->
?DO(Tracer, SpanCtx, [Key, Value]).
-spec set_attributes(Tracer, SpanCtx, Attributes) -> boolean() when
Tracer :: opentelemetry:tracer(),
Attributes :: opentelemetry:attributes(),
SpanCtx :: opentelemetry:span_ctx().
set_attributes(Tracer, SpanCtx, Attributes) ->
?DO(Tracer, SpanCtx, [Attributes]).
-spec add_event(Tracer, SpanCtx, Name, Attributes) -> boolean() when
Tracer :: opentelemetry:tracer(),
Name :: unicode:unicode_binary(),
Attributes :: opentelemetry:attributes(),
SpanCtx :: opentelemetry:span_ctx().
add_event(Tracer, SpanCtx, Name, Attributes) ->
?DO(Tracer, SpanCtx, [Name, Attributes]).
-spec add_events(Tracer, SpanCtx, Events) -> boolean() when
Tracer :: opentelemetry:tracer(),
Events :: opentelemetry:events(),
SpanCtx :: opentelemetry:span_ctx().
add_events(Tracer, SpanCtx, Events) ->
?DO(Tracer, SpanCtx, [Events]).
-spec set_status(Tracer, SpanCtx, Status) -> boolean() when
Tracer :: opentelemetry:tracer(),
Status :: opentelemetry:status(),
SpanCtx :: opentelemetry:span_ctx().
set_status(Tracer, SpanCtx, Status) ->
?DO(Tracer, SpanCtx, [Status]).
-spec update_name(Tracer, SpanCtx, Name) -> boolean() when
Tracer :: opentelemetry:tracer(),
Name :: opentelemetry:span_name(),
SpanCtx :: opentelemetry:span_ctx().
update_name(Tracer, SpanCtx, SpanName) ->
?DO(Tracer, SpanCtx, [SpanName]).
%% internal functions
do_span_function(Function, Tracer, SpanCtx, Args) ->
SpanModule = ot_tracer:span_module(Tracer),
apply_span_function(SpanModule, Function, [SpanCtx | Args]).
apply_span_function(ot_span_noop, _Function, _Args) ->
ok;
apply_span_function(SpanModule, Function, Args) ->
erlang:apply(SpanModule, Function, Args). | src/ot_span.erl | 0.686055 | 0.450964 | ot_span.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_btree).
-export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
-export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]).
-export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
-export([less/3]).
-include("couch_db.hrl").
-define(CHUNK_THRESHOLD, 16#4ff).
extract(#btree{extract_kv=Extract}, Value) ->
Extract(Value).
assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
Assemble(Key, Value).
less(#btree{less=Less}, A, B) ->
Less(A, B).
% pass in 'nil' for State if a new Btree.
open(State, Fd) ->
{ok, #btree{root=State, fd=Fd}}.
set_options(Bt, []) ->
Bt;
set_options(Bt, [{split, Extract}|Rest]) ->
set_options(Bt#btree{extract_kv=Extract}, Rest);
set_options(Bt, [{join, Assemble}|Rest]) ->
set_options(Bt#btree{assemble_kv=Assemble}, Rest);
set_options(Bt, [{less, Less}|Rest]) ->
set_options(Bt#btree{less=Less}, Rest);
set_options(Bt, [{reduce, Reduce}|Rest]) ->
set_options(Bt#btree{reduce=Reduce}, Rest);
set_options(Bt, [{compression, Comp}|Rest]) ->
set_options(Bt#btree{compression=Comp}, Rest).
open(State, Fd, Options) ->
{ok, set_options(#btree{root=State, fd=Fd}, Options)}.
get_state(#btree{root=Root}) ->
Root.
final_reduce(#btree{reduce=Reduce}, Val) ->
final_reduce(Reduce, Val);
final_reduce(Reduce, {[], []}) ->
Reduce(reduce, []);
final_reduce(_Bt, {[], [Red]}) ->
Red;
final_reduce(Reduce, {[], Reductions}) ->
Reduce(rereduce, Reductions);
final_reduce(Reduce, {KVs, Reductions}) ->
Red = Reduce(reduce, KVs),
final_reduce(Reduce, {[], [Red | Reductions]}).
fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
Dir = couch_util:get_value(dir, Options, fwd),
StartKey = couch_util:get_value(start_key, Options),
EndKey = case couch_util:get_value(end_key_gt, Options) of
undefined -> couch_util:get_value(end_key, Options);
LastKey -> LastKey
end,
KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end),
{StartKey2, EndKey2} =
case Dir of
rev -> {EndKey, StartKey};
fwd -> {StartKey, EndKey}
end,
try
{ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
reduce_stream_node(Bt, Dir, Root, StartKey2, EndKey2, undefined, [], [],
KeyGroupFun, Fun, Acc),
if GroupedKey2 == undefined ->
{ok, Acc2};
true ->
case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
{ok, Acc3} -> {ok, Acc3};
{stop, Acc3} -> {ok, Acc3}
end
end
catch
throw:{stop, AccDone} -> {ok, AccDone}
end.
full_reduce(#btree{root=nil,reduce=Reduce}) ->
{ok, Reduce(reduce, [])};
full_reduce(#btree{root=Root}) ->
{ok, element(2, Root)}.
size(#btree{root = nil}) ->
0;
size(#btree{root = {_P, _Red}}) ->
% pre 1.2 format
nil;
size(#btree{root = {_P, _Red, Size}}) ->
Size.
% wraps a 2 arity function with the proper 3 arity function
convert_fun_arity(Fun) when is_function(Fun, 2) ->
fun
(visit, KV, _Reds, AccIn) -> Fun(KV, AccIn);
(traverse, _K, _Red, AccIn) -> {ok, AccIn}
end;
convert_fun_arity(Fun) when is_function(Fun, 3) ->
fun
(visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn);
(traverse, _K, _Red, AccIn) -> {ok, AccIn}
end;
convert_fun_arity(Fun) when is_function(Fun, 4) ->
Fun. % Already arity 4
make_key_in_end_range_function(#btree{less=Less}, fwd, Options) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
case couch_util:get_value(end_key, Options) of
undefined ->
fun(_Key) -> true end;
LastKey ->
fun(Key) -> not Less(LastKey, Key) end
end;
EndKey ->
fun(Key) -> Less(Key, EndKey) end
end;
make_key_in_end_range_function(#btree{less=Less}, rev, Options) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
case couch_util:get_value(end_key, Options) of
undefined ->
fun(_Key) -> true end;
LastKey ->
fun(Key) -> not Less(Key, LastKey) end
end;
EndKey ->
fun(Key) -> Less(EndKey, Key) end
end.
foldl(Bt, Fun, Acc) ->
fold(Bt, Fun, Acc, []).
foldl(Bt, Fun, Acc, Options) ->
fold(Bt, Fun, Acc, Options).
fold(#btree{root=nil}, _Fun, Acc, _Options) ->
{ok, {[], []}, Acc};
fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
Dir = couch_util:get_value(dir, Options, fwd),
InRange = make_key_in_end_range_function(Bt, Dir, Options),
Result =
case couch_util:get_value(start_key, Options) of
undefined ->
stream_node(Bt, [], Bt#btree.root, InRange, Dir,
convert_fun_arity(Fun), Acc);
StartKey ->
stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
convert_fun_arity(Fun), Acc)
end,
case Result of
{ok, Acc2}->
FullReduction = element(2, Root),
{ok, {[], [FullReduction]}, Acc2};
{stop, LastReduction, Acc2} ->
{ok, LastReduction, Acc2}
end.
add(Bt, InsertKeyValues) ->
add_remove(Bt, InsertKeyValues, []).
add_remove(Bt, InsertKeyValues, RemoveKeys) ->
{ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
{ok, Bt2}.
query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
#btree{root=Root} = Bt,
InsertActions = lists:map(
fun(KeyValue) ->
{Key, Value} = extract(Bt, KeyValue),
{insert, Key, Value}
end, InsertValues),
RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
SortFun =
fun({OpA, A, _}, {OpB, B, _}) ->
case A == B of
% A and B are equal, sort by op.
true -> op_order(OpA) < op_order(OpB);
false ->
less(Bt, A, B)
end
end,
Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
{ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
{ok, NewRoot} = complete_root(Bt, KeyPointers),
{ok, QueryResults, Bt#btree{root=NewRoot}}.
% for ordering different operations with the same key.
% fetch < remove < insert
op_order(fetch) -> 1;
op_order(remove) -> 2;
op_order(insert) -> 3.
lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
SortedKeys = lists:sort(Less, Keys),
{ok, SortedResults} = lookup(Bt, Root, SortedKeys),
% We want to return the results in the same order as the keys were input
% but we may have changed the order when we sorted. So we need to put the
% order back into the results.
couch_util:reorder_results(Keys, SortedResults).
lookup(_Bt, nil, Keys) ->
{ok, [{Key, not_found} || Key <- Keys]};
lookup(Bt, Node, Keys) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
kp_node ->
lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
kv_node ->
lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
end.
lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
{ok, lists:reverse(Output)};
lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
{ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
{Key, PointerInfo} = element(N, NodeTuple),
SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
case lists:splitwith(SplitFun, LookupKeys) of
{[], GreaterQueries} ->
lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
{LessEqQueries, GreaterQueries} ->
{ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
end.
lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
{ok, lists:reverse(Output)};
lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
% keys not found
{ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
{Key, Value} = element(N, NodeTuple),
case less(Bt, LookupKey, Key) of
true ->
% LookupKey is less than Key
lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
false ->
case less(Bt, Key, LookupKey) of
true ->
% LookupKey is greater than Key
lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
false ->
% LookupKey is equal to Key
lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
end
end.
complete_root(_Bt, []) ->
{ok, nil};
complete_root(_Bt, [{_Key, PointerInfo}])->
{ok, PointerInfo};
complete_root(Bt, KPs) ->
{ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
complete_root(Bt, ResultKeyPointers).
%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
% It is inaccurate as it does not account for compression when blocks are
% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
% it's probably really inefficient.
chunkify(InList) ->
case ?term_size(InList) of
Size when Size > ?CHUNK_THRESHOLD ->
NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
ChunkThreshold = Size div NumberOfChunksLikely,
chunkify(InList, ChunkThreshold, [], 0, []);
_Else ->
[InList]
end.
chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
lists:reverse(OutputChunks);
chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
lists:reverse([lists:reverse(OutList) | OutputChunks]);
chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
case ?term_size(InElement) of
Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
Size ->
chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
end.
modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
case RootPointerInfo of
nil ->
NodeType = kv_node,
NodeList = [];
_Tuple ->
Pointer = element(1, RootPointerInfo),
{NodeType, NodeList} = get_node(Bt, Pointer)
end,
NodeTuple = list_to_tuple(NodeList),
{ok, NewNodeList, QueryOutput2} =
case NodeType of
kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
end,
case NewNodeList of
[] -> % no nodes remain
{ok, [], QueryOutput2};
NodeList -> % nothing changed
{LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
{ok, [{LastKey, RootPointerInfo}], QueryOutput2};
_Else2 ->
{ok, ResultList} = write_node(Bt, NodeType, NewNodeList),
{ok, ResultList, QueryOutput2}
end.
reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
[];
reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
reduce_tree_size(kv_node, NodeSize, _KvList) ->
NodeSize;
reduce_tree_size(kp_node, NodeSize, []) ->
NodeSize;
reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red}} | _]) ->
% pre 1.2 format
nil;
reduce_tree_size(kp_node, _NodeSize, [{_K, {_P, _Red, nil}} | _]) ->
nil;
reduce_tree_size(kp_node, NodeSize, [{_K, {_P, _Red, Sz}} | NodeList]) ->
reduce_tree_size(kp_node, NodeSize + Sz, NodeList).
get_node(#btree{fd = Fd}, NodePos) ->
{ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
{NodeType, NodeList}.
write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
% split up nodes into smaller sizes
NodeListList = chunkify(NodeList),
% now write out each chunk and return the KeyPointer pairs for those nodes
ResultList = [
begin
{ok, Pointer, Size} = couch_file:append_term(
Fd, {NodeType, ANodeList}, [{compression, Comp}]),
{LastKey, _} = lists:last(ANodeList),
SubTreeSize = reduce_tree_size(NodeType, Size, ANodeList),
{LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList), SubTreeSize}}
end
||
ANodeList <- NodeListList
],
{ok, ResultList}.
modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
modify_node(Bt, nil, Actions, QueryOutput);
modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
{ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
tuple_size(NodeTuple), [])), QueryOutput};
modify_kpnode(Bt, NodeTuple, LowerBound,
[{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
Sz = tuple_size(NodeTuple),
N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
case N =:= Sz of
true ->
% perform remaining actions on last node
{_, PointerInfo} = element(Sz, NodeTuple),
{ok, ChildKPs, QueryOutput2} =
modify_node(Bt, PointerInfo, Actions, QueryOutput),
NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
Sz - 1, ChildKPs)),
{ok, NodeList, QueryOutput2};
false ->
{NodeKey, PointerInfo} = element(N, NodeTuple),
SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
not less(Bt, NodeKey, ActionKey)
end,
{LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
{ok, ChildKPs, QueryOutput2} =
modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
LowerBound, N - 1, ResultNode)),
modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
end.
bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
Tail;
bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
bounded_tuple_to_list(Tuple, Start, End, Tail) ->
bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
lists:reverse(Acc, Tail);
bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
End;
find_first_gteq(Bt, Tuple, Start, End, Key) ->
Mid = Start + ((End - Start) div 2),
{TupleKey, _} = element(Mid, Tuple),
case less(Bt, TupleKey, Key) of
true ->
find_first_gteq(Bt, Tuple, Mid+1, End, Key);
false ->
find_first_gteq(Bt, Tuple, Start, Mid, Key)
end.
modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
{ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
case ActionType of
insert ->
modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
remove ->
% just drop the action
modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
fetch ->
% the key/value must not exist in the tree
modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
end;
modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
{Key, Value} = element(N, NodeTuple),
ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
case less(Bt, ActionKey, Key) of
true ->
case ActionType of
insert ->
% ActionKey is less than the Key, so insert
modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
remove ->
% ActionKey is less than the Key, just drop the action
modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
fetch ->
% ActionKey is less than the Key, the key/value must not exist in the tree
modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
end;
false ->
% ActionKey and Key are maybe equal.
case less(Bt, Key, ActionKey) of
false ->
case ActionType of
insert ->
modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
remove ->
modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
fetch ->
% ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
% since an identical action key can follow it.
modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
end;
true ->
modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
end
end.
reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc,
GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
{ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
reduce_stream_node(Bt, Dir, Node, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc,
GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
P = element(1, Node),
case get_node(Bt, P) of
{kp_node, NodeList} ->
reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd, GroupedKey,
GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
{kv_node, KVs} ->
reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd, GroupedKey,
GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
end.
reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd,
GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
KeyGroupFun, Fun, Acc) ->
GTEKeyStartKVs =
case KeyStart of
undefined ->
KVs;
_ ->
lists:dropwhile(fun({Key,_}) -> less(Bt, Key, KeyStart) end, KVs)
end,
KVs2 =
case KeyEnd of
undefined ->
GTEKeyStartKVs;
_ ->
lists:takewhile(
fun({Key,_}) ->
not less(Bt, KeyEnd, Key)
end, GTEKeyStartKVs)
end,
reduce_stream_kv_node2(Bt, adjust_dir(Dir, KVs2), GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
KeyGroupFun, Fun, Acc).
reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
_KeyGroupFun, _Fun, Acc) ->
{ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
case GroupedKey of
undefined ->
reduce_stream_kv_node2(Bt, RestKVs, Key,
[assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
_ ->
case KeyGroupFun(GroupedKey, Key) of
true ->
reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
[assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
Fun, Acc);
false ->
case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
{ok, Acc2} ->
reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
[], KeyGroupFun, Fun, Acc2);
{stop, Acc2} ->
throw({stop, Acc2})
end
end
end.
reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd,
GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
KeyGroupFun, Fun, Acc) ->
Nodes =
case KeyStart of
undefined ->
NodeList;
_ ->
lists:dropwhile(
fun({Key,_}) ->
less(Bt, Key, KeyStart)
end, NodeList)
end,
NodesInRange =
case KeyEnd of
undefined ->
Nodes;
_ ->
{InRange, MaybeInRange} = lists:splitwith(
fun({Key,_}) ->
less(Bt, Key, KeyEnd)
end, Nodes),
InRange ++ case MaybeInRange of [] -> []; [FirstMaybe|_] -> [FirstMaybe] end
end,
reduce_stream_kp_node2(Bt, Dir, adjust_dir(Dir, NodesInRange), KeyStart, KeyEnd,
GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, KeyEnd,
undefined, [], [], KeyGroupFun, Fun, Acc) ->
{ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, undefined,
[], [], KeyGroupFun, Fun, Acc),
reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, KeyEnd, GroupedKey2,
GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd,
GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
{Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
KeyGroupFun(GroupedKey, Key) end, NodeList),
{GroupedNodes, UngroupedNodes} =
case Grouped0 of
[] ->
{Grouped0, Ungrouped0};
_ ->
[FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
{RestGrouped, [FirstGrouped | Ungrouped0]}
end,
GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
case UngroupedNodes of
[{_Key, NodeInfo}|RestNodes] ->
{ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey,
GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2,
GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
[] ->
{ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
end.
adjust_dir(fwd, List) ->
List;
adjust_dir(rev, List) ->
lists:reverse(List).
stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
kp_node ->
stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
kv_node ->
stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
end.
stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
kp_node ->
stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
kv_node ->
stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
end.
stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
{ok, Acc};
stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
Red = element(2, Node),
case Fun(traverse, Key, Red, Acc) of
{ok, Acc2} ->
case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
{ok, Acc3} ->
stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
{stop, LastReds, Acc3} ->
{stop, LastReds, Acc3}
end;
{skip, Acc2} ->
stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2)
end.
drop_nodes(_Bt, Reds, _StartKey, []) ->
{Reds, []};
drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
case less(Bt, NodeKey, StartKey) of
true ->
drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
false ->
{Reds, [{NodeKey, Node} | RestKPs]}
end.
stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
{NewReds, NodesToStream} =
case Dir of
fwd ->
% drop all nodes sorting before the key
drop_nodes(Bt, Reds, StartKey, KPs);
rev ->
% keep all nodes sorting before the key, AND the first node to sort after
RevKPs = lists:reverse(KPs),
case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
{_RevsBefore, []} ->
% everything sorts before it
{Reds, KPs};
{RevBefore, [FirstAfter | Drop]} ->
{[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
[FirstAfter | lists:reverse(RevBefore)]}
end
end,
case NodesToStream of
[] ->
{ok, Acc};
[{_Key, Node} | Rest] ->
case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
{ok, Acc2} ->
Red = element(2, Node),
stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
{stop, LastReds, Acc2} ->
{stop, LastReds, Acc2}
end
end.
stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
DropFun =
case Dir of
fwd ->
fun({Key, _}) -> less(Bt, Key, StartKey) end;
rev ->
fun({Key, _}) -> less(Bt, StartKey, Key) end
end,
{LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
{ok, Acc};
stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
case InRange(K) of
false ->
{stop, {PrevKVs, Reds}, Acc};
true ->
AssembledKV = assemble(Bt, K, V),
case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
{ok, Acc2} ->
stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
{stop, Acc2} ->
{stop, {PrevKVs, Reds}, Acc2}
end
end. | src/couchdb/couch_btree.erl | 0.551091 | 0.451447 | couch_btree.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core: Core Riak Application
%%
%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc The default functions used for claiming partition ownership. Generally,
%% a wants_claim function should return either {yes, Integer} or 'no' where
%% Integer is the number of additional partitions wanted by this node. A
%% choose_claim function should return a riak_core_ring with more
%% partitions claimed by this node than in the input ring.
%% The usual intention for partition ownership assumes relative heterogeneity of
%% capacity and connectivity. Accordingly, the standard claim functions attempt
%% to maximize "spread" -- expected distance between partitions claimed by each
%% given node. This is in order to produce the expectation that for any
%% reasonably short span of consecutive partitions, there will be a minimal
%% number of partitions owned by the same node.
%% The exact amount that is considered tolerable is determined by the
%% application env variable "target_n_val". The functions in riak_core_claim
%% will ensure that all sequences up to target_n_val long contain no repeats if
%% at all possible. The effect of this is that when the number of nodes in the
%% system is smaller than target_n_val, a potentially large number of partitions
%% must be moved in order to safely add a new node. After the cluster has grown
%% beyond that size, a minimal number of partitions (1/NumNodes) will generally
%% be moved.
%% If the number of nodes does not divide evenly into the number of partitions,
%% it may not be possible to perfectly achieve the maximum spread constraint.
%% In that case, Riak will minimize the cases where the constraint is violated
%% and they will all exist near the origin point of the ring.
-module(riak_core_claim).
-type ring() :: riak_core_ring:riak_core_ring().
-export([claim/1,
claim/3,
claim_until_balanced/2,
claim_until_balanced/4]).
-export([default_wants_claim/1,
default_wants_claim/2,
default_choose_claim/1,
default_choose_claim/2,
default_choose_claim/3,
never_wants_claim/1,
never_wants_claim/2,
random_choose_claim/1,
random_choose_claim/2,
random_choose_claim/3]).
-export([wants_claim_v2/1,
wants_claim_v2/2,
choose_claim_v2/1,
choose_claim_v2/2,
choose_claim_v2/3,
claim_rebalance_n/2,
claim_diversify/3,
claim_diagonal/3,
wants/1,
wants_owns_diff/2,
meets_target_n/2,
diagonal_stripe/2]).
-define(DEF_TARGET_N, 4).
%% @doc Run the claim algorithm for the complete ring.
%% @param Ring Ring the algorithm is run on.
%% @returns The ring after the claim algorithm has been applied.
-spec claim(Ring :: ring()) -> ring().
claim(Ring) -> claim(Ring, want, choose).
%% @doc Run the claim algorithm for the complete ring.
%% @param Ring Ring the algorithm is run on.
%% @param Mode1 ignored.
%% @param Mode2 ignored.
%% @returns The ring after the claim algorithm has been applied.
-spec claim(Ring :: ring(), Mode1 :: any(),
Mode2 :: any()) -> ring().
claim(Ring, _, _) ->
Members = riak_core_ring:claiming_members(Ring),
lists:foldl(fun (Node, Ring0) ->
claim_until_balanced(Ring0, Node, want, choose)
end,
Ring,
Members).
%% @doc Apply the claim algorithm until a given node owns enough partitions.
%% @param Ring Ring the algorithm is applied to.
%% @param Node Node name of the node to be balanced.
%% @returns The balanced ring.
-spec claim_until_balanced(Ring :: ring(),
Node :: term()) -> ring().
claim_until_balanced(Ring, Node) ->
claim_until_balanced(Ring, Node, want, choose).
%% @doc Apply the claim algorithm until a given node owns enough partitions.
%% @param Ring Ring the algorithm is applied to.
%% @param Node Node name of the node to be balanced.
%% @param want Fixed guard.
%% @param choose Fixed guard
%% @returns The balanced ring.
-spec claim_until_balanced(Ring :: ring(),
Node :: term(), want, choose) -> ring().
claim_until_balanced(Ring, Node, want, choose) ->
NeedsIndexes = wants_claim_v2(Ring, Node),
case NeedsIndexes of
no -> Ring;
{yes, _NumToClaim} ->
NewRing = choose_claim_v2(Ring, Node),
claim_until_balanced(NewRing, Node, want, choose)
end.
%% ===================================================================
%% Claim Function Implementations
%% ===================================================================
%% @doc Choose a partition at random for the local node.
%% @param Ring Ring to claim on.
%% @returns Updated ring.
-spec default_choose_claim(Ring :: ring()) -> ring().
default_choose_claim(Ring) ->
default_choose_claim(Ring, node()).
%% @doc Choose a partition for a given node at random.
%% @param Ring Ring to claim on.
%% @param Node Node to claim for.
%% @returns Updated ring.
-spec default_choose_claim(Ring :: ring(),
Node :: term()) -> ring().
default_choose_claim(Ring, Node) ->
choose_claim_v2(Ring, Node).
%% @doc Choose a partition for a given node according to the given parameters.
%% @param Ring Ring to claim on.
%% @param Node Node to claim for.
%% @param Params Parameters to consider.
%% @returns Updated ring.
-spec default_choose_claim(Ring :: ring(),
Node :: term(), Params :: [term()]) -> ring().
default_choose_claim(Ring, Node, Params) ->
choose_claim_v2(Ring, Node, Params).
%% @doc Want a partition if we currently have less than floor(ringsize/nodes).
%% @param Ring Ring to claim on.
%% @returns `{yes, Difference}' or `no'.
-spec default_wants_claim(Ring :: ring()) -> {yes,
integer()} |
no.
default_wants_claim(Ring) ->
default_wants_claim(Ring, node()).
%% @doc Like {@link default_wants_claim/1} with a given node.
%% @param Node Node to decide balance for.
-spec default_wants_claim(Ring :: ring(),
Node :: term()) -> {yes, integer()} | no.
default_wants_claim(Ring, Node) ->
wants_claim_v2(Ring, Node).
%% @doc Decide if the local node needs more partitions.
%% @param Ring Ring to claim on.
%% @returns `{yes, Difference}' or `no'.
-spec wants_claim_v2(Ring :: ring()) -> {yes,
integer()} |
no.
wants_claim_v2(Ring) -> wants_claim_v2(Ring, node()).
%% @doc Like {@link wants_claim_v2/1} for another node.
%% @param Node Node to decide balance for.
-spec wants_claim_v2(Ring :: ring(),
Node :: term()) -> {yes, integer()} | no.
wants_claim_v2(Ring, Node) ->
Active = riak_core_ring:claiming_members(Ring),
Owners = riak_core_ring:all_owners(Ring),
Counts = get_counts(Active, Owners),
NodeCount = erlang:length(Active),
RingSize = riak_core_ring:num_partitions(Ring),
Avg = RingSize div NodeCount,
Count = proplists:get_value(Node, Counts, 0),
case Count < Avg of
false -> no;
true -> {yes, Avg - Count}
end.
%% @doc Provide default choose parameters if none given
-spec default_choose_params() -> [term()].
default_choose_params() -> default_choose_params([]).
%% @doc Provide default NVal if it is not contained in the given parameters.
%% @param Params List of claim parameters.
%% @returns List of claim parameters containing target NVal.
-spec default_choose_params(Params ::
term()) -> [term()].
default_choose_params(Params) ->
case proplists:get_value(target_n_val, Params) of
undefined ->
TN = application:get_env(riak_core,
target_n_val,
?DEF_TARGET_N),
[{target_n_val, TN} | Params];
_ -> Params
end.
%% @doc Choose a partition the local node should claim.
%% @param Ring Ring to claim on.
%% @returns Updated ring.
-spec choose_claim_v2(Ring :: ring()) -> ring().
choose_claim_v2(Ring) -> choose_claim_v2(Ring, node()).
%% @doc Like {@link choose_claim_v2/1} with a specified node.
%% @param Node Specified node that claims a partition.
-spec choose_claim_v2(Ring :: ring(),
Node :: term()) -> ring().
choose_claim_v2(Ring, Node) ->
Params = default_choose_params(),
choose_claim_v2(Ring, Node, Params).
%% @doc Like {@link choose_claim_v2/2} with specified parameters.
%% @param Params0 Claim parameter list.
-spec choose_claim_v2(Ring :: ring(), Node :: term(),
Params0 :: [term()]) -> ring().
choose_claim_v2(Ring, Node, Params0) ->
Params = default_choose_params(Params0),
%% Active::[node()]
Active = riak_core_ring:claiming_members(Ring),
%% Owners::[{index(), node()}]
Owners = riak_core_ring:all_owners(Ring),
%% Counts::[node(), non_neg_integer()]
Counts = get_counts(Active, Owners),
RingSize = riak_core_ring:num_partitions(Ring),
NodeCount = erlang:length(Active),
%% Deltas::[node(), integer()]
Deltas = get_deltas(RingSize,
NodeCount,
Owners,
Counts),
{_, Want} = lists:keyfind(Node, 1, Deltas),
TargetN = proplists:get_value(target_n_val, Params),
AllIndices = lists:zip(lists:seq(0, length(Owners) - 1),
[Idx || {Idx, _} <- Owners]),
EnoughNodes = (NodeCount > TargetN) or
(NodeCount == TargetN) and (RingSize rem TargetN =:= 0),
case EnoughNodes of
true ->
%% If we have enough nodes to meet target_n, then we prefer to
%% claim indices that are currently causing violations, and then
%% fallback to indices in linear order. The filtering steps below
%% will ensure no new violations are introduced.
Violated = lists:flatten(find_violations(Ring,
TargetN)),
Violated2 = [lists:keyfind(Idx, 2, AllIndices)
|| Idx <- Violated],
Indices = Violated2 ++ AllIndices -- Violated2;
false ->
%% If we do not have enough nodes to meet target_n, then we prefer
%% claiming the same indices that would occur during a
%% re-diagonalization of the ring with target_n nodes, falling
%% back to linear offsets off these preferred indices when the
%% number of indices desired is less than the computed set.
Padding = lists:duplicate(TargetN, undefined),
Expanded = lists:sublist(Active ++ Padding, TargetN),
PreferredClaim = riak_core_claim:diagonal_stripe(Ring,
Expanded),
PreferredNth = [begin
{Nth, Idx} = lists:keyfind(Idx, 2, AllIndices),
Nth
end
|| {Idx, Owner} <- PreferredClaim, Owner =:= Node],
Offsets = lists:seq(0,
RingSize div length(PreferredNth)),
AllNth = lists:sublist([(X + Y) rem RingSize
|| Y <- Offsets, X <- PreferredNth],
RingSize),
Indices = [lists:keyfind(Nth, 1, AllIndices)
|| Nth <- AllNth]
end,
%% Filter out indices that conflict with the node's existing ownership
Indices2 = prefilter_violations(Ring,
Node,
AllIndices,
Indices,
TargetN,
RingSize),
%% Claim indices from the remaining candidate set
Claim = select_indices(Owners,
Deltas,
Indices2,
TargetN,
RingSize),
Claim2 = lists:sublist(Claim, Want),
NewRing = lists:foldl(fun (Idx, Ring0) ->
riak_core_ring:transfer_node(Idx, Node, Ring0)
end,
Ring,
Claim2),
RingChanged = [] /= Claim2,
RingMeetsTargetN = meets_target_n(NewRing, TargetN),
case {RingChanged, EnoughNodes, RingMeetsTargetN} of
{false, _, _} ->
%% Unable to claim, fallback to re-diagonalization
sequential_claim(Ring, Node, TargetN);
{_, true, false} ->
%% Failed to meet target_n, fallback to re-diagonalization
sequential_claim(Ring, Node, TargetN);
_ -> NewRing
end.
%% @private for each node in owners return a tuple of owner and delta
%% where delta is an integer that expresses how many nodes the owner
%% needs it's ownership to change by. A positive means the owner needs
%% that many more partitions, a negative means the owner can lose that
%% many paritions.
-spec get_deltas(RingSize :: pos_integer(),
NodeCount :: pos_integer(),
Owners :: [{Index :: non_neg_integer(), node()}],
Counts :: [{node(), non_neg_integer()}]) -> Deltas ::
[{node(),
integer()}].
get_deltas(RingSize, NodeCount, Owners, Counts) ->
Avg = RingSize / NodeCount,
%% the most any node should own
Max = ceiling(RingSize / NodeCount),
ActiveDeltas = [{Member,
Count,
normalise_delta(Avg - Count)}
|| {Member, Count} <- Counts],
BalancedDeltas = rebalance_deltas(ActiveDeltas,
Max,
RingSize),
add_default_deltas(Owners, BalancedDeltas, 0).
%% @private a node can only claim whole partitions, but if RingSize
%% rem NodeCount /= 0, a delta will be a float. This function decides
%% if that float should be floored or ceilinged
-spec normalise_delta(float()) -> integer().
normalise_delta(Delta) when Delta < 0 ->
%% if the node has too many (a negative delta) give up the most
%% you can (will be rebalanced)
ceiling(abs(Delta)) * -1;
normalise_delta(Delta) ->
%% if the node wants partitions, ask for the fewest for least
%% movement
trunc(Delta).
%% @private so that we don't end up with an imbalanced ring where one
%% node has more vnodes than it should (e.g. [{n1, 6}, {n2, 6}, {n3,
%% 6}, {n4, 8}, {n5,6} we rebalance the deltas so that select_indices
%% doesn't leave some node not giving up enough partitions
-spec rebalance_deltas([{node(), integer()}],
pos_integer(), pos_integer()) -> [{node(), integer()}].
rebalance_deltas(NodeDeltas, Max, RingSize) ->
AppliedDeltas = [Own + Delta
|| {_, Own, Delta} <- NodeDeltas],
case lists:sum(AppliedDeltas) - RingSize of
0 ->
[{Node, Delta} || {Node, _Cnt, Delta} <- NodeDeltas];
N when N < 0 -> increase_keeps(NodeDeltas, N, Max, [])
end.
%% @private increases the delta for (some) nodes giving away
%% partitions to the max they can keep
-spec increase_keeps(Deltas :: [{node(), integer()}],
WantsError :: integer(), Max :: pos_integer(),
Acc :: [{node(), integer()}]) -> Rebalanced :: [{node(),
integer()}].
increase_keeps(Rest, 0, _Max, Acc) ->
[{Node, Delta}
|| {Node, _Own, Delta}
<- lists:usort(lists:append(Rest, Acc))];
increase_keeps([], N, Max, Acc) when N < 0 ->
increase_takes(lists:reverse(Acc), N, Max, []);
increase_keeps([{Node, Own, Delta} | Rest], N, Max, Acc)
when Delta < 0 ->
WouldOwn = Own + Delta,
Additive = case WouldOwn + 1 =< Max of
true -> 1;
false -> 0
end,
increase_keeps(Rest,
N + Additive,
Max,
[{Node, Own + Delta + Additive} | Acc]);
increase_keeps([NodeDelta | Rest], N, Max, Acc) ->
increase_keeps(Rest, N, Max, [NodeDelta | Acc]).
%% @private increases the delta for (some) nodes taking partitions to the max
%% they can ask for
-spec increase_takes(Deltas :: [{node(), integer()}],
WantsError :: integer(), Max :: pos_integer(),
Acc :: [{node(), integer()}]) -> Rebalanced :: [{node(),
integer()}].
increase_takes(Rest, 0, _Max, Acc) ->
[{Node, Delta}
|| {Node, _Own, Delta}
<- lists:usort(lists:append(Rest, Acc))];
increase_takes([], N, _Max, Acc) when N < 0 ->
[{Node, Delta}
|| {Node, _Own, Delta} <- lists:usort(Acc)];
increase_takes([{Node, Own, Delta} | Rest], N, Max, Acc)
when Delta > 0 ->
WouldOwn = Own + Delta,
Additive = case WouldOwn + 1 =< Max of
true -> 1;
false -> 0
end,
increase_takes(Rest,
N + Additive,
Max,
[{Node, Own, Delta + Additive} | Acc]);
increase_takes([NodeDelta | Rest], N, Max, Acc) ->
increase_takes(Rest, N, Max, [NodeDelta | Acc]).
%% @doc Check if the given ring can provide enough owners for each node to meet
%% the target NVal.
%% @param Ring Ring to check.
%% @param TargetN NVal to check.
%% @returns Boolean indicating if the ring meets the requirement.
-spec meets_target_n(Ring :: ring(),
TargetN :: pos_integer()) -> boolean().
meets_target_n(Ring, TargetN) ->
Owners = lists:keysort(1,
riak_core_ring:all_owners(Ring)),
meets_target_n(Owners, TargetN, 0, [], []).
%% @private
%% @doc Helper function for {@link meets_target_n/2}.
-spec meets_target_n(Owners :: [{integer(), term()}],
TargetN :: pos_integer(), Index :: non_neg_integer(),
First :: [{integer(), term()}],
Last :: [{integer(), term()}]) -> boolean().
meets_target_n([{Part, Node} | Rest], TargetN, Index,
First, Last) ->
case lists:keytake(Node, 1, Last) of
{value, {Node, LastIndex, _}, NewLast} ->
if Index - LastIndex >= TargetN ->
%% node repeat respects TargetN
meets_target_n(Rest,
TargetN,
Index + 1,
First,
[{Node, Index, Part} | NewLast]);
true ->
%% violation of TargetN
false
end;
false ->
%% haven't seen this node yet
meets_target_n(Rest,
TargetN,
Index + 1,
[{Node, Index} | First],
[{Node, Index, Part} | Last])
end;
meets_target_n([], TargetN, Index, First, Last) ->
%% start through end guarantees TargetN
%% compute violations at wrap around, but don't fail
%% because of them: handle during reclaim
Violations = lists:filter(fun ({Node, L, _}) ->
{Node, F} = proplists:lookup(Node, First),
Index - L + F < TargetN
end,
Last),
{true, [Part || {_, _, Part} <- Violations]}.
%% @doc Claim diversify tries to build a perfectly diverse ownership list that
%% meets target N. It uses wants to work out which nodes want partitions,
%% but does not honor the counts currently. The algorithm incrementally
%% builds the ownership list, updating the adjacency matrix needed to
%% compute the diversity score as each node is added and uses it to drive
%% the selection of the next nodes.
%% @param Wants List of Node names and the respective number of partition they
%% want to claim.
%% @param Owners List of indices and the name of their owning node.
%% @param Params Parameters.
%% @returns New owner list and a list of attributes, in this case `diversified'.
-spec claim_diversify(Wants :: [{term(), integer()}],
Owners :: [{integer(), term()}],
Params :: [term()]) -> {[{integer(), term()}],
[atom()]}.
claim_diversify(Wants, Owners, Params) ->
TN = proplists:get_value(target_n_val,
Params,
?DEF_TARGET_N),
Q = length(Owners),
Claiming = [N || {N, W} <- Wants, W > 0],
{ok, NewOwners, _AM} =
riak_core_claim_util:construct(riak_core_claim_util:gen_complete_len(Q),
Claiming,
TN),
{NewOwners, [diversified]}.
%% @doc Claim nodes in seq a,b,c,a,b,c trying to handle the wraparound case to
%% meet target N
%% @param Wants List of Node names and the respective number of partition they
%% want to claim.
%% @param Owners List of indices and the name of their owning node.
%% @param Params Parameters.
%% @returns Diagonalized list of owners and a list of attributes, in this case
%% `diagonalized'.
-spec claim_diagonal(Wants :: [{term(), integer()}],
Owners :: [{integer(), term()}],
Params :: [term()]) -> {[term()], [atom()]}.
claim_diagonal(Wants, Owners, Params) ->
TN = proplists:get_value(target_n_val,
Params,
?DEF_TARGET_N),
Claiming = lists:sort([N || {N, W} <- Wants, W > 0]),
S = length(Claiming),
Q = length(Owners),
Reps = Q div S,
%% Handle the ring wrapround case. If possible try to pick nodes
%% that are not within the first TN of Claiming, if enough nodes
%% are available.
Tail = Q - Reps * S,
Last = case S >= TN + Tail of
true -> % If number wanted can be filled excluding first TN nodes
lists:sublist(lists:nthtail(TN - Tail, Claiming), Tail);
_ -> lists:sublist(Claiming, Tail)
end,
{lists:flatten([lists:duplicate(Reps, Claiming), Last]),
[diagonalized]}.
%% @private fall back to diagonal striping vnodes across nodes in a
%% sequential round robin (eg n1 | n2 | n3 | n4 | n5 | n1 | n2 | n3
%% etc) However, different to `claim_rebalance_n', this function
%% attempts to eliminate tail violations (for example a ring that
%% starts/ends n1 | n2 | ...| n3 | n4 | n1)
-spec sequential_claim(riak_core_ring:riak_core_ring(),
node(), integer()) -> riak_core_ring:riak_core_ring().
sequential_claim(Ring, Node, TargetN) ->
Nodes = lists:usort([Node
| riak_core_ring:claiming_members(Ring)]),
NodeCount = length(Nodes),
RingSize = riak_core_ring:num_partitions(Ring),
Overhang = RingSize rem NodeCount,
HasTailViolation = Overhang > 0 andalso
Overhang < TargetN,
Shortfall = TargetN - Overhang,
CompleteSequences = RingSize div NodeCount,
MaxFetchesPerSeq = NodeCount - TargetN,
MinFetchesPerSeq = ceiling(Shortfall /
CompleteSequences),
CanSolveViolation = CompleteSequences * MaxFetchesPerSeq
>= Shortfall,
Zipped = case HasTailViolation andalso CanSolveViolation
of
true ->
Partitions = lists:sort([I
|| {I, _}
<- riak_core_ring:all_owners(Ring)]),
Nodelist = solve_tail_violations(RingSize,
Nodes,
Shortfall,
MinFetchesPerSeq),
lists:zip(Partitions, lists:flatten(Nodelist));
false -> diagonal_stripe(Ring, Nodes)
end,
lists:foldl(fun ({P, N}, Acc) ->
riak_core_ring:transfer_node(P, N, Acc)
end,
Ring,
Zipped).
%% @private every module has a ceiling function
-spec ceiling(float()) -> integer().
ceiling(F) ->
T = trunc(F),
case F - T == 0 of
true -> T;
false -> T + 1
end.
%% @private rem_fill increase the tail so that there is no wrap around
%% preflist violation, by taking a `Shortfall' number nodes from
%% earlier in the preflist
-spec solve_tail_violations(integer(), [node()],
integer(), integer()) -> [node()].
solve_tail_violations(RingSize, Nodes, Shortfall,
MinFetchesPerSeq) ->
StartingNode = RingSize rem length(Nodes) + 1,
build_nodelist(RingSize,
Nodes,
Shortfall,
StartingNode,
MinFetchesPerSeq,
[]).
%% @private build the node list by building tail to satisfy TargetN, then removing
%% the added nodes from earlier segments
-spec build_nodelist(integer(), [node()], integer(),
integer(), integer(), [node()]) -> [node()].
build_nodelist(RingSize, Nodes, _Shortfall = 0,
_NodeCounter, _MinFetchesPerSeq, Acc) ->
%% Finished shuffling, backfill if required
ShuffledRing = lists:flatten(Acc),
backfill_ring(RingSize,
Nodes,
(RingSize - length(ShuffledRing)) div length(Nodes),
Acc);
build_nodelist(RingSize, Nodes, Shortfall, NodeCounter,
MinFetchesPerSeq, _Acc = []) ->
%% Build the tail with sufficient nodes to satisfy TargetN
NodeCount = length(Nodes),
LastSegLength = RingSize rem NodeCount + Shortfall,
NewSeq = lists:sublist(Nodes, 1, LastSegLength),
build_nodelist(RingSize,
Nodes,
Shortfall,
NodeCounter,
MinFetchesPerSeq,
NewSeq);
build_nodelist(RingSize, Nodes, Shortfall, NodeCounter,
MinFetchesPerSeq, Acc) ->
%% Build rest of list, subtracting minimum of MinFetchesPerSeq, Shortfall
%% or (NodeCount - NodeCounter) each time
NodeCount = length(Nodes),
NodesToRemove = min(min(MinFetchesPerSeq, Shortfall),
NodeCount - NodeCounter),
RemovalList = lists:sublist(Nodes,
NodeCounter,
NodesToRemove),
NewSeq = lists:subtract(Nodes, RemovalList),
NewNodeCounter = NodeCounter + NodesToRemove,
build_nodelist(RingSize,
Nodes,
Shortfall - NodesToRemove,
NewNodeCounter,
MinFetchesPerSeq,
[NewSeq | Acc]).
%% @private Backfill the ring with full sequences
-spec backfill_ring(integer(), [node()], integer(),
[node()]) -> [node()].
backfill_ring(_RingSize, _Nodes, _Remaining = 0, Acc) ->
Acc;
backfill_ring(RingSize, Nodes, Remaining, Acc) ->
backfill_ring(RingSize,
Nodes,
Remaining - 1,
[Nodes | Acc]).
%% @doc Rebalance the expected load on nodes using a diagonal stripe.
%% @param Ring :: Ring to rebalance.
%% @param Node :: Node to rebalance from.
%% @returns Rebalanced ring.
%% @see diagonal_stripe/2.
-spec claim_rebalance_n(Ring :: ring(),
Node :: term()) -> ring().
claim_rebalance_n(Ring, Node) ->
Nodes = lists:usort([Node
| riak_core_ring:claiming_members(Ring)]),
Zipped = diagonal_stripe(Ring, Nodes),
lists:foldl(fun ({P, N}, Acc) ->
riak_core_ring:transfer_node(P, N, Acc)
end,
Ring,
Zipped).
%% @doc Creates a diagonal stripw of the given nodes over the partitions of the
%% ring.
%% @param Ring Ring on which the stripes are built.
%% @param Nodes Nodes that are to be distributed.
%% @returns List of indices and assigned nodes.
-spec diagonal_stripe(Ring :: ring(),
Nodes :: [term()]) -> [{integer(), term()}].
diagonal_stripe(Ring, Nodes) ->
%% diagonal stripes guarantee most disperse data
Partitions = lists:sort([I
|| {I, _} <- riak_core_ring:all_owners(Ring)]),
Zipped = lists:zip(Partitions,
lists:sublist(lists:flatten(lists:duplicate(1 +
length(Partitions)
div
length(Nodes),
Nodes)),
1,
length(Partitions))),
Zipped.
%% @doc Choose a random partition for the local node.
%% @param Ring Ring to claim on.
%% @returns Updated ring.
-spec random_choose_claim(Ring :: ring()) -> ring().
random_choose_claim(Ring) ->
random_choose_claim(Ring, node()).
%% @doc Like {@link random_choose_claim/1} with a specified node.
%% @param Node Node to choose a partition for.
-spec random_choose_claim(Ring :: ring(),
Node :: term()) -> ring().
random_choose_claim(Ring, Node) ->
random_choose_claim(Ring, Node, []).
%% @doc Like {@link random_choose_claim/2} with specified parameters.
%% @param Params List of parameters, currently ignored.
-spec random_choose_claim(Ring :: ring(),
Node :: term(), Params :: [term()]) -> ring().
random_choose_claim(Ring, Node, _Params) ->
riak_core_ring:transfer_node(riak_core_ring:random_other_index(Ring),
Node,
Ring).
%% @doc For use by nodes that should not claim any partitions.
-spec never_wants_claim(ring()) -> no.
never_wants_claim(_) -> no.
%% @doc For use by nodes that should not claim any partitions.
-spec never_wants_claim(ring(), term()) -> no.
never_wants_claim(_, _) -> no.
%% ===================================================================
%% Private
%% ===================================================================
%% @private
%%
%% @doc Determines indices that violate the given target_n spacing
%% property.
find_violations(Ring, TargetN) ->
Owners = riak_core_ring:all_owners(Ring),
Suffix = lists:sublist(Owners, TargetN - 1),
Owners2 = Owners ++ Suffix,
%% Use a sliding window to determine violations
{Bad, _} = lists:foldl(fun (P = {Idx, Owner},
{Out, Window}) ->
Window2 = lists:sublist([P | Window],
TargetN - 1),
case lists:keyfind(Owner, 2, Window) of
{PrevIdx, Owner} ->
{[[PrevIdx, Idx] | Out], Window2};
false -> {Out, Window2}
end
end,
{[], []},
Owners2),
lists:reverse(Bad).
%% @private
%% @doc Counts up the number of partitions owned by each node.
-spec get_counts([node()],
[{integer(), _}]) -> [{node(), non_neg_integer()}].
get_counts(Nodes, Ring) ->
Empty = [{Node, 0} || Node <- Nodes],
Counts = lists:foldl(fun ({_Idx, Node}, Counts) ->
case lists:member(Node, Nodes) of
true ->
dict:update_counter(Node, 1, Counts);
false -> Counts
end
end,
dict:from_list(Empty),
Ring),
dict:to_list(Counts).
%% @private
%% @doc Add default delta values for all owners to the delta list.
-spec add_default_deltas(IdxOwners :: [{integer(),
term()}],
Deltas :: [{term(), integer()}],
Default :: integer()) -> [{term(), integer()}].
add_default_deltas(IdxOwners, Deltas, Default) ->
{_, Owners} = lists:unzip(IdxOwners),
Owners2 = lists:usort(Owners),
Defaults = [{Member, Default} || Member <- Owners2],
lists:ukeysort(1, Deltas ++ Defaults).
%% @private
%% @doc Filter out candidate indices that would violate target_n given a node's
%% current partition ownership.
-spec prefilter_violations(Ring :: ring(),
Node :: term(), AllIndices :: [{term(), integer()}],
Indices :: [{term(), integer()}],
TargetN :: pos_integer(),
RingSize :: non_neg_integer()) -> [{term(),
integer()}].
prefilter_violations(Ring, Node, AllIndices, Indices,
TargetN, RingSize) ->
CurrentIndices = riak_core_ring:indices(Ring, Node),
CurrentNth = [lists:keyfind(Idx, 2, AllIndices)
|| Idx <- CurrentIndices],
[{Nth, Idx}
|| {Nth, Idx} <- Indices,
lists:all(fun ({CNth, _}) ->
spaced_by_n(CNth, Nth, TargetN, RingSize)
end,
CurrentNth)].
%% @private
%% @doc Select indices from a given candidate set, according to two
%% goals.
%%
%% 1. Ensure greedy/local target_n spacing between indices. Note that this
%% goal intentionally does not reject overall target_n violations.
%%
%% 2. Select indices based on the delta between current ownership and
%% expected ownership. In other words, if A owns 5 partitions and
%% the desired ownership is 3, then we try to claim at most 2 partitions
%% from A.
-spec select_indices(Owners :: [],
Deltas :: [{term(), integer()}],
Indices :: [{term(), integer()}],
TargetN :: pos_integer(),
RingSize :: pos_integer()) -> [integer()].
select_indices(_Owners, _Deltas, [], _TargetN,
_RingSize) ->
[];
select_indices(Owners, Deltas, Indices, TargetN,
RingSize) ->
OwnerDT = dict:from_list(Owners),
{FirstNth, _} = hd(Indices),
%% The `First' symbol indicates whether or not this is the first
%% partition to be claimed by this node. This assumes that the
%% node doesn't already own any partitions. In that case it is
%% _always_ safe to claim the first partition that another owner
%% is willing to part with. It's the subsequent partitions
%% claimed by this node that must not break the target_n invariant.
{Claim, _, _, _} = lists:foldl(fun ({Nth, Idx},
{Out, LastNth, DeltaDT, First}) ->
Owner = dict:fetch(Idx, OwnerDT),
Delta = dict:fetch(Owner, DeltaDT),
MeetsTN = spaced_by_n(LastNth,
Nth,
TargetN,
RingSize),
case (Delta < 0) and
(First or MeetsTN)
of
true ->
NextDeltaDT =
dict:update_counter(Owner,
1,
DeltaDT),
{[Idx | Out],
Nth,
NextDeltaDT,
false};
false ->
{Out,
LastNth,
DeltaDT,
First}
end
end,
{[], FirstNth, dict:from_list(Deltas), true},
Indices),
lists:reverse(Claim).
%% @private
%% @doc Determine if two positions in the ring meet target_n spacing.
-spec spaced_by_n(Ntha :: integer(), NthB :: integer(),
TargetN :: pos_integer(),
RingSize :: pos_integer()) -> boolean().
spaced_by_n(NthA, NthB, TargetN, RingSize) ->
case NthA > NthB of
true ->
NFwd = NthA - NthB,
NBack = NthB - NthA + RingSize;
false ->
NFwd = NthA - NthB + RingSize,
NBack = NthB - NthA
end,
(NFwd >= TargetN) and (NBack >= TargetN).
%% @doc For each node in wants, work out how many more partition each node wants
%% (positive) or is overloaded by (negative) compared to what it owns.
%% @param Wants List of node names and their target number of partitions.
%% @param Owns List of node names and their actual number of partitions.
-spec wants_owns_diff(Wants :: [{term(), integer()}],
Owns :: [{term(), integer()}]) -> [{term(), integer()}].
wants_owns_diff(Wants, Owns) ->
[case lists:keyfind(N, 1, Owns) of
{N, O} -> {N, W - O};
false -> {N, W}
end
|| {N, W} <- Wants].
%% @doc Given a ring, work out how many partition each wants to be
%% considered balanced.
%% @param Ring Ring to figure out wants for.
%% @returns List of node names and the number of wanted partitions.
-spec wants(Ring :: ring()) -> [{term(), integer()}].
wants(Ring) ->
Active =
lists:sort(riak_core_ring:claiming_members(Ring)),
Inactive = riak_core_ring:all_members(Ring) -- Active,
Q = riak_core_ring:num_partitions(Ring),
ActiveWants = lists:zip(Active,
wants_counts(length(Active), Q)),
InactiveWants = [{N, 0} || N <- Inactive],
lists:sort(ActiveWants ++ InactiveWants).
%% @private
%% @doc Given a number of nodes and ring size, return a list of
%% desired ownership, S long that add up to Q
-spec wants_counts(S :: non_neg_integer(),
Q :: non_neg_integer()) -> [integer()].
wants_counts(S, Q) ->
Max = roundup(Q / S),
case S * Max - Q of
0 -> lists:duplicate(S, Max);
X ->
lists:duplicate(X, Max - 1) ++
lists:duplicate(S - X, Max)
end.
%% @private
%% @doc Round up to next whole integer - ceil
-spec roundup(float()) -> integer().
roundup(I) when I >= 0 ->
T = erlang:trunc(I),
case I - T of
Neg when Neg < 0 -> T;
Pos when Pos > 0 -> T + 1;
_ -> T
end.
%% ===================================================================
%% Unit tests
%% ===================================================================
-ifdef(TEST).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
wants_claim_test() ->
riak_core_ring_manager:setup_ets(test),
riak_core_test_util:setup_mockring1(),
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
?assertEqual({yes, 1}, (default_wants_claim(Ring))),
riak_core_ring_manager:cleanup_ets(test),
riak_core_ring_manager:stop().
%% @private console helper function to return node lists for claiming
%% partitions
-spec gen_diag(pos_integer(), pos_integer()) -> [Node ::
atom()].
gen_diag(RingSize, NodeCount) ->
Nodes = [list_to_atom(lists:concat(["n_", N]))
|| N <- lists:seq(1, NodeCount)],
{HeadNode, RestNodes} = {hd(Nodes), tl(Nodes)},
R0 = riak_core_ring:fresh(RingSize, HeadNode),
RAdded = lists:foldl(fun (Node, Racc) ->
riak_core_ring:add_member(HeadNode, Racc, Node)
end,
R0,
RestNodes),
Diag = diagonal_stripe(RAdded, Nodes),
{_P, N} = lists:unzip(Diag),
N.
%% @private call with result of gen_diag/1 only, does the list have
%% tail violations, returns true if so, false otherwise.
-spec has_violations([Node :: atom()]) -> boolean().
has_violations(Diag) ->
RS = length(Diag),
NC = length(lists:usort(Diag)),
Overhang = RS rem NC,
Overhang > 0 andalso
Overhang < 4. %% hardcoded target n of 4
-endif. | src/riak_core_claim.erl | 0.724578 | 0.469703 | riak_core_claim.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2018 ACK CYFRONET AGH
%%% This software is released under the MIT license cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module contains utilities for verifying DNS configuration.
%%% It is used to ensure that DNS contains records necessary
%%% for the cluster to work correctly.
%%% @end
%%%-------------------------------------------------------------------
-module(onepanel_dns).
-author("<NAME>").
-include("modules/errors.hrl").
-include("modules/onepanel_dns.hrl").
-include_lib("ctool/include/logging.hrl").
-export([check_any/4, check_all/4, make_results_stub/2]).
-export([build_bind_record/3]).
% allowed query types
-type dns_type() :: a | txt.
% names used in queries
-type dns_name() :: binary() | string().
% IP tuple for A queries, binary for TXT queries
-type dns_value() :: inet:ip4_address() | binary().
% Result type
-type dns_check() :: #dns_check{}.
%% Possible results of dns check:
%% unresolvable - no values, expected or not, could be resolved
%% bad_recods - none of expected values are found in the resolved ones
%% missing_records - some, but not all, expected IPs are present in resolved ones
%% ok - all of expected values are found in the resolved ones
-type summary() :: ok | unresolvable | bad_records | missing_records.
-export_type([summary/0, dns_check/0, dns_type/0, dns_name/0, dns_value/0]).
%%%===================================================================
%%% Public API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Queries all given DNS servers and compares result with the expected
%% list. Returns the most optimistic result among DNS servers.
%% @end
%%--------------------------------------------------------------------
-spec check_any(Expected :: [dns_value()], Names :: [dns_name()], Type :: dns_type(),
Servers :: [inet:ip4_address()]) -> dns_check().
check_any(Expected, Names, Type, Servers) ->
Diffs = check(Expected, Names, Type, Servers),
Best = lists:last(lists:sort(fun compare/2, Diffs)),
log_result(Names, Type, Servers, Best),
Best.
%%--------------------------------------------------------------------
%% @doc
%% Queries all given DNS servers and compares result with the expected
%% list. Returns the most pessimistic result among responsive DNS servers.
%% @end
%%--------------------------------------------------------------------
-spec check_all(Expected :: [dns_value()], Names :: [dns_name()], Type :: dns_type(),
Servers :: [inet:ip4_address()]) -> dns_check().
check_all(Expected, Names, Type, Servers) ->
Diffs = check(Expected, Names, Type, Servers),
Worst = hd(lists:sort(fun compare/2, Diffs)),
log_result(Names, Type, Servers, Worst),
Worst.
%%--------------------------------------------------------------------
%% @doc
%% Creates dns_check record for when no actual results could be
%% obtained.
%% @end
%%--------------------------------------------------------------------
-spec make_results_stub(Summary :: summary(), Expected :: [dns_value()]) -> dns_check().
make_results_stub(Summary, Expected) ->
Unique = lists:usort(Expected),
#dns_check{summary = Summary, expected = Unique, missing = Unique}.
%%--------------------------------------------------------------------
%% @doc
%% Build DNS record in the popular BIND server zone format.
%% @end
%%--------------------------------------------------------------------
-spec build_bind_record(Domain :: binary(), Type :: atom(),
Value :: inet:ip4_address () | binary()) -> binary().
build_bind_record(Domain, a, IP) when is_tuple(IP) ->
build_bind_record(Domain, a, onepanel_ip:ip4_to_binary(IP));
build_bind_record(Domain, txt, Value) ->
QuotedValue = <<$", Value/binary, $">>,
onepanel_utils:join([Domain, <<"IN">>, <<"TXT">>, QuotedValue], <<" ">>);
build_bind_record(Domain, Type, Value) ->
TypeBin = string:uppercase(onepanel_utils:convert(Type, binary)),
onepanel_utils:join([Domain, <<"IN">>, TypeBin, Value], <<" ">>).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Performs DNS check on each DNS server. Filters out servers
%% which could not be contacted.
%% @end
%%--------------------------------------------------------------------
-spec check(Expected :: [dns_value()], Names :: [dns_name()], Type :: dns_type(),
Servers :: [inet:ip4_address() | default]) -> [dns_check()].
check(Expected, Names, Type, []) ->
check(Expected, Names, Type, [default]);
check([], [], _Type, _Servers) ->
[#dns_check{summary = ok, expected = [], got = []}];
check(Expected, Names, Type, Servers) ->
Results = lists_utils:pmap(fun(Server) ->
check_on_server(Expected, Names, Type, Server)
end, Servers),
WithoutErrors = lists:filter(fun
(error) -> false;
(#dns_check{}) -> true
end, Results),
case WithoutErrors of
[] -> throw(?ERROR_DNS_SERVERS_UNREACHABLE(Servers));
_ -> WithoutErrors
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Queries given Server for given Names using DNS query type Type.
%% Compares results with the list of Expected values.
%% @end
%%--------------------------------------------------------------------
-spec check_on_server(Expected :: [dns_value()], Names :: [dns_name()], Type :: dns_type(),
Servers :: inet:ip4_address() | default) -> dns_check() | error.
check_on_server(Expected, Names, Type, ServerIP) ->
case lookup(Names, Type, ServerIP) of
error -> error;
Resolved ->
Correct = lists_utils:intersect(Resolved, Expected),
Missing = lists_utils:subtract(Expected, Resolved),
Additional = lists_utils:subtract(Resolved, Expected),
Summary = if
Resolved == [] -> unresolvable;
Correct == [] -> bad_records;
Missing /= [] -> missing_records;
true -> ok
end,
#dns_check{summary = Summary,
expected = lists:usort(Expected), got = lists:usort(Resolved),
missing = Missing, excessive = Additional}
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns aggregated results of DNS queries for all Names.
%% If any of the queries causes connection error, all results
%% are discarded and 'error' is returned.
%% @end
%%--------------------------------------------------------------------
-spec lookup(Quries :: [dns_name()], Type :: dns_type(),
DnsServerIP :: inet:ip4_address() | default) -> [dns_value()] | error.
lookup(Names, Type, DnsServerIP) ->
Results = lists:flatten(lists_utils:pmap(fun(Name) ->
NameStr = onepanel_utils:convert(Name, list),
Opts = case DnsServerIP of
default -> [];
IP -> [{nameservers, [{IP, 53}]}]
end,
Resolved = inet_res:resolve(NameStr, in, Type, Opts),
case Resolved of
{error, {_Reason, _DnsMsg}} -> [];
{error, Reason} ->
?warning("Error querying server ~p for DNS check ~p of name ~p: ~p",
[DnsServerIP, Type, NameStr, Reason]),
error;
{ok, Msg} ->
Answers = inet_dns:msg(Msg, anlist),
% filter_answer will convert TXT query results from string to binary
% preventing them from being caught by lists:flatten.
filter_answer(Type, Answers)
end
end, Names)),
case lists:any(fun(error) -> true; (_) -> false end, Results) of
true -> error;
false -> Results
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Filters query response by expected class and type.
%% This is similar to inet_res:lookup, which was not used
%% in order to expose server connection errors.
%% @end
%%--------------------------------------------------------------------
-spec filter_answer(Type :: dns_type(), Anlist :: [DnsRR :: term()]) -> [dns_value()].
filter_answer(Type, Anlist) ->
[normalize_dns_data(Type, inet_dns:rr(RR, data)) || RR <- Anlist,
inet_dns:rr(RR, class) == in,
inet_dns:rr(RR, type) == Type].
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Converts dns_data value returned by inet_res to expected format.
%% @end
%%--------------------------------------------------------------------
-spec normalize_dns_data(Type :: dns_type(), Data :: term()) -> dns_value().
normalize_dns_data(txt, Data) -> list_to_binary(Data);
normalize_dns_data(a, IPTuple) -> IPTuple.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Comparator for the check results.
%% When summaries are equal, result with higher number of excessive
%% records is considered worse.
%% @end
%%--------------------------------------------------------------------
-spec compare(dns_check(), dns_check()) -> boolean().
compare(#dns_check{summary = SameSummary, excessive = Exc1},
#dns_check{summary = SameSummary, excessive = Exc2}) ->
length(Exc1) >= length(Exc2);
compare(#dns_check{summary = Result1}, #dns_check{summary = Result2}) ->
summary_to_integer(Result1) < summary_to_integer(Result2).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Provides results summary ordering with lower numbers indicating
%% more problematic results.
%% @end
%%--------------------------------------------------------------------
-spec summary_to_integer(summary()) -> integer().
summary_to_integer(unresolvable) -> 1;
summary_to_integer(bad_records) -> 2;
summary_to_integer(missing_records) -> 3;
summary_to_integer(ok) -> 4.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Logs message describing negative results of the check.
%% @end
%%--------------------------------------------------------------------
-spec log_result(Names :: [dns_name()], Type :: dns_type(),
Servers :: [inet:ip4_address()], Result :: dns_check()) -> ok.
log_result(_Names, _Type, _Servers, #dns_check{summary = ok}) -> ok;
log_result(Names, Type, Servers,
#dns_check{summary = Summary, expected = Expected, got = Got}) ->
?warning("DNS check for records ~s named ~p failed with \"~s\":~n"
"Servers used: ~p~nExpected values: ~p~nObtained values: ~p",
[string:uppercase(atom_to_list(Type)), Names, Summary, Servers, Expected, Got]). | src/modules/onepanel_dns.erl | 0.55254 | 0.436682 | onepanel_dns.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright <2013-2018> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
%% @doc Provides an API to interact with DC nodes. Interactions include:
%% - Getting descriptors.
%% - Connecting Nodes to a DC.
%% - Check the DC start status.
%% - Check node restart status.
%% - Tell nodes to drop ping messages.
-module(inter_dc_manager).
-include("antidote.hrl").
-include("inter_dc_repl.hrl").
%% ===================================================================
%% Public API
%% ===================================================================
-define(DC_CONNECT_RETRIES, 5).
-define(DC_CONNECT_RETY_SLEEP, 1000).
-include_lib("kernel/include/logger.hrl").
-export([
get_descriptor/0,
start_bg_processes/1,
observe_dcs_sync/1,
dc_successfully_started/0,
check_node_restart/0,
forget_dcs/1,
drop_ping/1
]).
-spec get_descriptor() -> {ok, descriptor()}.
get_descriptor() ->
%% Wait until all needed vnodes are spawned, so that the heartbeats are already being sent
ok = dc_utilities:ensure_all_vnodes_running_master(inter_dc_log_sender_vnode_master),
Nodes = dc_utilities:get_my_dc_nodes(),
Publishers = lists:map(
fun(Node) -> rpc:call(Node, inter_dc_pub, get_address_list, []) end, Nodes
),
LogReaders = lists:map(
fun(Node) -> rpc:call(Node, inter_dc_query_router, get_address_list, []) end, Nodes
),
{ok, #descriptor{
dcid = dc_utilities:get_my_dc_id(),
partition_num = dc_utilities:get_partitions_num(),
publishers = Publishers,
logreaders = LogReaders
}}.
%% This will connect the list of local nodes to the DC given by the descriptor
%% When a connecting to a new DC, Nodes will be all the nodes in the local DC
%% Otherwise this will be called with a single node that is reconnecting (for example after one of the nodes in the DC crashes and restarts)
%% Note this is an internal function, to instruct the local DC to connect to a new DC the observe_dcs_sync(Descriptors) function should be used
-spec observe_dc(descriptor(), [node()]) -> ok | inter_dc_conn_err().
observe_dc(
Desc = #descriptor{
dcid = DCID,
partition_num = PartitionsNumRemote,
publishers = Publishers,
logreaders = LogReaders
},
Nodes
) ->
PartitionsNumLocal = dc_utilities:get_partitions_num(),
case PartitionsNumRemote == PartitionsNumLocal of
false ->
?LOG_ERROR("Cannot observe remote DC: partition number mismatch"),
{error, {partition_num_mismatch, PartitionsNumRemote, PartitionsNumLocal}};
true ->
case DCID == dc_utilities:get_my_dc_id() of
true ->
ok;
false ->
?LOG_INFO("Observing DC ~p", [DCID]),
dc_utilities:ensure_all_vnodes_running_master(inter_dc_log_sender_vnode_master),
%% Announce the new publisher addresses to all subscribers in this DC.
%% Equivalently, we could just pick one node in the DC and delegate all the subscription work to it.
%% But we want to balance the work, so all nodes take part in subscribing.
connect_nodes(Nodes, DCID, LogReaders, Publishers, Desc, ?DC_CONNECT_RETRIES)
end
end.
-spec connect_nodes(
[node()], dcid(), [socket_address()], [socket_address()], descriptor(), non_neg_integer()
) ->
ok | {error, connection_error}.
connect_nodes([], _DCID, _LogReaders, _Publishers, _Desc, _Retries) ->
ok;
connect_nodes(_Nodes, _DCID, _LogReaders, _Publishers, Desc, 0) ->
ok = forget_dcs([Desc]),
{error, connection_error};
connect_nodes([Node | Rest], DCID, LogReaders, Publishers, Desc, Retries) ->
case rpc:call(Node, inter_dc_query_dealer, add_dc, [DCID, LogReaders], ?COMM_TIMEOUT) of
ok ->
case rpc:call(Node, inter_dc_sub, add_dc, [DCID, Publishers], ?COMM_TIMEOUT) of
ok ->
connect_nodes(Rest, DCID, LogReaders, Publishers, Desc, ?DC_CONNECT_RETRIES);
_ ->
timer:sleep(?DC_CONNECT_RETY_SLEEP),
?LOG_ERROR("Unable to connect to publisher ~p", [DCID]),
connect_nodes([Node | Rest], DCID, LogReaders, Publishers, Desc, Retries - 1)
end;
_ ->
timer:sleep(?DC_CONNECT_RETY_SLEEP),
?LOG_ERROR("Unable to connect to log reader ~p", [DCID]),
connect_nodes([Node | Rest], DCID, LogReaders, Publishers, Desc, Retries - 1)
end.
%% This should not be called until the local dc's ring is merged
-spec start_bg_processes(atom()) -> ok.
start_bg_processes(MetaDataName) ->
%% Start the meta-data senders
Nodes = dc_utilities:get_my_dc_nodes(),
%% Ensure vnodes are running and meta_data
ok = dc_utilities:ensure_all_vnodes_running_master(inter_dc_log_sender_vnode_master),
ok = dc_utilities:ensure_all_vnodes_running_master(clocksi_vnode_master),
ok = dc_utilities:ensure_all_vnodes_running_master(logging_vnode_master),
ok = dc_utilities:ensure_all_vnodes_running_master(materializer_vnode_master),
lists:foreach(
fun(Node) ->
true = wait_init:wait_ready(Node),
ok = rpc:call(Node, dc_utilities, check_registered, [meta_data_sender_sup]),
ok = rpc:call(Node, dc_utilities, check_registered, [meta_data_manager_sup]),
ok = rpc:call(Node, dc_utilities, check_registered_global, [
stable_meta_data_server:generate_server_name(Node)
]),
ok = rpc:call(Node, meta_data_sender, start, [MetaDataName])
end,
Nodes
),
%% Load the internal meta-data
ok = dc_meta_data_utilities:store_meta_data_name(MetaDataName),
%% Start the timers sending the heartbeats
?LOG_INFO("Starting heartbeat sender timers"),
Responses = dc_utilities:bcast_vnode_sync(logging_vnode_master, {start_timer, undefined}),
%% Be sure they all started ok, crash otherwise
ok = lists:foreach(
fun({_, ok}) ->
ok
end,
Responses
),
ok.
%% This should be called once the DC is up and running successfully
%% It sets a flag on disk to true. When this is true on fail and
%% restart the DC will load its state from disk
-spec dc_successfully_started() -> ok.
dc_successfully_started() ->
dc_meta_data_utilities:dc_start_success().
%% Checks is the node is restarting when it had already been running
%% If it is then all the background processes and connections are restarted
-spec check_node_restart() -> boolean().
check_node_restart() ->
case dc_meta_data_utilities:is_restart() of
true ->
?LOG_INFO("This node was previously configured, will restart from previous config"),
MyNode = node(),
%% Load any env variables
ok = dc_utilities:check_registered_global(
stable_meta_data_server:generate_server_name(MyNode)
),
ok = dc_meta_data_utilities:load_env_meta_data(),
%% Ensure vnodes are running and meta_data
ok = dc_utilities:ensure_local_vnodes_running_master(inter_dc_log_sender_vnode_master),
ok = dc_utilities:ensure_local_vnodes_running_master(clocksi_vnode_master),
ok = dc_utilities:ensure_local_vnodes_running_master(logging_vnode_master),
ok = dc_utilities:ensure_local_vnodes_running_master(materializer_vnode_master),
wait_init:wait_ready(MyNode),
ok = dc_utilities:check_registered(meta_data_sender_sup),
ok = dc_utilities:check_registered(meta_data_manager_sup),
ok = dc_utilities:check_registered(inter_dc_query_router),
ok = dc_utilities:check_registered(inter_dc_sub),
ok = dc_utilities:check_registered(inter_dc_pub),
ok = dc_utilities:check_registered(inter_dc_query_response_sup),
ok = dc_utilities:check_registered(inter_dc_query_dealer),
{ok, MetaDataName} = dc_meta_data_utilities:get_meta_data_name(),
ok = meta_data_sender:start(MetaDataName),
%% Start the timers sending the heartbeats
?LOG_INFO("Starting heartbeat sender timers"),
Responses = dc_utilities:bcast_my_vnode_sync(
logging_vnode_master, {start_timer, undefined}
),
%% Be sure they all started ok, crash otherwise
ok = lists:foreach(
fun({_, ok}) ->
ok
end,
Responses
),
%% Reconnect this node to other DCs
OtherDCs = dc_meta_data_utilities:get_dc_descriptors(),
Responses3 = reconnect_dcs_after_restart(OtherDCs, MyNode),
%% Ensure all connections were successful, crash otherwise
Responses3 = [X = ok || X <- Responses3],
true;
false ->
false
end.
-spec reconnect_dcs_after_restart([descriptor()], node()) -> [ok | inter_dc_conn_err()].
reconnect_dcs_after_restart(Descriptors, MyNode) ->
ok = forget_dcs(Descriptors, [MyNode]),
observe_dcs_sync(Descriptors, [MyNode]).
%% This should be called when connecting the local DC to a new external DC
-spec observe_dcs_sync([descriptor()]) -> [ok | inter_dc_conn_err()].
observe_dcs_sync(Descriptors) ->
Nodes = dc_utilities:get_my_dc_nodes(),
observe_dcs_sync(Descriptors, Nodes).
-spec observe_dcs_sync([descriptor()], [node()]) -> [ok | inter_dc_conn_err()].
observe_dcs_sync(Descriptors, Nodes) ->
{ok, SS} = dc_utilities:get_stable_snapshot(),
DCs = lists:map(
fun(DC) ->
{observe_dc(DC, Nodes), DC}
end,
Descriptors
),
lists:foreach(
fun({Res, Desc = #descriptor{dcid = DCID}}) ->
case Res of
ok ->
Value = vectorclock:get(DCID, SS),
wait_for_stable_snapshot(DCID, Value),
ok = dc_meta_data_utilities:store_dc_descriptors([Desc]);
_ ->
ok
end
end,
DCs
),
[Result1 || {Result1, _DC1} <- DCs].
-spec forget_dc(descriptor(), [node()]) -> ok.
forget_dc(#descriptor{dcid = DCID}, Nodes) ->
case DCID == dc_utilities:get_my_dc_id() of
true ->
ok;
false ->
?LOG_NOTICE("Forgetting DC ~p", [DCID]),
lists:foreach(
fun(Node) -> ok = rpc:call(Node, inter_dc_query_dealer, del_dc, [DCID]) end, Nodes
),
lists:foreach(fun(Node) -> ok = rpc:call(Node, inter_dc_sub, del_dc, [DCID]) end, Nodes)
end.
-spec forget_dcs([descriptor()]) -> ok.
forget_dcs(Descriptors) ->
Nodes = dc_utilities:get_my_dc_nodes(),
forget_dcs(Descriptors, Nodes).
-spec forget_dcs([descriptor()], [node()]) -> ok.
forget_dcs(Descriptors, Nodes) ->
lists:foreach(
fun(Descriptor) ->
forget_dc(Descriptor, Nodes)
end,
Descriptors
).
%% Tell nodes within the DC to drop heartbeat ping messages from other
%% DCs, used for debugging
-spec drop_ping(boolean()) -> ok.
drop_ping(DropPing) ->
Responses = dc_utilities:bcast_vnode_sync(inter_dc_dep_vnode_master, {drop_ping, DropPing}),
%% Be sure they all returned ok, crash otherwise
ok = lists:foreach(
fun({_, ok}) ->
ok
end,
Responses
).
%%%%%%%%%%%%%
%% Utils
wait_for_stable_snapshot(DCID, MinValue) ->
case DCID == dc_utilities:get_my_dc_id() of
true ->
ok;
false ->
{ok, SS} = dc_utilities:get_stable_snapshot(),
Value = vectorclock:get(DCID, SS),
case Value > MinValue of
true ->
?LOG_INFO("Connected to DC ~p", [DCID]),
ok;
false ->
?LOG_INFO("Waiting for DC ~p", [DCID]),
timer:sleep(1000),
wait_for_stable_snapshot(DCID, MinValue)
end
end. | apps/antidote/src/inter_dc_manager.erl | 0.546496 | 0.401923 | inter_dc_manager.erl | starcoder |
-module(rstar_search).
-export([search_within/2, search_near/3]).
-ifdef(TEST).
-compile(export_all).
-endif.
-include("../include/rstar.hrl").
% Searches the tree for any geometries within or intersecting
% with the given geometry
search_within(Root, Geo) -> search_within(Root, Geo, []).
search_within(#geometry{value=Value}, Geo, Results) when is_record(Value, leaf) ->
lists:foldl(fun(Child, Res) ->
case rstar_geometry:intersect(Child, Geo) of
undefined -> Res;
% Add the child to the result set
_ -> [Child | Res]
end
end, Results, Value#leaf.entries);
search_within(Node, Geo, Results) ->
lists:foldl(fun(Child, Res) ->
case rstar_geometry:intersect(Child, Geo) of
undefined -> Res;
% Recurse into the matching node
_ -> search_within(Child, Geo, Res)
end
end, Results, Node#geometry.value#node.children).
% Returns the K nearest points to the given Geometry
search_near(Node, Geo, K) ->
Results = search_near_recursive(Node, Geo, K, gb_sets:new()),
[R || {_, R} <- gb_sets:to_list(Results)].
search_near_recursive(#geometry{value=Value}, Geo, K, Results) when is_record(Value, leaf) ->
lists:foldl(fun(C, Res) ->
% Compute the center the distance to the target geometry
Distance = rstar_geometry:min_dist(Geo, C),
case gb_sets:size(Res) of
% Handle the case where we have less than K matches
L when L < K -> gb_sets:add({Distance, C}, Res);
% Handle when we have K matches already
_ ->
{MaxDist, _} = MaxNN = gb_sets:largest(Res),
if
MaxDist > Distance -> gb_sets:add({Distance, C}, gb_sets:delete(MaxNN, Res));
true -> Res
end
end
end, Results, Value#leaf.entries);
search_near_recursive(Node, Geo, K, Results) ->
% Create an Active Branch List based on our children sorted by their
% minimum distance from the query region
Children = Node#geometry.value#node.children,
Sorted = lists:sort([{rstar_geometry:min_dist(Geo, C), C} || C <- Children]),
ActiveBranchList = [B || {_, B} <- Sorted],
% Prune the children and iterate over the branches
Pruned = prune_branches(Geo, K, ActiveBranchList, Results),
search_near_branches(Pruned, Geo, K, Results).
% Helper to iterate through the Active Branch List gathering results
% Return the result set when there are no further branches
search_near_branches([], _, _, Results) -> Results;
% Check each branch and prune after updating the results
search_near_branches([Branch | ABL], Geo, K, Results) ->
NewResults = search_near_recursive(Branch, Geo, K, Results),
Pruned = prune_branches(Geo, K, ABL, NewResults),
search_near_branches(Pruned, Geo, K, NewResults).
% Prunes the search branches based on the existing results
prune_branches(Geo, K, Branches, Results) ->
case gb_sets:size(Results) of
% Do not prune if we don't have K neighbors yet
L when L < K -> Branches;
_ ->
{MaxDist, _} = gb_sets:largest(Results),
lists:filter(fun(B) -> rstar_geometry:min_dist(Geo, B) =< MaxDist end, Branches)
end. | src/rstar_search.erl | 0.620047 | 0.659967 | rstar_search.erl | starcoder |
-module(poly_SUITE).
-include_lib("eunit/include/eunit.hrl").
-export([all/0, init_per_testcase/2, end_per_testcase/2]).
-export(
[
eval_test/1,
from_fr_test/1,
interpolate_with_fr_test/1,
zeroize_test/1,
self_subtract_test/1,
add_zero_test/1,
sub_zero_test/1,
mul_poly_test/1,
add_different_sizes_poly_test/1,
negative_cmp_test/1,
f_of_x_test/1,
serde_test/1
]
).
all() ->
[
eval_test,
from_fr_test,
interpolate_with_fr_test,
zeroize_test,
self_subtract_test,
add_zero_test,
sub_zero_test,
mul_poly_test,
add_different_sizes_poly_test,
negative_cmp_test,
f_of_x_test,
serde_test
].
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, Config) ->
Config.
eval_test(_Config) ->
%% poly = 5x³ + x - 2.
Poly = tc_poly:from_coeffs([-2, 1, 0, 5]),
Samples = [{-1, -8}, {2, 40}, {3, 136}, {5, 628}],
%% check f(a) = b
?assert(
lists:all(
fun({Point, Answer}) ->
AnswerFr = tc_fr:into(Answer),
EvalFr = tc_poly:eval(Poly, Point),
tc_fr:cmp(AnswerFr, EvalFr)
end,
Samples
)
),
%% poly can be interpolated because num_sample >= degree + 1
?assert(tc_poly:cmp(Poly, tc_poly:interpolate(Samples))),
?assertEqual(3, tc_poly:degree(Poly)),
ok.
from_fr_test(_Config) ->
%% poly = 5x³ + x - 2.
Coeffs = [-2, 1, 0, 5],
Frs = [tc_fr:into(I) || I <- Coeffs],
Poly = tc_poly:from_frs(Frs),
Samples = [{-1, -8}, {2, 40}, {3, 136}, {5, 628}],
%% check f(a) = b
?assert(
lists:all(
fun({Point, Answer}) ->
AnswerFr = tc_fr:into(Answer),
EvalFr = tc_poly:eval(Poly, Point),
tc_fr:cmp(AnswerFr, EvalFr)
end,
Samples
)
),
%% poly can be interpolated because num_sample >= degree + 1
?assert(tc_poly:cmp(Poly, tc_poly:interpolate(Samples))),
?assertEqual(3, tc_poly:degree(Poly)),
ok.
interpolate_with_fr_test(_Config) ->
%% poly = 5x³ + x - 2.
Poly = tc_poly:from_coeffs([-2, 1, 0, 5]),
Samples = [{-1, -8}, {2, 40}, {3, 136}, {5, 628}],
FrSamples = [{tc_fr:into(A), tc_fr:into(B)} || {A, B} <- Samples],
%% check f(a) = b
?assert(
lists:all(
fun({Point, Answer}) ->
AnswerFr = tc_fr:into(Answer),
EvalFr = tc_poly:eval(Poly, Point),
tc_fr:cmp(AnswerFr, EvalFr)
end,
Samples
)
),
%% poly can be interpolated because num_sample >= degree + 1
?assert(tc_poly:cmp(Poly, tc_poly:interpolate(Samples))),
%% we should also be able to interpolate poly using fr values of samples
?assert(tc_poly:cmp(Poly, tc_poly:interpolate_from_fr(FrSamples))),
?assertEqual(3, tc_poly:degree(Poly)),
ok.
zeroize_test(_Config) ->
%% random_poly -> zeroize -> is_zero
?assert(tc_poly:is_zero(tc_poly:zeroize(tc_poly:random(4)))),
BiPoly = tc_bipoly:random(3),
BiCommitment = tc_bipoly:commitment(BiPoly),
ZeroBiPoly = tc_bipoly:zeroize(BiPoly),
ZeroBiCommitment = tc_bipoly:commitment(ZeroBiPoly),
?assertEqual(false, tc_bicommitment:cmp(ZeroBiCommitment, BiCommitment)),
?assert(
tc_g1:cmp(
tc_g1:zero(),
tc_bicommitment:eval(
ZeroBiCommitment,
rand:uniform(100),
rand:uniform(100)
)
)
),
ok.
self_subtract_test(_Config) ->
%% f(x) - f(x) = 0
P = tc_poly:random(2),
?assert(tc_poly:cmp(tc_poly:zero(), tc_poly:sub(P, P))).
add_zero_test(_Config) ->
%% f(x) + 0 = f(x)
P = tc_poly:random(2),
?assert(tc_poly:cmp(P, tc_poly:add_scalar(0, P))).
sub_zero_test(_Config) ->
%% f(x) - 0 = f(x)
P = tc_poly:random(2),
?assert(tc_poly:cmp(P, tc_poly:sub_scalar(0, P))).
mul_poly_test(_Config) ->
%% p1 = (x² + 1)
%% p2 = (x - 1)
%% p1 * p2 = p3 = x³ - x² + x - 1
%% p1(p) * p2(p) = p3(p)
P1 = tc_poly:from_coeffs([1, 0, 1]),
P2 = tc_poly:from_coeffs([-1, 1]),
P3 = tc_poly:from_coeffs([-1, 1, -1, 1]),
?assert(tc_poly:cmp(P3, tc_poly:mul(P1, P2))),
P1Eval = tc_poly:eval(P1, 5),
P2Eval = tc_poly:eval(P2, 5),
P3Eval = tc_poly:eval(P3, 5),
?assert(tc_fr:cmp(tc_fr:into(26), P1Eval)),
?assert(tc_fr:cmp(tc_fr:into(4), P2Eval)),
?assert(tc_fr:cmp(tc_fr:into(104), P3Eval)),
ok.
add_different_sizes_poly_test(_Config) ->
P1 = tc_poly:random(5),
P2 = tc_poly:random(8),
AddedPoly = tc_poly:add(P1, P2),
%% result should be of degree 8
?assertEqual(8, tc_poly:degree(AddedPoly)),
%% if we subtract B from the result, we should get back A with degree 5
SubPoly = tc_poly:sub(AddedPoly, P2),
?assertEqual(5, tc_poly:degree(SubPoly)),
?assert(tc_poly:cmp(P1, SubPoly)),
ok.
negative_cmp_test(_Config) ->
P1 = tc_poly:random(5),
P2 = tc_poly:add(P1, P1),
%% since P1 /= 2*P1
?assertEqual(false, tc_poly:cmp(P1, P2)),
ok.
f_of_x_test(_Config) ->
%% f(x) = 5x², f(2) = 5 * 2 * 2
P = tc_poly:from_coeffs([0, 0, 5]),
Eval = tc_poly:eval(P, 2),
?assert(tc_fr:cmp(tc_fr:into(5 * 2 * 2), Eval)),
ok.
serde_test(_Config) ->
%% f(x) = 5x², f(2) = 5 * 2 * 2
P = tc_poly:from_coeffs([0, 0, 5]),
SerializedPoly = tc_poly:serialize(P),
DeserializePoly = tc_poly:deserialize(SerializedPoly),
?assert(tc_fr:cmp(tc_fr:into(5 * 2 * 2), tc_poly:eval(DeserializePoly, 2))),
ok. | test/poly_SUITE.erl | 0.712332 | 0.612628 | poly_SUITE.erl | starcoder |
%% @copyright 2011 <NAME>
%% @author <NAME> <<EMAIL>>
%% @version {@date} {@time}
%% @doc Function call timing analysis. Parts lifted from hipe_timer.erl (thanks OTP)
%% @end
-module(funalysis).
-export([advanced/2,
par_advanced/3]).
-export([par_proc_iterations/2,
analyse/2]).
t(F) ->
NullTime = empty_time(),
{Time, _} = timer:tc(F, []),
erlang:max(Time - NullTime, 0).
% -spec empty_time() -> microseconds().
empty_time() ->
{Time, _} = timer:tc(fun () -> ok end, []),
Time.
advanced(_Fun, I) when I < 2 -> false;
advanced(Fun, Iterations) ->
Measurements = [t(Fun) || _ <- lists:seq(1, Iterations)],
analyse(Measurements, Iterations).
par_advanced(Fun, Procs, Iterations) when Iterations > Procs, Procs > 0 ->
Master = self(),
Ref = make_ref(),
Pids = [ spawn( fun () -> timing_loop(Master, Ref, Fun, ProcIters ) end )
|| ProcIters <- par_proc_iterations(Procs, Iterations) ],
Measurements = lists:flatmap( fun (Pid) ->
receive
{Pid, Ref, M} -> M
end
end,
Pids),
analyse(Measurements, Iterations).
par_proc_iterations(Procs, Iterations) ->
Rem = (Iterations rem Procs),
[ case N =< Rem of
true -> (Iterations div Procs) + 1;
false -> (Iterations div Procs)
end
|| N <- lists:seq(1, Procs) ].
timing_loop(Master, Ref, Fun, ProcIters) ->
Master ! {self(), Ref, [t(Fun) || _ <- lists:seq(1, ProcIters) ]},
exit(normal).
analyse(Measurements, Iterations) ->
Wallclock = Measurements,
WMin = lists:min(Wallclock),
WMax = lists:max(Wallclock),
WMean = mean(Wallclock),
WMedian = median(Wallclock),
WVariance = variance(Wallclock),
WStddev = stddev(Wallclock),
WVarCoff = 100 * WStddev / WMean,
WSum = lists:sum(Wallclock),
[{wallclock,[{min, WMin},
{max, WMax},
{mean, WMean},
{median, WMedian},
{variance, WVariance},
{stdev, WStddev},
{varcoff, WVarCoff},
{sum, WSum},
{values, Wallclock}]},
{iterations, Iterations}].
split(M) ->
split(M, [], []).
split([{W,R}|More], AccW, AccR) ->
split(More, [W|AccW], [R|AccR]);
split([], AccW, AccR) ->
{AccW, AccR}.
mean(L) ->
mean(L, 0, 0).
mean([V|Vs], No, Sum) ->
mean(Vs, No+1, Sum+V);
mean([], No, Sum) when No > 0 ->
Sum/No;
mean([], _No, _Sum) ->
exit(empty_list).
median(L) ->
S = length(L),
SL = lists:sort(L),
case even(S) of
true ->
(lists:nth((S div 2), SL) + lists:nth((S div 2) + 1, SL)) / 2;
false ->
lists:nth((S div 2), SL)
end.
even(S) ->
(S band 1) =:= 0.
%% diffs(L, V) ->
%% [X - V || X <- L].
square_diffs(L, V) ->
[(X - V) * (X - V) || X <- L].
variance(L) ->
Mean = mean(L),
N = length(L),
if N > 1 ->
lists:sum(square_diffs(L,Mean)) / (N-1);
true -> exit('too few values')
end.
stddev(L) ->
math:sqrt(variance(L)). | funalysis.erl | 0.532425 | 0.44571 | funalysis.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(ejson).
-export([encode/1, decode/1]).
-on_load(init/0).
init() ->
SoName = case code:priv_dir(ejson) of
{error, bad_name} ->
case filelib:is_dir(filename:join(["..", priv])) of
true ->
filename:join(["..", priv, ejson]);
false ->
filename:join([priv, ejson])
end;
Dir ->
filename:join(Dir, ejson)
end,
(catch erlang:load_nif(SoName, 0)),
case erlang:system_info(otp_release) of
"R13B03" -> true;
_ -> ok
end.
decode(undefined) ->
throw({invalid_json, undefined});
decode(IoList) ->
try
nif_decode(IoList)
catch exit:ejson_nif_not_loaded ->
erl_decode(IoList)
end.
encode(EJson) ->
try
nif_encode(EJson)
catch exit:ejson_nif_not_loaded ->
erl_encode(EJson)
end.
nif_decode(IoList) ->
case reverse_tokens(IoList) of
{ok, ReverseTokens} ->
[[EJson]] = make_ejson(ReverseTokens, [[]]),
EJson;
Error ->
throw({invalid_json, {Error, IoList}})
end.
erl_decode(IoList) ->
try
(mochijson2:decoder([{object_hook, fun({struct, L}) -> {L} end}]))(IoList)
catch _Type:Error ->
throw({invalid_json, {Error, IoList}})
end.
nif_encode(EJson) ->
RevList = encode_rev(EJson),
final_encode(lists:reverse(lists:flatten([RevList]))).
erl_encode(EJson) ->
Opts = [{handler, fun mochi_encode_handler/1}],
iolist_to_binary((mochijson2:encoder(Opts))(EJson)).
mochi_encode_handler({L}) when is_list(L) ->
{struct, L};
mochi_encode_handler(Bad) ->
exit({json_encode, {bad_term, Bad}}).
% Encode the json into a reverse list that's almost an iolist
% everything in the list is the final output except for tuples with
% {0, Strings} and {1, Floats}, which are to be converted to strings
% inside the NIF.
encode_rev(true) ->
<<"true">>;
encode_rev(false) ->
<<"false">>;
encode_rev(null) ->
<<"null">>;
encode_rev(I) when is_integer(I) ->
list_to_binary(integer_to_list(I));
encode_rev(S) when is_binary(S) ->
{0, S};
encode_rev(S) when is_atom(S) ->
{0, list_to_binary(atom_to_list(S))};
encode_rev(F) when is_float(F) ->
{1, F};
encode_rev({Props}) when is_list(Props) ->
encode_proplist_rev(Props, [<<"{">>]);
encode_rev(Array) when is_list(Array) ->
encode_array_rev(Array, [<<"[">>]);
encode_rev(Bad) ->
throw({json_encode, {bad_term, Bad}}).
encode_array_rev([], Acc) ->
[<<"]">> | Acc];
encode_array_rev([Val | Rest], [<<"[">>]) ->
encode_array_rev(Rest, [encode_rev(Val), <<"[">>]);
encode_array_rev([Val | Rest], Acc) ->
encode_array_rev(Rest, [encode_rev(Val), <<",">> | Acc]).
encode_proplist_rev([], Acc) ->
[<<"}">> | Acc];
encode_proplist_rev([{Key,Val} | Rest], [<<"{">>]) ->
encode_proplist_rev(
Rest, [encode_rev(Val), <<":">>, {0, as_binary(Key)}, <<"{">>]);
encode_proplist_rev([{Key,Val} | Rest], Acc) ->
encode_proplist_rev(
Rest, [encode_rev(Val), <<":">>, {0, as_binary(Key)}, <<",">> | Acc]).
as_binary(B) when is_binary(B) ->
B;
as_binary(A) when is_atom(A) ->
list_to_binary(atom_to_list(A));
as_binary(L) when is_list(L) ->
list_to_binary(L).
make_ejson([], Stack) ->
Stack;
make_ejson([0 | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
% 0 ArrayStart
make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
make_ejson([1 | RevEvs], Stack) ->
% 1 ArrayEnd
make_ejson(RevEvs, [[] | Stack]);
make_ejson([2 | RevEvs], [ObjValues, PrevValues | RestStack]) ->
% 2 ObjectStart
make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
make_ejson([3 | RevEvs], Stack) ->
% 3 ObjectEnd
make_ejson(RevEvs, [[] | Stack]);
make_ejson([{0, Value} | RevEvs], [Vals | RestStack] = _Stack) ->
% {0, IntegerString}
make_ejson(RevEvs, [[list_to_integer(binary_to_list(Value)) | Vals] | RestStack]);
make_ejson([{1, Value} | RevEvs], [Vals | RestStack] = _Stack) ->
% {1, FloatString}
make_ejson(RevEvs, [[list_to_float(binary_to_list(Value)) | Vals] | RestStack]);
make_ejson([{3, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
% {3 , ObjectKey}
make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
make_ejson(RevEvs, [[Value | Vals] | RestStack]).
reverse_tokens(_) ->
exit(ejson_nif_not_loaded).
final_encode(_) ->
exit(ejson_nif_not_loaded). | src/ejson/ejson.erl | 0.504639 | 0.44059 | ejson.erl | starcoder |
-module(nanometer).
-type piece() :: atom() | integer() | binary().
-type name() :: [piece()].
-type options() :: proplists:proplist().
-type values() :: [{piece(), number()}].
-type type() :: counter | meter | gauge | histogram | external.
-type error() :: {error, any()}.
-callback create(name(), options()) -> any().
-callback notify(name(), number()) -> any().
-callback acquire(name()) -> values().
-callback release(name()) -> any().
-callback reset(name()) -> any().
-callback exists(name()) -> boolean().
-callback list() -> [name()].
-callback count() -> non_neg_integer().
%% low-level API
-export([create/3, notify/3, acquire/2, release/2, reset/2, list/1, types/0, callback_module/1, stats/0]).
%% convenience API
-export([count/2, meter/2, gauge/2, histogram/2, histogram/3, time/2, time/3]).
-export_type([name/0, error/0, options/0, values/0, type/0]).
-type result() :: ok | not_started | {error, any()}.
%% don't die!
-define(SAVE_OUR_SOULS(X),
case catch(begin X end) of
{'EXIT', {badarg, [{ets, _, _, _} | _]}} ->
not_started;
{'EXIT', {Error, _}} ->
{error, Error};
_ ->
ok
end).
-spec create(type(), name(), options()) -> result().
create(Type, Name, Options) ->
Mod = callback_module(Type),
?SAVE_OUR_SOULS(Mod:create(Name, Options)).
-spec notify(type(), name(), number()) -> result().
notify(Type, Name, Number) ->
Mod = callback_module(Type),
?SAVE_OUR_SOULS(Mod:notify(Name, Number)).
-spec acquire(type(), name()) -> values().
acquire(Type, Name) ->
Mod = callback_module(Type),
Mod:acquire(Name).
-spec release(type(), name()) -> ok.
release(Type, Name) ->
Mod = callback_module(Type),
Mod:release(Name),
ok.
-spec reset(type(), name()) -> ok.
reset(Type, Name) ->
Mod = callback_module(Type),
Mod:reset(Name),
ok.
-spec list(type()) -> [name()].
list(Type) ->
Mod = callback_module(Type),
Mod:list().
-spec types() -> [type()].
types() -> [counter, meter, gauge, histogram, external].
-spec callback_module(type()) -> module().
callback_module(counter) ->
nanometer_counter;
callback_module(meter) ->
nanometer_meter;
callback_module(gauge) ->
nanometer_gauge;
callback_module(histogram) ->
nanometer_histogram;
callback_module(external) ->
nanometer_external.
-spec stats() -> values().
stats() ->
lists:map(
fun(Type) ->
Mod = callback_module(Type),
{Type, Mod:count()}
end, types()).
-spec count(name(), integer()) -> result().
count(Name, Delta) when is_integer(Delta) ->
?SAVE_OUR_SOULS(
begin
nanometer_counter:create(Name, []),
nanometer_counter:notify(Name, Delta)
end
).
-spec meter(name(), integer()) -> result().
meter(Name, Delta) when is_integer(Delta) ->
?SAVE_OUR_SOULS(
begin
nanometer_meter:create(Name, []),
nanometer_meter:notify(Name, Delta)
end
).
-spec gauge(name(), number()) -> result().
gauge(Name, Value) ->
?SAVE_OUR_SOULS(
begin
nanometer_gauge:create(Name, []),
nanometer_gauge:notify(Name, Value)
end
).
-spec histogram(name(), number()) -> result().
histogram(Name, Value) ->
histogram(Name, Value, []).
-spec histogram(name(), number(), options()) -> result().
histogram(Name, Value, Options) ->
?SAVE_OUR_SOULS(
begin
nanometer_histogram:create(Name, Options),
nanometer_histogram:notify(Name, Value)
end
).
-spec time(name(), fun(() -> X)) -> X when X :: any().
time(Name, Fun) ->
time(Name, Fun, []).
-spec time(name(), fun(() -> X), options()) -> X when X :: any().
time(Name, Fun, Options) ->
Time0 = nanometer_compat:monotonic_us(),
Result = Fun(),
Time1 = nanometer_compat:monotonic_us(),
histogram(Name, Time1 - Time0, Options),
Result. | src/nanometer.erl | 0.551815 | 0.431884 | nanometer.erl | starcoder |
%% =============================================================================
%% bondy_consistent_hashing.erl -
%% Copyright (c) 2016-2021 Leapsight. All rights reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% =============================================================================
%% -----------------------------------------------------------------------------
%% @doc
%% It uses Jump Consistent Hash algorithm described in
%% [A Fast, Minimal Memory, Consistent Hash Algorithm](https://arxiv.org/ftp/
%% arxiv/papers/1406/1406.2294.pdf).
%% @end
%% -----------------------------------------------------------------------------
-module(bondy_consistent_hashing).
-define(MAGIC, 16#27BB2EE687B0B0FD).
-define(MASK, 16#FFFFFFFFFFFFFFFF).
-export([bucket/2]).
-export([bucket/3]).
%% =============================================================================
%% API
%% =============================================================================
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec bucket(Key :: term(), Buckets :: pos_integer()) -> Bucket :: integer().
bucket(Key, Buckets) ->
bucket(Key, Buckets, jch).
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec bucket(Key :: term(), Buckets :: pos_integer(), Algo :: atom()) ->
Bucket :: integer().
bucket(_, 1, _) ->
0;
bucket(Key, Buckets, jch)
when is_integer(Key) andalso is_integer(Buckets) andalso Buckets > 1 ->
jump_consistent_hash(Key, Buckets);
bucket(Key, Buckets, Algo) ->
bucket(erlang:phash2(Key), Buckets, Algo).
%% =============================================================================
%% PRIVATE
%% =============================================================================
jump_consistent_hash(Key, N) ->
jump_consistent_hash(Key, N, -1, 0).
%% -----------------------------------------------------------------------------
%% @private
%% @doc
%% The following is the C++ implementation in
%% A Fast, Minimal Memory, Consistent Hash Algorithm
%% https://arxiv.org/pdf/1406.2294.pdf
%%
%% @end
%% -----------------------------------------------------------------------------
%% static int32_t jump_consistent_hash(uint64_t key, int32_t num_buckets) {
%% int64_t b = -1, j = 0;
%% while (j < num_buckets) {
%% b = j;
%% key = key * 2862933555777941757ULL + 1;
%% j = (b + 1) * ((double)(1LL << 31) / (double)((key >> 33) + 1));
%% }
%% return (int32_t)b;
%% }
%%
jump_consistent_hash(Key, N, _, J0) when J0 < N ->
%% B1 = J0,
NewKey = (Key * ?MAGIC + 1) band ?MASK,
J1 = trunc((J0 + 1) * ((1 bsl 31) / ((NewKey bsr 33) + 1)) ),
jump_consistent_hash(NewKey, N, J0, J1);
jump_consistent_hash(_, _, B, _) ->
B.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
jch_test_() ->
Cases =
%% {Expect, Key, Buckets}
[
{0, 0, 1},
{0, 3, 1},
{0, 0, 2},
{1, 4, 2},
{0, 7, 2},
{55, 1, 128},
{120, 129, 128},
{0, 0, 100000000},
{38172097, 128, 100000000},
{1644467860, 128, 2147483648},
{92, 18446744073709551615, 128}
],
[?_assertEqual(Expect, bucket(K, B, jch)) || {Expect, K, B} <- Cases].
-endif. | apps/bondy/src/bondy_consistent_hashing.erl | 0.555073 | 0.433442 | bondy_consistent_hashing.erl | starcoder |
%% Copyright 2016-2017 TensorHub, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(guild_collector_protocol).
-export([new_input_buffer/0, input/2]).
-define(PART_DELIM, <<"\n\n">>).
new_input_buffer() -> {[], undefined}.
input(Buf, Bin) ->
Now = input_timestamp(),
handle_parts(split_input(Bin), Now, Buf).
input_timestamp() ->
erlang:system_time(milli_seconds).
split_input(Bin) ->
re:split(Bin, ?PART_DELIM, [{return, binary}]).
handle_parts([<<>>], _Now, Buf) ->
finalize_decoded(Buf);
handle_parts([Part], Now, Buf) ->
finalize_decoded(buffer_part(Part, Now, Buf));
handle_parts([Part|Rest], Now, Buf) ->
NextBuf = finalize_parts(buffer_part(Part, Now, Buf)),
handle_parts(Rest, Now, finalize_parts(NextBuf)).
buffer_part(Part, Now, {Decoded, undefined}) ->
{Decoded, {Now, [Part]}};
buffer_part(Part, _Now, {Decoded, {Time, Parts}}) ->
{Decoded, {Time, [Part|Parts]}}.
finalize_decoded({Decoded, Working}) ->
{lists:reverse(Decoded), {[], Working}}.
finalize_parts({Decoded, {Time, Parts}}) ->
{[{Time, decode(lists:reverse(Parts))}|Decoded], undefined};
finalize_parts({Decoded, undefined}) ->
{Decoded, undefined}.
decode([<<>>]) -> eof;
decode(Bin) ->
case guild_json:try_decode(Bin) of
{ok, Decoded} -> format_decoded(Decoded);
{error, _Err} -> {invalid, Bin}
end.
format_decoded({[{<<"kv">>, {KeyVals}}]}) ->
{kv, KeyVals};
format_decoded({[{<<"ktsv">>, {KeyTimeStepVals}}]}) ->
{ktsv, KeyTimeStepVals};
format_decoded(Other) ->
{other, Other}. | src/guild_collector_protocol.erl | 0.573201 | 0.413004 | guild_collector_protocol.erl | starcoder |
-module(gen_rate_limiter).
-behaviour(gen_server).
%% API
-export([start_link/2, run/2]).
%% Gen Server
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
%% This module defines a generic rate limiter (rl) intended to be used with APIs that
%% only allow a certain number of requests per timespan.
%% ===================================================================
%% Rate Limiter Callback Module
%% ===================================================================
%% The callback module has to define 4 function that define the behaviour of the
%% rl:
-callback init(Args :: term()) ->
State :: term().
%% Called after startup. Args is the same as in
%% gen_rate_limiter:start_link(Module, Args) -> [See Gen Server start_link return]
-callback before_run(State :: term()) ->
State :: term().
%% Called every time before Module:run is called. Runs in the rl process.
-callback run(Args :: term(), State :: term()) ->
{Return :: term(), State :: term()}.
%% Called when gen_rate_limiter:run(Pid, Args) -> Return is called. Runs in
%% the caller's process, if this call fails the rl's old State will be kept.
-callback to_wait(State :: term()) ->
{ToWait :: non_neg_integer(), State :: term()}.
%% Called every time after Module:run was called (even if it failed). ToWait
%% (measured in milliseconds) determines how long the rl will sleep before
%% allowing Module:run to be called again. Runs in the rl process.
%% ===================================================================
%% Gen Rate Limiter
%% ===================================================================
start_link(Module, Args) ->
gen_server:start_link(?MODULE, {Module, Args}, []).
run(Pid, Args) ->
{M, RLState} = gen_server:call(Pid, {request, self()}, infinity),
{Return, NewRLState} = M:run(Args, RLState),
Pid ! {ok, NewRLState},
Return.
init({Module, Args}) ->
{ok, {Module, Module:init(Args)}}.
handle_call({request, Pid}, From, {Module, RLState}) when is_pid(Pid), is_atom(Module) ->
IntermediateRLState = Module:before_run(RLState),
MRef = monitor(process, Pid),
gen_server:reply(From, {Module, IntermediateRLState}),
AfterRunRLState = receive
{ok, S} ->
demonitor(MRef, [flush]),
S;
{'DOWN', MRef, process, Pid, Reason} ->
io:format("Gen Rate Limiter: Run of ~p failed with reason ~p.~n",
[Pid, Reason]),
IntermediateRLState
end,
{ToWait, FinalRLState} = Module:to_wait(AfterRunRLState),
if ToWait > 0 -> timer:sleep(ToWait);
true -> ok end,
{noreply, {Module, FinalRLState}};
handle_call(Msg, {Pid, _Tag} = _From, {Module, RLState} = State) ->
io:format("Gen Rate Limiter ~p (callback module ~p): Received unexpected call ~p from ~p while in state ~p.",
[self(), Module, Msg, Pid, RLState]),
{noreply, State}.
handle_cast(Msg, {Module, RLState} = State) ->
io:format("Gen Rate Limiter ~p (callback module ~p): Received unexpected cast ~p while in state ~p.",
[self(), Module, Msg, RLState]),
{noreply, State}.
handle_info(Msg, {Module, RLState} = State) ->
io:format("Gen Rate Limiter ~p (callback module ~p): Received unexpected info ~p while in state ~p.",
[self(), Module, Msg, RLState]),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(_Reason, _State) ->
ok. | src/gen_rate_limiter.erl | 0.583322 | 0.410993 | gen_rate_limiter.erl | starcoder |
%%==============================================================================
%% Copyright 2013-2021 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%%%-------------------------------------------------------------------
%%% @doc
%%% A lazy lib.
%%%
%%% This library provides a simple pull oriented interface towards different
%%% data sources using a lazy data abstraction.
%%%
%%% The primary use case is the decoding of protocols where the sizes of parts
%%% are not known until they have been completely decoded and one wishes to
%%% decouple the handling of the data source and the protocol code.
%%%
%%% The user of the library provides a promise of data: a function that
%%% given a timeout returns either a data or eol (end of lazy) within
%%% the timespan given by the timeout. A promise can also have a state
%%% in which case the initial state has to be provided with the function.
%%% The stateful promise function must retun a tuple of {Data, NewState}.
%%% If the timeout is eol is given the promise should return eol and any
%%% resources, such as streams and sockets, should be deallocated.
%%%
%%% The library provides the functions create/1 and create/2 to create
%%% lazy data structures given a promise. The structure can then be
%%% applied to a timeout to generate a tuple of data and new lazy data
%%% {Data, Lazy} or eol.
%%%
%%% Several utility functions are provided to construct new LazyData.
%%%
%%% One function is provided that creates an "empty" LazyData empty/0.
%%%
%%% Two functions to add a data to existing lazy data are provided:
%%% prepend/2 and append/2 that adds the data before or after the lazy data
%%% respectively.
%%%
%%% One function that combines lazy data is provided: concat/2.
%%%
%%% A number of utility funtions are provided, functioning both as simple
%%% examples as well as convenience for basic uses:
%%% list_to_data/1, iolist_to_data/1
%%% tcp_to_data/2, tcp_to_data/3,tcp_to_data/4, tcp_socket_to_data/1
%%% tcp_reconnect_to_data/3
%%% file_to_data/2, file_stream_to_data/2
%%%
%%% N.B. This library relies heavily on the construction of lambda functions
%%% and for the sake of clearity and efficiency should be avoided if a
%%% more direct approach, that does not suffer heavily from these very
%%% drawback itself, exists.
%%% @end
%%%
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2013-2021, <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(lazy).
-copyright('<NAME> <<EMAIL>>').
%% Library functions
-export([create/1, create/2]).
-export([empty/0, prepend/2, append/2, concat/2]).
-export([list_to_data/1, iolist_to_data/1,
tcp_to_data/2, tcp_to_data/3,tcp_to_data/4, tcp_socket_to_data/1,
tcp_reconnect_to_data/3,
file_to_data/2, file_stream_to_data/2
]).
%% Types
-type data(Type) :: fun(([timeout() | eol]) -> {Type, data(Type)}) | eol.
-type promise(Type) :: fun(([timeout() | eol]) -> Type | eol).
-type promise(Type, State) ::
fun(([timeout() | eol], State) -> {Type, State} | eol).
%% Exported Types
-export_type([data/1, promise/1, promise/2]).
%% ===================================================================
%% Library functions.
%% ===================================================================
%%--------------------------------------------------------------------
%% Function: create(Promise) -> LazyData
%% @doc
%% Given a promise, lazy data is created.
%% @end
%%--------------------------------------------------------------------
-spec create(promise(Type)) -> data(Type).
%%--------------------------------------------------------------------
create(F) ->
fun(Timeout) ->
case F(Timeout) of
eol -> eol;
Data -> {Data, create(F)}
end
end.
%%--------------------------------------------------------------------
%% Function: create(Promise, State) -> LazyData
%% @doc
%% Given a promise and an initial state, stateful lazy data is created.
%% @end
%%--------------------------------------------------------------------
-spec create(promise(Type, State), State) -> data(Type).
%%--------------------------------------------------------------------
create(F, State) ->
fun(Timeout) ->
case F(Timeout, State) of
{Data, State1} -> {Data, create(F, State1)};
eol -> eol
end
end.
%%--------------------------------------------------------------------
%% Function: empty() -> LazyData.
%% @doc
%% Creates an empty LazyData that just returns eol.
%% @end
%%--------------------------------------------------------------------
-spec empty() -> data(_).
%%--------------------------------------------------------------------
empty() -> fun(_) -> eol end.
%%--------------------------------------------------------------------
%% Function: prepend(Data, LazyData) -> LazyData.
%% @doc
%% Lazy data is constructed from data and lazy data where when cosumed
%% the data comes before any of the lazy data.
%% @end
%%--------------------------------------------------------------------
-spec prepend(Type, data(Type)) -> data(Type).
%%--------------------------------------------------------------------
prepend(Data, Lazy) -> fun(eol) -> Lazy(eol); (_) -> {Data, Lazy} end.
%%--------------------------------------------------------------------
%% Function: append(Data, LazyData) -> LazyData.
%% @doc
%% Lazy data is constructed from data and lazy data where when cosumed
%% the data comes after all of the lazy data.
%% @end
%%--------------------------------------------------------------------
-spec append(Type, data(Type)) -> data(Type).
%%--------------------------------------------------------------------
append(Data, Lazy) ->
fun(eol) -> Lazy(eol);
(Timeout) ->
case Lazy(Timeout) of
eol -> {Data, empty()};
{Data1, Lazy1} -> {Data1, append(Data, Lazy1)}
end
end.
%%--------------------------------------------------------------------
%% Function: concat(LazyData1, LazyData2) -> LazyData.
%% @doc
%% Lazy data is constructed from lazy data and lazy data where when cosumed
%% the LazyData1 comes before all the data of of the LazyData2.
%% @end
%%--------------------------------------------------------------------
-spec concat(data(Type), data(Type)) -> data(Type).
%%--------------------------------------------------------------------
concat(Lazy1, Lazy2) ->
fun(eol) -> Lazy1(eol), Lazy2(eol);
(Timeout) ->
case Lazy1(Timeout) of
eol -> Lazy2(Timeout);
{Data1, Lazy11} -> {Data1, concat(Lazy11, Lazy2)}
end
end.
%%--------------------------------------------------------------------
%% Function: list_to_data(DataList) -> LazyData
%% @doc
%% Lazy data is constructed from a list.
%% @end
%%--------------------------------------------------------------------
-spec list_to_data([Type]) -> data(Type).
%%--------------------------------------------------------------------
list_to_data(List) ->
Promise = fun(eol, _) -> eol;
(_, []) -> eol;
(_, [H | T]) -> {H, T}
end,
create(Promise, List).
%%--------------------------------------------------------------------
%% Function: iolist_to_data(IOList) -> LazyBinary
%% @doc
%% A Lazy binary is constructed from an iolist.
%% @end
%%--------------------------------------------------------------------
-spec iolist_to_data(iolist()) -> data(binary()).
%%--------------------------------------------------------------------
iolist_to_data(List) ->
Promise = fun(eol, _) -> eol;
(_, []) -> eol;
(_, H) when is_binary(H) -> {H, []};
(_, [H | T]) when is_binary(H) -> {H, T};
(_, [H | T]) -> {iolist_to_binary(H), T}
end,
create(Promise, List).
%%--------------------------------------------------------------------
%% Function: tcp_to_data(Host, Port) -> LazyBinary | Error
%% @doc
%% A Lazy binary is constructed from the socket that opening a tcp connetion
%% to the host in binary mode with packet size 0. If an error occurs during
%% connection an error is returned.
%% @end
%%--------------------------------------------------------------------
-spec tcp_to_data(HostName, Port) -> data(binary()) | {error, inet:posix()} when
HostName:: inet:ip_address() | inet:hostname(),
Port ::inet:port_number().
%%--------------------------------------------------------------------
tcp_to_data(HostName, Port) -> tcp_to_data(HostName, Port, infinity).
%%--------------------------------------------------------------------
%% Function: tcp_to_data(Host, Port, Timeout) -> LazyBinary | Error
%% @doc
%% A Lazy binary is constructed from the socket that opening a tcp connetion
%% to the host in binary mode with packet size 0. If an error or timeout
%% occurs during connection an error is returned.
%% @end
%%--------------------------------------------------------------------
-spec tcp_to_data(HostName, Port, timeout()) ->
data(binary()) | {error, inet:posix()} when
HostName:: inet:ip_address() | inet:hostname(),
Port ::inet:port_number().
%%--------------------------------------------------------------------
tcp_to_data(HostName, Port, Timeout) -> tcp_to_data(HostName, Port, Timeout,[]).
%%--------------------------------------------------------------------
%% Function: tcp_to_data(Host, Port, Timeout, TCPOptions) -> LazyBinary | Error
%% @doc
%% A Lazy binary is constructed from the socket that opening a tcp connetion
%% to the host in binary mode with packet size 0. If an error or timeout
%% occurs during connection an error is returned.
%% If the options provided are inconsistent with:
%% {packet, 0}, binary, {active, false}
%% unexpected and undefined behaviour will be the result.
%% @end
%%--------------------------------------------------------------------
-spec tcp_to_data(HostName, Port, timeout(), [gen_tcp:connect_option()]) ->
data(binary()) | {error, inet:posix()} when
HostName:: inet:ip_address() | inet:hostname(),
Port ::inet:port_number().
%%--------------------------------------------------------------------
tcp_to_data(HostName, Port, Timeout, OptionsIn) ->
Options = [{packet, 0}, binary, {active, false} | OptionsIn],
case gen_tcp:connect(HostName, Port, Options, Timeout) of
{ok, Socket} -> tcp_socket_to_data(Socket);
Error = {error, _} -> Error
end.
%%--------------------------------------------------------------------
%% Function: tcp_socket_to_data(Socket) -> LazyBinary
%% @doc
%% A Lazy binary is constructed from the socket, it is expected to be
%% connected to the host in binary mode with packet size 0.
%% On errors/closure reading from the socket results in the closure
%% and eol is returned. Timeout in reading gives an empty binary.
%% @end
%%--------------------------------------------------------------------
-spec tcp_socket_to_data(inet:socket()) -> data(binary()).
%%--------------------------------------------------------------------
tcp_socket_to_data(Socket) ->
Promise = fun(eol) -> gen_tcp:close(Socket), eol;
(Timeout) ->
case gen_tcp:recv(Socket, 0, Timeout) of
{ok, Packet} -> Packet;
{error, timeout} -> <<>>;
{error, closed} -> eol;
{error, _} -> gen_tcp:close(Socket), eol
end
end,
create(Promise).
%%--------------------------------------------------------------------
%% Function: tcp_reconnect_to_data(Host, Port, Timeout) -> LazyBinary
%% @doc
%% A Lazy binary is constructed from the socket that opening a tcp connetion
%% to the host in binary mode with packet size 0. If an error occurs during
%% connection an error is returned. If the connection is closed it is
%% reconnected.
%% @end
%%--------------------------------------------------------------------
-spec tcp_reconnect_to_data(HostName, Port, timeout()) -> data(binary()) when
HostName:: inet:ip_address() | inet:hostname(),
Port ::inet:port_number().
%%--------------------------------------------------------------------
tcp_reconnect_to_data(HostName, Port, Timeout) ->
case tcp_to_data(HostName, Port, Timeout) of
Lazy when is_function(Lazy, 1) ->
concat(Lazy,
fun(eol) -> eol;
(CallTimeout) ->
Lazy1 =
tcp_reconnect_to_data(HostName, Port, Timeout),
Lazy1(CallTimeout)
end);
_ ->
empty()
end.
%%--------------------------------------------------------------------
%% Function: file_to_data(Mode, FileName) -> LazyBinary
%% @doc
%% A Lazy binary is constructed from the stream, when opening the file
%% in binary raw mode with read_ahead.
%% On errors reading from the stream results in the closure
%% and eol is returned.
%% The mode determines if the data is read linewise or in chunks of
%% Mode characters.
%% @end
%%--------------------------------------------------------------------
-spec file_to_data(line | integer(), file:filename()) ->
data(binary()) | {error, file:posix() | badarg | system_limit}.
%%--------------------------------------------------------------------
file_to_data(Type, Name) ->
case file:open(Name, [read, binary, raw, read_ahead]) of
{ok, Device} -> file_stream_to_data(Type, Device);
Error = {error, _} -> Error
end.
%%--------------------------------------------------------------------
%% Function: file_stream_to_data(Mode, Stream) -> LazyBinary
%% @doc
%% A Lazy binary is constructed from the stream, it is expected to be
%% opened in binary raw mode with read_ahead.
%% On errors reading from the stream results in the closure
%% and eol is returned.
%% The mode determines if the data is read linewise or in chunks of
%% Mode characters.
%% @end
%%--------------------------------------------------------------------
-spec file_stream_to_data(line | integer(), file:io_device()) -> data(binary()).
%%--------------------------------------------------------------------
file_stream_to_data(line, Stream) ->
Promise = fun(eol) -> file:close(Stream), eol;
(_) ->
case file:read_line(Stream) of
{ok, Data} -> Data;
{error, _} -> file:close(Stream), eol;
eof -> file:close(Stream), eol
end
end,
create(Promise);
file_stream_to_data(ChunkSize, Stream) ->
Promise = fun(eol) -> file:close(Stream), eol;
(_) ->
case file:read(Stream, ChunkSize) of
{ok, Data} -> Data;
{error, _} -> file:close(Stream), eol;
eof -> file:close(Stream), eol
end
end,
create(Promise).
%% ===================================================================
%% Internal functions.
%% =================================================================== | src/lazy.erl | 0.591251 | 0.612252 | lazy.erl | starcoder |
%% @doc Module for converting ip formats and results
-module(ipmangle).
-export([ip_results_to_json/1, verify_address/1]).
-include("defs.hrl").
-spec check_blacklist(byte()) -> ok.
check_blacklist(A) ->
case A of
169 ->
erlang:error(blacklist_net);
_ ->
ok
end.
%% @doc Verifies input is a valid network address. Converts binary input to
%% list output. For example, binary "127.0.0.0" returns "127.0.0.".
%% This is consistent with input required for the scanner.
%% The caller should catch exceptions.
%% @end
-spec verify_address(binary()) -> [byte()].
verify_address(Network) ->
% All functions require lists
NetworkList = binary_to_list(Network),
% Verifies a valid address, and verifies it is a /24
{ok, AddrTuple} = inet:parse_ipv4strict_address(NetworkList),
{A, _B, _C, 0} = AddrTuple,
check_blacklist(A),
NetworkSubnet = lists:droplast(NetworkList),
NetworkSubnet.
%% @doc Converts the ip tuple for a JSON format suited for React
-spec ip_results_to_json([{inet:ip4_address(), scan_result()}]) ->
jiffy:json_value().
ip_results_to_json(Results) ->
ScanConverted = [ { single_convert(Result) } || Result <- Results],
jiffy:encode(ScanConverted).
-spec single_convert({inet:ip4_address(), scan_result()}) ->
list({ binary(), binary() | scan_result() }).
single_convert({Address, Stat}) ->
[{<<"address">>, list_to_binary(inet:ntoa(Address))},
{<<"stat">>, Stat}].
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
verify_address_valid_test() ->
?assertEqual("127.0.0.", verify_address(<<"127.0.0.0">>)).
verify_address_stupid_test() ->
?assertError(_, verify_address(<<"127.324324234.43432.12321">>)).
verify_address_fullip_test() ->
?assertError(_, verify_address(<<"127.0.0.1">>)).
ip_results_to_json_test() ->
?assertEqual(ip_results_to_json([{{127, 0, 0, 253}, not_vulnerable}, {{127, 0, 0, 252}, not_vulnerable}]),
<<"[{\"address\":\"127.0.0.253\",\"stat\":\"not_vulnerable\"},{\"address\":\"127.0.0.252\",\"stat\":\"not_vulnerable\"}]">>).
-endif. | src/ipmangle.erl | 0.521227 | 0.447823 | ipmangle.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_ql_quanta.erl - a library for quantising time for Time Series
%%
%% @doc This module serves to generate time quanta on multi - (day, hour, minute,
%% second) boundaries. The quantum are based on an origin time of Jan 1, 1970
%% 00:00:00 (Unix Epoch).
%% The function <em>quantum/3</em> takes a time in milliseconds to bucketize,
%% a size of the quantum, and the units of said quantum.
%% For instance, the following call would create buckets for timestamps on 15
%% minute boundaries: <em>quantum(Time, 15, m)</em>. The quantum time is returned in
%% milliseconds since the Unix epoch.
%% The function <em>quanta/4</em> takes 2 times in milliseconds and size of the quantum
%% and the of units of said quantum and returns a list of quantum boundaries that span the time
%%
%% Copyright (c) 2015-2016 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(riak_ql_quanta).
-export([
quantum/3,
quanta/4,
timestamp_to_ms/1,
ms_to_timestamp/1,
unit_to_millis/2
]).
-type time_ms() :: non_neg_integer().
%% A timestamp in millisconds representing number of millisconds from Unix epoch
-type time_unit() :: d | h | m | s | ms.
%% The units of quantization available to quantum/3
-type err() :: {error, term()}.
%% @doc The Number of Days from Jan 1, 0 to Jan 1, 1970
%% We need this to compute years and months properly including leap years and variable length
%% months.
-define(DAYS_FROM_0_TO_1970, 719528).
-ifdef(TEST).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
-define(QC_OUT(P),
eqc:on_output(fun(Str, Args) ->
io:format(user, Str, Args) end, P)).
-compile(export_all).
-endif.
-endif.
%% @clear
%% @end
%% @doc given an upper and lower bound for time, returns a tuple consisting of
%% * the number of slices
%% * a list of all the quantum boundaries
%% - the length of the list is the number of slices - 1
-spec quanta(time_ms(), time_ms(), non_neg_integer(), time_unit()) -> {integer(), [integer()]} | {error, any()}.
quanta(StartTime, EndTime, QuantaSize, Unit) when StartTime > EndTime ->
%% cheap trick to handle descending timestamps, reverse the arguments
quanta(EndTime, StartTime, QuantaSize, Unit);
quanta(StartTime, EndTime, QuantaSize, Unit) ->
Start = quantum(StartTime, QuantaSize, Unit),
case Start of
{error, _} = E -> E;
_Other -> End = EndTime,
Diff = End - Start,
Slice = unit_to_ms(Unit) * QuantaSize,
NSlices = accommodate(Diff, Slice),
Quanta = gen_quanta(NSlices, Start, Slice, []),
{NSlices, Quanta}
end.
%% compute ceil(Length / Unit)
accommodate(Length, Unit) ->
Length div Unit + if Length rem Unit > 0 -> 1; el/=se -> 0 end.
gen_quanta(1, _Start, _Slice, Acc) ->
Acc;
gen_quanta(N, Start, Slice, Acc) when is_integer(N) andalso N > 1 ->
NewA = Start + (N - 1) * Slice,
gen_quanta(N - 1, Start, Slice, [NewA | Acc]).
%% @doc Given the time in milliseconds since the unix epoch and a time range and unit eg (15, m),
%% generate the starting timestamp of the range (quantum) in milliseconds since the epoch where the
%% time belongs. Note that Time - Quanta is less than or equal to QuantaSize * Unit (in milliseconds).
-spec quantum(time_ms(), non_neg_integer(), time_unit()) -> time_ms() | err().
quantum(Time, QuantaSize, Unit) when is_integer(Time) andalso
is_integer(QuantaSize) andalso
Unit == d;
Unit == h;
Unit == m;
Unit == s;
Unit == ms ->
Ms = unit_to_ms(Unit),
Diff = Time rem (QuantaSize*Ms),
Time - Diff;
quantum(_, _, Unit) ->
{error, {invalid_unit, Unit}}.
%% Convert an integer and a time unit in binary to millis, assumed from the unix
%% epoch.
-spec unit_to_millis(Value::integer(), Unit::binary() | time_unit()) -> integer() | error.
unit_to_millis(V, U) when U == ms; U == <<"ms">> -> V;
unit_to_millis(V, U) when U == s; U == <<"s">> -> V*1000;
unit_to_millis(V, U) when U == m; U == <<"m">> -> V*1000*60;
unit_to_millis(V, U) when U == h; U == <<"h">> -> V*1000*60*60;
unit_to_millis(V, U) when U == d; U == <<"d">> -> V*1000*60*60*24;
unit_to_millis(_, _) -> error.
%% @doc Return the time in milliseconds since 00:00 GMT Jan 1, 1970 (Unix Epoch)
-spec timestamp_to_ms(erlang:timestamp()) -> time_ms().
timestamp_to_ms({Mega, Secs, Micro}) ->
Mega*1000000000 + Secs*1000 + Micro div 1000.
%% @doc Return an erlang:timestamp() given the time in milliseconds since the Unix Epoch
-spec ms_to_timestamp(time_ms()) -> erlang:timestamp().
ms_to_timestamp(Time) ->
Seconds = Time div 1000,
MicroSeconds = (Time rem 1000) * 1000,
{0, Seconds, MicroSeconds}.
-spec unit_to_ms(time_unit()) -> time_ms().
unit_to_ms(ms) ->
1;
unit_to_ms(s) ->
1000;
unit_to_ms(m) ->
60 * unit_to_ms(s);
unit_to_ms(h) ->
60 * unit_to_ms(m);
unit_to_ms(d) ->
24 * unit_to_ms(h).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
assert_minutes(Quanta, OkTimes) ->
Time = timestamp_to_ms(os:timestamp()),
QuantaMs = quantum(Time, Quanta, m),
{_, {_, M, _}} = calendar:now_to_universal_time(ms_to_timestamp(QuantaMs)),
?assert(lists:member(M, OkTimes)).
quantum_minutes_test() ->
assert_minutes(15, [0, 15, 30, 45]),
assert_minutes(75, [0, 15, 30, 45]),
assert_minutes(5, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]),
assert_minutes(6, [0, 6, 12, 18, 24, 30, 36, 42, 48, 54]).
assert_hours(Quanta, OkTimes) ->
Time = timestamp_to_ms(os:timestamp()),
QuantaMs = quantum(Time, Quanta, h),
{_, {H, _, _}} = calendar:now_to_universal_time(ms_to_timestamp(QuantaMs)),
?assert(lists:member(H, OkTimes)).
quantum_hours_test() ->
assert_hours(12, [0, 12]),
assert_hours(24, [0]).
assert_days(Days) ->
Now = os:timestamp(),
Time = timestamp_to_ms(Now),
QuantaMs = quantum(Time, Days, d),
{NowDate, _} = calendar:now_to_universal_time(Now),
{QuantaDate, _} = calendar:now_to_universal_time(ms_to_timestamp(QuantaMs)),
NowDays = calendar:date_to_gregorian_days(NowDate),
QuantaDays = calendar:date_to_gregorian_days(QuantaDate),
?assert((NowDays - QuantaDays) < Days),
?assert((NowDays - QuantaDays) >= 0).
quantum_days_test() ->
assert_days(1),
assert_days(10),
assert_days(15),
assert_days(28),
assert_days(29),
assert_days(30),
assert_days(31).
%%
%% test Quanta
%%
-define(MIN, 60*1000). % minute in miliseconds
single_quanta_test() ->
Start = 1 * ?MIN,
End = 5 * ?MIN,
{N, Quanta} = quanta(Start, End, 15, m),
Length = length(Quanta),
?assertEqual(1, N),
?assertEqual(N - 1, Length),
?assertEqual([], Quanta).
two_quanta_test() ->
Start = 1 * ?MIN,
End = 16 * ?MIN,
{N, Quanta} = quanta(Start, End, 15, m),
Length = length(Quanta),
?assertEqual(2, N),
?assertEqual(N -1, Length),
?assertEqual([15 * ?MIN], Quanta).
split_quanta_test() ->
Start = 14 * ?MIN,
End = 16 * ?MIN,
{N, Quanta} = quanta(Start, End, 15, m),
Length = length(Quanta),
?assertEqual(2, N),
?assertEqual(N - 1, Length),
?assertEqual([15 * ?MIN], Quanta).
-ifdef(EQC).
prop_quantum_bounded_test() ->
?assertEqual(
true,
eqc:quickcheck(
eqc:numtests(1000, prop_quantum_bounded()))
).
%% Ensure that Quantas are always bounded, meaning that any time is no more
%% than one quantum ahead of the quantum start.
prop_quantum_bounded() ->
?FORALL(
{Date, Time, {Quanta, Unit}},
{date_gen(), time_gen(), quantum_gen()},
begin
DateTime = {Date, Time},
SecondsFrom0To1970 = ?DAYS_FROM_0_TO_1970 * (unit_to_ms(d) div 1000),
DateMs = (calendar:datetime_to_gregorian_seconds(DateTime) - SecondsFrom0To1970)*1000,
QuantaMs = quantum(DateMs, Quanta, Unit),
QuantaSize = quantum_in_ms(Quanta, Unit),
(DateMs - QuantaMs) =< QuantaSize
end).
quantum_now_from_datetime(DateTime, Quanta, Unit) ->
SecondsFrom0To1970 = ?DAYS_FROM_0_TO_1970 * (unit_to_ms(d) div 1000),
DateMs = (calendar:datetime_to_gregorian_seconds(DateTime) - SecondsFrom0To1970)*1000,
QuantaMs = quantum(DateMs, Quanta, Unit),
ms_to_timestamp(QuantaMs).
quantum_in_ms(Quanta, Unit) ->
Quanta*unit_to_ms(Unit).
%% EQC Generators
date_gen() ->
?SUCHTHAT(Date, {choose(1970, 2015), choose(1, 12), choose(1, 31)}, calendar:valid_date(Date)).
time_gen() ->
{choose(0, 23), choose(0, 59), choose(0, 59)}.
%% We expect quanta to be bigger than their cardinality
%% A quantum of 100 minutes is perfectly reasonable
quantum_gen() ->
oneof([
{choose(1, 1000), d},
{choose(1, 1000), h},
{choose(1, 1000), m},
{choose(1, 1000), s}
]).
-endif.
-endif. | src/riak_ql_quanta.erl | 0.631253 | 0.726571 | riak_ql_quanta.erl | starcoder |
%% @doc This is the behaviour definition for a credential provider
%% module and it iterates over a list of providers. You may set the
%% `credential_providers` Erlang environment variable if you want to
%% restrict checking only a certain subset of the default list.
%%
%% Default order of checking for credentials is:
%% <ol>
%% <li>Erlang application environment</li>
%% <li>OS environment</li>
%% <li>Credentials from AWS file</li>
%% <li>ECS Task credentials</li>
%% <li>EC2 credentials</li>
%% </ol>
%%
%% Providers are expected to implement a function called `fetch/1' which
%% takes as its argument a proplist of options which may influence the
%% operation of the provider. The fetch/1 function should return either
%% `{ok, Credentials, Expiration}' or `{error, Reason}'.
%%
%% If a provider returns {ok, ...} then evaluation stops at that provider.
%% If it returns {error, ...} then the next provider is executed in order
%% until either a set of credentials are returned or the tuple
%% `{error, no_credentials}' is returned.
%%
%% If a new provider is desired, the behaviour interface should be
%% implemented and its module name added to the default list.
%% @end
-module(aws_credentials_provider).
-export([fetch/0, fetch/1]).
-type options() :: #{provider() => map()}.
-type expiration() :: binary() | pos_integer() | infinity.
-type provider() :: aws_credentials_env
| aws_credentials_file
| aws_credentials_ecs
| aws_credentials_ec2.
-export_type([ options/0, expiration/0 ]).
-callback fetch(options()) ->
{ok, aws_credentials:credentials(), expiration()} | {error, any()}.
-include_lib("kernel/include/logger.hrl").
-define(DEFAULT_PROVIDERS, [aws_credentials_env,
aws_credentials_file,
aws_credentials_ecs,
aws_credentials_ec2]).
-spec fetch() ->
{ok, aws_credentials:credentials(), expiration()} |
{'error', 'no_credentials'}.
fetch() ->
fetch(#{}).
-spec fetch(options()) ->
{ok, aws_credentials:credentials(), expiration()} |
{'error', 'no_credentials'}.
fetch(Options) ->
Providers = get_env(credential_providers, ?DEFAULT_PROVIDERS),
evaluate_providers(Providers, Options).
-spec evaluate_providers([provider() | {provider(), options()}], options()) ->
{ok, aws_credentials:credentials(), expiration()} |
{'error', no_credentials}.
evaluate_providers([], _Options) -> {error, no_credentials};
evaluate_providers([ Provider | Providers ], Options) ->
case Provider:fetch(Options) of
{error, _} = Error ->
?LOG_ERROR("Provider ~p reports ~p",
[Provider, Error],
#{domain => [aws_credentials]}),
evaluate_providers(Providers, Options);
{ok, Credentials, Expiration} ->
{ok, Credentials, Expiration}
end.
-spec get_env(atom(), [provider()]) -> any().
get_env(Key, Default) ->
case application:get_env(aws_credentials, Key) of
undefined -> Default;
{ok, Value} -> Value
end. | src/aws_credentials_provider.erl | 0.540439 | 0.438665 | aws_credentials_provider.erl | starcoder |
%% Puzzle:
%%
%% transparent paper folding
%% https://adventofcode.com/2021/day/17
%%
%% explanation:
%% https://blog.beerriot.com/2021/12/17/advent-of-code-day-17/
-module(puzzle17).
-export([
solveA/0,
solveB/0,
parse_input/1,
find_ys/2,
find_xs/2,
find_solutions/4
]).
%% - highest Y velocity for target below start is abs(lower_end)-1,
%% because it will trace same steps up and down, and then makes one
%% more step, which needs to hit bottom end
%%
%% - highest Y reached during that point is (Y * (Y+1))/2
solveA() ->
[_MinX, _MaxX, MinY, _MaxY] = load_file(),
Velocity = abs(MinY) - 1,
(Velocity * (Velocity + 1)) div 2.
solveB() ->
[MinX, MaxX, MinY, MaxY] = load_file(),
length(puzzle17:find_solutions(MinX, MaxX, MinY, MaxY)).
load_file() ->
{ok, Data} = file:read_file("puzzles/puzzle17-input.txt"),
parse_input(Data).
-define(RE,
"target area: x=([0-9]+)\\.\\.([0-9]+), y=(-[0-9]+)\\.\\.(-[0-9]+)").
parse_input(Input) ->
{match, Params} =
re:run(Input, ?RE, [{capture, [1,2,3,4], binary}]),
[binary_to_integer(V) || V <- Params ].
%% Notes for Part 2:
%%
%% - Solve X and Y independently: keep a list of which steps for each
%% velocity end up in the target space, then pair up the two, by
%% matching the steps. Match X to Y, because of the special case
%% noted below.
%%
%% - Solving X:
%% - Skip everything that would have a zero velocity before the target
%% - Stop searching when the first step of a velocity is past the target
%% - Special case: some velocities might stop in the target, so mark
%% that they match every step greater
%%
%% - Solving Y:
%% - Start with highest positive velocity that hits target
%% - Work downward until the first velocity has a first step below target
find_ys(Min, Max) ->
Highest = abs(Min)-1,
%% {velocity, step}
InitPoints = [{Highest, Highest * 2 + 2}],
find_ys(Min, Max, Highest-1, InitPoints).
find_ys(Min, _Max, Min, Points) ->
[{Min, 1}|Points];
find_ys(Min, Max, Vel, Points) ->
case Vel > 0 of
true ->
FirstStepBelow0 = Vel * 2 + 2,
InitVelocityBelow0 = -(Vel+1);
false ->
FirstStepBelow0 = 1,
InitVelocityBelow0 = Vel
end,
find_ys(Min, Max, Vel-1,
[{Vel, Step} || Step <- steps_in_target_y(Min, Max,
FirstStepBelow0,
InitVelocityBelow0,
InitVelocityBelow0-1)]
++ Points).
steps_in_target_y(Min, _Max, _Step, Y, _Vel) when Y < Min ->
[];
steps_in_target_y(Min, Max, Step, Y, Vel) ->
case Y =< Max of
true ->
[Step | steps_in_target_y(Min, Max, Step+1, Y+Vel, Vel-1)];
false ->
steps_in_target_y(Min, Max, Step+1, Y+Vel, Vel-1)
end.
find_xs(Min, Max) ->
Slowest = find_first_xs(Min, Max, 1),
[{StartVel,_}|_] = Slowest,
find_xs(Min, Max, StartVel, Slowest).
find_first_xs(Min, Max, Vel) ->
case (Vel * (Vel + 1)) div 2 of
In when Min =< In, In =< Max ->
[{Vel, Step} || Step <- steps_in_target_x(Min, Max, 1, Vel, Vel-1)];
In when In > Max ->
{error, unexpected_slowest_too_fast};
_ ->
find_first_xs(Min, Max, Vel+1)
end.
steps_in_target_x(_Min, Max, _Step, X, _Vel) when X > Max ->
[];
steps_in_target_x(Min, Max, Step, X, 0) ->
case (Min =< X) and (X =< Max) of
true ->
[{all_gte, Step}];
false ->
[]
end;
steps_in_target_x(Min, Max, Step, X, Vel) ->
case X >= Min of
true ->
[Step | steps_in_target_x(Min, Max, Step+1, X+Vel, Vel-1)];
false ->
steps_in_target_x(Min, Max, Step+1, X+Vel, Vel-1)
end.
find_xs(_Min, Max, Vel, Points) when Vel > Max ->
Points;
find_xs(Min, Max, Vel, Points) ->
find_xs(Min, Max, Vel+1,
[{Vel, Step} || Step <- steps_in_target_x(Min, Max, 1, Vel, Vel-1)]
++ Points).
find_solutions(MinX, MaxX, MinY, MaxY) ->
Xs = find_xs(MinX, MaxX),
Ys = find_ys(MinY, MaxY),
lists:usort(
lists:foldl(fun({XVel, Step}, Acc) ->
[{XVel, YVel}
|| YVel <- ys_at_step(Step, Ys)]
++ Acc
end,
[],
Xs)).
ys_at_step({all_gte, Step}, Ys) ->
[ Y || {Y, S} <- Ys, S >= Step ];
ys_at_step(Step, Ys) ->
[ Y || {Y, S} <- Ys, S == Step ]. | src/puzzle17.erl | 0.562537 | 0.688724 | puzzle17.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_limiter_schema).
-include_lib("typerefl/include/types.hrl").
-export([ roots/0, fields/1, to_rate/1
, to_bucket_rate/1, minimum_period/0]).
-define(KILOBYTE, 1024).
-type limiter_type() :: bytes_in
| message_in
| connection
| message_routing.
-type bucket_name() :: atom().
-type zone_name() :: atom().
-type rate() :: infinity | float().
-type bucket_rate() :: list(infinity | number()).
-typerefl_from_string({rate/0, ?MODULE, to_rate}).
-typerefl_from_string({bucket_rate/0, ?MODULE, to_bucket_rate}).
-reflect_type([ rate/0
, bucket_rate/0
]).
-export_type([limiter_type/0, bucket_name/0, zone_name/0]).
-import(emqx_schema, [sc/2, map/2]).
roots() -> [emqx_limiter].
fields(emqx_limiter) ->
[ {bytes_in, sc(ref(limiter), #{})}
, {message_in, sc(ref(limiter), #{})}
, {connection, sc(ref(limiter), #{})}
, {message_routing, sc(ref(limiter), #{})}
];
fields(limiter) ->
[ {global, sc(rate(), #{})}
, {zone, sc(map("zone name", rate()), #{})}
, {bucket, sc(map("bucket id", ref(bucket)),
#{desc => "Token Buckets"})}
];
fields(bucket) ->
[ {zone, sc(atom(), #{desc => "the zone which the bucket in"})}
, {aggregated, sc(bucket_rate(), #{})}
, {per_client, sc(bucket_rate(), #{})}
].
%% minimum period is 100ms
minimum_period() ->
100.
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
ref(Field) -> hoconsc:ref(?MODULE, Field).
to_rate(Str) ->
Tokens = [string:trim(T) || T <- string:tokens(Str, "/")],
case Tokens of
["infinity"] ->
{ok, infinity};
[Quota, Interval] ->
{ok, Val} = to_quota(Quota),
case emqx_schema:to_duration_ms(Interval) of
{ok, Ms} when Ms > 0 ->
{ok, Val * minimum_period() / Ms};
_ ->
{error, Str}
end;
_ ->
{error, Str}
end.
to_bucket_rate(Str) ->
Tokens = [string:trim(T) || T <- string:tokens(Str, "/,")],
case Tokens of
[Rate, Capa] ->
{ok, infinity} = to_quota(Rate),
{ok, CapaVal} = to_quota(Capa),
if CapaVal =/= infinity ->
{ok, [infinity, CapaVal]};
true ->
{error, Str}
end;
[Quota, Interval, Capacity] ->
{ok, Val} = to_quota(Quota),
case emqx_schema:to_duration_ms(Interval) of
{ok, Ms} when Ms > 0 ->
{ok, CapaVal} = to_quota(Capacity),
{ok, [Val * minimum_period() / Ms, CapaVal]};
_ ->
{error, Str}
end;
_ ->
{error, Str}
end.
to_quota(Str) ->
{ok, MP} = re:compile("^\s*(?:(?:([1-9][0-9]*)([a-zA-z]*))|infinity)\s*$"),
Result = re:run(Str, MP, [{capture, all_but_first, list}]),
case Result of
{match, [Quota, Unit]} ->
Val = erlang:list_to_integer(Quota),
Unit2 = string:to_lower(Unit),
{ok, apply_unit(Unit2, Val)};
{match, [Quota]} ->
{ok, erlang:list_to_integer(Quota)};
{match, []} ->
{ok, infinity};
_ ->
{error, Str}
end.
apply_unit("", Val) -> Val;
apply_unit("kb", Val) -> Val * ?KILOBYTE;
apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). | apps/emqx_limiter/src/emqx_limiter_schema.erl | 0.656548 | 0.432003 | emqx_limiter_schema.erl | starcoder |
%% Copyright (c) 2018 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% @doc This module manages an opaque collection of statistics data used to
%% force garbage collection on `self()' process when hitting thresholds.
%% Namely:
%% (1) Total number of messages passed through
%% (2) Total data volume passed through
%% @end
-module(emqx_gc).
-export([init/1, inc/2, reset/0]).
-type st() :: #{ cnt => {integer(), integer()}
, oct => {integer(), integer()}
}.
-define(disabled, disabled).
-define(ENABLED(X), (is_integer(X) andalso X > 0)).
%% @doc Initialize force GC parameters.
-spec init(false | map()) -> ok.
init(#{count := Count, bytes := Bytes}) ->
Cnt = [{cnt, {Count, Count}} || ?ENABLED(Count)],
Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)],
erlang:put(?MODULE, maps:from_list(Cnt ++ Oct)),
ok;
init(_) -> erlang:put(?MODULE, #{}), ok.
%% @doc Increase count and bytes stats in one call,
%% ensure gc is triggered at most once, even if both thresholds are hit.
-spec inc(pos_integer(), pos_integer()) -> ok.
inc(Cnt, Oct) ->
mutate_pd_with(fun(St) -> inc(St, Cnt, Oct) end).
%% @doc Reset counters to zero.
-spec reset() -> ok.
reset() ->
mutate_pd_with(fun(St) -> reset(St) end).
%% ======== Internals ========
%% mutate gc stats numbers in process dict with the given function
mutate_pd_with(F) ->
St = F(erlang:get(?MODULE)),
erlang:put(?MODULE, St),
ok.
%% Increase count and bytes stats in one call,
%% ensure gc is triggered at most once, even if both thresholds are hit.
-spec inc(st(), pos_integer(), pos_integer()) -> st().
inc(St0, Cnt, Oct) ->
case do_inc(St0, cnt, Cnt) of
{true, St} ->
St;
{false, St1} ->
{_, St} = do_inc(St1, oct, Oct),
St
end.
%% Reset counters to zero.
reset(St) -> reset(cnt, reset(oct, St)).
-spec do_inc(st(), cnt | oct, pos_integer()) -> {boolean(), st()}.
do_inc(St, Key, Num) ->
case maps:get(Key, St, ?disabled) of
?disabled ->
{false, St};
{Init, Remain} when Remain > Num ->
{false, maps:put(Key, {Init, Remain - Num}, St)};
_ ->
{true, do_gc(St)}
end.
do_gc(St) ->
erlang:garbage_collect(),
reset(St).
reset(Key, St) ->
case maps:get(Key, St, ?disabled) of
?disabled -> St;
{Init, _} -> maps:put(Key, {Init, Init}, St)
end. | src/emqx_gc.erl | 0.71721 | 0.440289 | emqx_gc.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% Tests that use error injection should go here, to avoid polluting
%% the logs and scaring people
-module(ekka_mnesia_error_injection_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
all() -> ekka_ct:all(?MODULE).
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
t_agent_restart(_) ->
Cluster = ekka_ct:cluster([core, core, replicant], ekka_mnesia_test_util:common_env()),
CounterKey = counter,
?check_trace(
try
Nodes = [N1, N2, N3] = ekka_ct:start_cluster(ekka, Cluster),
ekka_mnesia_test_util:wait_shards(Nodes),
ekka_mnesia_test_util:stabilize(1000),
%% Everything in ekka agent will crash
?inject_crash( #{?snk_meta := #{domain := [ekka, rlog, agent|_]}}
, snabbkaffe_nemesis:random_crash(0.4)
),
ok = rpc:call(N1, ekka_transaction_gen, counter, [CounterKey, 100, 100]),
ekka_mnesia_test_util:stabilize(5100),
ekka_mnesia_test_util:compare_table_contents(test_tab, Nodes),
N3
after
ekka_ct:teardown_cluster(Cluster)
end,
fun(N3, Trace) ->
?assert(ekka_rlog_props:replicant_bootstrap_stages(N3, Trace)),
%ekka_rlog_props:counter_import_check(CounterKey, N3, Trace),
?assert(length(?of_kind(snabbkaffe_crash, Trace)) > 1)
end).
t_rand_error_injection(_) ->
Cluster = ekka_ct:cluster([core, core, replicant], ekka_mnesia_test_util:common_env()),
CounterKey = counter,
?check_trace(
try
Nodes = [N1, N2, N3] = ekka_ct:start_cluster(ekka, Cluster),
ekka_mnesia_test_util:wait_shards(Nodes),
ekka_mnesia_test_util:stabilize(1000),
%% Everything in ekka RLOG will crash
?inject_crash( #{?snk_meta := #{domain := [ekka, rlog|_]}}
, snabbkaffe_nemesis:random_crash(0.1)
),
ok = rpc:call(N1, ekka_transaction_gen, counter, [CounterKey, 300, 100]),
ekka_mnesia_test_util:stabilize(5000),
ekka_mnesia_test_util:compare_table_contents(test_tab, Nodes),
N3
after
ekka_ct:teardown_cluster(Cluster)
end,
fun(N3, Trace) ->
?assert(ekka_rlog_props:replicant_bootstrap_stages(N3, Trace)),
?assert(ekka_rlog_props:counter_import_check(CounterKey, N3, Trace) > 0),
?assert(length(?of_kind(snabbkaffe_crash, Trace)) > 1)
end). | test/ekka_mnesia_error_injection_SUITE.erl | 0.523664 | 0.540863 | ekka_mnesia_error_injection_SUITE.erl | starcoder |
-module(borda).
%% API exports
-export([
rankings/2
]).
-include("include/common.hrl").
-include("include/elections.hrl").
%%====================================================================
%% Guard Macros
%%====================================================================
-define(IS_VALID_LABEL(Label), Label =:= base0 orelse
Label =:= base1 orelse
Label =:= dowdell orelse
Label =:= nauru).
%%====================================================================
%% API functions
%%====================================================================
% https://en.wikipedia.org/wiki/Borda_count#Example
-spec rankings(label(), [ballot(), ...]) -> [{candidate_name(), number()}, ...] | binary().
rankings(Label, Ballots) when ?IS_VALID_LABEL(Label) ->
Candidates = ballot:candidates(hd(Ballots)),
Map = rankings(Label, Ballots, length(Candidates), #{}),
L = maps:to_list(Map),
lists:sort(fun by_votes/2, L);
rankings(_, _) -> <<"Please restrict the first argument to base0, base1, dowdell, or nauru.">>.
%%====================================================================
%% Internal functions
%%====================================================================
-spec add_votes(label(), [candidate(), ...], map()) -> map().
add_votes(Label, [C | Cs], Acc) when is_map(Acc) -> add_votes(Label, [C | Cs], Acc, 1).
add_votes(_, [], Acc, _) -> Acc;
add_votes(Label, [C | Cs], Acc, Position) ->
CN = candidate:name(C),
Acc2 = add_votes_at_position(CN, Position, Acc),
add_votes(Label, Cs, Acc2, Position + 1).
-spec add_votes_at_position(candidate_name(), pos_integer(), map()) -> map().
add_votes_at_position(CN, Position, Acc) ->
VotesByPosition = maps:get(CN, Acc, #{}),
VotesAtPosition = maps:get(Position, VotesByPosition, 0),
NewVotes = maps:put(Position, VotesAtPosition + 1, VotesByPosition),
maps:put(CN, NewVotes, Acc).
-spec borda_values(label(), pos_integer(), map()) -> map().
borda_values(Label, CandCount, Acc) when is_atom(Label) andalso is_map(Acc) ->
borda_values(Label, CandCount, maps:keys(Acc), Acc, #{}).
borda_values(_Label, _CandCount, [], #{}, Final) -> Final;
borda_values(Label, CandCount, [CandName | CNs], From, Acc) ->
Votes = maps:get(CandName, From),
From2 = maps:without([CandName], From),
Acc2 = maps:put(CandName, value_of_votes(Label, CandCount, Votes), Acc),
borda_values(Label, CandCount, CNs, From2, Acc2).
by_votes({_, V1}, {_, V2}) -> V1 > V2.
rankings(Label, [], CandCount, Acc) -> borda_values(Label, CandCount, Acc);
rankings(Label, [B | Bs], CandCount, Acc) when is_map(Acc) ->
Acc2 = add_votes(Label, ballot:candidates(B), Acc),
rankings(Label, Bs, CandCount, Acc2).
-spec value(label(), pos_integer(), pos_integer()) -> non_neg_integer().
value(base0, Position, CandCount) -> CandCount - Position;
value(base1, Position, CandCount) -> value(base0, Position, CandCount) + 1;
% Cf. https://en.wikipedia.org/wiki/Borda_count#Dowdall_system_(Nauru)
value(dowdell, Position, CandCount) -> value(nauru, Position, CandCount);
value(nauru, 1, _CandCount) -> 1;
value(nauru, Position, _CandCount) -> 1.0 / Position.
-spec value_of_votes(label(), pos_integer(), map()) -> non_neg_integer().
value_of_votes(Label, CandCount, Votes) when is_map(Votes) ->
Values = lists:map(fun(Idx) ->
EachValue = value(Label, Idx, CandCount),
VoteCount = maps:get(Idx, Votes),
EachValue * VoteCount
end, maps:keys(Votes)),
lists:sum(Values). | src/borda.erl | 0.554953 | 0.520253 | borda.erl | starcoder |
%% @doc Model for fetching and updating the Git clone of Zotonic.
%% The Git checkout is located in "priv/data/zotonic-git".
%%
%% This file is a model. You will see functions like 'm_get' that
%% implement the model behaviour. Models are always located in the
%% directory 'models' and their filename always start with 'm_'.
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2020 <NAME>
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% Note that the filename starts with m_, followed by the module name.
%% This ensures that there are no unexpected name clashes, which could
%% have easily occured if the module was named something like 'm_git'.
-module(m_zotonicwww2_git).
%% Model behaviour, from zotonic_core/src/behaviours/
%% Only the m_get/3 function is required.
-behaviour(zotonic_model).
-export([
m_get/3,
m_post/3,
task_rebuild/1,
build_edoc/1,
build_doc/1,
clone/1,
hash/1,
pull/1,
git_dir/1,
edoc_dir/1,
doc_dir/1
]).
%% Include the central definitions of Zotonic. Useful for macro
%% definitions like ?DEBUG.
-include_lib("zotonic_core/include/zotonic.hrl").
%% @doc Handle GET requests for this model. Can be called from the
%% templates (m.zotonicwww2_git), the API (/api/model/zotonicwww2_git/get/...)
%% or via MQTT (topic model/zotonicwww2_git/get).
%%
%% The first argument is the split path of the request (after 'get').
%%
%% The second argument is the MQTT messages, if any. For template calls
%% this could be 'undefined'. API calls also construct a MQTT message, as
%% the API routes calls via the MQTT tree and the zotonic_model.erl (in
%% zotonic_core/src/support).
%%
%% The m_get function consumes as much of the Path as is needed, it must
%% return its result together with the unconsumed part of the path. The
%% zotonic_model functions will do a further lookup of the Path remainder
%% in the return value.
%%
%% On an error an error tuple should be returned. For the template routines
%% this maps to 'undefined' and is ignored. The API and MQTT will return
%% a payload with the error to the caller.
-spec m_get( Path :: list(), zotonic_model:opt_msg(), z:context() ) -> zotonic_model:return().
m_get( [ <<"hash">> | Rest ], _Payload, Context) ->
% Only for authenticated users, as we are running a command line
% program and don't want to do that to anonymous bots and users.
case z_auth:is_auth(Context) of
true ->
% Return the hash of the current checkout
case hash(Context) of
{ok, Hash} ->
% Note the 'Rest', this is the non-consumed part of the
% path. It is often used by templates for further lookups
% in returned maps or other structured values.
{ok, {Hash, Rest}};
{error, _} = Error ->
Error
end;
false ->
{error, eacces}
end;
m_get( [ <<"rebuild">>, <<"hash">> | Rest ], _Payload, Context) ->
% The task_rebuild stores the hash of the last rebuild in the
% config key site.rebuild_hash.
Hash = m_config:get_value(site, rebuild_hash, Context),
{ok, {Hash, Rest}}.
%% This is a post handler for HTTP posts to `/api/model/zototonicwww2_git/post`
%% and MQTT publish to "model/zotonicwww2_git/post"
%% Note that a post handler always consumes the whole path.
-spec m_post( Path :: list( binary() ), zotonic_model:opt_msg(), z:context() ) -> {ok, term()} | ok | {error, term()}.
m_post( [ <<"rebuild">>, Secret ], _Payload, Context) ->
% Compare the value in the config tables with the passed secret.
% Config values can be set with m_config:set_value/4 or in the
% admin on "/admin/config"
case m_config:get_value(site, rebuild_secret, Context) of
Secret ->
% As a build takes a long time we schedule a build task.
% The task is slightly delayed so that repetitive pushes
% are started after a small period of inactivity.
z_pivot_rsc:insert_task_after(
10, % Seconds or a date
?MODULE, % This module
task_rebuild, % Prepend task functions with 'task_'
<<>>, % Give a key for multiple tasks with same mod:fun
[], % Arguments, non needed for this task
Context),
{ok, <<"queued">>};
_ ->
% Log a message to the lager logs
lager:info("Docs rebuild request with wrong secret from ~p", [ m_req:get(peer, Context) ]),
% Try to use posix error codes
{error, eacces}
end.
%% @doc Documentation rebuild task scheduled by the 'm_post' handler for
%% 'docs-rebuild' above. This task is executed by z_pivot_rsc. Only a single
%% task is executed in parallel.
-spec task_rebuild( z:context() ) -> ok | {error, term()}.
task_rebuild(Context) ->
z_utils:pipeline([
fun pull/1,
fun build_doc/1,
fun zotonicwww2_parse_docs:import/1,
fun build_edoc/1,
fun() ->
{ok, Hash} = hash(Context),
lager:info("Rebuild of docs success for '~s'", [ Hash ]),
m_config:set_value(site, rebuild_hash, Hash, Context),
ok
end
],
[ Context ]).
%% @doc Build Zotonic, the html docs and move them to the doc_dir. Returns
%% an error or the current hash. This command takes a long time to run.
-spec build_doc( z:context() ) -> {ok, binary()} | {error, term()}.
build_doc(Context) ->
ok = z_filelib:ensure_dir(doc_dir(Context)),
run_gitcmds([
"rm -rf doc/_build",
"make docs",
"rm -rf ../doc/html",
"mv doc/_build/html ../doc/."
],
Context).
%% @doc Clone a fresh checkout of the zotonic repository. Only running if
%% the priv/data/zotonic-git directory is not present.
-spec clone( z:context() ) -> {ok, binary()} | {error, term()}.
clone(Context) ->
Dir = git_dir(Context),
case filelib:is_file(Dir) of
true ->
{error, eexist};
false ->
DataDir = unicode:characters_to_list( filename:dirname(Dir) ),
ok = z_filelib:ensure_dir(Dir),
Cmd = "git clone https://github.com/zotonic/zotonic.git zotonic-git",
Options = [
sync,
stdout,
{cd, DataDir}
],
lager:info("Command: \"~s\"", [ Cmd ]),
case exec:run(Cmd, Options) of
{ok, [ {stdout, Output} ]} ->
Output1 = filter_output(Output),
lager:info("Command output: ~s", [ Output1 ]),
{ok, iolist_to_binary(Output)};
{ok, []} ->
lager:info("Command output: "),
{ok, <<>>};
{error, _} = Error ->
lager:error("Command \"~s\" error: ~p", [ Cmd, Error ]),
Error
end
end.
%% Remove escape sequences, especially from the rebar3 output which
%% adds color escapes.
filter_output(Output) ->
O1 = iolist_to_binary(Output),
filter_output_1(O1, <<>>).
filter_output_1(<<>>, Acc) ->
Acc;
filter_output_1(<<C, R/binary>>, Acc) when C =:= 9; C =:= 10; C =:= 13 ->
filter_output_1(R, <<Acc/binary, C>>);
filter_output_1(<<C, R/binary>>, Acc) when C < 32 ->
filter_output_1(R, Acc);
filter_output_1(<<C, R/binary>>, Acc) ->
filter_output_1(R, <<Acc/binary, C>>).
%% @doc Build Zotonic, the edoc and move them to the edoc_dir. Returns
%% an error or the current hash. This command takes a long time to run.
-spec build_edoc( z:context() ) -> {ok, binary()} | {error, term()}.
build_edoc(Context) ->
ok = z_filelib:ensure_dir(edoc_dir(Context)),
run_gitcmds([
"rm -rf doc/_build",
"make",
"make edocs",
"rm -rf ../doc/edoc",
"mv doc/_build/edoc ../doc/."
],
Context).
%% @doc Return the current checkout hash of the git repo.
-spec hash( z:context() ) -> {ok, binary()} | {error, term()}.
hash(Context) ->
run_gitcmd("git log -1 --pretty=format:%H", Context).
%% @doc Pull changes in the 'priv/data/zotonic-git' directory. Returns the
%% new current hash.
-spec pull( z:context() ) -> {ok, binary()} | {error, term()}.
pull(Context) ->
case run_gitcmd("git clean -f", Context) of
{ok, _} ->
case run_gitcmd("git pull", Context) of
{ok, _} ->
hash(Context);
{error, _} = Error ->
Error
end;
{error, _} = Error ->
Error
end.
%% @doc Run a series of commands in the 'priv/data/zotonic-git' directory.
-spec run_gitcmds( list( string() ), z:context() ) -> {ok, binary()} | {error, term()}.
run_gitcmds(Cmds, Context) ->
lists:foldl(
fun
(_Cmd, {error, _} = Error) ->
Error;
(Cmd, {ok, _}) ->
run_gitcmd(Cmd, Context)
end,
{ok, <<>>},
Cmds).
%% @doc Run a supervised command in the 'priv/data/zotonic-git' directory.
-spec run_gitcmd( string(), z:context() ) -> {ok, binary()} | {error, term()}.
run_gitcmd(Cmd, Context) ->
Dir = git_dir(Context),
Options = [
sync,
stdout,
{cd, unicode:characters_to_list(Dir)}
],
lager:info("Command: \"~s\"", [ Cmd ]),
case exec:run(Cmd, Options) of
{ok, [ {stdout, Output} ]} ->
lager:info("Command output: ~s", [ Output ]),
{ok, iolist_to_binary(Output)};
{ok, []} ->
lager:info("Command output: "),
{ok, <<>>};
{error, _} = Error ->
lager:error("Command \"~s\" error: ~p", [ Cmd, Error ]),
Error
end.
%% @doc Return the directory of the git checkout.
-spec git_dir( z:context() ) -> file:filename_all().
git_dir(Context) ->
filename:join([ base_dir(Context), <<"zotonic-git">> ]).
%% @doc Return the directory for the generated edoc documentation (from the erlang sources)
-spec edoc_dir( z:context() ) -> file:filename_all().
edoc_dir(Context) ->
filename:join([ base_dir(Context), <<"doc">>, <<"edoc">> ]).
%% @doc Return the directory for the generated html documentation (from ReStructuredText)
-spec doc_dir( z:context() ) -> file:filename_all().
doc_dir(Context) ->
filename:join([ base_dir(Context), <<"doc">>, <<"html">> ]).
%% @doc Return the base directory for all data.
-spec base_dir( z:context() ) -> file:filename_all().
base_dir(Context) ->
z_path:files_subdir_ensure(<<"data">>, Context). | src/models/m_zotonicwww2_git.erl | 0.574872 | 0.464841 | m_zotonicwww2_git.erl | starcoder |
%% @doc: If you wish to implement your own backend for storing
%% registers, your module needs to implement these interfaces. The
%% backend modules have quite a lot of responsibility (detailed below)
%% to allow for backend-specific optimizations.
-module(hyper_register).
%% @doc: Creates a new instance of the backend. The return value of
%% this function will be passed to all functions in this module.
-callback new(P :: hyper:precision()) ->
hyper:registers().
%% @doc: Set the register to the given value, *only* if the value
%% already stored is lower than the new value. The backend needs to
%% ensure the register value is only allowed to increase.
-callback set(Index :: integer(),
Value :: integer(),
hyper:registers()) ->
hyper:registers().
%% @doc: Compact is always called before any attempt at reading (sum,
%% zero count, etc) or merging. It is intended to give backends that
%% buffer the writes a chance to flush the buffer before the registers
%% are needed.
-callback compact(hyper:registers()) ->
hyper:registers().
%% @doc: Merge any number of registers, used to calculate the
%% union. For two register values at the same index, the max value
%% must be in the resulting register.
-callback max_merge([hyper:registers()]) ->
hyper:registers().
%% @doc: Same as max_merge/1 but used when we know only two filters
%% are merged.
-callback max_merge(hyper:registers(),
hyper:registers()) ->
hyper:registers().
%% @doc: Reduce the precision of the registers. Used for mixed-precision
%% union by first reducing the precision to the lowest of all filters.
-callback reduce_precision(hyper:precision(),
hyper:registers()) ->
hyper:registers().
%% @doc: Sum of 2^-R where R is the value in each register.
-callback register_sum(hyper:registers()) ->
float().
%% @doc: Count of registers set to 0.
-callback zero_count(hyper:registers()) ->
integer().
%% @doc: Encode and decode are called to convert the in-memory
%% representation of the backend to the serialized format. Must return
%% one binary where each register is encoded as an 8-bit integer.
-callback encode_registers(hyper:registers()) ->
binary().
-callback decode_registers(binary(), hyper:precision()) ->
hyper:registers().
%% @doc: Size in bytes used to represent the registers in memory.
-callback bytes(hyper:registers()) ->
integer(). | src/hyper_register.erl | 0.668556 | 0.622947 | hyper_register.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc The Accumulator receives metric updates and when `collect' is called
%% it sweeps through the instruments calling `checkpoint' with the define
%% aggregator for each and submitting to the Integrator.
%% @end
%%%-------------------------------------------------------------------------
-module(ot_metric_accumulator).
-behaviour(gen_server).
-export([start_link/1,
active_table/0,
record/2,
record/3,
observe/3,
collect/0,
lookup_active/2]).
-export([init/1,
handle_call/3,
handle_cast/2]).
-include_lib("stdlib/include/ms_transform.hrl").
-include_lib("opentelemetry_api/include/opentelemetry.hrl").
-include("ot_meter.hrl").
-define(ACTIVE_TAB, active_instrument_updates).
-define(active_ms(Name, LabelSet),
ets:fun2ms(fun(#active_instrument{key=Key,
instrument=#instrument{number_kind=InputType},
aggregator=Aggregator}) when Key =:= {Name, LabelSet} ->
{InputType, Aggregator}
end)).
-record(state, {}).
start_link(Opts) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, Opts, []).
active_table() ->
?ACTIVE_TAB.
-spec record(ot_meter:bound_instrument(), number()) -> boolean().
record({Key, {InputType, Aggregator}}, Number) ->
Aggregator:update(?ACTIVE_TAB, Key, InputType, Number);
record(#active_instrument{key=Key,
instrument=#instrument{number_kind=InputType},
aggregator=Aggregator}, Number) ->
Aggregator:update(?ACTIVE_TAB, Key, InputType, Number).
-spec record(ot_meter:name(), ot_meter:label_set(), number()) -> boolean() | unknown_instrument.
record(Name, LabelSet, Number) ->
case lookup_active(Name, LabelSet) of
unknown_instrument ->
%% an Instrument must exist to create an Active Instrument
unknown_instrument;
{InputType, Aggregator} ->
Aggregator:update(?ACTIVE_TAB, {Name, LabelSet}, InputType, Number)
end.
observe(#instrument{name=Name}, Number, LabelSet) ->
_ = lookup_active(Name, LabelSet),
ot_metric_aggregator_last_value:update(?ACTIVE_TAB, {Name, LabelSet}, observer, Number),
ok.
-spec lookup_active(instrument() | ot_meter:name(), ot_meter:label_set())
-> {ot_meter:number_kind(), module()} | unknown_instrument.
lookup_active(Instrument=#instrument{name=Name}, LabelSet) ->
MatchSpec = ?active_ms(Name, LabelSet),
case ets:select(?ACTIVE_TAB, MatchSpec) of
[{InputType, Aggregator}] ->
{InputType, Aggregator};
[] ->
add_active_instrument(Instrument, Name, LabelSet)
end;
lookup_active(Name, LabelSet) ->
MatchSpec = ?active_ms(Name, LabelSet),
case ets:select(?ACTIVE_TAB, MatchSpec) of
[{InputType, Aggregator}] ->
{InputType, Aggregator};
[] ->
case ot_meter_default:lookup_instrument(Name) of
unknown_instrument ->
unknown_instrument;
Instrument ->
add_active_instrument(Instrument, Name, LabelSet)
end
end.
collect()->
gen_server:call(?MODULE, collect).
init(_Opts) ->
%% This ETS table is required for other parts to not crash so we create
%% it in init and not in a handle_continue or whatever else.
%% No heir is worried about since active metrics are created dynamicly
_ = ets:new(?ACTIVE_TAB, [named_table,
public,
{keypos, #active_instrument.key},
{write_concurrency, true},
ordered_set]),
{ok, #state{}}.
handle_call(collect, _From, State) ->
%% TODO: should observers just checkpoint in the first place?
%% TODO: should have a timeout on observer callbacks
run_observers(ets:tab2list(ot_meter_default:observer_tab())),
MS = ets:fun2ms(fun(#active_instrument{key=Key,
aggregator=Aggregator}) ->
{Key, Aggregator}
end),
run_checkpoints(ets:select(?ACTIVE_TAB, MS, 20)),
{reply, ok, State};
handle_call(_Msg, _From, State) ->
{noreply, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
%%
run_checkpoints('$end_of_table') ->
ok;
run_checkpoints({Matches, Continuation}) ->
[Aggregator:checkpoint(?ACTIVE_TAB, Key) || {Key, Aggregator} <- Matches],
ets:select(Continuation).
run_observers([]) ->
ok;
run_observers([Observer | Rest]) ->
run_observer(Observer),
run_observers(Rest).
run_observer(#observer{instrument=ObserverInstrument,
callback=Callback}) ->
try Callback(ObserverInstrument)
catch _:_ ->
%% TODO: log an error
ok
end.
aggregator(#instrument{kind=ot_counter}) ->
ot_metric_aggregator_sum;
aggregator(#instrument{kind=ot_sum_observer}) ->
ot_metric_aggregator_last_value;
aggregator(#instrument{kind=ot_value_recorder}) ->
ot_metric_aggregator_mmsc.
add_active_instrument(Instrument=#instrument{number_kind=InputType}, Name, LabelSet) ->
Aggregator = aggregator(Instrument),
InitialValue = Aggregator:initial_value(InputType),
ActiveInstrument = #active_instrument{key={Name, LabelSet},
instrument=Instrument,
aggregator=Aggregator,
current=InitialValue},
_ = ets:insert_new(?ACTIVE_TAB, ActiveInstrument),
{InputType, Aggregator}. | src/ot_metric_accumulator.erl | 0.574992 | 0.418875 | ot_metric_accumulator.erl | starcoder |
%%
%% Copyright (c) dushin.net
%% All rights reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
-module(rational).
-export([add/2, subtract/2, multiply/2, divide/2, normalize/1, simplify/1, reduce/1, to_decimal/2, round/1]).
-type numerator() :: integer().
-type denominator() :: integer().
-type fraction() :: {numerator(), denominator()}.
-type composite() :: {integer(), fraction()}.
-type rational() :: integer() | fraction() | composite().
%%
%% @param A rational
%% @param B rational
%% @return A + B as a (possibly top-heavy) fraction.
%%
-spec add(A::rational(), B::rational()) -> fraction().
add(A, B) ->
add_rational(normalize(A), normalize(B)).
%% @private
add_rational({N1, D1}, {N2, D2}) ->
{N1 * D2 + N2 * D1, D1 * D2}.
%%
%% @param A rational
%% @param B rational
%% @return A - B as a (possibly top-heavy) fraction.
%%
subtract(A, B) ->
subtract_rational(normalize(A), normalize(B)).
%% @private
subtract_rational({N1, D1}, {N2, D2}) ->
{N1 * D2 - N2 * D1, D1 * D2}.
%%
%% @param A rational
%% @param B rational
%% @return A * B as a (possibly top-heavy) fraction.
%%
multiply(A, B) ->
multiply_rational(normalize(A), normalize(B)).
%% @private
multiply_rational({N1, D1}, {N2, D2}) ->
{N1 * N2, D1 * D2}.
%%
%% @param A rational
%% @param B rational
%% @return A / B as a (possibly top-heavy) fraction.
%%
divide(A, B) ->
divide_rational(normalize(A), normalize(B)).
%% @private
divide_rational({0, _}, _) ->
0;
divide_rational(_, {0, _}) ->
undefined;
divide_rational({N1, D1}, {N2, D2}) ->
{N1 * D2, D1 * N2}.
%%
%% @param R rational
%% @return (possibly top-heavy) fraction.
%%
-spec normalize(R::rational()) -> fraction().
normalize(X) when is_integer(X) -> {X, 1};
normalize({I, {N, D}}) when is_integer(I) andalso is_integer(N) andalso is_integer(D) ->
{I * D + N, D};
normalize({N, D} = R) when is_integer(N) andalso is_integer(D) ->
R.
%%
%% @param X (possibly top-heavy) fraction
%% @return simplified rational, a fraction, integer or composite.
%%
-spec simplify(F::fraction()) -> rational().
% simplify({N, 1}) ->
% N;
% simplify({N, N}) ->
% 1;
% simplify({0, D}) ->
% 0;
% simplify({_N, 0}) ->
% undefined;
simplify({N, D}) when is_integer(D) andalso N > D ->
case N rem D of
0 ->
{N div D, {0, D}};
R ->
{N div D, reduce({R, D})}
end;
simplify(X) ->
X.
%%
%% @param X (possibly top-heavy) fraction
%% @return reduced fraction (possibly divided by gcd).
%%
-spec reduce(F::fraction()) -> fraction().
reduce({N, D}) when is_integer(N) andalso is_integer(D) ->
case gcd(N, D) of
1 ->
{N, D};
G ->
{N div G, D div G}
end.
%% @private
gcd(A, B) when B > A ->
gcd(B, A);
gcd(A, B) ->
case A rem B of
0 -> B;
R ->
gcd(B, R)
end.
%%
%% @param F (possibly top-heavy) fraction
%% @param P desired precision
%% @return equivalent fraction whose denominator is 10^{Precision}.
%% @doc Note that no rounding is performed on the last digit,
%% and that in general the returned fraction is an estimate.
%%
-spec to_decimal(F::fraction(), P::non_neg_integer()) -> rational().
to_decimal({N, D}, Precision) when is_integer(N) andalso is_integer(D) ->
case simplify({N, D}) of
{I, {N1, D}} ->
{I, to_decimal({N1, D}, Precision)};
{N1, D} ->
Digits = long_division(D, N1, Precision, []),
{to_number(Digits), pow(10, Precision)};
undefined ->
undefined;
N ->
{N, {0, 10}}
end.
%% @private
long_division(_D, _N1, 0, Accum) ->
Accum;
long_division(D, N1, Precision, Accum) ->
N2 = N1 * 10,
X = N2 div D,
long_division(D, N2 - X * D, Precision - 1, [X | Accum]).
%%
%% @param R rational
%% @return Rounded off integer
%%
-spec round(R::rational()) -> integer().
round({I, {N, D} = F}) when is_integer(I) andalso is_integer(N) andalso is_integer(D) ->
I + ?MODULE:round(F);
round({N, D}) when is_integer(N) andalso is_integer(D) ->
case N < (D bsr 1) of
true -> 0;
_ -> 1
end;
round(I) when is_integer(I) ->
I.
%% @private
to_number(Digits) ->
to_number(Digits, 1, 0).
%% @private
to_number([], _I, Accum) ->
Accum;
to_number([D|T], I, Accum) ->
to_number(T, I*10, D * I + Accum).
%% @private
pow(_B, 0) ->
1;
pow(B, N) ->
B * pow(B, N - 1). | src/rational.erl | 0.802981 | 0.57824 | rational.erl | starcoder |
%%% -*- mode: Erlang; fill-column: 80; comment-column: 75; -*-
%%% vi:ts=4 sw=4 et
%%% The MIT License
%%%
%%% Copyright (c) 2007 <NAME>
%%%
%%% Permission is hereby granted, free of charge, to any person obtaining a copy
%%% of this software and associated documentation files (the "Software"), to deal
%%% in the Software without restriction, including without limitation the rights
%%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%%% copies of the Software, and to permit persons to whom the Software is
%%% furnished to do so, subject to the following conditions:
%%%
%%% The above copyright notice and this permission notice shall be included in
%%% all copies or substantial portions of the Software.
%%%
%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%%% THE SOFTWARE.
%%%---------------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright 2007 <NAME> freeyourmind ++ [$@|gmail.<EMAIL>]
%%% @doc
%%% plists is a drop-in replacement for module <a
%%% href="http://www.erlang.org/doc/man/lists.html">lists</a>, making
%%% most list operations parallel. It can operate on each element in
%%% parallel, for IO-bound operations, on sublists in parallel, for
%%% taking advantage of multi-core machines with CPU-bound operations,
%%% and across erlang nodes, for parallizing inside a cluster. It
%%% handles errors and node failures. It can be configured, tuned, and
%%% tweaked to get optimal performance while minimizing overhead.
%%%
%%% Almost all the functions are identical to equivalent functions in
%%% lists, returning exactly the same result, and having both a form
%%% with an identical syntax that operates on each element in parallel
%%% and a form which takes an optional "malt", a specification for how
%%% to parallize the operation.
%%%
%%% fold is the one exception, parallel fold is different from linear
%%% fold. This module also include a simple mapreduce implementation,
%%% and the function runmany. All the other functions are implemented
%%% with runmany, which is as a generalization of parallel list
%%% operations.
%%%
%%% Malts
%%% =====
%%%
%%% A malt specifies how to break a list into sublists, and can optionally
%%% specify a timeout, which nodes to run on, and how many processes to start
%%% per node.
%%%
%%% Malt = MaltComponent | [MaltComponent]
%%% MaltComponent = SubListSize::integer() | {processes, integer()} |
%%% {processes, schedulers} |
%%% {timeout, Milliseconds::integer()} | {nodes, [NodeSpec]}<br/>
%%%
%%% NodeSpec = Node::atom() | {Node::atom(), NumProcesses::integer()} |
%%% {Node::atom(), schedulers}
%%%
%%% An integer can be given to specify the exact size for sublists. 1
%%% is a good choice for IO-bound operations and when the operation on
%%% each list element is expensive. Larger numbers minimize overhead
%%% and are faster for cheap operations.
%%%
%%% If the integer is omitted, and you have specified a `{processes,
%%% X}`, the list is split into X sublists. This is only useful when
%%% the time to process each element is close to identical and you
%%% know exactly how many lines of execution are available to you.
%%%
%%% If neither of the above applies, the sublist size defaults to 1.
%%%
%%% You can use `{processes, X}` to have the list processed by `X`
%%% processes on the local machine. A good choice for `X` is the
%%% number of lines of execution (cores) the machine provides. This
%%% can be done automatically with {processes, schedulers}, which sets
%%% the number of processes to the number of schedulers in the erlang
%%% virtual machine (probably equal to the number of cores).
%%%
%%% `{timeout, Milliseconds}` specifies a timeout. This is a timeout
%%% for the entire operation, both operating on the sublists and
%%% combining the results. exit(timeout) is evaluated if the timeout
%%% is exceeded.
%%%
%%% `{nodes, NodeList}` specifies that the operation should be done
%%% across nodes. Every element of NodeList is of the form
%%% `{NodeName, NumProcesses}` or NodeName, which means the same as
%%% `{NodeName, 1}`. plists runs NumProcesses processes on NodeName
%%% concurrently. A good choice for NumProcesses is the number of
%%% lines of execution (cores) a node provides plus one. This ensures
%%% the node is completely busy even when fetching a new sublist. This
%%% can be done automatically with `{NodeName, schedulers}`, in which
%%% case plists uses a cached value if it has one, and otherwise finds
%%% the number of schedulers in the remote node and adds one. This
%%% will ensure at least one busy process per core (assuming the node
%%% has a scheduler for each core).
%%%
%%% plists is able to recover if a node goes down. If all nodes go
%%% down, exit(allnodescrashed) is evaluated.
%%%
%%% Any of the above may be used as a malt, or may be combined into a
%%% list. `{nodes, NodeList}` and {processes, X} may not be combined.
%%%
%%% Examples
%%% ========
%%%
%%% %%start a process for each element (1-element sublists)<
%%% 1
%%%
%%% %% start a process for each ten elements (10-element sublists)
%%% 10
%%%
%%% %% split the list into two sublists and process in two processes
%%% {processes, 2}
%%%
%%% %% split the list into X sublists and process in X processes,
%%% %% where X is the number of cores in the machine
%%% {processes, schedulers}
%%%
%%% %% split the list into 10-element sublists and process in two processes
%%% [10, {processes, 2}]
%%%
%%% %% timeout after one second. Assumes that a process should be started
%%% %% for each element.<br/>
%%% {timeout, 1000}
%%%
%%% %% Runs 3 processes at a time on apple@desktop, and 2 on orange@laptop
%%% %% This is the best way to utilize all the CPU-power of a dual-core<br/>
%%% %% desktop and a single-core laptop. Assumes that the list should be<br/>
%%% %% split into 1-element sublists.<br/>
%%% {nodes, [{apple@desktop, 3}, {orange@laptop, 2}]}
%%%
%%% %% Like above, but makes plists figure out how many processes to use.
%%% {nodes, [{apple@desktop, schedulers}, {orange@laptop, schedulers}]}
%%%
%%% %% Gives apple and orange three seconds to process the list as<br/>
%%% %% 100-element sublists.<br/>
%%% [100, {timeout, 3000}, {nodes, [{apple@desktop, 3}, {orange@laptop, 2}]}]
%%%
%%% Aside: Why Malt?
%%% ================
%%%
%%% I needed a word for this concept, so maybe my subconsciousness
%%% gave me one by making me misspell multiply. Maybe it is an acronym
%%% for Malt is A List Tearing Specification. Maybe it is a beer
%%% metaphor, suggesting that code only runs in parallel if bribed
%%% with spirits. It's jargon, learn it or you can't be part of the
%%% in-group.
%%%
%%% Messages and Errors
%%% ===================
%%%
%%% plists assures that no extraneous messages are left in or will
%%% later enter the message queue. This is guaranteed even in the
%%% event of an error.
%%%
%%% Errors in spawned processes are caught and propagated to the
%%% calling process. If you invoke
%%%
%%% plists:map(fun (X) -> 1/X end, [1, 2, 3, 0]).
%%%
%%% you get a badarith error, exactly like when you use lists:map.
%%%
%%% plists uses monitors to watch the processes it spawns. It is not a
%%% good idea to invoke plists when you are already monitoring
%%% processes. If one of them does a non-normal exit, plists receives
%%% the 'DOWN' message believing it to be from one of its own
%%% processes. The error propagation system goes into effect, which
%%% results in the error occuring in the calling process.
%%%
-module(ec_plists).
-export([all/2, all/3,
any/2, any/3,
filter/2, filter/3,
fold/3, fold/4, fold/5,
foreach/2, foreach/3,
map/2, map/3,
ftmap/2, ftmap/3,
partition/2, partition/3,
sort/1, sort/2, sort/3,
usort/1, usort/2, usort/3,
mapreduce/2, mapreduce/3, mapreduce/5,
runmany/3, runmany/4]).
-export_type([malt/0, malt_component/0, node_spec/0, fuse/0, fuse_fun/0]).
%%============================================================================
%% types
%%============================================================================
-type malt() :: malt_component() | [malt_component()].
-type malt_component() :: SubListSize :: integer()
| {processes, integer()}
| {processes, schedulers}
| {timeout, Milliseconds :: integer()}
| {nodes, [node_spec()]}.
-type node_spec() :: Node :: atom()
| {Node :: atom(), NumProcesses :: integer()}
| {Node :: atom(), schedulers}.
-type fuse_fun() :: fun((term(), term()) -> term()).
-type fuse() :: fuse_fun() | {recursive, fuse_fun()} | {reverse, fuse_fun()}.
-type el_fun() :: fun((term()) -> term()).
%%============================================================================
%% API
%%============================================================================
%% Everything here is defined in terms of runmany.
%% The following methods are convient interfaces to runmany.
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec all(el_fun(), list()) -> boolean().
all(Fun, List) ->
all(Fun, List, 1).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec all(el_fun(), list(), malt()) -> boolean().
all(Fun, List, Malt) ->
try
runmany(fun(L) ->
B = lists:all(Fun, L),
if
B ->
nil;
true ->
erlang:throw(notall)
end
end,
fun(_A1, _A2) ->
nil
end,
List, Malt),
true
catch
throw:notall ->
false
end.
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec any(fun(), list()) -> boolean().
any(Fun, List) ->
any(Fun, List, 1).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec any(fun(), list(), malt()) -> boolean().
any(Fun, List, Malt) ->
try
runmany(fun(L) ->
B = lists:any(Fun, L),
if B ->
erlang:throw(any);
true ->
nil
end
end,
fun(_A1, _A2) ->
nil
end,
List, Malt) of
_ ->
false
catch throw:any ->
true
end.
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec filter(fun(), list()) -> list().
filter(Fun, List) ->
filter(Fun, List, 1).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec filter(fun(), list(), malt()) -> list().
filter(Fun, List, Malt) ->
runmany(fun(L) ->
lists:filter(Fun, L)
end,
{reverse, fun(A1, A2) ->
A1 ++ A2
end},
List, Malt).
%% Note that with parallel fold there is not foldl and foldr,
%% instead just one fold that can fuse Accumlators.
%% @doc Like below, but assumes 1 as the Malt. This function is almost useless,
%% and is intended only to aid converting code from using lists to plists.
-spec fold(fun(), InitAcc :: term(), list()) -> term().
fold(Fun, InitAcc, List) ->
fold(Fun, Fun, InitAcc, List, 1).
%% @doc Like below, but uses the Fun as the Fuse by default.
-spec fold(fun(), InitAcc :: term(), list(), malt()) -> term().
fold(Fun, InitAcc, List, Malt) ->
fold(Fun, Fun, InitAcc, List, Malt).
%% @doc fold is more complex when made parallel. There is no foldl and
%% foldr, accumulators aren't passed in any defined order. The list
%% is split into sublists which are folded together. Fun is identical
%% to the function passed to lists:fold[lr], it takes (an element, and
%% the accumulator) and returns -> a new accumulator. It is used for
%% the initial stage of folding sublists. Fuse fuses together the
%% results, it takes (Results1, Result2) and returns -> a new result.
%% By default sublists are fused left to right, each result of a fuse
%% being fed into the first element of the next fuse. The result of
%% the last fuse is the result.
%%
%% Fusing may also run in parallel using a recursive algorithm,
%% by specifying the fuse as {recursive, Fuse}. See
%% the discussion in {@link runmany/4}.
%%
%% Malt is the malt for the initial folding of sublists, and for the
%% possible recursive fuse.
-spec fold(fun(), fuse(), InitAcc :: term(), list(), malt()) -> term().
fold(Fun, Fuse, InitAcc, List, Malt) ->
Fun2 = fun(L) ->
lists:foldl(Fun, InitAcc, L)
end,
runmany(Fun2, Fuse, List, Malt).
%% @doc Similiar to foreach in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>
%% except it makes no guarantee about the order it processes list elements.
-spec foreach(fun(), list()) -> ok.
foreach(Fun, List) ->
foreach(Fun, List, 1).
%% @doc Similiar to foreach in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>
%% except it makes no guarantee about the order it processes list elements.
-spec foreach(fun(), list(), malt()) -> ok.
foreach(Fun, List, Malt) ->
runmany(fun(L) ->
lists:foreach(Fun, L)
end,
fun(_A1, _A2) ->
ok
end,
List, Malt).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec map(fun(), list()) -> list().
map(Fun, List) ->
map(Fun, List, 1).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec map(fun(), list(), malt()) -> list().
map(Fun, List, Malt) ->
runmany(fun(L) ->
lists:map(Fun, L)
end,
{reverse, fun(A1, A2) ->
A1 ++ A2
end},
List, Malt).
%% @doc values are returned as {value, term()}.
-spec ftmap(fun(), list()) -> list().
ftmap(Fun, List) ->
map(fun(L) ->
try
{value, Fun(L)}
catch
Class:Type ->
{error, {Class, Type}}
end
end, List).
%% @doc values are returned as {value, term()}.
-spec ftmap(fun(), list(), malt()) -> list().
ftmap(Fun, List, Malt) ->
map(fun(L) ->
try
{value, Fun(L)}
catch
Class:Type ->
{error, {Class, Type}}
end
end, List, Malt).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec partition(fun(), list()) -> {list(), list()}.
partition(Fun, List) ->
partition(Fun, List, 1).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec partition(fun(), list(), malt()) -> {list(), list()}.
partition(Fun, List, Malt) ->
runmany(fun(L) ->
lists:partition(Fun, L)
end,
{reverse, fun({True1, False1}, {True2, False2}) ->
{True1 ++ True2, False1 ++ False2}
end},
List, Malt).
%% SORTMALT needs to be tuned
-define(SORTMALT, 100).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec sort(list()) -> list().
sort(List) ->
sort(fun(A, B) ->
A =< B
end,
List).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec sort(fun(), list()) -> list().
sort(Fun, List) ->
sort(Fun, List, ?SORTMALT).
%% @doc This version lets you specify your own malt for sort.
%%
%% sort splits the list into sublists and sorts them, and it merges the
%% sorted lists together. These are done in parallel. Each sublist is
%% sorted in a seperate process, and each merging of results is done in a
%% seperate process. Malt defaults to 100, causing the list to be split into
%% 100-element sublists.
-spec sort(fun(), list(), malt()) -> list().
sort(Fun, List, Malt) ->
Fun2 = fun(L) ->
lists:sort(Fun, L)
end,
Fuse = fun(A1, A2) ->
lists:merge(Fun, A1, A2)
end,
runmany(Fun2, {recursive, Fuse}, List, Malt).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec usort(list()) -> list().
usort(List) ->
usort(fun(A, B) ->
A =< B
end,
List).
%% @doc Same semantics as in module
%% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
-spec usort(fun(), list()) -> list().
usort(Fun, List) ->
usort(Fun, List, ?SORTMALT).
%% @doc This version lets you specify your own malt for usort.
%%
%% usort splits the list into sublists and sorts them, and it merges the
%% sorted lists together. These are done in parallel. Each sublist is
%% sorted in a seperate process, and each merging of results is done in a
%% seperate process. Malt defaults to 100, causing the list to be split into
%% 100-element sublists.
%%
%% usort removes duplicate elments while it sorts.
-spec usort(fun(), list(), malt()) -> list().
usort(Fun, List, Malt) ->
Fun2 = fun(L) ->
lists:usort(Fun, L)
end,
Fuse = fun(A1, A2) ->
lists:umerge(Fun, A1, A2)
end,
runmany(Fun2, {recursive, Fuse}, List, Malt).
%% @doc Like below, assumes default MapMalt of 1.
-ifdef(namespaced_types).
-spec mapreduce(MapFunc, list()) -> dict:dict() when
MapFunc :: fun((term()) -> DeepListOfKeyValuePairs),
DeepListOfKeyValuePairs :: [DeepListOfKeyValuePairs] | {Key :: term(), Value :: term()}.
-else.
-spec mapreduce(MapFunc, list()) -> dict() when
MapFunc :: fun((term()) -> DeepListOfKeyValuePairs),
DeepListOfKeyValuePairs :: [DeepListOfKeyValuePairs] | {Key :: term(), Value :: term()}.
-endif.
mapreduce(MapFunc, List) ->
mapreduce(MapFunc, List, 1).
%% Like below, but uses a default reducer that collects all
%% {Key, Value} pairs into a
%% <a href="http://www.erlang.org/doc/man/dict.html">dict</a>,
%% with values {Key, [Value1, Value2...]}.
%% This dict is returned as the result.
mapreduce(MapFunc, List, MapMalt) ->
mapreduce(MapFunc, List, dict:new(), fun add_key/3, MapMalt).
%% @doc This is a very basic mapreduce. You won't write a
%% Google-rivaling search engine with it. It has no equivalent in
%% lists. Each element in the list is run through the MapFunc, which
%% produces either a {Key, Value} pair, or a lists of key value pairs,
%% or a list of lists of key value pairs...etc. A reducer process runs
%% in parallel with the mapping processes, collecting the key value
%% pairs. It starts with a state given by InitState, and for each
%% {Key, Value} pair that it receives it invokes ReduceFunc(OldState,
%% Key, Value) to compute its new state. mapreduce returns the
%% reducer's final state.
%%
%% MapMalt is the malt for the mapping operation, with a default value of 1,
%% meaning each element of the list is mapped by a seperate process.
%%
%% mapreduce requires OTP R11B, or it may leave monitoring messages in the
%% message queue.
-ifdef(namespaced_types).
-spec mapreduce(MapFunc, list(), InitState :: term(), ReduceFunc, malt()) -> dict:dict() when
MapFunc :: fun((term()) -> DeepListOfKeyValuePairs),
DeepListOfKeyValuePairs :: [DeepListOfKeyValuePairs] | {Key :: term(), Value :: term()},
ReduceFunc :: fun((OldState :: term(), Key :: term(), Value :: term()) -> NewState :: term()).
-else.
-spec mapreduce(MapFunc, list(), InitState :: term(), ReduceFunc, malt()) -> dict() when
MapFunc :: fun((term()) -> DeepListOfKeyValuePairs),
DeepListOfKeyValuePairs :: [DeepListOfKeyValuePairs] | {Key :: term(), Value :: term()},
ReduceFunc :: fun((OldState :: term(), Key :: term(), Value :: term()) -> NewState :: term()).
-endif.
mapreduce(MapFunc, List, InitState, ReduceFunc, MapMalt) ->
Parent = self(),
{Reducer, ReducerRef} =
erlang:spawn_monitor(fun() ->
reducer(Parent, 0, InitState, ReduceFunc)
end),
MapFunc2 = fun(L) ->
Reducer ! lists:map(MapFunc, L),
1
end,
SentMessages = try
runmany(MapFunc2, fun(A, B) -> A + B end, List, MapMalt)
catch
exit:Reason ->
erlang:demonitor(ReducerRef, [flush]),
Reducer ! die,
exit(Reason)
end,
Reducer ! {mappers, done, SentMessages},
Results = receive
{Reducer, Results2} ->
Results2;
{'DOWN', _, _, Reducer, Reason2} ->
exit(Reason2)
end,
receive
{'DOWN', _, _, Reducer, normal} ->
nil
end,
Results.
reducer(Parent, NumReceived, State, Func) ->
receive
die ->
nil;
{mappers, done, NumReceived} ->
Parent ! {self(), State};
Keys ->
reducer(Parent, NumReceived + 1, each_key(State, Func, Keys), Func)
end.
each_key(State, Func, {Key, Value}) ->
Func(State, Key, Value);
each_key(State, Func, [List | Keys]) ->
each_key(each_key(State, Func, List), Func, Keys);
each_key(State, _, []) ->
State.
add_key(Dict, Key, Value) ->
case dict:is_key(Key, Dict) of
true ->
dict:append(Key, Value, Dict);
false ->
dict:store(Key, [Value], Dict)
end.
%% @doc Like below, but assumes a Malt of 1,
%% meaning each element of the list is processed by a seperate process.
-spec runmany(fun(), fuse(), list()) -> term().
runmany(Fun, Fuse, List) ->
runmany(Fun, Fuse, List, 1).
%% Begin internal stuff (though runmany/4 is exported).
%% @doc All of the other functions are implemented with runmany. runmany
%% takes a List, splits it into sublists, and starts processes to operate on
%% each sublist, all done according to Malt. Each process passes its sublist
%% into Fun and sends the result back.
%%
%% The results are then fused together to get the final result. There are two
%% ways this can operate, lineraly and recursively. If Fuse is a function,
%% a fuse is done linearly left-to-right on the sublists, the results
%% of processing the first and second sublists being passed to Fuse, then
%% the result of the first fuse and processing the third sublits, and so on. If
%% Fuse is {reverse, FuseFunc}, then a fuse is done right-to-left, the results
%% of processing the second-to-last and last sublists being passed to FuseFunc,
%% then the results of processing the third-to-last sublist and
%% the results of the first fuse, and and so forth.
%% Both methods preserve the original order of the lists elements.
%%
%% To do a recursive fuse, pass Fuse as {recursive, FuseFunc}.
%% The recursive fuse makes no guarantee about the order the results of
%% sublists, or the results of fuses are passed to FuseFunc. It
%% continues fusing pairs of results until it is down to one.
%%
%% Recursive fuse is down in parallel with processing the sublists, and a
%% process is spawned to fuse each pair of results. It is a parallized
%% algorithm. Linear fuse is done after all results of processing sublists
%% have been collected, and can only run in a single process.
%%
%% Even if you pass {recursive, FuseFunc}, a recursive fuse is only done if
%% the malt contains {nodes, NodeList} or {processes, X}. If this is not the
%% case, a linear fuse is done.
-spec runmany(fun(([term()]) -> term()), fuse(), list(), malt()) -> term().
runmany(Fun, Fuse, List, Malt)
when erlang:is_list(Malt) ->
runmany(Fun, Fuse, List, local, no_split, Malt);
runmany(Fun, Fuse, List, Malt) ->
runmany(Fun, Fuse, List, [Malt]).
runmany(Fun, Fuse, List, Nodes, no_split, [MaltTerm | Malt])
when erlang:is_integer(MaltTerm) ->
runmany(Fun, Fuse, List, Nodes, MaltTerm, Malt);
runmany(Fun, Fuse, List, local, Split, [{processes, schedulers} | Malt]) ->
%% run a process for each scheduler
S = erlang:system_info(schedulers),
runmany(Fun, Fuse, List, local, Split, [{processes, S} | Malt]);
runmany(Fun, Fuse, List, local, no_split, [{processes, X} | _] = Malt) ->
%% Split the list into X sublists, where X is the number of processes
L = erlang:length(List),
case (L rem X) of
0 ->
runmany(Fun, Fuse, List, local, (L / X), Malt);
_ ->
runmany(Fun, Fuse, List, local, (L / X) + 1, Malt)
end;
runmany(Fun, Fuse, List, local, Split, [{processes, X} | Malt]) ->
%% run X process on local machine
Nodes = lists:duplicate(X, node()),
runmany(Fun, Fuse, List, Nodes, Split, Malt);
runmany(Fun, Fuse, List, Nodes, Split, [{timeout, X} | Malt]) ->
Parent = erlang:self(),
Timer = proc_lib:spawn(fun() ->
receive
stoptimer ->
Parent ! {timerstopped, erlang:self()}
after X ->
Parent ! {timerrang, erlang:self()},
receive
stoptimer ->
Parent ! {timerstopped, erlang:self()}
end
end
end),
Ans = try
runmany(Fun, Fuse, List, Nodes, Split, Malt)
catch
%% we really just want the after block, the syntax
%% makes this catch necessary.
willneverhappen ->
nil
after
Timer ! stoptimer,
cleanup_timer(Timer)
end,
Ans;
runmany(Fun, Fuse, List, local, Split, [{nodes, NodeList} | Malt]) ->
Nodes = lists:foldl(fun({Node, schedulers}, A) ->
X = schedulers_on_node(Node) + 1,
lists:reverse(lists:duplicate(X, Node), A);
({Node, X}, A) ->
lists:reverse(lists:duplicate(X, Node), A);
(Node, A) ->
[Node | A]
end,
[], NodeList),
runmany(Fun, Fuse, List, Nodes, Split, Malt);
runmany(Fun, {recursive, Fuse}, List, local, Split, []) ->
%% local recursive fuse, for when we weren't invoked with {processes, X}
%% or {nodes, NodeList}. Degenerates recursive fuse into linear fuse.
runmany(Fun, Fuse, List, local, Split, []);
runmany(Fun, Fuse, List, Nodes, no_split, []) ->
%% by default, operate on each element seperately
runmany(Fun, Fuse, List, Nodes, 1, []);
runmany(Fun, Fuse, List, local, Split, []) ->
List2 = splitmany(List, Split),
local_runmany(Fun, Fuse, List2);
runmany(Fun, Fuse, List, Nodes, Split, []) ->
List2 = splitmany(List, Split),
cluster_runmany(Fun, Fuse, List2, Nodes).
cleanup_timer(Timer) ->
receive
{timerrang, Timer} ->
cleanup_timer(Timer);
{timerstopped, Timer} ->
nil
end.
schedulers_on_node(Node) ->
case erlang:get(ec_plists_schedulers_on_nodes) of
undefined ->
X = determine_schedulers(Node),
erlang:put(ec_plists_schedulers_on_nodes,
dict:store(Node, X, dict:new())),
X;
Dict ->
case dict:is_key(Node, Dict) of
true ->
dict:fetch(Node, Dict);
false ->
X = determine_schedulers(Node),
erlang:put(ec_plists_schedulers_on_nodes,
dict:store(Node, X, Dict)),
X
end
end.
determine_schedulers(Node) ->
Parent = erlang:self(),
Child = proc_lib:spawn(Node, fun() ->
Parent ! {self(), erlang:system_info(schedulers)}
end),
erlang:monitor(process, Child),
receive
{Child, X} ->
receive
{'DOWN', _, _, Child, _Reason} ->
nil
end,
X;
{'DOWN', _, _, Child, Reason} when Reason =/= normal ->
0
end.
%% @doc local runmany, for when we weren't invoked with {processes, X}
%% or {nodes, NodeList}. Every sublist is processed in parallel.
local_runmany(Fun, Fuse, List) ->
Parent = self(),
Pids = lists:map(fun(L) ->
F = fun() ->
Parent ! {self(), Fun(L)}
end,
{Pid, _} = erlang:spawn_monitor(F),
Pid
end,
List),
Answers = try
lists:map(fun receivefrom/1, Pids)
catch
throw:Message ->
{BadPid, Reason} = Message,
handle_error(BadPid, Reason, Pids)
end,
lists:foreach(fun(Pid) ->
normal_cleanup(Pid)
end, Pids),
fuse(Fuse, Answers).
receivefrom(Pid) ->
receive
{Pid, R} ->
R;
{'DOWN', _, _, Pid, Reason} when Reason =/= normal ->
erlang:throw({Pid, Reason});
{timerrang, _} ->
erlang:throw({nil, timeout})
end.
%% Convert List into [{Number, Sublist}]
cluster_runmany(Fun, Fuse, List, Nodes) ->
{List2, _} = lists:foldl(fun(X, {L, Count}) ->
{[{Count, X} | L], Count + 1}
end,
{[], 0}, List),
cluster_runmany(Fun, Fuse, List2, Nodes, [], []).
%% @doc Add a pair of results into the TaskList as a fusing task
cluster_runmany(Fun, {recursive, Fuse}, [], Nodes, Running,
[{_, R1}, {_, R2} | Results]) ->
cluster_runmany(Fun, {recursive, Fuse}, [{fuse, R1, R2}], Nodes,
Running, Results);
cluster_runmany(_, {recursive, _Fuse}, [], _Nodes, [], [{_, Result}]) ->
%% recursive fuse done, return result
Result;
cluster_runmany(_, {recursive, _Fuse}, [], _Nodes, [], []) ->
%% edge case where we are asked to do nothing
[];
cluster_runmany(_, Fuse, [], _Nodes, [], Results) ->
%% We're done, now we just have to [linear] fuse the results
fuse(Fuse, lists:map(fun({_, R}) ->
R
end,
lists:sort(fun({A, _}, {B, _}) ->
A =< B
end,
lists:reverse(Results))));
cluster_runmany(Fun, Fuse, [Task | TaskList], [N | Nodes], Running, Results) ->
%% We have a ready node and a sublist or fuse to be processed, so we start
%% a new process
Parent = erlang:self(),
case Task of
{Num, L2} ->
Fun2 = fun() ->
Parent ! {erlang:self(), Num, Fun(L2)}
end;
{fuse, R1, R2} ->
{recursive, FuseFunc} = Fuse,
Fun2 = fun() ->
Parent ! {erlang:self(), fuse, FuseFunc(R1, R2)}
end
end,
Fun3 = fun() -> runmany_wrap(Fun2, Parent) end,
Pid = proc_lib:spawn(N, Fun3),
erlang:monitor(process, Pid),
cluster_runmany(Fun, Fuse, TaskList, Nodes, [{Pid, N, Task} | Running], Results);
cluster_runmany(Fun, Fuse, TaskList, Nodes, Running, Results) when length(Running) > 0 ->
%% We can't start a new process, but can watch over already running ones
receive
{_Pid, error, Reason} ->
RunningPids = lists:map(fun({Pid, _, _}) ->
Pid
end,
Running),
handle_error(junkvalue, Reason, RunningPids);
{Pid, Num, Result} ->
%% throw out the exit message, Reason should be
%% normal, noproc, or noconnection
receive
{'DOWN', _, _, Pid, _Reason} ->
nil
end,
{Running2, FinishedNode, _} = delete_running(Pid, Running, []),
cluster_runmany(Fun, Fuse, TaskList,
[FinishedNode | Nodes], Running2, [{Num, Result} | Results]);
{timerrang, _} ->
RunningPids = lists:map(fun({Pid, _, _}) ->
Pid
end,
Running),
handle_error(nil, timeout, RunningPids);
%% node failure
{'DOWN', _, _, Pid, noconnection} ->
{Running2, _DeadNode, Task} = delete_running(Pid, Running, []),
cluster_runmany(Fun, Fuse, [Task | TaskList], Nodes,
Running2, Results);
%% could a noproc exit message come before the message from
%% the process? we are assuming it can't.
%% this clause is unlikely to get invoked due to cluster_runmany's
%% spawned processes. It will still catch errors in mapreduce's
%% reduce process, however.
{'DOWN', _, _, BadPid, Reason} when Reason =/= normal ->
RunningPids = lists:map(fun({Pid, _, _}) ->
Pid
end,
Running),
handle_error(BadPid, Reason, RunningPids)
end;
cluster_runmany(_, _, [_Non | _Empty], [] = _Nodes, [] = _Running, _) ->
%% We have data, but no nodes either available or occupied
erlang:exit(allnodescrashed).
-ifdef(fun_stacktrace).
runmany_wrap(Fun, Parent) ->
try
Fun
catch
exit:siblingdied ->
ok;
exit:Reason ->
Parent ! {erlang:self(), error, Reason};
error:R ->
Parent ! {erlang:self(), error, {R, erlang:get_stacktrace()}};
throw:R ->
Parent ! {erlang:self(), error, {{nocatch, R}, erlang:get_stacktrace()}}
end.
-else.
runmany_wrap(Fun, Parent) ->
try
Fun
catch
exit:siblingdied ->
ok;
exit:Reason ->
Parent ! {erlang:self(), error, Reason};
error:R:Stacktrace ->
Parent ! {erlang:self(), error, {R, Stacktrace}};
throw:R:Stacktrace ->
Parent ! {erlang:self(), error, {{nocatch, R}, Stacktrace}}
end.
-endif.
delete_running(Pid, [{Pid, Node, List} | Running], Acc) ->
{Running ++ Acc, Node, List};
delete_running(Pid, [R | Running], Acc) ->
delete_running(Pid, Running, [R | Acc]).
handle_error(BadPid, Reason, Pids) ->
lists:foreach(fun(Pid) ->
erlang:exit(Pid, siblingdied)
end, Pids),
lists:foreach(fun(Pid) ->
error_cleanup(Pid, BadPid)
end, Pids),
erlang:exit(Reason).
error_cleanup(BadPid, BadPid) ->
ok;
error_cleanup(Pid, BadPid) ->
receive
{Pid, _} ->
error_cleanup(Pid, BadPid);
{Pid, _, _} ->
error_cleanup(Pid, BadPid);
{'DOWN', _, _, Pid, _Reason} ->
ok
end.
normal_cleanup(Pid) ->
receive
{'DOWN', _, _, Pid, _Reason} ->
ok
end.
%% edge case
fuse(_, []) ->
[];
fuse({reverse, _} = Fuse, Results) ->
[RL | ResultsR] = lists:reverse(Results),
fuse(Fuse, ResultsR, RL);
fuse(Fuse, [R1 | Results]) ->
fuse(Fuse, Results, R1).
fuse({reverse, FuseFunc} = Fuse, [R2 | Results], R1) ->
fuse(Fuse, Results, FuseFunc(R2, R1));
fuse(Fuse, [R2 | Results], R1) ->
fuse(Fuse, Results, Fuse(R1, R2));
fuse(_, [], R) ->
R.
%% @doc Splits a list into a list of sublists, each of size Size,
%% except for the last element which is less if the original list
%% could not be evenly divided into Size-sized lists.
splitmany(List, Size) ->
splitmany(List, [], Size).
splitmany([], Acc, _) ->
lists:reverse(Acc);
splitmany(List, Acc, Size) ->
{Top, NList} = split(Size, List),
splitmany(NList, [Top | Acc], Size).
%% @doc Like lists:split, except it splits a list smaller than its first
%% parameter
split(Size, List) ->
split(Size, List, []).
split(0, List, Acc) ->
{lists:reverse(Acc), List};
split(Size, [H | List], Acc) ->
split(Size - 1, List, [H | Acc]);
split(_, [], Acc) ->
{lists:reverse(Acc), []}. | src/ec_plists.erl | 0.503662 | 0.471892 | ec_plists.erl | starcoder |
%% @author <NAME>
%% @copyright Copyright (c) 2008-2009 <NAME>, Copyright (c) 2009 <NAME>
%%
%% @doc Conversion functions for all kinds of data types. Changes to
%% Rusty's version: added date conversion, undefined handling and more
%% to_bool cases.
%% Copyright 2009-2012 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_convert).
-author("<NAME>").
-author("<NAME> <<EMAIL>>").
-author("<NAME> <<EMAIL>>").
-define(ST_JUTTEMIS, {{9999,8,17}, {12,0,0}}).
-export ([
clean_lower/1,
to_list/1,
to_flatlist/1,
to_atom/1,
to_binary/1,
to_binary/2,
to_integer/1,
to_float/1,
to_bool_strict/1,
to_bool/1,
to_utc/1,
to_localtime/1,
to_datetime/1,
to_date/1,
to_time/1,
to_isotime/1,
to_json/1,
unicode_to_utf8/1,
convert_json/1,
ip_to_list/1,
ip_to_long/1,
long_to_ip/1
]).
%%% CONVERSION %%%
%% @doc Convert to lower case, strip surrounding whitespace.
-spec clean_lower(binary()|list()|atom()) -> binary().
clean_lower(L) -> z_string:trim(z_string:to_lower(L)).
%% @doc Convert (almost) any value to a list.
-spec to_list(term()) -> string().
to_list(undefined) -> [];
to_list(<<>>) -> [];
to_list({rsc_list, L}) -> L;
to_list(L) when is_list(L) -> L;
to_list(A) when is_atom(A) -> atom_to_list(A);
to_list(B) when is_binary(B) -> binary_to_list(B);
to_list(I) when is_integer(I) -> integer_to_list(I);
to_list(F) when is_float(F) -> mochinum:digits(F).
%% @doc Flatten list and convert to string.
-spec to_flatlist(term()) -> string().
to_flatlist(L) when is_list(L) ->
case z_string:is_string(L) of
true -> L;
false -> lists:flatten(to_list(iolist_to_binary(L)))
end;
to_flatlist(L) ->
lists:flatten(to_list(L)).
%% @doc Convert (almost) any value to an atom.
-spec to_atom(term()) -> atom() | undefined.
to_atom(<<>>) -> undefined;
to_atom([]) -> undefined;
to_atom(A) when is_atom(A) -> A;
to_atom(B) when is_binary(B) -> to_atom(binary_to_list(B));
to_atom(I) when is_integer(I) -> to_atom(integer_to_list(I));
to_atom(L) when is_list(L) -> list_to_atom(binary_to_list(iolist_to_binary(L))).
%% @doc Convert (almost) any value to an atom.
-spec to_binary(term()) -> binary().
to_binary(undefined) -> <<>>;
to_binary(A) when is_atom(A) -> to_binary(atom_to_list(A));
to_binary(B) when is_binary(B) -> B;
to_binary(I) when is_integer(I) -> to_binary(integer_to_list(I));
to_binary(F) when is_float(F) -> to_binary(to_list(F));
to_binary(L) when is_list(L) -> iolist_to_binary(L).
%% Specific Zotonic callback, please keep here.
to_binary({trans, _} = Tr, Context) -> to_binary(z_trans:lookup_fallback(Tr, Context));
to_binary(A, _Context) -> to_binary(A).
%% @doc Convert (almost) any value to an integer.
-spec to_integer(term()) -> integer() | undefined.
to_integer(undefined) -> undefined;
to_integer([]) -> undefined;
to_integer(A) when is_atom(A) -> to_integer(atom_to_list(A));
to_integer(B) when is_binary(B) -> to_integer(binary_to_list(B));
to_integer(I) when is_integer(I) -> I;
to_integer(F) when is_float(F) -> erlang:round(F);
to_integer([C]) when is_integer(C) andalso (C > $9 orelse C < $0) -> C;
to_integer(L) when is_list(L) -> list_to_integer(L).
%% @doc Convert (almost) any value to a float.
-spec to_float(term()) -> float() | undefined.
to_float(undefined) -> undefined;
to_float([]) -> undefined;
to_float(A) when is_atom(A) -> to_float(atom_to_list(A));
to_float(B) when is_binary(B) -> to_float(binary_to_list(B));
to_float(I) when is_integer(I) -> I + 0.0;
to_float(F) when is_float(F) -> F;
to_float(L) when is_list(L) ->
case lists:member($., L) of
true -> list_to_float(L);
false -> list_to_float(L++".0") %% list_to_float("1") gives a badarg
end.
%% @doc Quite loose conversion of values to boolean
-spec to_bool(term()) -> true | false.
to_bool("false") -> false;
to_bool("FALSE") -> false;
to_bool("n") -> false;
to_bool("N") -> false;
to_bool("no") -> false;
to_bool("NO") -> false;
to_bool(<<"false">>) -> false;
to_bool(<<"FALSE">>) -> false;
to_bool(<<"n">>) -> false;
to_bool(<<"N">>) -> false;
to_bool(<<"no">>) -> false;
to_bool(<<"NO">>) -> false;
to_bool("disabled") -> false;
to_bool(<<"disabled">>) -> false;
to_bool("DISABLED") -> false;
to_bool(<<"DISABLED">>) -> false;
to_bool([0]) -> false;
to_bool(V) -> to_bool_strict(V).
% @doc Convert values to boolean values according to the Django rules
-spec to_bool_strict(term()) -> true | false.
to_bool_strict(undefined) -> false;
to_bool_strict(false) -> false;
to_bool_strict(0) -> false;
to_bool_strict(0.0) -> false;
to_bool_strict(<<>>) -> false;
to_bool_strict(<<0>>) -> false;
to_bool_strict([]) -> false;
to_bool_strict({rsc_list, []}) -> false;
to_bool_strict({trans, []}) -> false;
to_bool_strict("0") -> false;
to_bool_strict(<<"0">>) -> false;
to_bool_strict({{9999,M,D},{H,I,S}}) % ?ST_JUTTEMIS
when is_integer(M), is_integer(D),
is_integer(H), is_integer(I), is_integer(S),
M >= 1, M =< 12, D >= 1, D =< 31, H >= 0, H =< 23,
I >= 0, I =< 59, S >= 0, S =< 60 -> false;
to_bool_strict(_) -> true.
%% @doc Convert a local date time to utc
to_utc(undefined) ->
undefined;
to_utc({{9999,_,_}, _}) ->
?ST_JUTTEMIS;
to_utc(D) ->
case catch calendar:local_time_to_universal_time_dst(D) of
[] -> D; % This time never existed in the local time, just take it as-is
[UTC] -> UTC;
[DstUTC, _UTC] -> DstUTC;
{'EXIT', _} -> D
end.
%% @doc Convert a utc date time to local
to_localtime(undefined) ->
undefined;
to_localtime({{9999,_,_},_}) ->
?ST_JUTTEMIS;
to_localtime(D) ->
case catch calendar:universal_time_to_local_time(D) of
{'EXIT', _} -> D;
LocalD -> LocalD
end.
%% @doc Convert an input to a (universal) datetime, using to_date/1 and
%% to_time/1. If the input is a string, it is expected to be in iso
%% 8601 format, although it can also handle timestamps without time
%% zones. The time component of the datatime is optional.
to_datetime({{_,_,_},{_,_,_}} = DT) -> DT;
to_datetime({_,_,_} = D) -> {D, {0,0,0}};
to_datetime(B) when is_binary(B) ->
to_datetime(binary_to_list(B));
to_datetime(L) when is_list(L) ->
try
case string:tokens(L, " T") of
[Date,Time] ->
WithTZ = fun(Tm, Tz, Mul) ->
TZTime = to_time(Tz),
Add = calendar:datetime_to_gregorian_seconds({{0,1,1},TZTime}),
Secs = calendar:datetime_to_gregorian_seconds({to_date(Date), to_time(Tm)}),
calendar:gregorian_seconds_to_datetime(Secs+(Mul*Add))
end,
case string:tokens(Time, "+") of
[Time1, TZ] ->
%% Timestamp with positive time zone
WithTZ(Time1, TZ, -1);
_ ->
case string:tokens(Time, "-") of
[Time1, TZ] ->
%% Timestamp with negative time zone
WithTZ(Time1, TZ, 1);
_ ->
case lists:reverse(Time) of
[$Z|Rest] ->
%% Timestamp ending on Z (= UTC)
{to_date(Date), to_time(lists:reverse(Rest))};
_ ->
%% Timestamp without time zone
{to_date(Date), to_time(Time)}
end
end
end;
[Date] ->
{to_date(Date), {0,0,0}}
end
catch
_:_ -> undefined
end;
to_datetime(undefined) ->
undefined.
%% @doc Convert an input to a date. Input is expected to be YYYY-MM-DD
%% or YYYY/MM/DD.
to_date({_,_,_} = D) -> D;
to_date(B) when is_binary(B) ->
to_date(binary_to_list(B));
to_date([]) -> undefined;
to_date(L) when is_list(L) ->
case string:tokens(L, "-/") of
[D,M,Y] when length(Y) =:= 4 ->
{to_integer(Y),to_integer(M),to_integer(D)};
[Y,M,D] ->
{to_integer(Y),to_integer(M),to_integer(D)};
_ ->
undefined
end.
%% @doc Convert an input to a time. INput is expected to be HH:MM:SS
%% or HH.MM.SS.
to_time({_,_,_} = D) -> D;
to_time(B) when is_binary(B) ->
to_time(binary_to_list(B));
to_time([]) -> undefined;
to_time([H1,H2,M1,M2]) ->
to_time([H1,H2,$:,M1,M2]);
to_time(L) when is_list(L) ->
[H,I,S|_] = lists:flatten([[to_integer(X) ||X <- string:tokens(L, ":.")], 0, 0]),
{H,I,S}.
%% @doc Convert a datetime (in universal time) to an ISO time string.
-spec to_isotime(calendar:datetime()) -> string().
to_isotime(DateTime) ->
to_list(z_dateformat:format(DateTime, "x-m-d\\TH:i:s\\Z", en)).
%%
%% @doc Convert an Erlang structure to a format that can be serialized by mochijson.
%%
%% Simple values
to_json(undefined) ->
null;
to_json(X) when is_atom(X) ->
X;
to_json(X) when is_integer(X) ->
X;
to_json(X) when is_float(X) ->
X;
to_json(X) when is_binary(X) ->
X;
%% Tuple values
to_json({{Y,M,D},{H,I,S}} = DateTime)
when is_integer(Y), is_integer(M), is_integer(D),
is_integer(H), is_integer(I), is_integer(S) ->
z_dateformat:format(DateTime, "c", [{utc,DateTime}]);
to_json({array, X}) ->
%% Explicit request for array (to prevent string conversion for some lists)
{array, [to_json(V) || V <- X]};
to_json({struct, X}) ->
{struct, X};
to_json({X, Y}) ->
{struct, to_json_struct([{X, Y}])};
to_json(X) when is_tuple(X) ->
{array, [to_json(V) || V <- tuple_to_list(X)]};
%% List values
to_json([{X, Y}]) when is_atom(X) ->
{struct, to_json_struct([{X, Y}])};
to_json([{X, Y} | Z]) when is_atom(X) ->
{struct, to_json_struct([{X, Y} | Z])};
to_json(X) when is_list(X) ->
case z_string:is_string(X) of
true ->
X;
false ->
{array, [to_json(V) || V <- X]}
end.
%% Handle structs specially
to_json_struct([]) ->
[];
to_json_struct([{X,Y}|T]) ->
[{to_json_struct_key(X), to_json(Y)} | to_json_struct(T)].
to_json_struct_key(X) when is_atom(X) orelse is_integer(X) orelse is_binary(X) ->
X;
to_json_struct_key(X) when is_list(X) ->
case z_string:is_string(X) of
true ->
X;
false ->
invalid_key
end;
to_json_struct_key(_) ->
invalid_key.
ip_to_list({IP,Port}) when is_tuple(IP), is_integer(Port) ->
ip_to_list(IP);
ip_to_list({N1,N2,N3,N4} ) ->
lists:flatten([integer_to_list(N1), $., integer_to_list(N2), $., integer_to_list(N3), $., integer_to_list(N4)]);
ip_to_list({_K1,_K2,_K3,_K4,_K5,_K6,_K7,_K8} = IPv6) ->
L = lists:map(fun(0) -> "";
(N) -> io_lib:format("~.16b", [N])
end,
tuple_to_list(IPv6)),
lists:flatten(string:join(L, ":")).
%% Taken from egeoip (http://code.google.com/p/egeoip/source/browse/trunk/egeoip/src/egeoip.erl?r=19)
%% @spec ip_to_long(Address) -> {ok, integer()} | {error, badmatch}
%% @doc Convert an IP address from a string, IPv4 tuple or IPv6 tuple to the
%% big endian integer representation.
ip_to_long({B3, B2, B1, B0}) ->
{ok, (B3 bsl 24) bor (B2 bsl 16) bor (B1 bsl 8) bor B0};
ip_to_long({W7, W6, W5, W4, W3, W2, W1, W0}) ->
{ok, (W7 bsl 112) bor (W6 bsl 96) bor (W5 bsl 80) bor (W4 bsl 64) bor
(W3 bsl 48) bor (W2 bsl 32) bor (W1 bsl 16) bor W0};
ip_to_long(_) ->
{error, badmatch}.
%% @doc Convert long int to IP address tuple. FIXME: ipv6
long_to_ip(L) ->
{ok, {(L band (255 bsl 24)) bsr 24,
(L band (255 bsl 16)) bsr 16,
(L band (255 bsl 8)) bsr 8,
L band 255}}.
%% @doc Convert json from facebook favour to an easy to use format for zotonic templates.
convert_json({K, V}) when is_binary(K) ->
{z_convert:to_atom(K), convert_json(V)};
convert_json({struct, PropList}) when is_list(PropList) ->
convert_json(PropList);
convert_json(L) when is_list(L) ->
[convert_json(V) || V <- L];
convert_json(V) ->
V.
unicode_to_utf8(List) when is_list(List) -> lists:flatmap(fun unicode_to_utf8/1, List);
unicode_to_utf8(Ch) -> char_to_utf8(Ch).
char_to_utf8(Ch) when is_integer(Ch), Ch >= 0 ->
if Ch < 128 ->
%% 0yyyyyyy
[Ch];
Ch < 16#800 ->
%% 110xxxxy 10yyyyyy
[16#C0 + (Ch bsr 6),
128+(Ch band 16#3F)];
Ch < 16#10000 ->
%% 1110xxxx 10xyyyyy 10yyyyyy
if Ch < 16#D800; Ch > 16#DFFF, Ch < 16#FFFE ->
[16#E0 + (Ch bsr 12),
128+((Ch bsr 6) band 16#3F),
128+(Ch band 16#3F)];
true -> [$?]
end;
Ch < 16#200000 ->
%% 11110xxx 10xxyyyy 10yyyyyy 10yyyyyy
[16#F0+(Ch bsr 18),
128+((Ch bsr 12) band 16#3F),
128+((Ch bsr 6) band 16#3F),
128+(Ch band 16#3F)];
Ch < 16#4000000 ->
%% 111110xx 10xxxyyy 10yyyyyy 10yyyyyy 10yyyyyy
[16#F8+(Ch bsr 24),
128+((Ch bsr 18) band 16#3F),
128+((Ch bsr 12) band 16#3F),
128+((Ch bsr 6) band 16#3F),
128+(Ch band 16#3F)];
Ch < 16#80000000 ->
%% 1111110x 10xxxxyy 10yyyyyy 10yyyyyy 10yyyyyy 10yyyyyy
[16#FC+(Ch bsr 30),
128+((Ch bsr 24) band 16#3F),
128+((Ch bsr 18) band 16#3F),
128+((Ch bsr 12) band 16#3F),
128+((Ch bsr 6) band 16#3F),
128+(Ch band 16#3F)];
true -> [$?]
end. | src/z_convert.erl | 0.513425 | 0.468669 | z_convert.erl | starcoder |
%% Copyright 2016-2017 TensorHub, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(port_io).
-behavior(gen_server).
-export([start_link/1]).
-export([init/1, handle_info/2, handle_cast/2, handle_call/3,
terminate/2, code_change/3]).
-record(state, {port, buf, buflen, readers}).
%% ===================================================================
%% Start / init
%% ===================================================================
start_link(Exe) ->
gen_server:start_link(?MODULE, [Exe], []).
init([Exe]) ->
Port = erlang:open_port({spawn_executable, Exe}, [binary, exit_status]),
{ok, #state{port=Port, buf=[], buflen=0, readers=[]}}.
handle_info({io_request, From, Ref, Req}, State) ->
handle_io_request(Req, From, Ref, State);
handle_info({Port, {data, Data}}, #state{port=Port}=State) ->
handle_data(Data, State);
handle_info({Port, {exit_status, 0}}, #state{port=Port}=State) ->
{stop, normal, State};
handle_info({Port, {exit_status, N}}, #state{port=Port}=State) ->
{stop, {exit_status, N}, State}.
%% ===================================================================
%% IO request
%% ===================================================================
handle_io_request({put_chars, latin1, Chars}, From, Ref, State) ->
send_to_port(Chars, State),
send_io_reply(From, Ref, ok),
{noreply, State};
handle_io_request({get_chars, _Prompt, Len}, From, Ref, State) ->
Next = apply_readers(add_reader(Len, From, Ref, State)),
{noreply, Next}.
send_to_port(Data, #state{port=Port}) ->
erlang:port_command(Port, Data).
send_io_reply(From, Ref, Reply) ->
erlang:send(From, {io_reply, Ref, Reply}).
add_reader(Len, From, Ref, #state{readers=Readers}=S) ->
Reader = {Len, From, Ref},
S#state{readers=Readers++[Reader]}.
apply_readers(State) ->
case pop_available_reader(State) of
{Reader, NextState} ->
apply_readers(apply_reader(Reader, NextState));
false ->
State
end.
pop_available_reader(#state{readers=[]}) -> false;
pop_available_reader(#state{readers=[Reader|Rest]}=State) ->
case data_available(Reader, State) of
true -> {Reader, State#state{readers=Rest}};
false -> false
end.
data_available({Len, _, _}, #state{buflen=Available}) ->
Len =< Available.
apply_reader({Len, From, Ref}, #state{buf=Buf}=State) ->
{Chars, NextBuf} = get_chars(Len, Buf),
send_io_reply(From, Ref, {ok, Chars}),
set_buffer(NextBuf, State).
get_chars(Len, [B1|RestBuf]) when Len =< byte_size(B1) ->
{Chars, RestChars} = split_binary(B1, Len),
{Chars, get_chars_finalize_buf(RestChars, RestBuf)};
get_chars(Len, [B1, B2|RestBuf]) ->
get_chars(Len, [<<B1/binary, B2/binary>>|RestBuf]).
get_chars_finalize_buf(<<>>, Buf) -> Buf;
get_chars_finalize_buf(Chars, Buf) -> [Chars|Buf].
set_buffer(Buf, S) ->
S#state{buf=Buf, buflen=iolist_size(Buf)}.
%% ===================================================================
%% Handle data
%% ===================================================================
handle_data(Data, State) ->
Next = apply_readers(buffer(Data, State)),
{noreply, Next}.
buffer(Data, #state{buf=Buf, buflen=Len}=S) ->
S#state{buf=Buf++[Data], buflen=Len+byte_size(Data)}.
%% ===================================================================
%% gen_server boilerplate
%% ===================================================================
handle_cast(_Msg, State) ->
{noreply, State}.
handle_call(_Msg, _From, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}. | src/port_io.erl | 0.532911 | 0.449997 | port_io.erl | starcoder |
% @doc
% <a href="https://reference.digilentinc.com/reference/pmod/pmodgyro/reference-manual">
% PmodGYRO
% </a>
% module that gets the gyroscopes data via SPI.
%
% Start the driver with
% ```
% 1> grisp:add_device(spi1, pmod_gyro).
% '''
% @end
-module(pmod_gyro).
-behaviour(gen_server).
-include("grisp.hrl").
-include("pmod_gyro.hrl").
% API
-export([start_link/2]).
-export([read/0]).
% Callbacks
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([code_change/3]).
-export([terminate/2]).
-define(SPI_MODE, #{clock => {low, leading}}).
%--- API -----------------------------------------------------------------------
% @private
start_link(Slot, Opts) ->
gen_server:start_link(?MODULE, [Slot, Opts], []).
% @doc Read the gyroscopes X, Y and Z values in degrees per second.
%
% === Example ===
% ```
% 2> pmod_gyro:read().
% {249.28279313922965,-26.078862235243843,12.764756149667337}
% '''
-spec read() -> {X::float(), Y::float(), Z::float()}.
read() ->
Dev = grisp_devices:default(?MODULE),
case gen_server:call(Dev#device.pid, read) of
{error, Reason} -> error(Reason);
Result -> Result
end.
%--- Callbacks -----------------------------------------------------------------
% @private
init([Slot, Opts]) ->
Bus = grisp_spi:open(Slot),
verify_device(Bus),
Res = maps:get(resolution, Opts, 250),
ResOpt = case Res of
250 -> 2#00000000;
500 -> 2#00010000;
2000 -> 2#00100000;
_ -> error({invalid_option, Res})
end,
% Set the resolution
<<>> = write(Bus, ?CTRL_REG4, ResOpt),
% Enable the device and axis sensors
<<>> = write(Bus, ?CTRL_REG1, 2#00001111),
grisp_devices:register(Slot, ?MODULE),
{ok, #{bus => Bus, unit_degree => (32766 / Res)}}.
% @private
handle_call(read, _From, #{bus := Bus, unit_degree := UnitDeg} = State) ->
<<X:16/signed-little, Y:16/signed-little, Z:16/signed-little>>
= read(Bus, ?OUT_X_L, 6),
{reply, {X / UnitDeg, Y / UnitDeg, Z / UnitDeg}, State};
handle_call(Request, From, _State) ->
error({unknown_request, Request, From}).
% @private
handle_cast(Request, _State) -> error({unknown_cast, Request}).
% @private
handle_info(Info, _State) -> error({unknown_info, Info}).
% @private
code_change(_OldVsn, State, _Extra) -> {ok, State}.
% @private
terminate(_Reason, _State) -> ok.
%--- Internal ------------------------------------------------------------------
verify_device(Bus) ->
case read(Bus, ?WHO_AM_I, 1) of
<<?DEVID>> -> ok;
Other -> error({device_mismatch, {who_am_i, Other}})
end.
read(Bus, Reg, Pad) ->
transfer(Bus, {?SPI_MODE, <<?RW_READ:1, ?MS_INCR:1, Reg:6>>, 1, Pad}).
write(Bus, Reg, Value) ->
transfer(Bus, {?SPI_MODE, <<?RW_WRITE:1, ?MS_SAME:1, Reg:6, Value:8>>, 2, 0}).
transfer(Bus, Message) ->
[Response] = grisp_spi:transfer(Bus, [Message]),
Response. | src/pmod_gyro.erl | 0.530723 | 0.705747 | pmod_gyro.erl | starcoder |
%%This file is licensed under the terms of the Modified BSD License.
%%This is a counter to keep track of simulated time. The timed
%%semantics work without it but having now() in ABS is useful. Global
%%time is also necessary to calculate the next deployment component
%%update barrier (distance_to_next_boundary/0).
-module(clock).
-behaviour(gen_server).
-export([start_link/2,stop/0,advance/1,advance_limit/1,is_at_limit/0,now/0,time_since_model_start/0,next_boundary/0,distance_to_next_boundary/0]).
-export([code_change/3,handle_call/3,handle_cast/2,handle_info/2,init/1,terminate/2]).
-record(state,{now, limit, start_real_time}).
%% Interface
start_link(Clocklimit, StartTime) ->
gen_server:start_link({global, clock}, ?MODULE, [Clocklimit, StartTime], []).
stop() ->
gen_server:stop({global, clock}).
advance(Amount) ->
gen_server:call({global, clock}, {advance, Amount}, infinity).
advance_limit(Amount) ->
%% Advancing the limit here does not cause waiting processes to
%% start again; use cog_monitor:increase_clock_limit instead.
gen_server:call({global, clock}, {advance_limit, Amount}, infinity).
is_at_limit() ->
gen_server:call({global, clock}, is_at_limit, infinity).
now() ->
gen_server:call({global, clock}, now, infinity).
time_since_model_start() ->
gen_server:call({global, clock}, time_since_model_start, infinity).
distance_to_next_boundary() ->
%% Returns relative time until the next resource refresh timepoint.
gen_server:call({global, clock}, next_int, infinity).
next_boundary() ->
%% Returns absolute time of the next resource refresh timepoint.
gen_server:call({global, clock}, next_boundary, infinity).
%% gen_server functions
init([Clocklimit, StartTime]) ->
{ok, #state{now=rationals:to_r(0), limit=Clocklimit, start_real_time=StartTime}}.
handle_call({advance, Amount},_From,State=#state{now=Time,limit=Limit}) ->
Newtime = rationals:add(Time, Amount),
Reply=case Limit of
none -> {reply, {ok, Newtime}, State#state{now=Newtime}};
_ -> case rationals:is_greater(Limit, Newtime) % Limit > Newtime
of
true -> {reply, {ok, Newtime}, State#state{now=Newtime}};
false -> {reply, {limit_reached, Limit},
State#state{now=Limit}}
end
end,
Reply;
handle_call({advance_limit, Amount},_From,State=#state{limit=Limit}) ->
case is_integer(Amount) and (Amount > 0) of
true ->
case Limit of
none -> {reply, {ok, none}, State};
_ -> Newlimit=rationals:add(Limit, Amount),
{reply, {ok, Newlimit}, State#state{limit=Newlimit}}
end;
false ->
{reply, {error, <<"Need positive integer increment">>}, State}
end;
handle_call(is_at_limit,_From,State=#state{now=Now,limit=Limit}) ->
{reply, cmp:eq(Now, Limit), State};
handle_call(now, _From, State=#state{now=Time}) ->
{reply, Time, State};
handle_call(time_since_model_start, _From, State=#state{start_real_time=StartTime}) ->
{reply, erlang:system_time(millisecond) - StartTime, State};
handle_call(next_int, _From, State=#state{now=Time}) ->
Distance = rationals:sub(Time, rationals:trunc(Time)),
case rationals:is_zero(Distance) of
true -> {reply, {1,1}, State};
false -> {reply, Distance, State}
end;
handle_call(next_boundary, _From, State=#state{now=Time}) ->
%% Refill boundary is the next integer -- truncate current time and add 1
%% since we don’t have a round-up function, and in case we are at an
%% integer we want the next one anyway.
{reply,rationals:add(rationals:trunc(Time), {1, 1}) , State}.
handle_cast(_Msg,State) ->
%% unused
{noreply, State}.
handle_info(_Info,State) ->
%% unused
{noreply, State}.
terminate(_Reason,_State) ->
ok.
code_change(_OldVsn,State,_Extra) ->
%% not supported
{error, State}. | frontend/src/main/resources/erlang/absmodel/src/clock.erl | 0.556641 | 0.412589 | clock.erl | starcoder |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Copyright 2018 Pentland Edge Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
%% use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
%% License for the specific language governing permissions and limitations
%% under the License.
%%
-module(coords).
-export([
lla_to_ecef/1,
signed_lon/1,
ecef_distance/2,
enu_distance/2,
deg_to_rad/1,
rad_to_deg/1,
ecef_to_enu/2,
calc_angle/3,
dot_product/2,
vec_mag/1,
dec_to_dms/1,
dms_to_dec/1]).
%% WGS84 constants.
-define(WGS84_A, 6378137).
-define(WGS84_B, 6356752.31424518).
%% Function to convert from Lat, Lon, Alt to ECEF format
lla_to_ecef({Lat,Lon,Alt}) ->
% Convert the Lat, Lon into radians.
LatRad = deg_to_rad(Lat),
LonRad = deg_to_rad(Lon),
% Need to check that this height is from the correct reference.
H = Alt,
Asquared = ?WGS84_A * ?WGS84_A,
Bsquared = ?WGS84_B * ?WGS84_B,
Esquared = (Asquared - Bsquared) / Asquared,
SinLat = math:sin(LatRad),
CosLat = math:cos(LatRad),
SinLon = math:sin(LonRad),
CosLon = math:cos(LonRad),
% Calculate the radius of curvature
N = ?WGS84_A / math:sqrt(1 - Esquared * SinLat * SinLat),
% Calculate the coordinate points.
X = (N + H) * CosLat * CosLon,
Y = (N + H) * CosLat * SinLon,
Z = ((Bsquared/Asquared) * N + H) * SinLat,
{X, Y, Z}.
%% Convert Longitude to signed degrees format
signed_lon(Lon) ->
case Lon > 180.0 of
true -> Lon - 360.0;
false -> Lon
end.
%% @doc Calculate the magnitude of the difference between two points
%% specified in ECEF format.
ecef_distance({X1, Y1, Z1}, {X2, Y2, Z2}) ->
T1 = math:pow(X2 - X1, 2),
T2 = math:pow(Y2 - Y1, 2),
T3 = math:pow(Z2 - Z1, 2),
math:sqrt(T1 + T2 + T3).
%% @doc Calculate the magnitude of the difference between two points
%% specified in ENU format.
enu_distance(Pt1, Pt2) ->
% Can reuse ECEF distance since both are vector magnitude calculations.
ecef_distance(Pt1, Pt2).
deg_to_rad(Deg) ->
Deg * math:pi() / 180.
rad_to_deg(Rad) ->
Rad * 180 / math:pi().
%% Convert ECEF coordinates to ENU (East, North, Up) local plane.
%% Based on the formulae at:
%% http://wiki.gis.com/wiki/index.php/Geodetic_system#From_WGS-84_to_ENU:_sample_code
ecef_to_enu({RefLat,RefLon,RefH}, {X, Y, Z}) ->
% Find reference location in ECEF.
{Xr, Yr, Zr} = lla_to_ecef({RefLat,RefLon,RefH}),
% Convert the Lat, Lon into radians.
LatRad = deg_to_rad(RefLat),
LonRad = deg_to_rad(RefLon),
SinRefLat = math:sin(LatRad),
SinRefLon = math:sin(LonRad),
CosRefLat = math:cos(LatRad),
CosRefLon = math:cos(LonRad),
Xdiff = X - Xr,
Ydiff = Y - Yr,
Zdiff = Z - Zr,
E = -SinRefLon*Xdiff + CosRefLon*Ydiff,
N = -SinRefLat*CosRefLon*Xdiff - SinRefLat*SinRefLon*Ydiff + CosRefLat*Zdiff,
U = CosRefLat*CosRefLon*Xdiff + CosRefLat*SinRefLon*Ydiff + SinRefLat*Zdiff,
{E, N, U}.
%% Calculate the angle between two points from a common origin. Return value
%% is in radians. Arguments must be supplied in ECEF format.
calc_angle({X1,Y1,Z1} = _Orig, {X2,Y2,Z2} = _A, {X3,Y3,Z3} = _B) ->
% Create vectors OA and OB.
OA = {X2-X1, Y2-Y1, Z2-Z1},
OB = {X3-X1, Y3-Y1, Z3-Z1},
% Calculate the dot product of the two vectors.
DotProd = dot_product(OA, OB),
% Calculate vector magnitudes.
MagOA = vec_mag(OA),
MagOB = vec_mag(OB),
% Compute the angle.
math:acos(DotProd / (MagOA * MagOB)).
%% Calculate the dot product of two vectors.
dot_product({X1, Y1, Z1}, {X2, Y2, Z2}) ->
X1*X2 + Y1*Y2 + Z1*Z2.
%% Calculate the magnitude of a vector.
vec_mag({X, Y, Z}) ->
math:sqrt(X*X + Y*Y + Z*Z).
%% Convert a decimal coordinate to {degrees,minutes,seconds}.
dec_to_dms(Dec) ->
Deg = trunc(Dec),
MinF = abs(Dec - Deg),
Min = trunc(60.0 * MinF),
SecF = MinF - (Min / 60.0),
Sec = 3600.0 * SecF,
{Deg,Min,Sec}.
dms_to_dec({H,M,S}) ->
Mag = abs(H) + (M / 60.0) + (S / 3600.0),
case H >= 0 of
true -> Mag;
false -> -Mag
end. | src/coords.erl | 0.770896 | 0.553083 | coords.erl | starcoder |
-module(tql_either).
%% API exports
-export([ fold/2
, sequence/1
, traverse/2
, map/2
, is_ok/1
, is_error/1
, from_bool/3
, with_default/2
, and_/1
, oks/1
]).
-type either(Result, Reason) :: {ok, Result} | {error, Reason}.
-export_type([either/2]).
%%%-----------------------------------------------------------------------------
%%% API
%%%-----------------------------------------------------------------------------
%% @doc Fold over a term with a list of functions.
%%
%% The first function in the list is called with the initial value, and expected
%% to produce either an {@type either(Result, Reason)} or a bare `Result'. If
%% the produced value is an `{ok, Result}' tuple, the next function is called
%% with `Result' as its input. If the produced value is a bare `Result', the
%% next function is called with that value as its input. If the produced value
%% is an error, processing stops and returns that `{error, Reason}' tuple.
%%
%% Note that this function will always produce a tuple, so if the final function
%% produces a bare value, this will be wrapped in a tuple, too.
-spec fold(term(), [fun((Result) -> Return)]) -> either(Result, Reason) when
Result :: term(),
Reason :: term(),
Return :: either(Result, Reason) | Result.
fold(Init, Fs) when is_list(Fs) ->
Result = lists:foldl(fun fold_handle/2, Init, Fs),
fold_create(Result).
%% @doc Combine a list of result tuples.
%%
%% This will result in either an `{error, Reason}' if any of the supplied tuples
%% is an error, or `{ok, [Result]}' with all the ok-values sequenced into a
%% list.
-spec sequence([either(Result, Reason)]) -> either([Result], Reason) when
Result :: term(),
Reason :: term().
sequence(Eithers) ->
lists:foldr(fun
({ok, Success}, {ok, Successes}) ->
{ok, [Success | Successes]};
({ok, _Success}, {error, Failure}) ->
{error, Failure};
({error, Failure}, _) ->
{error, Failure}
end, {ok, []}, Eithers).
%% @doc Collect results of applying either-returning function on inputs.
%%
%% We'll bail out with the error on the first item for which the function errors
%% out. If the function returns an ok value for all inputs, the result will
%% contain those values collected in a single result.
-spec traverse(F, [A]) -> either([Result], Reason) when
F :: fun ((A) -> either(Result, Reason)),
A :: term(),
Result :: term(),
Reason :: term().
traverse(F, Xs) ->
traverse_help(F, Xs, []).
%% @doc Map a function over an either
-spec map(F :: fun ((A) -> B), either(A, Reason)) -> either(B, Reason).
map(F, {ok, V}) -> {ok, F(V)};
map(_, {error, _} = R) -> R.
%% @doc Check whether the given result tuple is of the form `{ok, Result}'.
-spec is_ok(either(Result, Reason)) -> boolean() when
Result :: term(),
Reason :: term().
is_ok({ok, _}) ->
true;
is_ok({error, _}) ->
false.
%% @doc Check whether the given result tuple is of the form `{error, Reason}'.
-spec is_error(either(Result, Reason)) -> boolean() when
Result :: term(),
Reason :: term().
is_error({ok, _}) ->
false;
is_error({error, _}) ->
true.
%% @doc Convert a boolean to the form `{ok, Result}' or `{error, Reason}'.
-spec from_bool(Result, Reason, boolean()) -> either(Result, Reason) when
Result :: term(),
Reason :: term().
from_bool(Result, _, true) ->
{ok, Result};
from_bool(_, Reason, false) ->
{error, Reason}.
%% @doc Fold over a term that depend on a previous result
-spec and_(fun((X) -> either(Y, Err))) -> fun((X) -> either({X, Y}, Err)).
and_(F) ->
fun (X) ->
case F(X) of
{ok, Y} ->
{ok, {X,Y}};
{error, E} ->
{error, E};
Y ->
{ok, {X, Y}}
end
end.
-spec with_default(either(Result, Reason), Default) -> Result | Default when
Result :: term(),
Reason :: term(),
Default :: term().
with_default({ok, Value}, _) ->
Value;
with_default({error, _}, Default) ->
Default.
%% @doc Collect the `ok' values from a list of result tuples.
-spec oks([either(Result, Reason)]) -> [Result] when
Result :: term(),
Reason :: term().
oks(Eithers) ->
lists:filtermap( fun({ok, Result}) ->
{true, Result};
({error, _}) ->
false
end
, Eithers).
%%%-----------------------------------------------------------------------------
%%% Internal functions
%%%-----------------------------------------------------------------------------
-spec fold_handle(fun((Result) -> Return), Return) -> Return when
Result :: term(),
Reason :: term(),
Return :: either(Result, Reason) | Result.
fold_handle(F, {ok, Value}) ->
F(Value);
fold_handle(_, {error, Reason}) ->
{error, Reason};
fold_handle(F, Value) ->
F(Value).
-spec fold_create(either(Result, Reason) | Result) -> either(Result, Reason) when
Result :: term(),
Reason :: term().
fold_create({error, Reason}) ->
{error, Reason};
fold_create({ok, Value}) ->
{ok, Value};
fold_create(Value) ->
{ok, Value}.
-spec traverse_help(F, Xs, Acc) -> either(Result, Reason) when
F :: fun ((A) -> either(Result, Reason)),
Acc :: [Result],
Xs :: [A],
A :: term(),
Result :: term(),
Reason :: term().
traverse_help(_, [], Acc) ->
{ok, lists:reverse(Acc)};
traverse_help(F, [X | Xs], Acc) ->
case fold_create(F(X)) of
{ok, V} ->
traverse_help(F, Xs, [V | Acc]);
{error, Reason} ->
{error, Reason}
end.
%% Local variables:
%% mode: erlang
%% erlang-indent-level: 2
%% indent-tabs-mode: nil
%% fill-column: 80
%% coding: latin-1
%% End: | src/tql_either.erl | 0.677047 | 0.693337 | tql_either.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_btree_copy).
-export([copy/3]).
-include("couch_db.hrl").
-record(acc, {
btree,
fd,
before_kv_write = {fun(Item, Acc) -> {Item, Acc} end, []},
filter = fun(_) -> true end,
compression = ?DEFAULT_COMPRESSION,
chunk_threshold,
nodes = dict:from_list([{1, []}]),
cur_level = 1,
max_level = 1
}).
copy(Btree, Fd, Options) ->
case lists:member(override, Options) of
true ->
ok = couch_file:truncate(Fd, 0);
false ->
ok
end,
Acc0 = #acc{
btree = Btree,
fd = Fd,
chunk_threshold = Btree#btree.chunk_threshold
},
Acc = apply_options(Options, Acc0),
{ok, _, #acc{cur_level = 1} = FinalAcc0} = couch_btree:fold(
Btree, fun fold_copy/3, Acc, []),
{ok, CopyRootState, FinalAcc} = finish_copy(FinalAcc0),
{_, LastUserAcc} = FinalAcc#acc.before_kv_write,
{ok, CopyRootState, LastUserAcc}.
apply_options([], Acc) ->
Acc;
apply_options([{before_kv_write, {Fun, UserAcc}} | Rest], Acc) ->
apply_options(Rest, Acc#acc{before_kv_write = {Fun, UserAcc}});
apply_options([{filter, Fun} | Rest], Acc) ->
apply_options(Rest, Acc#acc{filter = Fun});
apply_options([override | Rest], Acc) ->
apply_options(Rest, Acc);
apply_options([{compression, Comp} | Rest], Acc) ->
apply_options(Rest, Acc#acc{compression = Comp});
apply_options([{chunk_threshold, Threshold} | Rest], Acc) ->
apply_options(Rest, Acc#acc{chunk_threshold = Threshold}).
extract(#acc{btree = #btree{extract_kv = Extract}}, Value) ->
Extract(Value).
assemble(#acc{btree = #btree{assemble_kv = Assemble}}, Key, Value) ->
Assemble(Key, Value).
before_leaf_write(#acc{before_kv_write = {Fun, UserAcc0}} = Acc, KVs) ->
{NewKVs, NewUserAcc} = lists:mapfoldl(
fun({K, V}, UAcc) ->
Item = assemble(Acc, K, V),
{NewItem, UAcc2} = Fun(Item, UAcc),
{K, _NewValue} = NewKV = extract(Acc, NewItem),
{NewKV, UAcc2}
end,
UserAcc0, KVs),
{NewKVs, Acc#acc{before_kv_write = {Fun, NewUserAcc}}}.
write_leaf(#acc{fd = Fd, compression = Comp}, Node, Red) ->
{ok, Pos, Size} = couch_file:append_term(Fd, Node, [{compression, Comp}]),
{ok, {Pos, Red, Size}}.
write_kp_node(#acc{fd = Fd, btree = Bt, compression = Comp}, NodeList) ->
{ChildrenReds, ChildrenSize} = lists:foldr(
fun({_Key, {_P, Red, Sz}}, {AccR, AccSz}) ->
{[Red | AccR], Sz + AccSz}
end,
{[], 0}, NodeList),
Red = case Bt#btree.reduce of
nil -> [];
_ ->
couch_btree:final_reduce(Bt, {[], ChildrenReds})
end,
{ok, Pos, Size} = couch_file:append_term(
Fd, {kp_node, NodeList}, [{compression, Comp}]),
{ok, {Pos, Red, ChildrenSize + Size}}.
fold_copy(Item, _Reds, #acc{nodes = Nodes, cur_level = 1, filter = Filter} = Acc) ->
case Filter(Item) of
false ->
{ok, Acc};
true ->
{K, V} = extract(Acc, Item),
LevelNode = dict:fetch(1, Nodes),
LevelNodes2 = [{K, V} | LevelNode],
NextAcc = case ?term_size(LevelNodes2) >= Acc#acc.chunk_threshold of
true ->
{LeafState, Acc2} = flush_leaf(LevelNodes2, Acc),
bubble_up({K, LeafState}, Acc2);
false ->
Acc#acc{nodes = dict:store(1, LevelNodes2, Nodes)}
end,
{ok, NextAcc}
end.
bubble_up({Key, NodeState}, #acc{cur_level = Level} = Acc) ->
bubble_up({Key, NodeState}, Level, Acc).
bubble_up({Key, NodeState}, Level, Acc) ->
#acc{max_level = MaxLevel, nodes = Nodes} = Acc,
Acc2 = Acc#acc{nodes = dict:store(Level, [], Nodes)},
case Level of
MaxLevel ->
Acc2#acc{
nodes = dict:store(Level + 1, [{Key, NodeState}], Acc2#acc.nodes),
max_level = Level + 1
};
_ when Level < MaxLevel ->
NextLevelNodes = dict:fetch(Level + 1, Acc2#acc.nodes),
NextLevelNodes2 = [{Key, NodeState} | NextLevelNodes],
case ?term_size(NextLevelNodes2) >= Acc#acc.chunk_threshold of
true ->
{ok, NewNodeState} = write_kp_node(
Acc2, lists:reverse(NextLevelNodes2)),
bubble_up({Key, NewNodeState}, Level + 1, Acc2);
false ->
Acc2#acc{
nodes = dict:store(Level + 1, NextLevelNodes2, Acc2#acc.nodes)
}
end
end.
finish_copy(#acc{cur_level = 1, max_level = 1, nodes = Nodes} = Acc) ->
case dict:fetch(1, Nodes) of
[] ->
{ok, nil, Acc};
[{_Key, _Value} | _] = KvList ->
{RootState, Acc2} = flush_leaf(KvList, Acc),
{ok, RootState, Acc2}
end;
finish_copy(#acc{cur_level = Level, max_level = Level, nodes = Nodes} = Acc) ->
case dict:fetch(Level, Nodes) of
[{_Key, {Pos, Red, Size}}] ->
{ok, {Pos, Red, Size}, Acc};
NodeList ->
{ok, RootState} = write_kp_node(Acc, lists:reverse(NodeList)),
{ok, RootState, Acc}
end;
finish_copy(#acc{cur_level = Level, nodes = Nodes} = Acc) ->
case dict:fetch(Level, Nodes) of
[] ->
Acc2 = Acc#acc{cur_level = Level + 1},
finish_copy(Acc2);
[{LastKey, _} | _] = NodeList ->
{UpperNodeState, Acc2} = case Level of
1 ->
flush_leaf(NodeList, Acc);
_ when Level > 1 ->
{ok, KpNodeState} = write_kp_node(Acc, lists:reverse(NodeList)),
{KpNodeState, Acc}
end,
ParentNode = dict:fetch(Level + 1, Nodes),
Acc3 = Acc2#acc{
nodes = dict:store(Level + 1, [{LastKey, UpperNodeState} | ParentNode], Nodes),
cur_level = Level + 1
},
finish_copy(Acc3)
end.
flush_leaf(KVs, #acc{btree = Btree} = Acc) ->
{NewKVs, Acc2} = before_leaf_write(Acc, lists:reverse(KVs)),
Red = case Btree#btree.reduce of
nil -> [];
_ ->
Items = lists:map(
fun({K, V}) -> assemble(Acc2, K, V) end,
NewKVs),
couch_btree:final_reduce(Btree, {Items, []})
end,
{ok, LeafState} = write_leaf(Acc2, {kv_node, NewKVs}, Red),
{LeafState, Acc2}. | src/couchdb/couch_btree_copy.erl | 0.537284 | 0.481515 | couch_btree_copy.erl | starcoder |
%% ------------------------------------------------------------------
%% @doc LoraWAN Adaptive DataRate engine.
%%
%% This module exposes a simple adaptive DataRate algorithm loosely
%% based on Semtech's recommended algorithm [1,2].
%%
%% === Usage ===
%%
%% This module has a minimal API and almost zero coupling to external
%% code. Because of this, you have to bring your own IO (BYOIO). It
%% will not perform any packet reception, transmission, or
%% construction. Instead, you must call this module for every uplink
%% packet received and the returned terms tell you want actions you
%% need to take in order to properly regulate the end-device's
%% DataRate and transmit-power.
%%
%% Example:
%%
%% ```
%% %% Create a new handle with the device's region. See new/2 for more control.
%% State01 = lorawan_adr:new('US915'),
%%
%% %% First track all packet offers.
%% OfferN = #adr_offer{
%% packet_hash = RxOfferHash
%% },
%%
%% State09 = lorawan_adr:track_offer(State08, OfferN),
%%
%% %% Assume these RxPkt[X] values all come from uplink packet and
%% %% that we have already received several similar packets since State01.
%% Packet10 = #adr_packet{
%% rssi = RxPktRssi,
%% snr = RxPktSnr,
%% wants_adr = RxPktAdrBit == 1, % true
%% wants_adr_ack = RxPktAdrAckReqBit == 1, % false
%% datarate_config = {RxPktSpreading, RxPktBandwidth},
%% packet_hash = RxPktHash
%% },
%%
%% {State10, {NewDataRate, NewPower}} = lorawan_adr:track_packet(
%% State09,
%% Packet10
%% ),
%%
%% %%
%% %% ... here you handle sending a downlink packet with the
%% %% `LinkADRReq' MAC command.
%% %%
%%
%% %% If you receive an uplink response with the `LinkADRAns' MAC
%% %% command, inform the ADR engine so it can update its state:
%% Answer0 = #adr_answer{
%% channel_mask_ack = true,
%% datarate_ack = true,
%% power_ack = true
%% },
%% State11 = lorawan_adr:track_adr_answer(State10, Answer0),
%% '''
%%
%% ==== A rant about transmit power ====
%%
%% LoRaWAN requires us to set both DataRate and transmit power when
%% sending a `LinkADRReq' MAC command. Unfortunately, LoRaWAN's
%% designers forgot to give devices a mechanism for reporting their
%% current TX power. Furthermore, DataRate and transmit power changes
%% are communicated to end devices with values, not deltas (e.g., set
%% power to 1, not reduce power by 2 steps). Unlike DataRate, which is
%% absolute and reported by the forwarding gateway, power degrades
%% over distance and is not derivable by the receiving party. Because
%% of this design oversight, we track a device's TX power by:
%%
%% <dl>
%% <dt> Devices for which we have not yet received a `LinkADRAns': </dt>
%% <dd> Assume the device is transmitting at TXPower index 0 as
%% defined its region's 'TX Power Table' defined in the
%% regional parameters document. </dd>
%% <dt> Otherwise: </dt>
%% <dd> Lookup values sent in last `LinkADRReq' </dd>
%% </dl>
%%
%% ==== References ====
%%
%% <ol>
%% <li>LoRaWAN – simple rate adaptation recommended algorithm (Revision 1.0)</li>
%% <li>[https://www.thethingsnetwork.org/docs/lorawan/adaptive-data-rate.html]</li>
%% <li>[https://github.com/TheThingsNetwork/ttn/issues/265#issuecomment-329449935]</li>
%% </ol>
%% @end
%% ------------------------------------------------------------------
-module(lorawan_adr).
-include("lorawan_adr.hrl").
%% ==================================================================
%% Public API Exports
%% ==================================================================
-export([
datarate_entry/2,
max_datarate/1,
min_datarate/1,
new/1,
new/2,
track_adr_answer/2,
track_offer/2,
track_packet/2
]).
-export_type([
adjustment/0,
datarate_config/0,
handle/0
]).
-type datarate_config() :: {lorawan_utils:spreading(), lorawan_utils:bandwidth()}
%% A tuple of `{SpreadingFactor, Bandwidth}'.
.
-type datarate_entry() :: {pos_integer(), datarate_config()}
%% A tuple of `{DataRate, {SpreadingFactor, Bandwidth}}' returned by {@link
%% datarate_entry/2}.
.
-type adjustment() :: hold | {DataRate :: pos_integer(), TxPower :: pos_integer()}
%% An adjustment to send to an end-device, or not when `hold',
%% returned by {@link track_packet/2}.
.
%% ==================================================================
%% Internal Constants
%% ==================================================================
%% The default number of seconds we keep historic offers around.
%%
%% This value can be overridden by calling {@link new/2} with option
%% `{offer_lifetime_secs, NumberOfSeconds}'.
%%
%% We could get more precise about evicting old offers, but
%% compared to using time, it doesn't buy us anything and probably makes
%% things more complicated than it needs to be.
-define(DEFAULT_OFFER_LIFETIME_S, 10).
%% The default minimum number of packets we need in history before
%% eviction.
%%
%% This value can be overridden by calling {@link new/2} with option
%% `{adr_history_len, Length}'.
-define(DEFAULT_ADR_HISTORY_LEN, 20).
%% The closest we allow historical packet SNRs to approach the
%% theoretical minimum when determining whether or not change
%% spreading factor.
%%
%% This value can be overridden by calling {@link new/2} with option
%% `{snr_headroom, Decibels}'.
-define(DEFAULT_SNR_HEADROOM_dB, 10.0).
%% Default amount to adjust uplink TXPower for every available
%% adjustment step in dBm. Note, unlike DataRate, we adjust by dBm and
%% then find the closest TXPower index value to match.
-define(DEFAULT_TXPOWER_ADJUSTMENT_DBM_STEP_SIZE, 3).
%% A map of spreading factor to minimum signal-to-noise ratio needed
%% for reliable reception.
-define(SNR_THRESHOLD_dB, #{
7 => -7.5,
8 => -10.0,
9 => -12.5,
10 => -15.0,
11 => -17.5,
12 => -20.0
}).
%% ==================================================================
%% Internal Types
%% ==================================================================
-record(packet, {
%% Hash is used as a unique identifier.
hash :: binary(),
%% Number of gateways which heard this packet.
gateway_diversity :: pos_integer(),
%% Best signal-to-noise ratio for all the gateways that reported SNR.
best_snr :: float(),
%% Best signal strength for all the gateways that reported RSSI.
best_rssi :: float()
}).
%% {monotonic time (seconds), hash}
-type offer() :: {integer(), binary()}.
%% Region-specific Data Rate encoding table.
%%
%% TODO: bandwidth is not currently used because we're artificially
%% filtering the regional parameters for only 125kHz channels. Does
%% this make sense?
%%
%% [{DataRate, {Spreading, Bandwidth}}]
-type regional_datarates() ::
list(
{DataRate :: pos_integer(), datarate_config()}
).
-record(device, {
%% The region who's rules this device is operating under. Not
%% necessarily the region the device is physically in.
region :: atom(),
%% History of previously received packets.
packet_history :: list(#packet{}),
%% History of previously received offers.
offer_history :: list(offer()),
%% ADR awaiting an acknowledgment from the device via a
%% `LinkADRAns' MAC command.
pending_adjustments :: list(adjustment()),
%% ADR adjustments acknowledged by the device via a `LinkADRAns'
%% MAC command.
accepted_adjustments :: list(adjustment()),
%% Table of region-specific DataRate parameters.
datarates :: regional_datarates(),
%% Table of region-specific uplink power.
txpowers :: lorawan_mac_region:tx_power_table(),
%% Slowest DataRate available in this device's `datarates'.
%%
%% The word DataRate, as used by the regional parameters document,
%% is essentially an index into a table. It is probably safe to
%% remove this and assume 0, but let's track it for completeness
%% sake for the time being.
min_datarate :: pos_integer(),
%% Spreading factor for corresponding `min_datarate'.
%%
%% Q: Why would a `max_spreading' correspond to `min_datarate'?
%% A: Because DataRate, aka throughput, decreases as spreading
%% increases.
%%
%% TODO: maybe it's fine to always lookup this value in
%% `datarates' when needed.
max_spreading :: pos_integer(),
%% Fastest DataRate available in this device's `datarates'.
max_datarate :: pos_integer(),
%% Spreading factor for corresponding `max_datarate'.
%%
%% Q: Why would a `min_spreading' correspond to `max_datarate'?
%% A: Because DataRate, aka throughput, increases as spreading
%% factor decreases.
%%
%% TODO: maybe it's fine to always lookup this value in
%% `datarates' when needed.
min_spreading :: pos_integer(),
%% Index of strongest transmit power in regional parameters table.
%%
%% TODO: power _decreases_ as the index increases, so
%% `max_txpower_idx' is almost certainly index 0.
max_txpower_idx :: pos_integer(),
%% Value, in dBm, of transmit power at index `max_txpower_idx' in
%% the regional parameters document.
max_txpower_dbm :: number(),
%% Index of weakest transmit power in the regional parameters
%% document.
min_txpower_idx :: pos_integer(),
%% Value, in dBm, of transmit power at index `min_txpower_idx' in
%% the regional parameters document.
min_txpower_dbm :: number(),
%% --------------------------------------------------------------
%% Options
%% --------------------------------------------------------------
option_adr_history_len = ?DEFAULT_ADR_HISTORY_LEN :: pos_integer(),
option_offer_lifetime_sec = ?DEFAULT_OFFER_LIFETIME_S :: number(),
option_snr_headroom = ?DEFAULT_SNR_HEADROOM_dB :: float(),
option_txpower_adjustment_dbm_step_size = ?DEFAULT_TXPOWER_ADJUSTMENT_DBM_STEP_SIZE :: number()
}).
-opaque handle() :: #device{}
%% Obtain a `handle()' by calling {@link new/1} or {@link new/2}.
%%
%% `handle()' are the state object used for all ADR API calls.
.
%% ==================================================================
%% Public API
%% ==================================================================
%% ------------------------------------------------------------------
%% @doc Returns a new ADR handle with sane defaults suitable for use
%% in the specified region.
%%
%% Use {@link new/2} instead for more granular control of ADR
%% behavior.
%% @end
%% ------------------------------------------------------------------
-spec new(Region :: atom()) -> handle().
new(Region) ->
%% Filter gotthardp's table down to only 125kHz uplink DataRates.
FilterMapFn = fun
({_, _, down}) ->
false;
({DataRate, {Spreading, 125 = Bandwidth}, _Direction}) ->
{true, {DataRate, {Spreading, Bandwidth}}};
(_) ->
false
end,
Datarates = lists:filtermap(FilterMapFn, lorawan_mac_region:datars(Region)),
%% min-max refer to #device.min_datarate docs
[{MinDataRate, {MaxSpreading, _}} | _] = Datarates,
{MaxDataRate, {MinSpreading, _}} = lists:last(Datarates),
TxPowers = lorawan_mac_region:uplink_power_table(Region),
[{MaxTxPowerIdx, MaxTxPowerDBm} | _] = TxPowers,
{MinTxPowerIdx, MinTxPowerDBm} = lists:last(TxPowers),
#device{
region = Region,
offer_history = [],
packet_history = [],
pending_adjustments = [],
accepted_adjustments = [],
datarates = Datarates,
txpowers = TxPowers,
min_datarate = MinDataRate,
max_spreading = MaxSpreading,
max_datarate = MaxDataRate,
min_spreading = MinSpreading,
max_txpower_idx = MaxTxPowerIdx,
max_txpower_dbm = MaxTxPowerDBm,
min_txpower_idx = MinTxPowerIdx,
min_txpower_dbm = MinTxPowerDBm
}.
%% ------------------------------------------------------------------
%% @doc Returns a new ADR handle for the specified region.
%%
%% <h4>Options:</h4>
%% <dl>
%% <dt>{@type {offer_lifetime_secs, pos_integer()@}}</dt>
%% <dd>The maximum allowable age an offer can be for consideration
%% in gateway diversity calculations.</dd>
%%
%% <dt>{@type {adr_history_len, pos_integer()@}}</dt>
%% <dd>The minimum number of packets we need in history before
%% {@link track_packet/2} will return DataRate adjustments.</dd>
%%
%% <dt>{@type {snr_headroom, number()@}}</dt>
%% <dd>The closest we allow historical packet SNRs to approach the
%% theoretical minimum when determining whether or not change
%% DataRate.</dd>
%%
%% <dt>{@type {txpower_adjustment_dbm_step_size, number()@}}</dt>
%% <dd>After maxing out DataRate, adjust uplink transmit power by
%% these many dBm for each remaining step. The adjustment is
%% added to TxPower when remaining steps is negative, otherwise
%% it's subtracted. Read ADR reference algorithm
%% for definition of "step".</dd>
%% </dl>
%% @end
%% ------------------------------------------------------------------
-spec new(Region :: term(), Options :: proplists:proplist()) -> handle().
new(Region, Options) ->
OptionParserFn = fun
({offer_lifetime_secs, Value}, Device) when Value > 0 ->
Device#device{option_offer_lifetime_sec = Value};
({adr_history_len, Value}, Device) when Value > 0 ->
Device#device{option_adr_history_len = Value};
({snr_headroom, Value}, Device) when Value > 0.0 ->
Device#device{option_snr_headroom = Value};
({txpower_adjustment_dbm_step_size, Value}, Device) when is_number(Value) ->
Device#device{option_txpower_adjustment_dbm_step_size = Value}
end,
lists:foldl(OptionParserFn, new(Region), Options).
%% ------------------------------------------------------------------
%% @doc Consider this new offer in future ADR calculations.
%%
%% Because we have not bought the packet yet, and can't see its
%% header, we do not yet know if this packet has its ADR bit
%% set. So we will hold onto it until:
%%
%% <ol>
%% <li>We purchase the packet. </li>
%% <li>The offer is older than `offer_lifetime_secs' seconds.</li>
%% </ol>
%% @end
%% ------------------------------------------------------------------
-spec track_offer(
Device :: handle(),
Offer :: #adr_offer{}
) -> handle().
track_offer(
Device,
Offer
) ->
#device{offer_history = Offers} = Device,
%% See note for OFFER_LIFETIME_S for an explanation of why we're
%% using time and not length.
Now = erlang:monotonic_time(seconds),
TrimFn = fun({TimeStamp, _}) -> TimeStamp + Device#device.option_offer_lifetime_sec > Now end,
Trimmed = lists:takewhile(TrimFn, Offers),
Device#device{offer_history = [{Now, Offer#adr_offer.packet_hash} | Trimmed]}.
%% ------------------------------------------------------------------
%% @doc Remember this packet and (possibly) return an ADR DataRate
%% adjustment.
%%
%% <h4> Returns </h4>
%% <dl>
%% <dt>`hold'</dt>
%% <dd> Returned when either:
%% <ul>
%% <li>Have not collected enough historic packets to make an ADR
%% adjustment.</li>
%% <li>DataRate can not be improved (likely too low historic
%% SNR).</li>
%% </ul>
%% </dd>
%%
%% <dt>{@type {NewDataRate, NewTxPower@}}</dt>
%% <dd>Returned when either:
%% <ul>
%% <li>Returned when current DataRate is not optimal based on
%% SNRs we've seen in historic packets.</li>
%% <li>`ADRAckreqbitset' is set, regardless if we've collected
%% enough packets yet.</li>
%% </ul>
%% </dd>
%% </dl>
%% @end
%% ------------------------------------------------------------------
-spec track_packet(
handle(),
#adr_packet{}
) -> {handle(), adjustment()}.
track_packet(
Device0,
Pkt
) when Pkt#adr_packet.wants_adr == true ->
#device{
offer_history = Offers0,
packet_history = History0,
pending_adjustments = PendingAdjustments0,
accepted_adjustments = AcceptedAdjustments0,
txpowers = TxPowerTable,
max_txpower_idx = MaxTxPowerIdx,
min_datarate = MinDataRate,
max_datarate = MaxDataRate,
option_adr_history_len = ConfiguredAdrHistoryLen,
option_snr_headroom = ConfiguredSNRHeadroom,
option_txpower_adjustment_dbm_step_size = TxPowerAdjustmentDBmStepSize
} = Device0,
#adr_packet{
wants_adr_ack = WantsADRAck,
rssi = PktRssi,
snr = PktSnr,
datarate_config = DataRateConfig,
packet_hash = PktHash
} = Pkt,
{OfferGatewayDiversity, Offers1} = count_and_prune_offers_for_hash(PktHash, Offers0),
TrimmedHistory = lists:sublist(History0, ConfiguredAdrHistoryLen - 1),
History1 =
case TrimmedHistory of
[H | T] when H#packet.hash == PktHash ->
%% Previous diversity count + this packet + offer count
%%
%% Offer count is unlikely to be > 0 considering we've
%% already accepted a different gateway's packet with
%% this hash
CumulativeDiversity = 1 + OfferGatewayDiversity + H#packet.gateway_diversity,
BestSnr = erlang:max(H#packet.best_snr, PktSnr),
[H#packet{gateway_diversity = CumulativeDiversity, best_snr = BestSnr} | T];
_ ->
NewHistoryHead = #packet{
hash = PktHash,
gateway_diversity = OfferGatewayDiversity + 1,
best_snr = PktSnr,
best_rssi = PktRssi
},
[NewHistoryHead | TrimmedHistory]
end,
{CurrentDataRate, {CurrentSpreading, _CurrentBandwidth}} =
case datarate_entry(Device0, DataRateConfig) of
%% TODO: what is the best strategy when a device sends a
%% packet at a `{Spreading, Bandwidth}' not in our
%% regional parameters table? Sort term is to just
%% lie to the DataRate adjuster that the device is
%% at the minimum DataRate.
undefined -> datarate_entry(Device0, MinDataRate);
DataRateEntry -> DataRateEntry
end,
%% We don't (and can't!) know with great confidence what transmit
%% power the device is using. We can however guess based on the
%% most recently acknowledged ADR adjustment if we have any. If we
%% haven't received any acknowledgments yet, we assume the device
%% is using its region's maximum transmit power.
{LastAcceptedDataRate, LastAcceptedTxPowerIdx} =
case AcceptedAdjustments0 of
[] -> {CurrentDataRate, MaxTxPowerIdx};
[Accepted | _] -> Accepted
end,
AcceptedAdjustments1 =
case CurrentDataRate == LastAcceptedDataRate of
true ->
AcceptedAdjustments0;
false ->
lager:warning("expected data rate ~p, got ~p", [
LastAcceptedDataRate,
CurrentDataRate
]),
[]
end,
Adjustment = adjust_uplink_params(
CurrentDataRate,
CurrentSpreading,
MaxDataRate,
History1,
WantsADRAck,
ConfiguredAdrHistoryLen,
ConfiguredSNRHeadroom,
TxPowerTable,
TxPowerAdjustmentDBmStepSize,
LastAcceptedTxPowerIdx
),
%% We only want to put actual adjustments in
%% `pending_adjustments', `hold' is not really an
%% adjustment. Perhaps the `adjustment()' type should be
%% refactored to indicate that.
PendingAdjustments1 =
case Adjustment of
hold -> PendingAdjustments0;
_ -> [Adjustment | PendingAdjustments0]
end,
Device1 = Device0#device{
offer_history = Offers1,
packet_history = History1,
pending_adjustments = PendingAdjustments1,
accepted_adjustments = AcceptedAdjustments1
},
lager:info(
"~p, ADR ~p, ADRAck ~p, SNR ~p, RSSI ~p, diversity ~p," ++
" pend adj len ~p, history len ~p, adjustment ~p",
[
datarate_entry(Device0, DataRateConfig),
true,
WantsADRAck,
PktSnr,
PktRssi,
OfferGatewayDiversity,
length(PendingAdjustments1),
length(History1),
Adjustment
]
),
{Device1, Adjustment};
%% We clear all state when device doesn't want ADR control.
track_packet(
Device,
_Pkt
) ->
case Device#device.packet_history of
[] -> ok;
_ -> lager:info("device turned off ADR, clearing history")
end,
{
Device#device{
packet_history = [],
offer_history = [],
pending_adjustments = [],
accepted_adjustments = []
},
hold
}.
%% ------------------------------------------------------------------
%% @doc Returns this device's minimum (slowest) DataRate index[1].
%%
%% 1: what regional parameters spec calls DataRate.
%% @end
%% ------------------------------------------------------------------
-spec min_datarate(handle()) -> pos_integer().
min_datarate(Device) ->
Device#device.min_datarate.
%% ------------------------------------------------------------------
%% @doc Returns this device's maximum (fastest) DataRate index[1].
%%
%% 1: what regional parameters spec calls DataRate.
%% @end
%% ------------------------------------------------------------------
-spec max_datarate(handle()) -> pos_integer().
max_datarate(Device) ->
Device#device.max_datarate.
%% ------------------------------------------------------------------
%% @doc Returns the {@link datarate_entry()} for the given
%% DataRate-index[1] or {@link datarate_config()}.
%%
%% Returns `undefined' when DataRate is not in the device's
%% table. This function always returns the full DataRate table
%% entry. One of the values in the outer tuple is redundant and will
%% be the same as the Index or Config you pass in.
%%
%% 1: what regional parameters spec calls DataRate.
%% @end
%% ------------------------------------------------------------------
-spec datarate_entry(
Device :: handle(),
DataRate :: pos_integer() | datarate_config()
) -> datarate_entry() | undefined.
datarate_entry(Device, Config) when is_tuple(Config) ->
case lists:keyfind(Config, 2, Device#device.datarates) of
false ->
undefined;
Entry ->
Entry
end;
datarate_entry(
Device,
DataRate
) when is_integer(DataRate) ->
case lists:keyfind(DataRate, 1, Device#device.datarates) of
false -> undefined;
Entry -> Entry
end.
%% ------------------------------------------------------------------
%% @doc Must be called when a device uplinks a `LinkADRAns' MAC
%% command.
%%
%% Failure to call this function for every `LinkADRAns' will result in
%% growing memory consumption and non-standards-compliant ADR
%% behavior.
%% @end
%% ------------------------------------------------------------------
-spec track_adr_answer(Device :: handle(), Answer :: #adr_answer{}) -> handle().
track_adr_answer(#device{pending_adjustments = []} = Device, _Answer) ->
lager:warning(
"Device firmware bug or DoS attempt: received an ADR answer" ++
" with no outstanding ADR requests"
),
Device;
track_adr_answer(Device0, Answer) ->
#device{
%% TODO: how to handle the case where `PendingTail =/= []'?
pending_adjustments = [{AcceptedDataRate, AcceptedTXPower} = PendingHead | PendingTail],
accepted_adjustments = AcceptedAdjustments0
} = Device0,
#adr_answer{
channel_mask_ack = ChannelMaskAck,
datarate_ack = DataRateAck,
power_ack = PowerAck
} = Answer,
Acceptance = fun(Ack) ->
case Ack of
true -> accepted;
false -> rejected
end
end,
%% NOTE: channel mask is handled outside of this module. All we
%% know is if it was accepted.
lager:info(
"device ~s DataRate ~p, ~s TxPower ~p, and ~s ChannelMask" ++
" with ~p previously unanswered adjustments",
[
Acceptance(DataRateAck),
AcceptedDataRate,
Acceptance(PowerAck),
AcceptedTXPower,
Acceptance(ChannelMaskAck),
length(PendingTail)
]
),
case {ChannelMaskAck, DataRateAck, PowerAck} of
{_, true, true} ->
Device0#device{
pending_adjustments = [],
accepted_adjustments = [PendingHead | AcceptedAdjustments0]
};
_ ->
Device0#device{pending_adjustments = []}
end.
%% ==================================================================
%% Private API
%% ==================================================================
-spec snr_threshold(lorawan_utils:spreading()) -> number().
snr_threshold(Spreading) ->
maps:get(Spreading, ?SNR_THRESHOLD_dB).
%% Returns highest SNR from uplink history.
-spec best_snr(nonempty_list(#packet{})) -> number().
best_snr([H | T]) ->
lists:foldl(
fun(Packet, Winner) -> erlang:max(Packet#packet.best_snr, Winner) end,
H#packet.best_snr,
T
).
-spec adjust_uplink_params(
AdjSteps :: integer(),
DataRate :: pos_integer(),
MaxDataRate :: pos_integer(),
TxPowerAdjustmentDBmStepSize :: number(),
TxPowerDBm :: number(),
MinTxPowerDBm :: number(),
MaxTxPowerDBm :: number()
) -> {DataRate :: pos_integer(), TxPowerDBm :: number()}.
adjust_uplink_params(
AdjSteps,
DataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
TxPowerDBm,
MinTxPowerDBm,
MaxTxPowerDBm
) when (AdjSteps > 0), (DataRate < MaxDataRate) ->
%% -------------------------------------------
%% Begin increasing DataRate.
%% -------------------------------------------
Take = erlang:min(MaxDataRate - DataRate, AdjSteps),
adjust_uplink_params(
AdjSteps - Take,
DataRate + Take,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
TxPowerDBm,
MinTxPowerDBm,
MaxTxPowerDBm
);
adjust_uplink_params(
AdjSteps,
DataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
TxPowerDBm,
MinTxPowerDBm,
MaxTxPowerDBm
) when (AdjSteps > 0), (TxPowerDBm > MinTxPowerDBm) ->
%% -------------------------------------------
%% DataRate is maxed out; reduce uplink power.
%% -------------------------------------------
adjust_uplink_params(
AdjSteps - 1,
DataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
max(TxPowerDBm - TxPowerAdjustmentDBmStepSize, MinTxPowerDBm),
MinTxPowerDBm,
MaxTxPowerDBm
);
adjust_uplink_params(
AdjSteps,
DataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
TxPowerDBm,
MinTxPowerDBm,
MaxTxPowerDBm
) when (AdjSteps < 0), (TxPowerDBm < MaxTxPowerDBm) ->
%% -------------------------------------------
%% Negative SNR headroom, so we increase power.
%%
%% NOTE: Decreasing DataRate is not allowed.
%% -------------------------------------------
adjust_uplink_params(
AdjSteps + 1,
DataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
min(TxPowerDBm + TxPowerAdjustmentDBmStepSize, MaxTxPowerDBm),
MinTxPowerDBm,
MaxTxPowerDBm
);
adjust_uplink_params(_, DataRate, _, _, TxPowerDBm, _, _) ->
%% -------------------------------------------
%% We have either:
%% - Run out of adjustment steps.
%% - Maxed data rate and/or pegged TxPowerDBm
%% to either [Max,Min]TxPowerDBm.
%% -------------------------------------------
{DataRate, TxPowerDBm}.
-spec adjust_uplink_params(
CurrentDataRate :: pos_integer(),
CurrentSpreading :: lorawan_utils:spreading(),
MaxDataRate :: pos_integer(),
History :: nonempty_list(#packet{}),
WantsADRAck :: boolean(),
ConfiguredAdrHistoryLen :: pos_integer(),
ConfiguredSNRHeadroom :: number(),
TxPowerTable :: lorawan_mac_region:tx_power_table(),
TxPowerAdjustmentDBmStepSize :: number(),
CurrentTxPowerIdx :: number()
) -> adjustment().
adjust_uplink_params(
CurrentDataRate,
CurrentSpreading,
MaxDataRate,
History,
WantsADRAck,
ConfiguredAdrHistoryLen,
ConfiguredSNRHeadroom,
TxPowerTable,
TxPowerAdjustmentDBmStepSize,
CurrentTxPowerIdx
) when (length(History) >= ConfiguredAdrHistoryLen) or (WantsADRAck == true) ->
SNR_threshold = snr_threshold(CurrentSpreading),
SNR_max = best_snr(History),
%% TODO: add a bunch of extra headroom (e.g., perform more
%% conservative adjustments, or maybe no adjustments) when
%% we do not have enough history but WantsADRAck is true
SNR_margin = SNR_max - SNR_threshold - ConfiguredSNRHeadroom,
AdjSteps = trunc(SNR_margin / 3),
{CurrentTxPowerIdx, CurrentTxPowerDBm} = lists:keyfind(
CurrentTxPowerIdx,
1,
TxPowerTable
),
[{_MaxTxPowerIdx, MaxTxPowerDBm} | _] = TxPowerTable,
{_MinTxPowerIdx, MinTxPowerDBm} = lists:last(TxPowerTable),
{AdjustedDataRate, AdjustedTxPowerDBm} = adjust_uplink_params(
AdjSteps,
CurrentDataRate,
MaxDataRate,
TxPowerAdjustmentDBmStepSize,
CurrentTxPowerDBm,
MinTxPowerDBm,
MaxTxPowerDBm
),
%% This part is tricky. Most places deal with
%% indices that are absolute, but for TX power, the
%% adjustment algorithm deals in dBm. So we need to
%% search through the transmit power table and find
%% the TxPowerIdx with a dBm closest to the value
%% calculated by the adjustment algorithm.
TxPowerPredFn = fun({_, DBm}) ->
DBm >= AdjustedTxPowerDBm
end,
{AdjustedTxPowerIdx, _ActualTxPowerDBm} = lists:last(
lists:takewhile(TxPowerPredFn, TxPowerTable)
),
case
WantsADRAck or
(AdjustedDataRate /= CurrentDataRate) or
(AdjustedTxPowerIdx /= CurrentTxPowerIdx)
of
true -> {AdjustedDataRate, AdjustedTxPowerIdx};
false -> hold
end;
adjust_uplink_params(
_CurrentDataRate,
_CurrentSpreading,
_MaxDataRate,
_History,
_WantsADRAck,
_ConfiguredAdrHistoryLen,
_ConfiguredSNRHeadroom,
_TxPowerTable,
_TxPowerAdjustmentDBmStepSize,
_CurrentTxPowerIdx
) ->
hold.
%% Removes all offers from the offer list with hashes matching
%% `PktHash'.
%%
%% Returns the the leftover offers and count of how many offers were
%% pruned. This count is needed to calculate gateway diversity (total
%% number of gateways who a saw packet).
-spec count_and_prune_offers_for_hash(
PktHash :: binary(),
Offers0 :: list(offer())
) -> {integer(), list(offer())}.
count_and_prune_offers_for_hash(
PktHash,
Offers0
) ->
%% Remove offers for this hash so they are not counted again.
OfferFilterFn = fun({_Timestamp, OfferHash}) ->
OfferHash == PktHash
end,
{Removed, Offers1} = lists:partition(OfferFilterFn, Offers0),
{length(Removed), Offers1}.
%% ==================================================================
%% Tests
%% ==================================================================
-ifdef(EUNIT).
-include_lib("eunit/include/eunit.hrl").
device_history(#device{packet_history = History}) ->
History.
device_history_len(#device{packet_history = History}) ->
length(History).
device_offers_len(Device) ->
length(Device#device.offer_history).
spread_and_bandwidth(State, DataRate) ->
{DataRate, {Spread, Bandwidth}} = lorawan_adr:datarate_entry(
State,
DataRate
),
{Spread, Bandwidth}.
gen_adr_packet(DRConfig, Snr, Rssi) ->
RandNum = rand:uniform(4294967296),
Packet0 = #adr_packet{
packet_hash = <<RandNum>>,
wants_adr = true,
wants_adr_ack = false,
datarate_config = DRConfig,
snr = Snr,
rssi = Rssi
},
Packet0.
gen_adr_ack(DRConfig, Snr, Rssi) ->
RandNum = rand:uniform(4294967296),
Packet0 = #adr_packet{
packet_hash = <<RandNum>>,
wants_adr = true,
wants_adr_ack = true,
datarate_config = DRConfig,
snr = Snr,
rssi = Rssi
},
Packet0.
post_packet_track(State, DRIdx, Snr, Rssi) ->
DRConfig = spread_and_bandwidth(State, DRIdx),
Packet0 = gen_adr_packet(DRConfig, Snr, Rssi),
{State1, _} = track_packet(State, Packet0),
State1.
get_packet_adr(State, DRIdx, Snr, Rssi) ->
DRConfig = spread_and_bandwidth(State, DRIdx),
Packet0 = gen_adr_ack(DRConfig, Snr, Rssi),
{_State1, {AdjDataRate, AdjPower}} = track_packet(State, Packet0),
{AdjDataRate, AdjPower}.
adr_jitter(Range) ->
Range * rand:uniform().
exercise_packet_track(State, _DRIdx, Count, _Snr, _Rssi) when Count == 0 ->
State;
exercise_packet_track(State, DRIdx, Count, Snr0, Rssi0) ->
Snr = Snr0 - adr_jitter(0.1),
Rssi = Rssi0 - adr_jitter(0.1),
State1 = post_packet_track(State, DRIdx, Snr, Rssi),
exercise_packet_track(State1, DRIdx, Count - 1, Snr, Rssi).
% valid_exercise(DR, AdjustedDR, AdjustedPower) ->
% ?assert(AdjustedDR >= DR),
% ?assert(AdjustedPower >= 0),
% fin.
exercise_adr_state(State0, DRIdx, Count, Snr, Rssi) ->
% io:format("exercise_adr_state count=~w snr=~w, rssi=~w~n", [Count, Snr, Rssi]),
State1 = exercise_packet_track(State0, DRIdx, Count, Snr, Rssi),
%% ?assertEqual(Count, device_history_len(State1)),
?assert(device_history_len(State1) < 21),
{AdjDR, AdjPower} = get_packet_adr(State1, DRIdx, Snr, Rssi),
?assert(AdjDR >= 0),
?assert(AdjPower >= 0),
% io:format("DR = ~w Power = ~w~n", [AdjDR, AdjPower]),
{AdjDR, AdjPower}.
valid_exercise(State0, DRIdx, Count, Snr, Rssi, ExpectedDR, ExpectedPower) ->
{DR, Power} = exercise_adr_state(State0, DRIdx, Count, Snr, Rssi),
?assertEqual(ExpectedDR, DR),
?assertEqual(ExpectedPower, Power).
% gen_range(Start, Step, Length) when is_number(Length) ->
% [Start + (Step * X) || X <- lists:seq(0, Length)].
gen_startend_range(Start, Step, End) ->
Length = round((End - Start) / Step),
[Start + (Step * X) || X <- lists:seq(0, Length)].
adr_harness_test_() ->
DataRate0 = 0,
State0 = new('US915'),
[
?_test(begin
valid_exercise(State0, DataRate0, 22, 7.0, X, 3, 1)
end)
|| X <- gen_startend_range(-120.0, 0.1, 0.0)
].
adr_exercise_test_() ->
%% DataRate 0 in US915 regional parameters.
DataRate0 = 0,
% Spreading0 = 10,
% Bandwidth0 = 125,
%% Snr ranges from 10 to -20
Snr = 10.0,
%% Rssi ranges from 0 to -120
Rssi = 0.0,
% DRConfig0 = {Spreading0, Bandwidth0},
State0 = new('US915'),
?assertEqual(0, device_offers_len(State0)),
?assertEqual(0, device_history_len(State0)),
State1 = post_packet_track(State0, DataRate0, Snr, Rssi),
State2 = post_packet_track(State1, DataRate0, Snr, Rssi),
?assertEqual(2, device_history_len(State2)),
{_AdjDataRate2, _AdjPower2} = get_packet_adr(State2, DataRate0, Snr, Rssi),
%% io:format("NewSpreading2 ~8.16.0B~n", [NewSpreading2]),
% io:format("AdjDataRate2 ~w~n", [AdjDataRate2]),
% io:format("AdjPower2 ~w~n", [AdjPower2]),
PacketLimit = 19,
State3 = exercise_packet_track(State0, DataRate0, PacketLimit, Snr, Rssi),
% ?assertEqual(19, device_history_len(State3)),
?assert(device_history_len(State3) < 21),
{_AdjDataRate3, _AdjPower3} = get_packet_adr(State3, DataRate0, Snr, Rssi),
% io:format("AdjDataRate3 ~w~n", [AdjDataRate3]),
% io:format("AdjPower3 ~w~n", [AdjPower3]),
TestList = [
?_test(begin
valid_exercise(State0, DataRate0, 1, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 3, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 7, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 17, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 19, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 20, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 21, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 22, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 100, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, DataRate0, 200, Snr, Rssi, 3, 3)
end),
?_test(begin
valid_exercise(State0, 0, 22, -20.0, -120.0, 0, 0)
end),
?_test(begin
valid_exercise(State0, 1, 22, -20.0, -120.0, 1, 0)
end),
?_test(begin
valid_exercise(State0, 2, 22, -20.0, -120.0, 2, 0)
end),
?_test(begin
valid_exercise(State0, 3, 22, -20.0, -120.0, 3, 0)
end),
[
?_test(begin
valid_exercise(StateX, 0, 22, -20.0, -120.0, 0, 0)
end)
|| StateX <- [new('US915'), new('EU868'), new('CN470'), new('AS923'), new('AU915')]
],
[
?_test(begin
valid_exercise(State0, 0, X, -20.0, -120.0, 0, 0)
end)
|| X <- lists:seq(1, 200)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 0, 0)
end)
|| X <- gen_startend_range(-20.0, 0.1, -2.5)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 1, 0)
end)
|| X <- gen_startend_range(-1.0, 0.1, 0.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 2, 0)
end)
|| X <- gen_startend_range(1.0, 0.1, 3.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 0)
end)
|| X <- gen_startend_range(4.0, 0.1, 6.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 1)
end)
|| X <- gen_startend_range(7.0, 0.1, 9.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 3)
end)
|| X <- gen_startend_range(10.0, 0.1, 12.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 4)
end)
|| X <- gen_startend_range(13.0, 0.1, 15.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 6)
end)
|| X <- gen_startend_range(16.0, 0.1, 18.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 7)
end)
|| X <- gen_startend_range(19.0, 0.1, 21.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, X, -120.0, 3, 9)
end)
|| X <- gen_startend_range(22.0, 0.1, 24.9)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, 6.9, X, 3, 0)
end)
|| X <- gen_startend_range(-120.0, 0.1, 0.0)
],
[
?_test(begin
valid_exercise(State0, DataRate0, 22, 7.0, X, 3, 1)
end)
|| X <- gen_startend_range(-120.0, 0.1, 0.0)
]
],
TestList.
adr_history_test() ->
State0 = new('US915'),
?assertEqual(0, device_offers_len(State0)),
?assertEqual(0, device_history_len(State0)),
DataRateConfig = {10, 125},
Offer0 = #adr_offer{
packet_hash = <<0>>
},
Packet0WeakSNR = #adr_packet{
packet_hash = <<0>>,
wants_adr = true,
wants_adr_ack = false,
datarate_config = DataRateConfig,
snr = 0.0,
rssi = 0.0
},
Packet0StrongSNR = Packet0WeakSNR#adr_packet{snr = 10},
Offer1 = #adr_offer{
packet_hash = <<1>>
},
Packet1 = Packet0StrongSNR#adr_packet{packet_hash = <<1>>},
%% Make sure offers are stored
State1 = track_offer(State0, Offer0),
?assertEqual(1, device_offers_len(State1)),
State2 = track_offer(State1, Offer0),
?assertEqual(2, device_offers_len(State2)),
State3 = track_offer(State2, Offer1),
?assertEqual(3, device_offers_len(State3)),
%% Tracking a packet should clear offers with matching hashes from
%% the offer cache.
%%
%% NOTE: the third offer above has a different hash and will
%% remain in the offer cache.
{State4, hold} = track_packet(State3, Packet0WeakSNR),
?assertEqual(1, device_offers_len(State4)),
?assertEqual(1, device_history_len(State4)),
[HistoryHead0 | _] = device_history(State4),
?assertEqual(Packet0WeakSNR#adr_packet.snr, HistoryHead0#packet.best_snr),
?assertEqual(3, HistoryHead0#packet.gateway_diversity),
%% Tracking the same packet but with a stronger SNR should be
%% reflected in history and increase gateway diversity.
{State5, hold} = track_packet(State4, Packet0StrongSNR),
?assertEqual(1, device_history_len(State5)),
[HistoryHead1 | _] = State5#device.packet_history,
?assertEqual(Packet0StrongSNR#adr_packet.snr, HistoryHead1#packet.best_snr),
?assertEqual(4, HistoryHead1#packet.gateway_diversity),
%% An interleaved offer to packet sequence, however
%% unlikely, should work.
{State6, hold} = track_packet(State5, Packet1),
?assertEqual(2, device_history_len(State6)),
[HistoryHead2 | _] = State6#device.packet_history,
?assertEqual(Packet1#adr_packet.snr, HistoryHead2#packet.best_snr),
?assertEqual(2, HistoryHead2#packet.gateway_diversity),
?assertEqual(0, device_offers_len(State6)),
%% We clear history when the device clears the ADR bit.
{State7, hold} = track_packet(State6, Packet1#adr_packet{wants_adr = false}),
?assertEqual(0, device_offers_len(State7)),
?assertEqual(0, device_history_len(State7)),
fin.
adr_happy_path(State0, DRConfig) ->
Packet0 = #adr_packet{
rssi = 0,
snr = 10,
datarate_config = DRConfig,
wants_adr = true,
wants_adr_ack = false,
packet_hash = <<0>>
},
%% Up to MIN_HISTORY_LEN - 1 packets, track_packet should still
%% return 'hold'.
{State1, {AdjustedDataRate, AdjustedPowerIdx}} = lists:foldl(
fun
(N, {ADRn, _Action}) ->
%% ?assertEqual(hold, Action),
lorawan_adr:track_packet(ADRn, Packet0#adr_packet{
packet_hash = <<N>>
});
(N, State2) ->
io:format("State0 ~w~n", [N]),
lorawan_adr:track_packet(State2, Packet0#adr_packet{
packet_hash = <<N>>
})
end,
State0,
lists:seq(1, ?DEFAULT_ADR_HISTORY_LEN)
),
{State1, {AdjustedDataRate, AdjustedPowerIdx}}.
valid_happy_path(State0, DRConfig) ->
{Spreading0, Bandwidth0} = DRConfig,
{State1, {AdjustedDataRate, AdjustedPowerIdx}} = adr_happy_path(State0, DRConfig),
{AdjustedSpread, AdjustedBandwidth} = spread_and_bandwidth(State1, AdjustedDataRate),
?assert(AdjustedDataRate >= 0),
?assert(AdjustedSpread =< Spreading0),
?assertEqual(AdjustedBandwidth, Bandwidth0),
?assertEqual(0, State1#device.max_txpower_idx),
?assert(AdjustedPowerIdx >= State1#device.max_txpower_idx),
?assert(AdjustedPowerIdx =< State1#device.min_txpower_idx).
% io:format("AdjustedDataRate ~w~n", [AdjustedDataRate]),
% io:format("AdjustedSpreading ~w~n", [AdjustedSpread]),
% io:format("AdjustedBandwidth ~w~n", [AdjustedBandwidth]),
% io:format("AdjustedPowerIdx ~w~n", [AdjustedPowerIdx]),
% io:format("Min PowerIdx ~w~n", [State1#device.min_txpower_idx]),
adr_happy_path_test_() ->
[
?_test(begin
valid_happy_path(lorawan_adr:new('US915'), {10, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('US915'), {9, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('US915'), {8, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('US915'), {7, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {12, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {11, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {10, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {9, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {8, 125})
end),
?_test(begin
valid_happy_path(lorawan_adr:new('EU868'), {7, 125})
end)
].
adr_ack_req_test() ->
Packet0 = #adr_packet{
rssi = 0,
snr = 0,
wants_adr = true,
wants_adr_ack = true,
datarate_config = {7, 125},
packet_hash = <<0>>
},
State0 = lorawan_adr:new('US915'),
%% We must always respond with new uplink parameters when a device
%% requests ADR acknowledgement, even on first packet.
{State1, {_AdjustedDataRate, _AdjustedPowerIdx}} = lorawan_adr:track_packet(
State0,
Packet0
),
?assertEqual(1, device_history_len(State1)),
fin.
%% TODO: parameterize and test over a range packet SNRs.
adr_does_adr_test() ->
%% DataRate 0 in US915 regional parameters.
DataRate0 = 0,
Spreading0 = 10,
Bandwidth0 = 125,
DataRateConfig0 = {Spreading0, Bandwidth0},
Packet0 = #adr_packet{
rssi = 0,
snr = 6,
datarate_config = DataRateConfig0,
wants_adr = true,
wants_adr_ack = false,
packet_hash = <<0>>
},
%% Up to MIN_HISTORY_LEN - 1 packets, track_packet should still
%% return 'hold'.
{State1, {AdjustedDataRate, AdjustedPowerIdx}} = lists:foldl(
fun
(N, {ADRn, Action}) ->
?assertEqual(hold, Action),
lorawan_adr:track_packet(ADRn, Packet0#adr_packet{
packet_hash = <<N>>
});
(N, State0) ->
lorawan_adr:track_packet(State0, Packet0#adr_packet{
packet_hash = <<N>>
})
end,
lorawan_adr:new('US915'),
lists:seq(1, ?DEFAULT_ADR_HISTORY_LEN)
),
{AdjustedDataRate, {AdjustedSpreading, AdjustedBandwidth}} = lorawan_adr:datarate_entry(
State1,
AdjustedDataRate
),
?assertEqual(?DEFAULT_ADR_HISTORY_LEN, device_history_len(State1)),
?assert(AdjustedDataRate > DataRate0),
?assert(AdjustedSpreading < Spreading0),
?assertEqual(AdjustedBandwidth, Bandwidth0),
?assert(AdjustedPowerIdx >= State1#device.max_txpower_idx),
?assert(AdjustedPowerIdx =< State1#device.min_txpower_idx),
%% Check that tracking a fake ADR answer from the device doesn't
%% cause a crash.
Answer0 = #adr_answer{
channel_mask_ack = true,
datarate_ack = true,
power_ack = true
},
State2 = lorawan_adr:track_adr_answer(State1, Answer0),
?assertEqual([], State2#device.pending_adjustments),
Answer1 = #adr_answer{
channel_mask_ack = false,
datarate_ack = true,
power_ack = true
},
State3 = lorawan_adr:track_adr_answer(State1, Answer1),
?assertEqual([], State3#device.pending_adjustments),
State4 = lorawan_adr:track_adr_answer(State2, Answer1),
?assertEqual([], State4#device.pending_adjustments),
fin.
adr_new_test() ->
State0 = lorawan_adr:new('US915'),
%% See regional parameters document for the reference values
%% asserted against below.
MinDataRate = lorawan_adr:min_datarate(State0),
MinDataRateEntryFromIndex = lorawan_adr:datarate_entry(State0, MinDataRate),
?assertEqual({0, {10, 125}}, MinDataRateEntryFromIndex),
MinDataRateEntryFromConfig = lorawan_adr:datarate_entry(State0, {10, 125}),
?assertEqual({0, {10, 125}}, MinDataRateEntryFromConfig),
MaxDataRate = lorawan_adr:max_datarate(State0),
MaxDataRateEntryFromIndex = lorawan_adr:datarate_entry(State0, MaxDataRate),
?assertEqual({3, {7, 125}}, MaxDataRateEntryFromIndex),
MaxDataRateEntryFromConfig = lorawan_adr:datarate_entry(State0, {7, 125}),
?assertEqual({3, {7, 125}}, MaxDataRateEntryFromConfig),
InvalidDataRateConfig = lorawan_adr:datarate_entry(State0, MaxDataRate + 1),
?assertEqual(undefined, InvalidDataRateConfig),
fin.
adr_resists_denial_of_service_test() ->
State0 = lorawan_adr:new('US915'),
Answer0 = #adr_answer{
channel_mask_ack = true,
datarate_ack = true,
power_ack = true
},
%% This brand new state doesn't have any pending ADR
%% requests. It should gracefully handle receiving a bogus ADR
%% answer without crashing.
State1 = lorawan_adr:track_adr_answer(State0, Answer0),
?assertEqual([], State1#device.accepted_adjustments),
fin.
-endif. | src/lora/lorawan_adr.erl | 0.631708 | 0.619327 | lorawan_adr.erl | starcoder |
%%==============================================================================
%% Copyright 2021 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%%%-------------------------------------------------------------------
%%% @doc
%% Implements Prefix trees that allows you to find a value associated
%% with the longest prefix of the key used. All keys are utf8 binaries.
%%% @end
%%%
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2021, <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(pb_tree).
-copyright('<NAME> <<EMAIL>>').
%% Library functions
-export([new/0,
is_pb_tree/1, is_empty/1,
add/3, add/4, adds/2, adds/3,
delete/2, delete/3, deletes/2, deletes/3,
member/2, find/2, find/3,
keys/1, values/1,
replace/3, replace/4,
to_list/1, from_list/1
]).
%% Records
-record(bucket, {pivot :: value(),
rest = [] :: {value(), value()},
values = [] :: [value()]}).
-record(pb_node, {pivot :: value(),
left = pb_nil :: pb_tree(),
next = pb_nil :: pb_tree(),
right = pb_nil :: pb_tree(),
value :: value()}).
%% Types
-opaque pb_tree() :: #pb_node{} | pb_nil.
-type key() :: [_].
-type value() :: _.
-type default() :: _.
-type flag() :: check | nocheck.
%% Exported Types
-export_type([pb_tree/0]).
%% Defines
%% ===================================================================
%% Library functions.
%% ===================================================================
%%--------------------------------------------------------------------
%% Function: new() -> Tree.
%% @doc
%% Creates an empty P-tree.
%% @end
%%--------------------------------------------------------------------
-spec new() -> pb_tree().
%%--------------------------------------------------------------------
new() -> pb_nil.
%%--------------------------------------------------------------------
%% Function: is_pb_tree(X) -> Boolean().
%% @doc
%% Returns true if X is a P-tree, false otherwise.
%% @end
%%--------------------------------------------------------------------
-spec is_pb_tree(_) -> boolean().
%%--------------------------------------------------------------------
is_pb_tree(pb_nil) -> true;
is_pb_tree(#pb_node{}) -> true;
is_pb_tree(_) -> false.
%%--------------------------------------------------------------------
%% Function: is_empty(Tree) -> Boolean.
%% @doc
%% Returns true if the Tree is empty, false otherwise.
%% @end
%%--------------------------------------------------------------------
-spec is_empty(_) -> boolean().
%%--------------------------------------------------------------------
is_empty(pb_nil) -> true;
is_empty(_) -> false.
%%--------------------------------------------------------------------
%% Function: add(Key, Value, Tree) -> Tree.
%% @doc
%% The Value is saved in Tree under the Key, if that key is present the
%% value associated with the key is replaced by Value.
%% add(Key, Value, Tree) is equivalent to add(Key, Value, Tree,nocheck).
%% @end
%%--------------------------------------------------------------------
-spec add(key(), value(), pb_tree()) -> pb_tree().
%%--------------------------------------------------------------------
add(Key, Value, Tree) -> add(Key, Value, Tree, nocheck).
%%--------------------------------------------------------------------
%% Function: add(Prefix, Value, Tree, Flag) -> Tree.
%% @doc
%% The Value is saved in Tree under the Key, if that key is present the
%% value associated with the key is replaced by Value. If the flag is
%% check an exception is generated and if nocheck the value is replaced.
%% @end
%%--------------------------------------------------------------------
-spec add(key(), value(), pb_tree(), flag()) -> pb_tree().
%%--------------------------------------------------------------------
add(<<H/utf8>>, Value, pb_nil, _) -> #pb_node{pivot = H, value = Value};
add(<<H/utf8, T/binary>>, Value, pb_nil, _) ->
#pb_node{pivot = H, next = add(T, Value, pb_nil, nocheck)};
add(<<H/utf8>>, Value, Tree = #pb_node{pivot = H}, nocheck) ->
Tree#pb_node{value = Value};
add(<<H/utf8>>, Value, Tree = #pb_node{pivot = H, value = undefined}, _) ->
Tree#pb_node{value = Value};
add(<<H/utf8, T/binary>>, Value, Tree = #pb_node{pivot = H, next =Next},Flag) ->
Tree#pb_node{next = add(T, Value, Next, Flag)};
add(I = <<H/utf8,_/binary>>,V,Tree=#pb_node{pivot=P,left=Left},Flag) when H<P ->
Tree#pb_node{left = add(I, V, Left, Flag)};
add(I, Value, Tree = #pb_node{right = Right}, Flag) ->
Tree#pb_node{right = add(I, Value, Right, Flag)}.
%%--------------------------------------------------------------------
%% Function: adds(Pairs, Tree) -> Tree.
%% @doc<
%% For each {Key, Value} pair in Pairs the value is stored under the
%% key in the Tree. The adds(Pairs, Tree) call is equivalent to
%% adds(Pairs, Tree, nocheck).
%% @end
%%--------------------------------------------------------------------
-spec adds([{key(), value()}], pb_tree()) -> pb_tree().
%%--------------------------------------------------------------------
adds(Pairs, Tree) -> adds(Pairs, Tree, nocheck).
%%--------------------------------------------------------------------
%% Function: adds(Pairs, Tree, Flag) -> Tree.
%% @doc
%% For each {Key, Value} pair in Pairs the value is stored under the
%% key in the Tree. If an key already has a value associated with
%% in the Tree the flag determines what happens. If the flag is check
%% an exception is generated if a key has a value associated with it,
%% if the flag is nocheck the values will be replaced.
%% @end
%%--------------------------------------------------------------------
-spec adds([{key(), value()}], pb_tree(), flag()) -> pb_tree().
%%--------------------------------------------------------------------
adds(Pairs, Tree, nocheck) ->
Pairs1 = lists:keymerge(1, lists:keysort(1, Pairs), to_list(Tree)),
build(bucket_sort(Pairs1, []));
adds(Pairs, Tree, check) ->
case lists:any(fun({I, _}) -> member(I, Tree) end, Pairs) of
false -> adds(Pairs, Tree, nocheck);
true -> erlang:error(badarg)
end.
%%--------------------------------------------------------------------
%% Function: delete(Key, Tree) -> Tree.
%% @doc
%% If a value is associated the Key the Tree returned has that
%% association removed. The call delete(Key, Tree) is equivalent
%% to delete(Key, Tree, nocheck).
%% @end
%%--------------------------------------------------------------------
-spec delete(key(), pb_tree()) -> pb_tree().
%%--------------------------------------------------------------------
delete(Key, Tree) -> delete(Key, Tree, nocheck).
%%--------------------------------------------------------------------
%% Function: delete(Key, Tree, Flag) -> Tree.
%% @doc
%% If a value is associated the Key the Tree returned has that
%% association removed. If there is no value associated with the Key
%% in the Tree the flag determines what happens. If the flag is check
%% an exception is generated if no association exists, if the flag
%% is nocheck the unchanged tree is returned.
%% @end
%%--------------------------------------------------------------------
-spec delete(key(), pb_tree(), flag()) -> pb_tree().
%%--------------------------------------------------------------------
delete(_, pb_nil, check) -> erlang:error(badarg);
delete(_, pb_nil, nocheck) -> pb_nil;
delete(<<H/utf8>>, #pb_node{pivot = H, value = undefined}, check) ->
erlang:error(badarg);
delete(<<H/utf8>>, #pb_node{pivot =H,left=pb_nil,next=pb_nil,right=pb_nil},_) ->
pb_nil;
delete(<<H/utf8>>, #pb_node{pivot = H, left=Left,next=pb_nil,right=pb_nil},_) ->
Left;
delete(<<H/utf8>>, #pb_node{pivot = H,left=pb_nil,next=pb_nil,right=Right},_) ->
Right;
delete(<<H/utf8>>, Tree = #pb_node{pivot = H}, _) ->
Tree#pb_node{value = undefined};
delete(<<H/utf8, T/binary>>, #pb_node{pivot = H, next = Next}, Flag) ->
delete(T, Next, Flag);
delete(I = <<H/utf8, _/binary>>, #pb_node{pivot=P,left=Left},Flag) when H < P ->
delete(I, Left, Flag);
delete(I, #pb_node{right = Right}, Flag) ->
delete(I, Right, Flag).
%%--------------------------------------------------------------------
%% Function: deletes(Keys, Tree) -> Tree.
%% @doc
%% A tree that has all the associations for the keys removed.
%% The call deletes(Indces, Tree) is equivalent to
%% deletes(Keys, Tree, nocheck).
%% @end
%%--------------------------------------------------------------------
-spec deletes([key()], pb_tree()) -> pb_tree().
%%--------------------------------------------------------------------
deletes(Keys, Tree) -> deletes(Keys, Tree, nocheck).
%%--------------------------------------------------------------------
%% Function: deletes(Key, Tree, Flag) -> Tree.
%% @doc
%% A tree that has all the associations for the keys removed.
%% If there is no value associated with any of the Keys
%% in the Tree, the flag determines what happens. If the flag is check
%% an exception is generated if, if the flag is nocheck a tree
%% is returned with the other associations removed.
%% @end
%%--------------------------------------------------------------------
-spec deletes([key()], pb_tree(), flag()) -> pb_tree().
%%--------------------------------------------------------------------
deletes(Keys, Tree, nocheck) ->
build(bucket_sort(deletes1(lists:sort(Keys), to_list(Tree)), []));
deletes(Keys, Tree, check) ->
case lists:all(fun(I) -> member(I, Tree) end, Keys) of
true -> deletes(Keys, Tree, nocheck);
false -> erlang:error(badarg)
end.
deletes1(_, []) -> [];
deletes1([], L) -> L;
deletes1([H | T1], [{H, _} | T2]) -> deletes1(T1, T2);
deletes1([H1 | T1], L = [{H2, _} | _]) when H1 > H2 -> deletes1(T1, L);
deletes1(I, [_ | T]) -> deletes1(I, T).
%%--------------------------------------------------------------------
%% Function: member(Key, Tree) -> Boolean.
%% @doc
%% Returns true if there is a value associated with Key in the tree,
%% otherwise false.
%% @end
%%--------------------------------------------------------------------
-spec member(key(), pb_tree()) -> boolean().
%%--------------------------------------------------------------------
member(_, pb_nil) -> false;
member(<<H/utf8>>, #pb_node{pivot = H, value = Value}) -> Value /= undefined;
member(<<H/utf8, T/binary>>, #pb_node{pivot = H, next=Next}) -> member(T, Next);
member(I = <<H/utf8, _/binary>>, #pb_node{pivot = P, left = L}) when H < P ->
member(I, L);
member(I, #pb_node{right = R}) ->
member(I, R).
%%--------------------------------------------------------------------
%% Function: find(Key, Tree) -> Value.
%% @doc
%% Returns the value associated with the longest prefix of the Key in the
%% Tree or undefined if no such association exists.
%% The call find(Key, Tree) is equivalent to find(Key, Tree, undefined).
%% @end
%%--------------------------------------------------------------------
-spec find(key(), pb_tree()) -> value() | undefined.
%%--------------------------------------------------------------------
find(Key, Tree) -> find(Key, Tree, undefined).
%%--------------------------------------------------------------------
%% Function: find(Key, Tree, Default) -> Value.
%% @doc
%% Returns the value associated with the longest prefix of the Key in the
%% Tree or Default if no such association exists.
%% @end
%%--------------------------------------------------------------------
-spec find(key(), pb_tree(), default()) -> value() | default().
%%--------------------------------------------------------------------
find(_, pb_nil, Prev) -> Prev;
find(<<H/utf8>>, #pb_node{pivot = H, value = V}, Prev) -> update(V, Prev);
find(<<H/utf8, T/binary>>, #pb_node{pivot = H, next = Next, value = V},Prev) ->
find(T, Next, update(V, Prev));
find(I = <<H/utf8, _/binary>>, #pb_node{pivot = P,left=Left},Prev) when H < P ->
find(I, Left, Prev);
find(I, #pb_node{right = Right}, Prev) ->
find(I, Right, Prev).
update(undefined, Prev) -> Prev;
update(New, _) -> New.
%%--------------------------------------------------------------------
%% Function: keys(Tree) -> Keys.
%% @doc
%% Returns all the keys in ascending order.
%% @end
%%--------------------------------------------------------------------
-spec keys(pb_tree()) -> [key()].
%%--------------------------------------------------------------------
keys(Tree) -> keys(Tree, [], []).
keys(pb_nil, _, Acc) -> Acc;
keys(#pb_node{pivot = P, left=L,next=N,right=R,value=undefined},Xiferp,Acc) ->
Xiferp1 = [P | Xiferp],
keys(L, Xiferp, keys(N, Xiferp1, keys(R, Xiferp, Acc)));
keys(#pb_node{pivot = P, left=L, next=N, right=R}, Xiferp, Acc) ->
Xiferp1 = [P | Xiferp],
keys(L,
Xiferp,
[<< <<X/utf8>> || X <- lists:reverse(Xiferp1)>> |
keys(N, Xiferp1, keys(R, Xiferp, Acc))]).
%%--------------------------------------------------------------------
%% Function: values(Tree) -> Values.
%% @doc
%% Returns all the values in ascending order of their keys.
%% @end
%%--------------------------------------------------------------------
-spec values(pb_tree()) -> [key()].
%%--------------------------------------------------------------------
values(Tree) -> values(Tree, []).
values(pb_nil, Acc) -> Acc;
values(#pb_node{left = L, next = N, right = R, value = undefined}, Acc) ->
values(L, values(N, values(R, Acc)));
values(#pb_node{left = L, next = N, right = R, value = V}, Acc) ->
values(L, [V | values(N, values(R, Acc))]).
%%--------------------------------------------------------------------
%% Function: replace(Key, Value, Tree) -> Tree.
%% @doc
%% Replaces any existing value associated with Key in the tree,
%% otherwise adds a association for the value with the Key.
%% The call replace(Key, Value, Tree) is equivalent to
%% replace(Key, Value, Tree, nocheck).
%% @end
%%--------------------------------------------------------------------
-spec replace(key(), value(), pb_tree()) -> pb_tree().
%%--------------------------------------------------------------------
replace(Key, Value, Tree) -> replace(Key, Value, Tree, nocheck).
%%--------------------------------------------------------------------
%% Function: replace(Key, Values, Tree, Flag) -> Tree.
%% @doc
%% Replaces any existing value associated with Key in the tree,
%% otherwise the flag determines what happens. If the flag is check
%% an exception is generated, otherwise the value is added.
%% @end
%%--------------------------------------------------------------------
-spec replace(key(), value(), pb_tree(), flag()) -> pb_tree().
%%--------------------------------------------------------------------
replace(Key, Value, Tree, nocheck) -> add(Key, Value, Tree, nocheck);
replace(Key, Value, Tree, check) -> replace_check(Key, Value, Tree).
replace_check(_, _, pb_nil) -> erlang:error(badarg);
replace_check(<<H/utf8>>, _, #pb_node{pivot = H, value = undefined}) ->
erlang:error(badarg);
replace_check(<<H/utf8>>, Value, Tree = #pb_node{pivot = H}) ->
Tree#pb_node{value = Value};
replace_check(<<H/utf8, T/binary>>, Value, Tree =#pb_node{pivot=H,next=Next}) ->
Tree#pb_node{next = replace_check(T, Value, Next)};
replace_check(I = <<H/utf8, _/binary>>, Value, Tree=#pb_node{pivot=P,left=Left})
when H < P ->
Tree#pb_node{left = replace_check(I, Value, Left)};
replace_check(I, Value, Tree = #pb_node{right = Right}) ->
Tree#pb_node{right = replace_check(I, Value, Right)}.
%%--------------------------------------------------------------------
%% Function: to_list(Tree) -> Pairs.
%% @doc
%% From a P-tree a list of {Key, Value} pairs.
%% @end
%%--------------------------------------------------------------------
-spec to_list(pb_tree()) -> [{key(), value()}].
%%--------------------------------------------------------------------
to_list(Tree) -> to_list(Tree, [], []).
to_list(pb_nil, _, Acc) -> Acc;
to_list(#pb_node{pivot = P,left=L,next=N,right=R,value=undefined},Xiferp,Acc) ->
Xiferp1 = [P | Xiferp],
to_list(L, Xiferp, to_list(N, Xiferp1, to_list(R, Xiferp, Acc)));
to_list(#pb_node{pivot = P, left=L, next=N, right=R, value=V}, Xiferp, Acc) ->
Xiferp1 = [P | Xiferp],
to_list(L,
Xiferp,
[{<< <<X/utf8>> || X <- lists:reverse(Xiferp1)>>, V} |
to_list(N, Xiferp1, to_list(R, Xiferp, Acc))]).
%%--------------------------------------------------------------------
%% Function: from_list(Pairs) -> Tree.
%% @doc
%% For each {Key, Value} pair in Pairs the value is stored under the
%% key in the Tree.
%% @end
%%--------------------------------------------------------------------
-spec from_list([{key(), value()}]) -> pb_tree().
%%--------------------------------------------------------------------
from_list(Plist) -> build(bucket_sort(Plist)).
%% ===================================================================
%% Internal functions.
%% ===================================================================
bucket_sort(L) -> bucket_sort(lists:sort(L), []).
bucket_sort([], []) -> [];
bucket_sort([], [B = #bucket{rest = R} | Acc]) ->
lists:reverse([B#bucket{rest = bucket_sort(lists:reverse(R))} | Acc]);
bucket_sort([{<<H/utf8>>, V} | T], []) ->
bucket_sort(T, [#bucket{pivot = H, values = [V]}]);
bucket_sort([{<<H/utf8, P/binary>>, V} | T], []) ->
bucket_sort(T, [#bucket{pivot = H, rest = [{P, V}]}]);
bucket_sort([{<<H/utf8>>, V} | T], [B = #bucket{pivot = H, values = Vs}|Acc]) ->
bucket_sort(T, [B#bucket{values = [V | Vs]} | Acc]);
bucket_sort([{<<H/utf8, P/binary>>, V} |T],[B=#bucket{pivot=H,rest=T1} |Acc]) ->
bucket_sort(T, [B#bucket{rest = [{P, V} | T1]} | Acc]);
bucket_sort([{<<H/utf8>>, V} | T], [B = #bucket{rest = R} | Acc]) ->
bucket_sort(T, [#bucket{pivot = H, values = [V]},
B#bucket{rest = bucket_sort(lists:reverse(R))} | Acc]);
bucket_sort([{<<H/utf8, P/binary>>, V} | T], [B = #bucket{rest = R} | Acc]) ->
bucket_sort(T, [#bucket{pivot = H, rest = [{P, V}]},
B#bucket{rest = bucket_sort(lists:reverse(R))} | Acc]).
build([]) -> pb_nil;
build([#bucket{pivot = P, rest = R, values = [V]}]) ->
#pb_node{pivot = P, next = build(R), value = V};
build([#bucket{pivot = P1, rest = R1, values = [V1]},
#bucket{pivot = P2, rest = R2, values = [V2]}]) ->
#pb_node{pivot = P1, next = build(R1), value = V1,
right = #pb_node{pivot = P2, next = build(R2), value = V2}};
build(Bs) ->
Size = length(Bs),
case split(Size - 1 - ((Size - 1) div 2), Bs, []) of
{Left, #bucket{pivot = P, rest = Rest, values = []}, Right} ->
#pb_node{pivot = P,
next = build(Rest),
left = build(Left),
right = build(Right)};
{Left, #bucket{pivot = P, rest = Rest, values = [V]}, Right} ->
#pb_node{pivot = P,
next = build(Rest),
left = build(Left),
right = build(Right),
value = V}
end.
split(0, [H | T], Acc) -> {lists:reverse(Acc), H, T};
split(N, [H | T], Acc) -> split(N - 1, T, [H | Acc]). | src/pb_tree.erl | 0.63443 | 0.433622 | pb_tree.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2021 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% Helper module to ts_persistence operating on time series
%%% metric data node. Each time series metric data node is connected with
%%% singe metric. Values that exceed time series hub capacity for particular
%%% metric (ie. metric's head capacity) are stored in list of time series metric
%%% data nodes (capacity of single time series metric data node is also limited so more
%%% than one time series metric data node may be needed - see ts_persistence module).
%%% @end
%%%-------------------------------------------------------------------
-module(ts_metric_data_node).
-author("<NAME>").
%% API
-export([set/1, get/1]).
%% datastore_model callbacks
-export([get_ctx/0, get_record_struct/1]).
-record(ts_metric_data_node, {
value :: ts_metric:data_node()
}).
-type record() :: #ts_metric_data_node{}.
-type key() :: datastore:key().
-export_type([key/0]).
% Context used only by datastore to initialize internal structures.
% Context provided via time_series_collection module functions
% overrides it in other cases.
-define(CTX, #{
model => ?MODULE,
memory_driver => undefined,
disc_driver => undefined
}).
%%%===================================================================
%%% API
%%%===================================================================
-spec set(ts_metric:data_node()) -> record().
set(Data) ->
#ts_metric_data_node{value = Data}.
-spec get(record()) -> ts_metric:data_node().
get(#ts_metric_data_node{value = Data}) ->
Data.
%%%===================================================================
%%% datastore_model callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns model's context needed to initialize internal structure's
%% (it is not used to get or save document).
%% @end
%%--------------------------------------------------------------------
-spec get_ctx() -> datastore:ctx().
get_ctx() ->
?CTX.
-spec get_record_struct(datastore_model:record_version()) ->
datastore_model:record_struct().
get_record_struct(1) ->
{record, [
{value, {record, [
{windows, {custom, json, {ts_windows, encode, decode}}},
{older_node_key, string},
{older_node_timestamp, integer}
]}}
]}. | src/modules/datastore/time_series/persistence/ts_metric_data_node.erl | 0.621656 | 0.437523 | ts_metric_data_node.erl | starcoder |
% @copyright 2010-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Integrationstests for the gossip and gossip_load modules.
%% @end
%% @version $Id$
-module(gossip_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-compile(export_all).
-include("unittest.hrl").
-include("scalaris.hrl").
-dialyzer({no_fail_call, test_request_histogram1/1}).
-define(NO_OF_NODES, 5).
all() ->
[
test_no_load,
test_load,
test_request_histogram1,
test_request_histogram2
].
suite() ->
[
{timetrap, {seconds, 80}}
].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_group(Group, Config) ->
unittest_helper:init_per_group(Group, Config).
end_per_group(Group, Config) ->
unittest_helper:end_per_group(Group, Config).
init_per_testcase(_TestCase, Config) ->
unittest_helper:make_ring_with_ids(
[?MINUS_INFINITY],
[{config, [
{monitor_perf_interval, 0}, % deactivate monitor_perf
{gossip_load_interval, 100}, % truncated to 0, i.e. immediate delivery
{gossip_load_convergence_count_new_round, 5},
{gossip_load_convergence_count_best_values, 1},
{gossip_log_level_warn, warn},
{gossip_log_level_error, error}
]
}]),
{_, []} = api_vm:add_nodes(?NO_OF_NODES - 1),
unittest_helper:wait_for_stable_ring_deep(),
[{stop_ring, true} | Config].
end_per_testcase(_TestCase, _Config) ->
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Testcases
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
test_no_load(_Config) ->
% get values from gossiping (after round finishes)
wait_n_rounds(1),
send2gossip({cb_msg, {gossip_load, default}, {gossip_get_values_all, self()}}, 1),
% {load_info, avg, stddev, size_ldr, size_kr, minLoad, maxLoad, merged}
LoadInfoExpected ={load_info, 0.0, 0.0, 5.0, 5.0, 0, 0, whatever, []},
receive {gossip_get_values_all_response, {PrevLoadInfo, _, _}} ->
?compare(fun compare/2, PrevLoadInfo, LoadInfoExpected)
end.
test_load(_Config) ->
write(100),
% request load from all nodes and calc expected values manually
Loads = lists:map(fun (State) -> dht_node_state:get(State, load) end, get_node_states()),
Avg = lists:sum(Loads)/?NO_OF_NODES,
Stddev = calc_stddev(Loads),
Size = ?NO_OF_NODES,
Min = lists:min(Loads),
Max = lists:max(Loads),
% {load_info, avg, stddev, size_ldr, size_kr, minLoad, maxLoad, merged, other}
LoadInfoExpected = {load_info, Avg, Stddev, Size, Size, Min, Max, dc, []},
% get values from gossiping (after round finishes)
% first round might be interrupted by node joins, thus wait two rounds
wait_n_rounds(1),
send2gossip({cb_msg, {gossip_load, default}, {gossip_get_values_all, self()}}, 1),
receive {gossip_get_values_all_response, {PrevLoadInfo, _, _}} ->
?compare(fun compare/2, PrevLoadInfo, LoadInfoExpected)
end.
test_request_histogram1(_Config) ->
?expect_exception(gossip_load:request_histogram(0, comm:this()), error, function_clause).
test_request_histogram2(_Config) ->
write(100),
NoOfBuckets = 10,
% get the states from all nodes and calc a histogram manually
DHTStates = get_node_states(),
Histos = lists:map(fun(State) -> init_histo(State, NoOfBuckets) end, DHTStates),
MergeFun =
fun(Histo, Acc) ->
Combine = fun({Load1, N1}, {Load2, N2}) -> {Load1+Load2, N1+N2};
(unknown, {Load, N}) -> {Load, N};
({Load, N}, unknown) -> {Load, N} end,
lists:zipwith(Combine, Histo, Acc)
end,
InitialAcc = [ {0,0} || _X <- lists:seq(1, NoOfBuckets)],
MergedHisto = lists:foldl(MergeFun, InitialAcc, Histos),
% calculate the histogram from Sums and Numbers of Nodes
Histo = lists:map(fun({Sum, N}) -> Sum/N end, MergedHisto),
%% log:log("Histo: ~w", [Histo]),
gossip_load:request_histogram(NoOfBuckets, comm:this()),
%% GossipedHisto = receive {histogram, Histo} -> Histo end, % doesn't work, don't know why!
GossipedHisto = receive Msg -> element(2, Msg) end,
% remove the intervals from gossiped histo:
GossipedHisto1 = lists:map(fun({_Interval, Value}) -> Value end, GossipedHisto),
% calc the estimates
GossipedHisto2 = lists:map(fun({Value, Weight}) -> Value/Weight end, GossipedHisto1),
%% log:log("GossipedHisto: ~w", [GossipedHisto2]),
?compare(fun compare/2, GossipedHisto2, Histo).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Helper
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Sends a Msg to the gossip process
-spec send2gossip(Msg::comm:message(), Delay::non_neg_integer()) -> ok.
send2gossip(Msg, Delay) ->
Group = pid_groups:group_with(gossip),
Pid = pid_groups:pid_of(Group, gossip),
msg_delay:send_local(Delay, Pid, Msg).
%% @doc Waits n rounds, pulling the web_debug_info every second.
-spec wait_n_rounds(NoOfRounds::pos_integer()) -> ok.
wait_n_rounds(NoOfRounds) ->
Round = get_current_round(),
wait_for_round(Round+NoOfRounds).
%% @doc Helper for wait_n_round/0
-spec wait_for_round(TargetRound::pos_integer()) -> ok.
wait_for_round(TargetRound) ->
Round = get_current_round(),
%% log:log("CurrentRound: ~w, TargetRound: ~w", [Round, TargetRound]),
if Round >= TargetRound -> ok;
Round =/= TargetRound -> wait_for_round(TargetRound)
end.
%% @doc Get the current round (with one second delay).
-spec get_current_round() -> non_neg_integer().
get_current_round() ->
send2gossip({web_debug_info, self()}, 1),
receive {web_debug_info_reply, KeyValueList} ->
case lists:keyfind("cur_round", 1, KeyValueList) of
{"cur_round", Round} -> Round;
false -> get_current_round() % happens if cb module not yet initiated
end
end.
%% @doc Writes the given number of dummy entries to scalaris.
-spec write(NoOfEntries::pos_integer()) -> ok.
write(NoOfEntries) ->
_CommitLog = [ begin
K = io_lib:format("k~w", [N]),
V = io_lib:format("v~w", [N]),
api_tx:write(K,V)
end || N <- lists:seq(1,NoOfEntries) ],
%% log:log("CommitLog: ~w", [_CommitLog]),
ok.
%% @doc Get the states from all dht nodes.
-spec get_node_states() -> [dht_node_state:state(), ...].
get_node_states() ->
% request load from all nodes and calc expected values manually
[gen_component:get_state(Pid) || Pid <- pid_groups:find_all(dht_node)].
%% @doc Builds a initial histogram, i.e. a histogram for a single node.
-spec init_histo(DHTNodeState::dht_node_state:state(), NoOfBuckets::pos_integer()) -> [{non_neg_integer(), 1}, ...].
init_histo(DHTNodeState, NoOfBuckets) ->
DB = dht_node_state:get(DHTNodeState, db),
MyRange = dht_node_state:get(DHTNodeState, my_range),
Buckets = intervals:split(intervals:all(), NoOfBuckets),
_Histo = [ get_load_for_interval(BucketInterval, MyRange, DB) || BucketInterval <- Buckets ],
%% log:log("Histo ~w", [_Histo]),
_Histo.
%% @doc Gets the load for a given interval.
-spec get_load_for_interval(BucketInterval::intervals:interval(),
MyRange::intervals:interval(), DB::db_dht:db()) -> {non_neg_integer(), 1}.
get_load_for_interval(BucketInterval, MyRange, DB) ->
case intervals:is_empty(intervals:intersection(BucketInterval, MyRange)) of
true -> unknown;
false ->
Load = db_dht:get_load(DB, BucketInterval),
{Load, 1}
end.
%% @doc Compares LoadInfo records and Histo. Returns true if the difference is less than 5 %.
-spec compare(LoadInfo1::gossip_load:load_info(), LoadInfo1::gossip_load:load_info()) -> boolean();
(Histo1::[float(),...], Histo2::[float(),...]) -> boolean().
compare(LoadInfo1, LoadInfo2) when is_tuple(LoadInfo1) andalso is_tuple(LoadInfo2) ->
%% log:log("LoadInfo1: ~w~n", [LoadInfo1]),
%% log:log("LoadInfo2: ~w~n", [LoadInfo2]),
Fun = fun(Key, {Acc, LI1, LI2}) ->
Value1 = gossip_load:load_info_get(Key, LI1),
Value2 = gossip_load:load_info_get(Key, LI2),
{Acc andalso calc_diff(Value1, Value2) < 5.0, LI1, LI2}
end,
% merged counter is excluded from comparison
element(1, lists:foldl(Fun, {true, LoadInfo1, LoadInfo2},
[avgLoad, stddev, size_ldr, size_kr, minLoad, maxLoad]));
compare(Histo1, Histo2) when is_list(Histo1) andalso is_list(Histo2) ->
Fun = fun(Val1, Val2) -> calc_diff(Val1, Val2) < 5.0 end,
ComparisonResult = lists:zipwith(Fun, Histo1, Histo2),
lists:all(fun(X) -> X end, ComparisonResult).
%% @doc Calculates the difference in percent from one value to another value.
-spec calc_diff(Value1::float()|unknown, Value2::float()|unknown) -> float().
calc_diff(Value1, Value2) ->
if
(Value1 =/= unknown) andalso (Value1 =:= Value2) -> 0.0;
(Value1 =:= unknown) orelse (Value2 =:= unknown) orelse (Value1 == 0) -> 100.0;
true -> ((Value1 + abs(Value2 - Value1)) * 100.0 / Value1) - 100
end.
%% @doc Calculate the standard deviation for a given list of numbers.
-spec calc_stddev(Loads::[number()]) -> float().
calc_stddev(Loads) ->
Avg = lists:sum(Loads)/?NO_OF_NODES,
LoadsSquared = lists:map(fun (Load) -> math:pow(Avg-Load, 2) end, Loads),
_Stddev = math:sqrt(lists:sum(LoadsSquared)/?NO_OF_NODES). | test/gossip_SUITE.erl | 0.506836 | 0.406626 | gossip_SUITE.erl | starcoder |
%%% @doc
%%% A utility to create small memory buffers for an application without needing to allocate
%%% one big memory buffer. For example, maybe you need to reference data by bytes at random
%%% locations but you don't know which locations use. Or, you're processing
%%% chunks that may live at different byte positions. Rather than trying to create 1 large
%%% buffer to handle this, you can use memory_page to allocate only the buffers needed.
%%%
%%% Say you have data at bit positions: 10, 1048, and will have others at higher ranges. You
%%% could use this:
%%% ```
%%% 1 big buffer
%%% [ | | | | | | ... ]
%%% 0 4k
%%% '''
%%% Even though you may not need it all. Or you could do this: Use Smaller buffers at
%%% differnt offsets on demand:
%%%
%%% ```
%%% [0] -> [ | | ...]
%%% 0 1023
%%% '''
%%% ```
%%% [1] -> [ | | ...]
%%% 1024 2048
%%% '''
%%%
%%% Memory Pager does the latter.
%%% @end
-module(memory_pager).
-export([
new/0,
new/1,
get/2,
set/3,
num_of_pages/1,
pagesize_in_bytes/1,
pagenum_for_byte_index/2,
collect/1,
truncate_buffer/3
]).
%% Default page size
-define(PAGE_SIZE, 1024).
%% Initial capacity of the page list
-define(NUM_PAGES, 32768).
-type state() :: {Pages :: [page()], PageSize :: pos_integer()}.
-type page() :: {Offset :: pos_integer(), Buffer :: binary()}.
%% @doc Create a new memory pager with a default page size of 1024 bytes
-spec new() -> state().
new() ->
new(?PAGE_SIZE).
%% @doc Create a new memory pager with the given page size. Note:
%% 'PageSize' must be a power of 2 in 'bytes' or an error occurs.
-spec new(PageSize :: pos_integer()) -> state() | erlang:throw({badarg, not_power_of_two}).
new(PageSize) ->
case power_of_two(PageSize) of
true ->
{
array:new([{size, ?NUM_PAGES}, {fixed, false}, {default, nil}]),
PageSize
};
_ ->
erlang:error({badarg, not_power_of_two})
end.
%% @doc Get a page by page num.
-spec get(PageNum :: pos_integer(), State :: state()) ->
{none, state()} | {ok, Page :: page(), state()}.
get(PageNum, {Pages, _} = State) ->
case array:get(PageNum, Pages) of
nil -> {none, State};
Page -> {ok, Page, State}
end.
%% @doc Set a buffer at the given page number. If the incoming buffer
%% is not the same as the configured Page size, it will be truncated to fit the Page size.
-spec set(
PageNum :: pos_integer(),
Buffer :: binary(),
State :: state()
) ->
{ok, state()}.
set(PageNum, Buffer, {Pages, PageSize}) ->
BufSize = byte_size(Buffer),
Buf = truncate_buffer(Buffer, BufSize, PageSize),
%% Calculate the offset of the buffer in relation to the pagenum
Offset = PageNum * PageSize,
UpdatedPages = array:set(
PageNum,
{Offset, Buf},
Pages
),
{ok, {UpdatedPages, PageSize}}.
%% @doc Return the number of pages
-spec num_of_pages(State :: state()) -> pos_integer().
num_of_pages({Pages, _}) ->
array:sparse_size(Pages).
%% @doc return the page number for the given byte index
-spec pagenum_for_byte_index(
Index :: pos_integer(),
State :: state()
) -> pos_integer().
pagenum_for_byte_index(Index, {_, PageSize}) ->
Index div PageSize.
%% @doc Return the page size. This is set on creation
-spec pagesize_in_bytes(State :: state()) -> pos_integer().
pagesize_in_bytes({_, PageSize}) ->
PageSize.
%% @doc Return a list of all valid pages. Each entry in the list
%% contains {PageNum, Page}.
-spec collect(
State :: state()
) -> [{PageNum :: pos_integer(), Page :: page()}].
collect({Pages, _}) ->
array:sparse_to_orddict(Pages).
%% @doc Determine if 'Value' is a power of 2
-spec power_of_two(Value :: pos_integer()) -> boolean().
power_of_two(Value) ->
case (Value band (Value - 1)) of
0 -> true;
_ -> false
end.
%% @private
%% Deal with incoming buffers that may not have the comfigured page size.
%% - If the incoming buffer is the page size, do nothing and return the buffer
%% - If the incoming buffer is larger than the page size, Copy page size bytes
%% to a new buffer, return it, and disgard the rest.
%% - If the incoming buffer is smaller than the page size, append '0s' to the
%% buffer to make it the correct page size.
truncate_buffer(Buffer, BufferSize, PageSize) when BufferSize =:= PageSize ->
%% Same size, ok.
Buffer;
truncate_buffer(Buffer, BufferSize, PageSize) when BufferSize > PageSize ->
%% Buffer is bigger
<<A:PageSize/binary, _/binary>> = Buffer,
A;
truncate_buffer(Buffer, _, PageSize) ->
%% Buffer is smaller append '0s'.
Diff = PageSize - byte_size(Buffer),
Zeros = <<0:Diff/unit:8>>,
<<Buffer/binary, Zeros/binary>>. | src/memory_pager.erl | 0.564939 | 0.732197 | memory_pager.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% A set of optics specific to tuples.
%%% @end
%%%-------------------------------------------------------------------
-module(optic_tuples).
%% API
-export([all/0,
all/1,
element/1,
element/2,
field/3,
field/4]).
%%%===================================================================
%%% API
%%%===================================================================
%% @see all/1
-spec all() -> optic:optic().
all() ->
all(#{}).
%% @doc
%% Focus on all elements of a tuple.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_tuples:all()], {1,2,3}).
%% {ok,[1,2,3]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec all(Options) -> optic:optic() when
Options :: optic:variations().
all(Options) ->
Fold =
fun (Fun, Acc, Tuple) when is_tuple(Tuple) ->
{ok, lists:foldl(Fun, Acc, tuple_to_list(Tuple))};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Tuple) when is_tuple(Tuple) ->
{NewList, NewAcc} = lists:mapfoldl(Fun, Acc, tuple_to_list(Tuple)),
{ok, {list_to_tuple(NewList), NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (_Data, _Template) ->
{}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see element/2
-spec element(N) -> optic:optic() when
N :: pos_integer().
element(N) ->
optic_tuples:element(N, #{}).
%% @doc
%% Focus on the nth element of a tuple. As with `erlang:element/2',
%% indexing begins at 1.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_tuples:element(1)], {1,2,3}).
%% {ok,[1]}
%% '''
%% @end
%% @param N The index of the tuple element to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec element(N, Options) -> optic:optic() when
N :: pos_integer(),
Options :: optic:variations().
element(N, Options) ->
Fold =
fun (Fun, Acc, Tuple) when N =< tuple_size(Tuple) ->
Nth = erlang:element(N, Tuple),
{ok, Fun(Nth, Acc)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Tuple) when N =< tuple_size(Tuple) ->
Nth = erlang:element(N, Tuple),
{NewNth, NewAcc} = Fun(Nth, Acc),
{ok, {erlang:setelement(N, Tuple, NewNth), NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (Tuple, Template) when is_tuple(Tuple) ->
list_to_tuple(tuple_to_list(Tuple) ++
lists:duplicate(N - tuple_size(Tuple), Template));
(_Data, Template) ->
list_to_tuple(lists:duplicate(N, Template))
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see field/4
-spec field(Tag, Size, N) -> optic:optic() when
Tag :: atom(),
Size :: pos_integer(),
N :: pos_integer().
field(Tag, Size, N) ->
field(Tag, Size, N, #{}).
%% @doc
%% Focus on a record field. As records are a compiler construct, this
%% depends on the `?OPTIC_FIELD' macro in `include/optic_tuples.hrl'
%% to construct the required arguments from the record definition.
%%
%% Given the record definition:
%%
%% ```
%% -include_lib("optic/include/optic_tuples.hrl").
%% -record(example, {first}).
%% '''
%%
%% Example:
%%
%% ```
%% > optic:get([optic_tuples:field(?OPTIC_FIELD(example, first))],
%% #example{first=1}).
%% {ok,[1]}
%% '''
%% @end
%% @param Tag The expected record tag.
%% @param Size The expected record size.
%% @param N The index of the field in the record tuple.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec field(Tag, Size, N, Options) -> optic:optic() when
Tag :: atom(),
Size :: pos_integer(),
N :: pos_integer(),
Options :: optic:variations().
field(Tag, Size, N, Options) ->
Fold =
fun (Fun, Acc, Tuple) when erlang:element(1, Tuple) == Tag,
Size == tuple_size(Tuple),
N > 1,
N =< tuple_size(Tuple) ->
Nth = erlang:element(N, Tuple),
{ok, Fun(Nth, Acc)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Tuple) when erlang:element(1, Tuple) == Tag,
Size == tuple_size(Tuple),
N > 1,
N =< tuple_size(Tuple) ->
Nth = erlang:element(N, Tuple),
{NewNth, NewAcc} = Fun(Nth, Acc),
{ok, {erlang:setelement(N, Tuple, NewNth), NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (_Data, Template) ->
list_to_tuple([Tag] ++ lists:duplicate(Size - 1, Template))
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New). | src/optic_tuples.erl | 0.635336 | 0.533094 | optic_tuples.erl | starcoder |
%% Copyright (c) 2021 <NAME>
%%
%% Permission is hereby granted, free of charge, to any person obtaining a
%% copy of this software and associated documentation files (the "Software"),
%% to deal in the Software without restriction, including without limitation
%% the rights to use, copy, modify, merge, publish, distribute, sublicense,
%% and/or sell copies of the Software, and to permit persons to whom the
%% Software is furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
%% DEALINGS IN THE SOFTWARE.
%%
%% locus is an independent project and has not been authorized, sponsored,
%% or otherwise approved by MaxMind.
%% @private
-module(locus_shared_bitarray).
-include_lib("stdlib/include/assert.hrl").
%% ------------------------------------------------------------------
%% API Function Exports
%% ------------------------------------------------------------------
-export([new/1,
set/2,
is_set/2,
get_positions_set_at_cell/2]).
%% ------------------------------------------------------------------
%% Macro Definitions
%% ------------------------------------------------------------------
-define(BITSHIFT, 6).
%% ------------------------------------------------------------------
%% Type Definitions
%% ------------------------------------------------------------------
-opaque t() :: atomics:atomics_ref().
-export_type([t/0]).
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
-spec new(pos_integer()) -> t().
new(Length) ->
Size = ceil(Length / (1 bsl ?BITSHIFT)),
atomics:new(Size, [{signed, false}]).
-spec set(t(), non_neg_integer()) -> ok.
set(Array, Position) ->
OneBasedIndex = (Position bsr ?BITSHIFT) + 1,
Offset = Position band ((1 bsl ?BITSHIFT) - 1),
UpdateMask = 1 bsl Offset,
Cell = atomics:get(Array, OneBasedIndex),
set_recur(Array, Cell, OneBasedIndex, UpdateMask).
-spec is_set(t(), non_neg_integer()) -> boolean().
is_set(Array, Position) ->
OneBasedIndex = (Position bsr ?BITSHIFT) + 1,
Offset = Position band ((1 bsl ?BITSHIFT) - 1),
ReadMask = 1 bsl Offset,
try atomics:get(Array, OneBasedIndex) of
Cell ->
(Cell band ReadMask) =/= 0
catch
error:badarg when is_reference(Array), is_integer(Position), Position >= 0 ->
throw({position_out_of_bounds, Position})
end.
-spec get_positions_set_at_cell(t(), non_neg_integer()) -> [non_neg_integer(), ...].
get_positions_set_at_cell(Array, Index) ->
try atomics:get(Array, _OneBasedIndex = Index + 1) of
Cell ->
BasePosition = Index bsl ?BITSHIFT,
positions_set_at_cell(BasePosition, Cell)
catch
error:badarg ->
#{size := Size} = atomics:info(Array),
?assertMatch({true, _, _}, {Index >= Size, Index, Size}),
throw({index_out_of_bounds, Index})
end.
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
set_recur(Array, Cell, OneBasedIndex, UpdateMask) ->
case Cell bor UpdateMask of
Cell ->
ok;
NewCell ->
case atomics:exchange(Array, OneBasedIndex, NewCell) of
Cell ->
ok;
UpdatedCell ->
set_recur(Array, UpdatedCell, OneBasedIndex, UpdateMask)
end
end.
positions_set_at_cell(BasePosition, Cell)
when Cell =/= 0 ->
case Cell band 1 of
1 ->
[BasePosition | positions_set_at_cell(BasePosition + 1,
Cell bsr 1)];
_ ->
positions_set_at_cell(BasePosition + 1, Cell bsr 1)
end;
positions_set_at_cell(_BasePosition, _Cell) ->
[]. | src/locus_shared_bitarray.erl | 0.552057 | 0.476641 | locus_shared_bitarray.erl | starcoder |
-module(gh3).
-export([to_polyfills/2, from_polyfill/1]).
%% @doc Transforms a pre-parsed GeoJSON map into a list of polyfills.
%%
%% Example:
%%
%% ```
%% %% First download a map from https://geojson-maps.ash.ms
%% {ok, JSONb} = file:read_file("custom.geo.json"),
%% JSON = jsx:decode(JSONb, []),
%% Poly = h3:to_polyfills(JSON, 8),
%% '''
-spec to_polyfills(JSON :: map(), Resolution :: h3:resolution()) ->
[[h3:h3index(), ...], ...].
to_polyfills(#{<<"features">> := Features}, Resolution) ->
lists:map(
fun (Feature) ->
to_polyfills(Feature, Resolution)
end,
Features
);
to_polyfills(#{<<"geometry">> := Geometry}, Resolution) ->
to_polyfills(Geometry, Resolution);
to_polyfills(
#{<<"type">> := <<"MultiPolygon">>, <<"coordinates">> := Coordinates},
Resolution
) ->
geojson_parse_polygons(Coordinates, Resolution);
to_polyfills(
#{<<"type">> := <<"Polygon">>, <<"coordinates">> := Coordinates},
Resolution
) ->
h3:polyfill(geojson_parse_polygon(Coordinates), Resolution).
geojson_parse_polygons(Polygons, Resolution) ->
lists:map(
fun (P) -> h3:polyfill(geojson_parse_polygon(P), Resolution) end,
Polygons
).
geojson_parse_polygon(OutlineAndHoles) ->
lists:map(fun (OH) -> geojson_transform_coordinates(OH) end, OutlineAndHoles).
geojson_transform_coordinates(CoordinateList) ->
lists:map(fun ([Lat, Lon]) -> {float(Lon), float(Lat)} end, CoordinateList).
%% @doc Converts a polyfill into a GeoJSON map.
%%
%% Round-trip example:
%%
%% ```
%% %% First download a map from https://geojson-maps.ash.ms
%% {ok, JSONb} = file:read_file("custom.geo.json"),
%% JSON = jsx:decode(JSONb, []),
%% Polys = h3:to_polyfills(JSON, 9),
%% Unsorted = lists:flatten(Polys),
%% Sorted = lists:sort(fun(A, B) -> h3:get_resolution(A) < h3:get_resolution(B) end, Unsorted),
%% Compacted = h3:compact(Sorted),
%% NewJSON = h3:from_polyfill(Compacted),
%% NewJSONb = jsx:encode(NewJSON),
%% ok = file:write_file("custom.reconstructed.json", NewJSONb).
%% ## Try plotting at http://geojson.tools
%% '''
from_polyfill(Polyfill) ->
#{
<<"type">> => <<"MultiPolygon">>,
<<"coordinates">> => multi_polygon_to_geojson(h3:set_to_multi_polygon(Polyfill))
}.
multi_polygon_to_geojson({Lat, Lon}) ->
[Lon, Lat];
multi_polygon_to_geojson(Polygons) ->
lists:map(fun (P) -> multi_polygon_to_geojson(P) end, Polygons). | src/gh3.erl | 0.687105 | 0.73307 | gh3.erl | starcoder |
%%---------------------------------------------------------------------------
%% |
%% Module : String
%% Copyright : (c) 2020-2021 EMQ Technologies Co., Ltd.
%% License : BSD-style (see the LICENSE file)
%%
%% Maintainer : <NAME>, <EMAIL>
%% <NAME>, <EMAIL>
%% Stability : experimental
%% Portability : portable
%%
%% The UTF-8 String FFI module.
%%
%%---------------------------------------------------------------------------
-module('String').
-include("../Foreign/Maybe.hrl").
-export([ concat/2
, reverse/1
, replicate/2
, strlen/1
, equalIgnoreCase/2
, hasPrefix/2
, hasSuffix/2
, indexOf/2
, lastIndexOf/2
, find/2
, findLast/2
, replace/3
, replaceFirst/3
, replaceLast/3
, split/2
, lines/1
, words/1
, sliceTo/3
, pad/2
, padLeft/2
, padBoth/2
, trimChars/2
, trimLeft/1
, trimLeftChars/2
, trimRight/1
, trimRightChars/2
]).
-define(Whitespace, "\x{0009}\x{000B}\x{000C}\x{0020}\x{00A0}").
-define(LineFeed, "\x{000A}\x{000D}\x{2028}\x{2029}").
-type(prefix() :: string()).
-type(suffix() :: string()).
-type(pattern() :: string()).
-type(replacement() :: string()).
-spec(concat(string(), string()) -> string()).
concat(S1, S2) -> string:concat(S1, S2).
-spec(reverse(string()) -> string()).
reverse(String) -> lists:reverse(String).
-spec(replicate(pos_integer(), string()) -> string()).
replicate(N, S) ->
lists:flatten(lists:duplicate(N, S)).
-spec(strlen(string()) -> integer()).
strlen(String) -> string:length(String).
-spec(equalIgnoreCase(string(), string()) -> boolean()).
equalIgnoreCase(S1, S2) ->
string:equal(S1, S2, true).
-spec(hasPrefix(string(), prefix()) -> boolean()).
hasPrefix(String, Prefix) ->
string:prefix(String, Prefix) /= nomatch.
-spec(hasSuffix(string(), suffix()) -> boolean()).
hasSuffix(String, Suffix) ->
Pos = (strlen(String) - strlen(Suffix)),
if Pos >= 0 -> string:equal(string:slice(String, Pos), Suffix);
true -> false
end.
-spec(indexOf(char(), string()) -> integer()).
indexOf(Char, String) -> string:chr(String, Char).
-spec(lastIndexOf(char(), string()) -> integer()).
lastIndexOf(Char, String) -> string:rchr(String, Char).
-spec(find(string(), pattern()) -> maybe(string())).
find(String, Pattern) ->
doFind(String, Pattern, leading).
-spec(findLast(string(), pattern()) -> maybe(string())).
findLast(String, Pattern) ->
doFind(String, Pattern, trailing).
doFind(String, Pattern, Dir) ->
case string:find(String, Pattern, Dir) of
nomatch -> ?Nothing;
SubStr -> ?Just(SubStr)
end.
-spec(replace(string(), pattern(), replacement()) -> string()).
replace(String, Pattern, Replacement) ->
string:replace(String, Pattern, Replacement, all).
-spec(replaceFirst(string(), pattern(), replacement()) -> string()).
replaceFirst(String, Pattern, Replacement) ->
string:replace(String, Pattern, Replacement, leading).
-spec(replaceLast(string(), pattern(), replacement()) -> string()).
replaceLast(String, Pattern, Replacement) ->
string:replace(String, Pattern, Replacement, trailing).
-spec(split(string(), Sep :: string()) -> [string()]).
split(String, Sep) -> string:split(String, Sep, all).
-spec(lines(string()) -> [string()]).
lines(String) -> string:split(String, "\n", all).
-spec(words(string()) -> [string()]).
words(String) -> string:tokens(String, ?Whitespace ++ ?LineFeed).
-spec(sliceTo(string(), pos_integer(), pos_integer()) -> string()).
sliceTo(String, Start, End) ->
string:slice(String, Start, (End - Start)).
flat(X) ->
lists:flatten(X).
-spec(pad(string(), pos_integer()) -> string()).
pad(String, Len) ->
flat(string:pad(String, Len)).
-spec(padLeft(string(), pos_integer()) -> string()).
padLeft(String, Len) ->
flat(string:pad(String, Len, leading)).
-spec(padBoth(string(), pos_integer()) -> string()).
padBoth(String, Len) ->
flat(string:pad(String, Len, both)).
-spec(trimChars(string(), string()) -> string()).
trimChars(String, Chars) -> string:trim(String, both, Chars).
-spec(trimLeft(string()) -> string()).
trimLeft(String) -> string:trim(String, leading).
-spec(trimLeftChars(string(), string()) -> string()).
trimLeftChars(String, Chars) -> string:trim(String, leading, Chars).
-spec(trimRight(string()) -> string()).
trimRight(String) -> string:trim(String, trailing).
-spec(trimRightChars(string(), string()) -> string()).
trimRightChars(String, Chars) -> string:trim(String, trailing, Chars). | lib/Data/String.erl | 0.511473 | 0.47591 | String.erl | starcoder |
%% Copyright 2011 <NAME> <<EMAIL>>
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
-module(qrcode_mask).
-include("qrcode_params.hrl").
-export([generate/2, select/1]).
-define(PENALTY_RULE_1, 3).
-define(PENALTY_RULE_2, 3).
-define(PENALTY_RULE_3, 40).
-define(PENALTY_RULE_4, 10).
%% Generates all eight masked versions of the bit matrix
generate(#qr_params{dimension = Dim}, Matrix) ->
Sequence = lists:seq(0, 7),
Functions = [mask(X) || X <- Sequence],
Masks = [generate_mask(Dim, MF) || MF <- Functions],
[apply_mask(Matrix, Mask, []) || Mask <- Masks].
%% Selects the lowest penalty candidate from a list of bit matrices
select([H|T]) ->
Score = score_candidate(H),
select_candidate(T, 0, 0, Score, H).
%% Internal
%
generate_mask(Max, MF) ->
Sequence = lists:seq(0, Max - 1),
[generate_mask(Sequence, Y, MF) || Y <- Sequence].
generate_mask(Sequence, Y, MF) ->
[case MF(X, Y) of true -> 1; false -> 0 end || X <- Sequence].
apply_mask([H|T], [H0|T0], Acc) ->
Row = apply_mask0(H, H0, []),
apply_mask(T, T0, [Row|Acc]);
apply_mask([], [], Acc) ->
lists:reverse(Acc).
apply_mask0([H|T], [H0|T0], Acc) when is_integer(H) ->
apply_mask0(T, T0, [H bxor H0|Acc]);
apply_mask0([H|T], [_|T0], Acc) ->
apply_mask0(T, T0, [H|Acc]);
apply_mask0([], [], Acc) ->
lists:reverse(Acc).
% (i + j) mod 2 = 0
mask(0) ->
fun(X, Y) -> (X + Y) rem 2 =:= 0 end;
% i mod 2 = 0
mask(1) ->
fun(_X, Y) -> Y rem 2 =:= 0 end;
% j mod 3 = 0
mask(2) ->
fun(X, _Y) -> X rem 3 =:= 0 end;
% (i + j) mod 3 = 0
mask(3) ->
fun(X, Y) -> (X + Y) rem 3 =:= 0 end;
% ((i div 2) + (j div 3)) mod 2 = 0
mask(4) ->
fun(X, Y) -> (X div 3 + Y div 2) rem 2 =:= 0 end;
%101 (i * j) mod 2 + (i *j) mod 3 = 0
mask(5) ->
fun(X, Y) -> Sum = X * Y, Sum rem 2 + Sum rem 3 =:= 0 end;
% ((i * j) mod 2 + (i* j) mod 3) mod 2 = 0
mask(6) ->
fun(X, Y) -> Sum = X * Y, (Sum rem 2 + Sum rem 3) rem 2 =:= 0 end;
%((i * j) mod 3 + (i + j) mod 2) mod 2 = 0
mask(7) ->
fun(X, Y) -> ((X * Y rem 3) + ((X + Y) rem 2)) rem 2 =:= 0 end.
select_candidate([H|T], Count, Mask, Score, C) ->
case score_candidate(H) of
X when X < Score ->
select_candidate(T, Count + 1, Count + 1, X, H);
_ ->
select_candidate(T, Count + 1, Mask, Score, C)
end;
select_candidate([], _, Mask, _Score, C) ->
%?TTY({selected, Mask, {score, Score}}),
{Mask, C}.
score_candidate(C) ->
Rule1 = apply_penalty_rule_1(C),
Rule2 = apply_penalty_rule_2(C),
Rule3 = apply_penalty_rule_3(C),
Rule4 = apply_penalty_rule_4(C),
Total = Rule1 + Rule2 + Rule3 + Rule4,
%?TTY({score, Total, [Rule1, Rule2, Rule3, Rule4]}),
Total.
%% Section 8.2.2
apply_penalty_rule_1(Candidate) ->
ScoreRows = rule1(Candidate, 0),
ScoreCols = rule1(rows_to_columns(Candidate), 0),
ScoreRows + ScoreCols.
%
rule1([Row|T], Score) ->
Score0 = rule1_row(Row, Score),
rule1(T, Score0);
rule1([], Score) ->
Score.
%
rule1_row(L = [H|_], Score) ->
F = fun
(1) when H =:= 1 ->
true;
(1) ->
false;
(_) when H =:= 0 orelse is_integer(H) =:= false ->
true;
(_) ->
false
end,
{H0,T0} = lists:splitwith(F, L),
case length(H0) of
Repeats when Repeats >= 5 ->
Penalty = ?PENALTY_RULE_1 + Repeats - 5,
rule1_row(T0, Score + Penalty);
_ ->
rule1_row(T0, Score)
end;
rule1_row([], Score) ->
Score.
%%
apply_penalty_rule_2(_M = [H, H0|T]) ->
% ?TTY(M),
Blocks = rule2(1, 1, H, H0, [H0|T], []),
Blocks0 = composite_blocks(Blocks, []),
Blocks1 = composite_blocks(Blocks0, []),
% ?TTY(Blocks1),
score_blocks(Blocks1, 0).
score_blocks([{_, {M, N}, _}|T], Acc) ->
Score = ?PENALTY_RULE_2 * (M - 1) * (N - 1),
score_blocks(T, Acc + Score);
score_blocks([], Acc) ->
Acc.
rule2(X, Y, [H, H|T], [H, H|T0], Rows, Acc) ->
rule2(X + 1, Y, [H|T], [H|T0], Rows, [{{X, Y}, {2, 2}, H}|Acc]);
rule2(X, Y, [_|T], [_|T0], Rows, Acc) ->
rule2(X + 1, Y, T, T0, Rows, Acc);
rule2(_, Y, [], [], [H, H0|T], Acc) ->
rule2(1, Y + 1, H, H0, [H0|T], Acc);
rule2(_, _, [], [], [_], Acc) ->
lists:reverse(Acc).
composite_blocks([H|T], Acc) ->
{H0, T0} = composite_block(H, T, []),
composite_blocks(T0, [H0|Acc]);
composite_blocks([], Acc) ->
lists:reverse(Acc).
composite_block(B, [H|T], Acc) ->
case combine_block(B, H) of
false ->
composite_block(B, T, [H|Acc]);
B0 ->
composite_block(B0, T, Acc)
end;
composite_block(B, [], Acc) ->
{B, lists:reverse(Acc)}.
% Does Block 0 contain the Block 1 coordinate?
combine_block(B = {{X, Y}, {SX, SY}, _}, B0 = {{X0, Y0}, _, _})
when X0 < X + SX orelse Y0 < Y + SY ->
combine_block0(B, B0);
combine_block(_, _) ->
false.
% are they same valued?
combine_block0(B = {_, _, V}, B0 = {_, _, V0})
when V =:= V0 orelse (V =/= 1 andalso V0 =/= 1) ->
combine_block1(B, B0);
combine_block0(_, _) ->
false.
% is B extended by B0 horizontally?
combine_block1({{X, Y}, {SX, SY}, V}, {{X0, Y}, {SX0, SY}, _}) when X0 =:= X + SX - 1 ->
{{X, Y}, {SX + SX0 - 1, SY}, V};
% is B extended by B0 vertically?
combine_block1({{X, Y}, {SX, SY}, V}, {{X, Y0}, {SX, SY0}, _}) when Y0 =:= Y + SY - 1 ->
{{X, Y}, {SX, SY + SY0 - 1}, V};
combine_block1(_, _) ->
false.
%%
apply_penalty_rule_3(Candidate) ->
RowScores = [rule3(Row, 0) || Row <- Candidate],
ColumnScores = [rule3(Col, 0) || Col <- rows_to_columns(Candidate)],
lists:sum(RowScores) + lists:sum(ColumnScores).
%
rule3(Row = [1|T], Score) ->
Ones = lists:takewhile(fun(X) -> X =:= 1 end, Row),
Scale = length(Ones),
case Scale * 7 of
Length when Length > length(Row) ->
rule3(T, Score);
Length ->
case is_11311_pattern(lists:sublist(Row, Length), Scale) of
true ->
rule3(T, Score + ?PENALTY_RULE_3);
false ->
rule3(T, Score)
end
end;
rule3([_|T], Score) ->
rule3(T, Score);
rule3([], Acc) ->
Acc.
%
is_11311_pattern(List, Scale) ->
List0 = lists:map(fun(X) when X =:= 1 -> 1; (_) -> 0 end, List),
Result = condense(List0, Scale, []),
Result =:= [1,0,1,1,1,0,1].
%
condense([], _, Acc) ->
lists:reverse(Acc);
condense(L, Scale, Acc) ->
{H, T} = lists:split(Scale, L),
case lists:sum(H) of
Scale ->
condense(T, Scale, [1|Acc]);
0 ->
condense(T, Scale, [0|Acc]);
_ ->
undefined
end.
%%
apply_penalty_rule_4(Candidate) ->
Proportion = rule4(Candidate, 0, 0),
%?TTY({proportion, Proportion}),
?PENALTY_RULE_4 * (trunc(abs(Proportion * 100 - 50)) div 5).
%
rule4([H|T], Dark, All) ->
All0 = All + length(H),
Dark0 = Dark + length([X || X <- H, X =:= 1]),
rule4(T, Dark0, All0);
rule4([], Dark, All) ->
Dark / All.
%
rows_to_columns(L) ->
rows_to_columns(L, []).
rows_to_columns([[]|_], Acc) ->
lists:reverse(Acc);
rows_to_columns(L, Acc) ->
Heads = [H || [H|_] <- L],
Tails = [T || [_|T] <- L],
rows_to_columns(Tails, [Heads|Acc]). | src/qrcode_mask.erl | 0.513181 | 0.484685 | qrcode_mask.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2017: <NAME>
%%% This software is released under the MIT license cited in 'LICENSE.md'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module provides search functionality for leaf nodes of B+ tree.
%%% @end
%%%-------------------------------------------------------------------
-module(bp_tree_leaf).
-author("<NAME>").
-include("bp_tree.hrl").
%% API exports
-export([find_leftmost/1, find_key/2]).
%%====================================================================
%% API functions
%%====================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns leftmost leaf in a B+ tree.
%% @end
%%--------------------------------------------------------------------
-spec find_leftmost(bp_tree:tree()) ->
{{ok, bp_tree_node:id(), bp_tree:tree_node()} | {error, term()},
bp_tree:tree()}.
find_leftmost(Tree) ->
case bp_tree_store:get_root_id(Tree) of
{{ok, RootId}, Tree2} -> find_leftmost(RootId, Tree2);
{{error, Reason}, Tree2} -> {{error, Reason}, Tree2}
end.
%%--------------------------------------------------------------------
%% @doc
%% Returns key at given offset starting from leftmost node. Along with the key
%% a node holding it and its position within this node is returned.
%% @end
%%--------------------------------------------------------------------
-spec find_key(non_neg_integer(), bp_tree:tree()) -> {
{ok, bp_tree:key(), pos_integer(), bp_tree_node:id(), bp_tree:tree_node()} |
{error, term()
}, bp_tree:tree()}.
find_key(Offset, Tree) ->
case find_leftmost(Tree) of
{{ok, NodeId, Node}, Tree2} ->
find_key(Offset, NodeId, Node, Tree2);
{{error, Reason}, Tree2} ->
{{error, Reason}, Tree2}
end.
%%====================================================================
%% Internal functions
%%====================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns leftmost leaf in a B+ tree rooted in a node.
%% @end
%%--------------------------------------------------------------------
-spec find_leftmost(bp_tree_node:id(), bp_tree:tree()) ->
{{ok, bp_tree_node:id(), bp_tree:tree_node()} | {error, term()},
bp_tree:tree()}.
find_leftmost(NodeId, Tree) ->
{{ok, Node}, Tree2} = bp_tree_store:get_node(NodeId, Tree),
case bp_tree_node:leftmost_child(Node) of
{ok, NodeId2} -> find_leftmost(NodeId2, Tree2);
{error, not_found} -> {{ok, NodeId, Node}, Tree2}
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns key at given offset starting from a node. Along with the key
%% a node holding it and its position within this node is returned.
%% @end
%%--------------------------------------------------------------------
-spec find_key(non_neg_integer(), bp_tree_node:id(), bp_tree:tree_node(),
bp_tree:tree()) -> {{ok, bp_tree:key(), pos_integer(), bp_tree_node:id(),
bp_tree:tree_node()} | {error, term()}, bp_tree:tree()}.
find_key(Offset, NodeId, Node, Tree) ->
Size = bp_tree_node:size(Node),
case Size =< Offset of
true ->
case bp_tree_node:right_sibling(Node) of
{ok, NodeId2} ->
{{ok, Node2}, Tree2} = bp_tree_store:get_node(NodeId2, Tree),
find_key(Offset - Size, NodeId2, Node2, Tree2);
{error, Reason} ->
{{error, Reason}, Tree}
end;
false ->
{ok, Key} = bp_tree_node:key(Offset + 1, Node),
{{ok, Key, Offset + 1, NodeId, Node}, Tree}
end. | src/bp_tree_leaf.erl | 0.624523 | 0.40251 | bp_tree_leaf.erl | starcoder |
%% @doc This is the behaviour definition for a credential provider module
%% and it iterates over a list of providers. You may set the `credential_providers`
%% Erlang environment variable if you want to restrict checking only a certain
%% subset of the default list.
%%
%% Default order of checking for credentials is:
%% <ol>
%% <li>Erlang application environment</li>
%% <li>OS environment</li>
%% <li>Credentials from AWS file</li>
%% <li>ECS Task credentials</li>
%% <li>EC2 credentials</li>
%% </ol>
%%
%% Providers are expected to implement a function called `fetch/1' which
%% takes as its argument a proplist of options which may influence the
%% operation of the provider. The fetch/1 function should return either
%% `{ok, Credentials, Expiration}' or `{error, Reason}'.
%%
%% If a provider returns {ok, ...} then evaluation stops at that provider.
%% If it returns {error, ...} then the next provider is executed in order
%% until either a set of credentials are returns or the tuple
%% `{error, no_credentials}' is returned.
%%
%% If a new provider is desired, the behaviour interface should be
%% implemented and its module name added to the default list.
%% @end
-module(aws_credentials_provider).
-export([fetch/0, fetch/1]).
-callback fetch ( Options :: proplists:proplist() ) -> {ok, Credentials :: map(),
Expiration :: binary() | pos_integer() | infinity}
| {error, Reason :: term()}.
-define(DEFAULT_PROVIDERS, [aws_credentials_env, aws_credentials_file, aws_credentials_ecs, aws_credentials_ec2]).
fetch() ->
fetch([]).
fetch(Options) ->
Providers = get_env(credential_providers, ?DEFAULT_PROVIDERS),
evaluate_providers(Providers, Options).
evaluate_providers([], _Options) -> {error, no_credentials};
evaluate_providers([ H | T ], Options) ->
case H:fetch(Options) of
{error, _} = Error ->
error_logger:error_msg("Provider ~p reports ~p", [H, Error]),
evaluate_providers(T, Options);
Credentials -> Credentials
end.
get_env(Key, Default) ->
case application:get_env(aws_credentials, Key) of
undefined -> Default;
{ok, Value} -> Value
end. | src/aws_credentials_provider.erl | 0.543833 | 0.485539 | aws_credentials_provider.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(vtree_bulk).
-include_lib("couch/include/couch_db.hrl").
-ifndef(makecheck).
-define(MAX_FILLED, 40).
-else.
-define(MAX_FILLED, 4).
-compile(export_all).
-endif.
-export([omt_load/2, omt_write_tree/2, bulk_load/4]).
-export([log_n_ceil/2]).
% XXX vmx: check if tree has correct meta information set for every node
% (type=inner/type=leaf)
% The seedtree is kept in memory, therefore it makes sense to restrict the
% maximum height of it.
-define(MAX_SEEDTREE_HEIGHT, 3).
% same as in vtree
-record(node, {
% type = inner | leaf
type=leaf}).
-record(seedtree_root, {
tree = [] :: list(),
outliers = [] :: list(),
height = 0 :: integer()
}).
-record(seedtree_leaf, {
orig = [] :: list(),
new = [] :: list(),
% position in file (of this node)
pos = -1 :: integer()
}).
-type mbr() :: {number(), number(), number(), number()}.
-type vtree_node() :: {mbr(), tuple(), list()}.
-type seedtree_root() :: tuple().
-type seedtree_node() :: tuple().
-type omt_node() :: tuple().
-type omt_tree() :: [omt_node()] | [omt_tree()].
% @doc Bulk load a tree. Returns the position of the root node and the height
% of the tree
-spec bulk_load(Fd::file:io_device(), RootPos::integer(),
TargetTreeHeight::integer(), Nodes::[vtree_node()]) ->
{ok, integer(), integer()}.
% No nodes to load
bulk_load(_Fd, RootPos, TargetTreeHeight, []) ->
{ok, RootPos, TargetTreeHeight};
% Tree is empty
bulk_load(Fd, _RootPos, TargetTreeHeight, Nodes) when TargetTreeHeight==0 ->
% Tree is empty => bulk load it
{Omt, TreeHeight} = omt_load(Nodes, ?MAX_FILLED),
{ok, MbrAndPosList} = omt_write_tree(Fd, Omt),
{_Mbrs, PosList} = lists:unzip(MbrAndPosList),
NewNodes = load_nodes(Fd, PosList),
{ok, NewPos} = case length(NewNodes) of
% single node as root
1 ->
Result = couch_file:append_term(Fd, hd(NewNodes)),
ok = couch_file:sync(Fd),
Result;
% multiple nodes
_ -> write_parent(Fd, NewNodes)
end,
{ok, NewPos, TreeHeight};
bulk_load(Fd, RootPos, TargetTreeHeight, Nodes) ->
SeedtreeHeight = floor(TargetTreeHeight/2),
Seedtree = seedtree_init(Fd, RootPos, SeedtreeHeight),
Seedtree2 = seedtree_insert_list(Seedtree, Nodes),
% There might be a huge about of outliers, insert them in a different
% way. Create an OMT tree and insert the whole treet into the existing
% one at the approriate height.
Outliers = Seedtree2#seedtree_root.outliers,
OutliersNum = length(Outliers),
Seedtree3 = if
OutliersNum > 100 ->
Seedtree2#seedtree_root{outliers=[]};
true ->
Seedtree2
end,
{ok, Result, NewHeight, _HeightDiff} = seedtree_write(
Fd, Seedtree3, TargetTreeHeight - Seedtree3#seedtree_root.height),
{NewPos, NewHeight2, NewMbr} = case length(Result) of
% single node as root
1 ->
{ok, ResultPos} = couch_file:append_term(Fd, hd(Result)),
ok = couch_file:sync(Fd),
{ResultPos, NewHeight, element(1, hd(Result))};
% multiple nodes
_ ->
{ok, ResultPos} = write_parent(Fd, Result),
{ResultPos, NewHeight+1, vtree:calc_nodes_mbr(Result)}
end,
% Insert outliers as whole subtree
{NewPos2, NewHeight3} = if
OutliersNum > 100 ->
insert_outliers(Fd, NewPos, NewMbr, NewHeight2, Outliers);
true ->
{NewPos, NewHeight2}
end,
%?debugVal(NewPos2),
{ok, NewPos2, NewHeight3}.
% @doc If there is a huge number of outliers, we bulk load them into a new
% tree and insert that tree directly to the original target tree. Returns
% the position of the root node in file and the height of the tree.
-spec insert_outliers(Fd::file:io_device(), TargetPos::integer(),
TargetMbr::mbr(), TargetHeight::integer(),
Nodes::[vtree_node()]) -> {integer(), integer()}.
insert_outliers(Fd, TargetPos, TargetMbr, TargetHeight, Nodes) ->
{Omt, OmtHeight} = omt_load(Nodes, ?MAX_FILLED),
{ok, MbrAndPosList} = omt_write_tree(Fd, Omt),
{Mbrs, PosList} = lists:unzip(MbrAndPosList),
MergedMbr = vtree:calc_mbr([TargetMbr|Mbrs]),
% OmtHeight - 1 because this node might contain several children
case TargetHeight - (OmtHeight - 1) of
% Both, the new bulk loaded tree and targt tree have the same height
% => create new common root
Diff when Diff == 0 ->
%?debugMsg("same height"),
if
length(MbrAndPosList) + 1 =< ?MAX_FILLED ->
% XXX vmx: is using type=inner always valid?
NewRootNode = {MergedMbr, #node{type=inner}, [TargetPos|PosList]},
{ok, NewOmtPos} = couch_file:append_term(Fd, NewRootNode),
ok = couch_file:sync(Fd),
{NewOmtPos, TargetHeight+1};
% split the node and create new root node
true ->
% NOTE vmx: These are not a normal nodes. The last element in the
% tuple is not the position of the children, but the position
% of the node itself.
% XXX vmx: is type=inner always right?
NewChildren = [{Mbr, #node{type=inner}, Pos} || {Mbr, Pos} <-
[{TargetMbr, TargetPos}|MbrAndPosList]],
NodeToSplit = {MergedMbr, #node{type=inner}, NewChildren},
{_SplittedMbr, Node1, Node2} = vtree:split_node(NodeToSplit),
{ok, NewOmtPos} = write_parent(Fd, [Node1|[Node2]]),
{NewOmtPos, TargetHeight+2}
end;
% insert new tree into target tree
Diff when Diff > 0 ->
%?debugMsg("target tree higher"),
{ok, _, SubPos, Inc} = insert_subtree(
Fd, TargetPos, MbrAndPosList, Diff-1),
{SubPos, TargetHeight+Inc};
% insert target tree into new tree
Diff when Diff < 0 ->
%?debugMsg("target tree smaller"),
{OmtRootMbrs, OmtRootPosList} = lists:unzip(MbrAndPosList),
OmtRootNode = {vtree:calc_mbr(OmtRootMbrs), #node{type=inner},
OmtRootPosList},
{ok, NewOmtPos} = couch_file:append_term(Fd, OmtRootNode),
ok = couch_file:sync(Fd),
{ok, _, SubPos, Inc} = insert_subtree(
Fd, NewOmtPos, [{TargetMbr, TargetPos}], abs(Diff)),
{SubPos, OmtHeight+Inc}
end.
% @doc OMT bulk loading. MaxNodes is the number of maximum children per node.
% Returns the OMT tree and the height of the tree
% Based on (but modified):
% OMT: Overlap Minimizing Top-down Bulk Loading Algorithm for R-tree
-spec omt_load(Nodes::[omt_node()], MaxNodes::integer()) ->
{omt_tree(), integer()}.
omt_load([], _MaxNodes) ->
[];
omt_load(Nodes, MaxNodes) ->
NodesNum = length(Nodes),
% Height of the final tree
Height = log_n_ceil(MaxNodes, NodesNum),
Omt = omt_load(Nodes, MaxNodes, 0, Height-1, 0),
{Omt, Height}.
% all nodes need to be on the same level
-spec omt_load(Nodes::[omt_node()], MaxNodes::integer(), Dimension::integer(),
LeafDepth::integer(), Depth::integer()) -> omt_node().
omt_load(Nodes, MaxNodes, _Dimension, LeafDepth, Depth) when
length(Nodes) =< MaxNodes ->
lists:foldl(fun(_I, Acc) ->
[Acc]
end, Nodes, lists:seq(1,LeafDepth-Depth));
omt_load(Nodes, MaxNodes, Dimension, LeafDepth, Depth) ->
NodesNum = length(Nodes),
% Height of the final tree
Height = log_n_ceil(MaxNodes, NodesNum),
% Maximum number of children in a direct subtree of the root node
% with the minimum depth that is needed to store all nodes
ChildrenSubNum = math:pow(MaxNodes, Height-1),
% Number of children
RootChildrenNum = ceiling(NodesNum/ChildrenSubNum),
% Entries per child
EntriesNum = ceiling(NodesNum/RootChildrenNum),
% NOTE vmx: currently all nodes have only 2 dimensions => "rem 2"
Sorted = omt_sort_nodes(Nodes, (Dimension rem 2)+1),
_Chunked = chunk_list(fun(InnerNodes) ->
omt_load(InnerNodes, MaxNodes, Dimension+1, LeafDepth, Depth+1)
end, Sorted, EntriesNum).
% @doc Write an OMT in memory tree to disk. Returns a list of tuples that
% contain the MBR and
% either:
% - the position of the children in the file
% or:
% - the actual node value (if the tree consists only of one node).
-spec omt_write_tree(Fd::file:io_device(), Tree::omt_tree()) ->
{ok, [{mbr(), integer()}]} | {ok, [{mbr(), tuple()}]}.
omt_write_tree(Fd, Tree) ->
Return = case omt_write_tree(Fd, Tree, 0, []) of
{level_done, Nodes} ->
Nodes;
{leaf_nodes, MbrAndPos} ->
[MbrAndPos]
end,
{ok, lists:reverse(Return)}.
% no more siblings
-spec omt_write_tree(Fd::file:io_device(), Tree::list(), Depth::integer(),
Acc::list()) -> {ok, integer()}.
omt_write_tree(_Fd, [], _Depth, Acc) ->
{no_siblings, Acc};
% leaf node
omt_write_tree(Fd, [H|_T]=Leafs, _Depth, _Acc) when is_tuple(H) ->
% Don't write leafs nodes to disk now, they will be written later on.
% Instead return a list of of tuples with the node's MBR and the node
% itself
Mbr = vtree:calc_nodes_mbr(Leafs),
% We can not only pass in single nodes, but also subtrees. If the third
% element of the tuple is a tuple, it's a leaf node, if it is a list,
% it's an inner node
Node = case is_tuple(element(3, H)) of
true ->
{Mbr, #node{type=leaf}, Leafs};
false ->
PosList = lists:map(fun(N) ->
{ok, Pos} = couch_file:append_term(Fd, N),
ok = couch_file:sync(Fd),
Pos
end, Leafs),
{Mbr, #node{type=inner}, PosList}
end,
{ok, Pos} = couch_file:append_term(Fd, Node),
ok = couch_file:sync(Fd),
{leaf_nodes, {Mbr, Pos}};
omt_write_tree(Fd, [H|T], Depth, Acc) ->
{_, Acc2} = case omt_write_tree(Fd, H, Depth+1, []) of
{no_siblings, Siblings} ->
{ok, Siblings};
{leaf_nodes, MbrAndPos} ->
{ok, [MbrAndPos|Acc]};
{level_done, Level} ->
% NOTE vmx: reversing Level is probably not neccessary
{Mbrs, Children} = lists:unzip(lists:reverse(Level)),
Mbr = vtree:calc_mbr(Mbrs),
Meta = #node{type=inner},
{ok, Pos} = couch_file:append_term(Fd, {Mbr, Meta, Children}),
ok = couch_file:sync(Fd),
{ok, [{Mbr, Pos}|Acc]}
end,
{_, Acc3} = omt_write_tree(Fd, T, Depth, Acc2),
{level_done, Acc3}.
% @doc Sort nodes by a certain dimension (which is the first element of the
% node tuple)
-spec omt_sort_nodes(Nodes::[omt_node()], Dimension::integer()) ->
[omt_node()].
% NOTE vmx: in the future the dimensions won't be stored in tuples, but
% in lists.
omt_sort_nodes(Nodes, Dimension) ->
lists:sort(fun(Node1, Node2) ->
Mbr1 = element(1, Node1),
Mbr2 = element(1, Node2),
Val1 = element(Dimension, Mbr1),
Val2 = element(Dimension, Mbr2),
Val1 =< Val2
end, Nodes).
% @doc Insert several new items into the seed tree
-spec seedtree_insert_list(Tree::seedtree_root(), _Nodes::[tuple()]) ->
seedtree_root().
seedtree_insert_list(Root, []) ->
Root;
seedtree_insert_list(Root, [H|T]=_Nodes) ->
Root2 = seedtree_insert(Root, H),
seedtree_insert_list(Root2, T).
% @doc Insert a new item into the seed tree
% Based on:
% Bulk insertion for R-trees by seeded clustering
-spec seedtree_insert(Tree::seedtree_root(), Node::tuple()) -> seedtree_root().
seedtree_insert(#seedtree_root{tree=Tree, outliers=Outliers}=Root, Node) ->
case seedtree_insert_children([Tree], Node) of
{ok, [Tree2]} ->
Root#seedtree_root{tree=Tree2};
{not_inserted, _} ->
Root#seedtree_root{outliers=[Node|Outliers]}
end.
% @doc Do the actual insertion of the nodes into the seedtree
-spec seedtree_insert_children(Children::[seedtree_node()],
Node::seedtree_node()) ->
{ok, seedtree_node()} | {not_inserted}.
seedtree_insert_children([], Node) ->
{not_inserted, Node};
seedtree_insert_children(#seedtree_leaf{new=Old}=Children, Node) when
not is_list(Children) ->
New = [Node|Old],
Children2 = Children#seedtree_leaf{new=New},
{ok, Children2};
seedtree_insert_children([H|T], Node) ->
{Mbr, Meta, Children} = H,
{NodeMbr, _, _Data} = Node,
case vtree:within(NodeMbr, Mbr) of
true ->
{Status, Children2} = seedtree_insert_children(Children, Node),
{Status, [{Mbr, Meta, Children2}|T]};
false ->
{Status, T2} = seedtree_insert_children(T, Node),
{Status, [H|T2]}
end.
% @doc Put an on disk tree into memory and prepare it to store new nodes in
% the leafs. If MaxDepth is bigger than the tree, the lowest possible
% level is returned.
-spec seedtree_init(Fd::file:io_device(), RootPos::integer(),
MaxDepth::integer()) -> seedtree_root().
seedtree_init(Fd, RootPos, MaxDepth) ->
Tree = seedtree_init(Fd, RootPos, MaxDepth, 0),
%#seedtree_root{tree=Tree, height=MaxDepth}.
Height = erlang:min(MaxDepth, ?MAX_SEEDTREE_HEIGHT),
#seedtree_root{tree=Tree, height=Height}.
-spec seedtree_init(Fd::file:io_device(), RootPos::integer(),
MaxDepth::integer(), Depth::integer()) -> seedtree_node().
% It's "Depth+1" as the root already contains several nodes (it's the nature
% of a R-Tree).
seedtree_init(Fd, RootPos, MaxDepth, Depth) when
(Depth+1 == MaxDepth) or (Depth+1 == ?MAX_SEEDTREE_HEIGHT) ->
%?debugVal(RootPos),
{ok, Parent} = couch_file:pread_term(Fd, RootPos),
{ParentMbr, ParentMeta, EntriesPos} = Parent,
{ParentMbr, ParentMeta, #seedtree_leaf{orig=EntriesPos, pos=RootPos}};
seedtree_init(Fd, RootPos, MaxDepth, Depth) ->
{ok, Parent} = couch_file:pread_term(Fd, RootPos),
{ParentMbr, ParentMeta, EntriesPos} = Parent,
case is_tuple(hd(EntriesPos)) of
% we reached leaf level => return
true ->
% create seedtree leaf
{ParentMbr, ParentMeta, #seedtree_leaf{orig=EntriesPos, pos=RootPos}};
false ->
Children = lists:foldl(fun(EntryPos, Acc) ->
Child = seedtree_init(Fd, EntryPos, MaxDepth, Depth+1),
[Child|Acc]
end, [], EntriesPos),
{ParentMbr, ParentMeta, lists:reverse(Children)}
end.
% @doc Write an new vtree. InsertHeight is the height where the seedtree
% should be inserted to. Returns the root nodes, the height of the tree and
% the height difference to the old tree.
-spec seedtree_write(Fd::file:io_device(), Seetree::seedtree_root(),
InsertHeight::integer()) -> {ok, [vtree_node()], integer(), integer()}.
seedtree_write(Fd, Seedtree, InsertHeight) ->
Tree = Seedtree#seedtree_root.tree,
TargetHeight = InsertHeight + Seedtree#seedtree_root.height,
{level_done, Level} = seedtree_write(Fd, [Tree], InsertHeight, []),
{RootNodes, NewHeight} = if
length(Level) =< ?MAX_FILLED ->
{Level, TargetHeight};
true ->
{Level2, Level2Height} = omt_load(Level, ?MAX_FILLED),
{ok, ParentMbrAndPos} = omt_write_tree(Fd, Level2),
Parents = lists:map(fun({_, Pos}) ->
{ok, Node} = couch_file:pread_term(Fd, Pos),
Node
end, ParentMbrAndPos),
{Parents, TargetHeight+Level2Height-1}
end,
{RootNodes2, HeightDiff} = case Seedtree#seedtree_root.outliers of
[] ->
{RootNodes, 0};
Outliers ->
% insert outliers by creating a temporary root node...
{ok, TmpRootPos} = write_parent(Fd, RootNodes),
{TmpRootPos2, TmpHeight} = lists:foldl(fun(Outlier, {CurPos, _}) ->
{Mbr, Meta, {DocId, {Geom, Value}}} = Outlier,
{ok, _NewMbr, CurPos2, TreeHeight} = vtree:insert(
Fd, CurPos, DocId, {Mbr, Meta, Geom, Value}),
{CurPos2, TreeHeight}
end, {TmpRootPos, 0}, Outliers),
{ok, OldRoot} = couch_file:pread_term(Fd, TmpRootPos2),
% ...and get the original children back again
{_, _, RootNodes2Pos} = OldRoot,
LevelDiff = TmpHeight-1-NewHeight,
ChildrenNodes = load_nodes(Fd, RootNodes2Pos),
{ChildrenNodes, LevelDiff}
end,
{ok, RootNodes2, NewHeight+HeightDiff, HeightDiff}.
-spec seedtree_write(Fd::file:io_device(), Seetree::seedtree_root(),
InsertHeight::integer(), Acc::[vtree_node()]) ->
{ok, [vtree_node()], integer(), integer()}.
seedtree_write(_Fd, [], _InsertHeight, Acc) ->
{no_more_siblings, Acc};
% No new nodes to insert
seedtree_write(_Fd, #seedtree_leaf{orig=Orig, new=[]}, _InsertHeight, _Acc) ->
{leafs, Orig};
% New nodes to insert
% This function returns a new list of children, as some children were
% rewritten due to repacking. The MBR doesn't change (that's the nature of
% this algorithm).
seedtree_write(Fd, #seedtree_leaf{orig=Orig, new=New, pos=ParentPos},
InsertHeight, _Acc) ->
NewNum = length(New),
OmtHeight = log_n_ceil(?MAX_FILLED, NewNum),
HeightDiff = InsertHeight - (OmtHeight - 1),
NewChildrenPos2 = if
% Input tree can be inserted as-is into the target tree
HeightDiff == 0 ->
%?debugMsg("insert as is"),
{OmtTree, OmtHeight} = omt_load(New, ?MAX_FILLED),
NewChildren = seedtree_write_insert(Fd, Orig, OmtTree, OmtHeight),
_MbrAndPos = seedtree_write_finish(NewChildren);
% insert tree is too small => expand seedtree
HeightDiff > 0 ->
%?debugMsg("insert is too small"),
% Create new seedtree
% HeightDiff+1 as we like to load the level of the children
Seedtree = seedtree_init(Fd, ParentPos, HeightDiff+1),
Seedtree2 = seedtree_insert_list(Seedtree, New),
{ok, NodesList, _NewHeight, NewHeightDiff} = seedtree_write(
Fd, Seedtree2, InsertHeight-Seedtree2#seedtree_root.height+1),
% NOTE vmx (2011-01-04) This makes one test fail. I have to admit I
% can't see the point of this code atm. The calculation of the
% insertion was based on the height of the resulting OMT tree, so
% why should some levels be stripped?
% There was a node overflow. Though we are currently somewhere within
% the tree, so we need to return a node that has the original height.
% Therefore recreate the overflowed node.
NodesList2 = case NewHeightDiff > 0 of
false ->
NodesList;
true ->
lists:foldl(fun(_I, Acc) ->
PosList = lists:append([Pos || {_, _, Pos} <- Acc]),
load_nodes(Fd, PosList)
end, NodesList, lists:seq(1, NewHeightDiff))
end,
_MbrAndPos = [{Mbr, Pos} || {Mbr, _, Pos} <- NodesList2];
%MbrAndPos = [{Mbr, Pos} || {Mbr, _, Pos} <- NodesList];
% insert tree is too high => use its children
HeightDiff < 0 ->
%?debugMsg("insert is too high"),
{OmtTree, OmtHeight} = omt_load(New, ?MAX_FILLED),
% flatten the OMT tree until it has the expected height to be
% inserted into the target tree (one subtree at a time).
OmtTrees = lists:foldl(fun(_I, Acc) ->
lists:append(Acc)
end, OmtTree, lists:seq(1, abs(HeightDiff))),
% (OmtHeight + HeightDiff) as OMT tree was flattened (HeighTDiff is
% negative)
MbrAndPos2 = seedtree_write_insert(Fd, Orig, OmtTrees,
(OmtHeight + HeightDiff)),
{NewMbrs, NewPos} = lists:unzip(MbrAndPos2),
_MbrAndPos = seedtree_write_finish(lists:zip(NewMbrs, NewPos))
end,
{new_leaf, NewChildrenPos2};
seedtree_write(Fd, [{Mbr, Meta, Children}|T], InsertHeight, Acc) ->
{ok, Acc2} = case seedtree_write(Fd, Children, InsertHeight, []) of
{no_more_siblings, Siblings} ->
{ok, Siblings};
{leafs, Orig} ->
{ok, [{Mbr, Meta, Orig}|Acc]};
% This one might return more that one new node => level_done needs
% to handle a possible overflow
{new_leaf, Nodes} ->
Nodes2 = lists:foldl(fun({NodeMbr, NodePos}, Acc2) ->
[{NodeMbr, Meta, NodePos}|Acc2]
end, Acc, Nodes),
{ok, Nodes2};
% Node can be written as-is, as the number of children is low enough
{level_done, Level} when length(Level) =< ?MAX_FILLED->
ParentMbr = vtree:calc_nodes_mbr(Level),
ChildrenPos = write_nodes(Fd, Level),
% XXX vmx: Not sure if type=inner is always right
Parent = {ParentMbr, #node{type=inner}, ChildrenPos},
{ok, [Parent|Acc]};
{level_done, Level} when length(Level) > ?MAX_FILLED ->
{Level2, Level2Height} = omt_load(Level, ?MAX_FILLED),
Level3 = if
Level2Height > 2 ->
% Lets's try to flatten the list in case it is too deep
lists:foldl(fun(_I, ToAppend) ->
lists:append(ToAppend)
end, Level2, lists:seq(3, Level2Height));
true ->
Level2
end,
Parents = lists:foldl(fun(Level4, LevelAcc) ->
ParentMbr = vtree:calc_nodes_mbr(Level4),
ChildrenPos = write_nodes(Fd, Level4),
% XXX vmx: Not sure if type=inner is always right
Parent = {ParentMbr, #node{type=inner}, ChildrenPos},
[Parent|LevelAcc]
end, Acc, Level3),
{ok, Parents}
end,
{_, Acc3} = seedtree_write(Fd, T, InsertHeight, Acc2),
{level_done, Acc3}.
% @doc Writes new nodes into the existing tree. The nodes, resp. the
% height of the OMT tree, must fit into the target tree.
% "Orig" are the original child nodes, "New" are the nodes that should be
% inserted.
% Returns a tuple with the MBR and the position of the root node in the file.
% The result may be a node that overflows. seedtree_write_finish/1 will fix
% that.
-spec seedtree_write_insert(Fd::file:io_device(), Orig::[vtree_node()],
OmtTree::omt_node(), OmtHeight::integer()) -> [{mbr(), integer()}].
% Leaf node. We won't do the repacking dance, as we are on the lowest
% level already. Thus we just insert the new nodes
seedtree_write_insert(_Fd, Orig, OmtTree, OmtHeight) when is_tuple(hd(Orig)) ->
OmtTree2 = if
OmtHeight > 1 ->
% Lets's try to flatten the list in case it is too deep
lists:foldl(fun(_I, ToAppend) ->
lists:append(ToAppend)
end, OmtTree, lists:seq(2, OmtHeight));
true ->
OmtTree
end,
NewNodes = Orig ++ OmtTree2,
% create a list with tuples consisting of MBR and the actual node
[{Mbr, Node} || {Mbr, _, _}=Node <- NewNodes];
% Inner node, do some repacking.
seedtree_write_insert(Fd, Orig, OmtTree, _OmtHeight) ->
% Write the OmtTree to to disk.
% The tree might contain more than the maximum
% number of allowed nodes per node. This overflow will be solved
% at the end, when all child nodes get merged.
{ok, OmtMbrAndPos} = omt_write_tree(Fd, OmtTree),
% Get the enclosing MBR of the OMT nodes
{OmtMbrs, OmtPosList} = lists:unzip(OmtMbrAndPos),
OmtMbr = vtree:calc_mbr(OmtMbrs),
% We do some repacking for better perfomance. First get the children
% of the target node where the new nodes will be inserted in
OrigNodes = load_nodes(Fd, Orig),
% Transform the original nodes from normal ones to tuples where the
% meta information is replaced with the position of the node in
% the file.
OrigMbrAndPos = lists:zipwith(fun({Mbr, _, Children}, Pos) ->
{Mbr, Pos, Children}
end, OrigNodes, Orig),
% Get all nodes that are within or intersect with the input tree
% root node(s)
{Disjoint, NotDisjoint} = lists:partition(fun({Mbr, _Pos, _}) ->
vtree:disjoint(Mbr, OmtMbr)
end, OrigMbrAndPos),
% For better performance, we now take the children of the nodes that will
% be inserted, as well as all the children from the nodes that are not
% disjoint. With this pool of nodes, we create new nodes that enclose
% them more tightly.
% First get the position of children of the not disjoint nodes
NdChildrenPos = lists:append(
[Children || {_Mbr, _Pos, Children} <- NotDisjoint]),
% Get the position of the children of the nodes that will be inserted
% as well.
OmtChildren = load_nodes(Fd, OmtPosList),
OmtChildrenPos = lists:append(
[Children || {_Mbr, _Pos, Children} <- OmtChildren]),
NewChildrenPos = NdChildrenPos ++ OmtChildrenPos,
% Transform the new children's position to a tuple containing their MBR
% and the position.
NewChildrenMbrAndPos = case is_tuple(hd(NewChildrenPos)) of
% We might have hit leaf nodes. They already contain the MBR we need to
% find.
true ->
[{Mbr, Node} || {Mbr, _, _}=Node <- NewChildrenPos];
% Else we need to load the nodes to get their MBR
false ->
NewChildrenChildren = load_nodes(Fd, NewChildrenPos),
lists:zipwith(fun({Mbr, _, _}, Pos) ->
{Mbr, Pos}
end, NewChildrenChildren, NewChildrenPos)
end,
% Create nodes that are packed to the maximum. This might return a several
% levels deep OMT tree if there is a huge number of nodes.
{NewNodesOmt, NewNodesHeight} = omt_load(
NewChildrenMbrAndPos, ?MAX_FILLED),
NewNodes = case NewNodesHeight of
1 ->
[NewNodesOmt];
2 ->
NewNodesOmt;
_ ->
lists:foldl(fun(_I, Acc) ->
lists:append(Acc)
end, NewNodesOmt, lists:seq(3, NewNodesHeight))
end,
% Write those new nodes
NewChildren = lists:map(fun(Nodes) ->
{Mbrs, Pos} = lists:unzip(Nodes),
ParentMbr = vtree:calc_mbr(Mbrs),
Meta = case is_tuple(hd(Pos)) of
true -> #node{type=leaf};
false -> #node{type=inner}
end,
{ok, ParentPos} = couch_file:append_term(Fd, {ParentMbr, Meta, Pos}),
ok = couch_file:sync(Fd),
{ParentMbr, ParentPos}
end, NewNodes),
DisjointMbrAndPos = [{Mbr, Pos} || {Mbr, Pos, _} <- Disjoint],
DisjointMbrAndPos ++ NewChildren.
% @doc Creates new parent nodes out of a list of tuples containing MBRs and
% positions (splits as needed). Returns MBR and position in file of the
% new parent node(s)
-spec seedtree_write_finish(NewChildren::[{mbr(), integer()}]) ->
[{mbr(), integer()}].
seedtree_write_finish(NewChildren) ->
NewChildren3 = case length(NewChildren) > ?MAX_FILLED of
true ->
{NewChildren2, OmtHeight} = omt_load(NewChildren, ?MAX_FILLED),
if
OmtHeight > 2 ->
% Lets's try to flatten the list in case it is too deep
lists:foldl(fun(_I, ToAppend) ->
lists:append(ToAppend)
end, NewChildren2, lists:seq(3, OmtHeight));
true ->
NewChildren2
end;
false ->
[NewChildren]
end,
% Calculate the common MBR. The result is a tuple of the MBR and
% the children positions in file it encloses
NewChildrenMbrAndPos = lists:reverse(lists:foldl(fun(Nodes, Acc) ->
{Mbrs, PosList} = lists:unzip(Nodes),
% Poss might be actual nodes, if it is a leaf node. Then they
% are already wrapped in a list.
PosList2 = if
is_list(hd(PosList)) -> hd(PosList);
true -> PosList
end,
Mbr = vtree:calc_mbr(Mbrs),
[{Mbr, PosList2}|Acc]
end, [], NewChildren3)),
% Return new nodes (might be several ones)
NewChildrenMbrAndPos.
% @doc Loads nodes from file
-spec load_nodes(Fd::file:io_device(), Positions::[integer()]) ->
[vtree_node()].
load_nodes(Fd, Positions) ->
load_nodes(Fd, Positions, []).
-spec load_nodes(Fd::file:io_device(), Positions::[integer()],
Acc::[vtree_node()]) -> [vtree_node()].
load_nodes(_Fd, [], Acc) ->
lists:reverse(Acc);
load_nodes(Fd, [H|T], Acc) ->
{ok, Node} = couch_file:pread_term(Fd, H),
load_nodes(Fd, T, [Node|Acc]).
% @doc Write nodes to file, return their positions
-spec write_nodes(Fd::file:io_device(), Nodes::[vtree_node()]) -> [integer()].
write_nodes(Fd, Nodes) ->
write_nodes(Fd, Nodes, []).
-spec write_nodes(Fd::file:io_device(), Nodes::[vtree_node()],
Acc::[vtree_node()]) -> [integer()].
write_nodes(_Fd, [], Acc) ->
lists:reverse(Acc);
write_nodes(Fd, [H|T], Acc) ->
{ok, Pos} = couch_file:append_term(Fd, H),
ok = couch_file:sync(Fd),
write_nodes(Fd, T, [Pos|Acc]).
% @doc Write a list of of nodes to disk with corresponding parent node. Return
% the postion of the parent node in the file.
-spec write_parent(Fd::file:io_device(), Nodes::[vtree_node()]) ->
{ok, integer()}.
write_parent(Fd, Nodes) ->
ParentMbr = vtree:calc_nodes_mbr(Nodes),
ChildrenPos = write_nodes(Fd, Nodes),
{ok, ParentPos} = couch_file:append_term(
Fd, {ParentMbr, #node{type=inner}, ChildrenPos}),
ok = couch_file:sync(Fd),
{ok, ParentPos}.
% XXX vmx: insert_subtree and potentially other functions should be moved
% from the vtree_bulk to the vtree module
% @doc inserts a subtree into an vtree at a specific level. Returns the
% MBR, position in the file of the new root node and the increase in height
% of the tree.
-spec insert_subtree(Fd::file:io_device(), RootPos::integer(),
Subtree::[{mbr(), integer()}], Level::integer()) ->
{ok, mbr(), integer(), integer()}.
insert_subtree(Fd, RootPos, Subtree, Level) ->
case insert_subtree(Fd, RootPos, Subtree, Level, 0) of
{splitted, NodeMbr, {_Node1Mbr, NodePos1}, {_Node2Mbr, NodePos2}, Inc} ->
Parent = {NodeMbr, #node{type=inner}, [NodePos1, NodePos2]},
{ok, Pos} = couch_file:append_term(Fd, Parent),
ok = couch_file:sync(Fd),
{ok, NodeMbr, Pos, Inc+1};
{ok, NewMbr, NewPos, Inc} ->
{ok, NewMbr, NewPos, Inc}
end.
% @doc Returns either ok and MBR, position in file and the height of the tree,
% or splitted and the enclosing MBR, MBR and position in file of the
% individual nodes and the increase in height of the tree.
-spec insert_subtree(Fd::file:io_device(), RootPos::integer(),
Subtree::[{mbr(), integer()}], Level::integer(), Depth::integer()) ->
{ok, mbr(), integer(), integer()} |
{splitted, mbr(), {mbr(), integer()}, {mbr(), integer()}, integer()}.
insert_subtree(Fd, RootPos, Subtree, Level, Depth) when Depth==Level ->
{SubtreeMbrs, SubtreePosList} = lists:unzip(Subtree),
{ok, Parent} = couch_file:pread_term(Fd, RootPos),
{ParentMbr, ParentMeta, EntriesPos} = Parent,
MergedMbr = vtree:calc_mbr([ParentMbr|SubtreeMbrs]),
ChildrenPos = SubtreePosList ++ EntriesPos,
if
length(ChildrenPos) =< ?MAX_FILLED ->
NewNode = {MergedMbr, ParentMeta, ChildrenPos},
{ok, Pos} = couch_file:append_term(Fd, NewNode),
ok = couch_file:sync(Fd),
{ok, MergedMbr, Pos, 0};
true ->
Children = load_nodes(Fd, ChildrenPos),
% Transform the the nodes, so that we don't need to write them again.
% They are now a tuple with their MBR, Meta and *their* position
% in the file (as opposed to the position of their children)
Children2 = [{Mbr, Meta, Pos} || {{Mbr, Meta, _}, Pos} <-
lists:zip(Children, ChildrenPos)],
% The maximum number of nodes can be 2*MAX_FILLED, therefore we can't
% use the Ang/Tan split algorithm, but use the OMT algorithm to split
% the nodes into two partitions.
{[Part1, Part2], _OmtHeight} = omt_load(Children2, ?MAX_FILLED),
Mbr1 = vtree:calc_nodes_mbr(Part1),
Mbr2 = vtree:calc_nodes_mbr(Part2),
% Get the original position in file of the nodes
Part1Pos = [Pos || {_Mbr, _Meta, Pos} <- Part1],
Part2Pos = [Pos || {_Mbr, _Meta, Pos} <- Part2],
Node1 = {Mbr1, #node{type=inner}, Part1Pos},
Node2 = {Mbr2, #node{type=inner}, Part2Pos},
{ok, Pos1} = couch_file:append_term(Fd, Node1),
{ok, Pos2} = couch_file:append_term(Fd, Node2),
ok = couch_file:sync(Fd),
{splitted, MergedMbr, {Mbr1, Pos1}, {Mbr2, Pos2}, 0}
end;
insert_subtree(Fd, RootPos, Subtree, Level, Depth) ->
%{SubtreeMbr, SubtreePos} = Subtree,
{SubtreeMbrs, _SubtreePosList} = lists:unzip(Subtree),
% Calculate the MBR that enclosed both nodes, as both nodes should end up
% in the same target node
SubtreeMbr = vtree:calc_mbr(SubtreeMbrs),
{ok, Parent} = couch_file:pread_term(Fd, RootPos),
{ParentMbr, _ParentMeta, EntriesPos} = Parent,
{{_LeastMbr, LeastPos}, LeastRest} = least_expansion(
Fd, SubtreeMbr, EntriesPos),
LeastRestPos = [Pos || {_Mbr, Pos} <- LeastRest],
case insert_subtree(Fd, LeastPos, Subtree, Level, Depth+1) of
{ok, NewMbr, NewPos, Inc} ->
MergedMbr = vtree:merge_mbr(ParentMbr, NewMbr),
NewNode = {MergedMbr, #node{type=inner}, [NewPos|LeastRestPos]},
{ok, Pos} = couch_file:append_term(Fd, NewNode),
ok = couch_file:sync(Fd),
{ok, NewMbr, Pos, Inc};
{splitted, ChildMbr, {Child1Mbr, ChildPos1}, {Child2Mbr, ChildPos2}, Inc} ->
MergedMbr = vtree:merge_mbr(ParentMbr, ChildMbr),
LeastRestPos = [Pos || {_Mbr, Pos} <- LeastRest],
if
% Both nodes of the split fit in the current inner node
%length(EntriesPos)+2 =< ?MAX_FILLED ->
length(LeastRestPos)+2 =< ?MAX_FILLED ->
ChildrenPos = [ChildPos1, ChildPos2] ++ LeastRestPos,
NewNode = {MergedMbr, #node{type=inner}, ChildrenPos},
{ok, Pos} = couch_file:append_term(Fd, NewNode),
ok = couch_file:sync(Fd),
{ok, MergedMbr, Pos, Inc};
% We need to split the inner node
true ->
Child1 = {Child1Mbr, #node{type=inner}, ChildPos1},
Child2 = {Child2Mbr, #node{type=inner}, ChildPos2},
% NOTE vmx: for vtree:split/1 the nodes need to have a special
% format a tuple with their MBR, Meta and *their* position in
% the file (as opposed to the position of their children)
Children = [{Mbr, Meta, Pos} || {{Mbr, Meta, _}, Pos} <-
lists:zip(load_nodes(Fd, LeastRestPos), LeastRestPos)],
Children2 = [Child1, Child2] ++ Children,
{SplittedMbr, {Node1Mbr, _, _}=Node1, {Node2Mbr, _, _}=Node2}
= vtree:split_node({MergedMbr, #node{type=inner}, Children2}),
{ok, Pos1} = couch_file:append_term(Fd, Node1),
{ok, Pos2} = couch_file:append_term(Fd, Node2),
ok = couch_file:sync(Fd),
{splitted, SplittedMbr, {Node1Mbr, Pos1}, {Node2Mbr, Pos2}, Inc}
end
end.
% XXX vmx: needs tests
% @doc Find the node that needs least expension for the given MBR and returns
% the MBR and position of the node together with the original children MBRs and
% positions in the file.
-spec least_expansion(Fd::file:io_device(), NewMbr::mbr(),
PosList::[integer()]) -> {{mbr, integer()}, [{mbr, integer()}]}.
least_expansion(Fd, NewMbr, PosList) ->
Nodes = load_nodes(Fd, PosList),
NodesAndPos = lists:zip(Nodes, PosList),
MbrAndPos = [{Mbr, Pos} || {{Mbr, _, _}, Pos} <- NodesAndPos],
% Find node that matches best (least expansion of MBR)
{_, _, Nth} = lists:foldl(fun({Mbr, _Pos}, {MinExp, Nth2, Cnt}) ->
MergedMbr = vtree:merge_mbr(NewMbr, Mbr),
% if there is a element which need less expansion, put the info
% into the accumulator
case vtree:area(MergedMbr) - vtree:area(Mbr) of
Exp when Exp < MinExp orelse (MinExp==-1) ->
{Exp, Cnt, Cnt+1};
_ ->
{MinExp, Nth2, Cnt+1}
end
end, {-1, 0, 0}, MbrAndPos),
% Remove the child where the node will be inserted
{C1, C2} = lists:split(Nth-1, MbrAndPos),
{hd(C2), C1 ++ tl(C2)}.
%%%%% Helpers %%%%%
% @doc Returns the ceiling of log_N(X). Returns 1 for X==1.
-spec log_n_ceil(N::integer(), X::integer()) -> integer().
log_n_ceil(_N, 1) ->
1;
log_n_ceil(N, X) ->
ceiling(math:log(X) / math:log(N)).
% From http://www.trapexit.org/Floating_Point_Rounding (2010-10-18)
-spec ceiling(X::number()) -> integer().
ceiling(X) when X < 0 ->
trunc(X);
ceiling(X) ->
T = trunc(X),
case X - T == 0 of
true -> T;
false -> T + 1
end.
% From http://www.trapexit.org/Floating_Point_Rounding (2010-10-18)
-spec floor(X::number()) -> integer().
floor(X) when X < 0 ->
T = trunc(X),
case X - T == 0 of
true -> T;
false -> T - 1
end;
floor(X) ->
trunc(X).
% @doc split a list of elements into equally sized chunks (last element might
% contain less elements). Applies Fun to every chunk and inserts the
% return value of this function into the result list.
-spec chunk_list(Fun::fun(), List::list(), Size::integer()) -> [list()].
chunk_list(_Fun, List, Size) when Size == 0 ->
[List];
chunk_list(Fun, List, Size) ->
chunk_list(Fun, List, Size, 0, [], []).
% List is processed, but some items are left in the last chunk
chunk_list(Fun, [], _Size, _Cnt, Chunk, Result) when length(Chunk) > 0 ->
Entry = Fun(lists:reverse(Chunk)),
%lists:reverse(Entry ++ Result);
lists:reverse([Entry|Result]);
chunk_list(_Fun, [], _Size, _Cnt, _Chunk, Result) ->
lists:reverse(Result);
chunk_list(Fun, [H|T], Size, Cnt, Chunk, Result) when Cnt < Size ->
chunk_list(Fun, T, Size, Cnt+1, [H|Chunk], Result);
% Chunk is complete, add chunk to result
chunk_list(Fun, [H|T], Size, _Cnt, Chunk, Result) ->
Entry = Fun(lists:reverse(Chunk)),
%chunk_list(Fun, T, Size, 1, [H], Entry ++ Result).
chunk_list(Fun, T, Size, 1, [H], [Entry|Result]). | src/vtree_bulk.erl | 0.601594 | 0.516047 | vtree_bulk.erl | starcoder |
%% The contents of this file are subject to the Mozilla Public License
%% Version 1.1 (the "License"); you may not use this file except in
%% compliance with the License. You may obtain a copy of the License
%% at http://www.mozilla.org/MPL/
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and
%% limitations under the License.
%%
%% The Original Code is Erlando.
%%
%% The Initial Developer of the Original Code is VMware, Inc.
%% Copyright (c) 2011-2013 VMware, Inc. All rights reserved.
%%
%% This is the Omega monad which is like the list monad, but does not
%% depth first, and not breadth first traversal. This implementation
%% is based on <NAME>'s Control.Monad.Omega module for Haskell
%% (http://hackage.haskell.org/packages/archive/control-monad-omega/latest/doc/html/Control-Monad-Omega.html). As
%% the documentation there states:
%%
%% Warning: Omega is only a monad when the results are interpreted
%% as a set; that is, a valid transformation according to the monad
%% laws may change the order of the results. However, the same set
%% of results will always be reachable.
%%
-module(omega_m).
-behaviour(monad).
-export(['>>='/2, return/1, fail/1]).
-export([diagonal/1]).
-behaviour(monad_plus).
-export([mzero/0, mplus/2]).
-spec '>>='([A], fun( (A) -> [B] )) -> [B].
'>>='(X, Fun) ->
diagonal([Fun(E) || E <- X]).
-spec return(A) -> [A].
return(X) -> [X].
-spec fail(any()) -> [_A].
fail(_X) -> [].
-spec mzero() -> [_A].
mzero() -> [].
-spec mplus([A], [A]) -> [A].
mplus(X, Y) ->
lists:append(X, Y).
%% [[a, b, c, d],
%% [e, f, g, h],
%% [i, j, k, l],
%% [m, n, o, p]].
%%
%% diagonal traverses diagonally from north-west corner, heading east
%% then south-west. I.e.
%% [a, b, e, c, f, i, d, g, j, m, h, k, n, l, o, p]
-spec diagonal([[A]]) -> [A].
diagonal(LoL) -> lists:append(stripe(LoL)).
stripe([]) -> [];
stripe([[]|Xss]) -> stripe(Xss);
stripe([[X|Xs]|Xss]) -> [[X] | zip_cons(Xs, stripe(Xss))].
zip_cons([], Ys) -> Ys;
zip_cons(Xs, []) -> [[X] || X <- Xs];
zip_cons([X|Xs], [Y|Ys]) -> [[X|Y] | zip_cons(Xs, Ys)]. | apps/erlando/src/omega_m.erl | 0.696062 | 0.472927 | omega_m.erl | starcoder |
%%%-------------------------------------------------------------------
%%% Copyright: (c) 2007-2010 Gemini Mobile Technologies, Inc. All rights reserved.
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%% File : brick_hash.erl
%%% Purpose : brick hashing upper layer.
%%%-------------------------------------------------------------------
%% @doc An upper (?) layer for distributed storage hashing.
%%
%% I've attempted to split my grand vision for distributed storage into
%% two different layers:
%% <ul>
%% <li> A lower storage level of an individual brick. Each brick should
%% be completely unaware of other bricks, to the full extent possible. </li>
%% <li> An upper storage level that uses hashing techniques to determine
%% which keys are stored on any particular storage brick. </li>
%% </ul>
%%
%% This upper layer has two mostly-separate parts:
%% <ul>
%% <li> A mapping from key -> chain name. </li>
%% <li> A mapping from chain name -> individual bricks. </li>
%% </ul>
%%
%% == What is a #hash_r record? ==
%%
%% A `#hash_r' record is used for two things:
%% <ul>
%% <li> The algorithm used to choose what part of the key will be used
%% for hashing: e.g. fixed-length prefix, variable length prefix </li>
%% <li> The algorithm for the actual hashing: e.g. naive. </li>
%% </ul>
%%
%% NOTE: Those two tasks should be quite separate and, at the moment,
%% are blurred together in (sometimes) bad ways. TODO: refactor!
%%
%% == Note #hash_r init functions ==
%%
%% The #hash_r record initialization functions, e.g. naive_init() and
%% var_prefix_init(), must provide a couple of well-known arities to
%% external users:
%% <ul>
%% <li> (ChainList) ... specifying only the chains' definition. All
%% other options, if any, are set to default. </li>
%% <li> (via_proplist, ChainList, PropList) ... The proplist method is
%% used (at first) by brick_admin:add_table(). Such (ab)use of
%% proplists is a common technique for presenting a fixed-arity
%% API while allowing extremely flexibility in implementing same. </li>
%% </ul>
%%
%% == What is a #g_hash_r record? ==
%%
%% A `#g_hash_r' record contains "global" hashing info for a
%% <b>single GDSS table</b>. (Perhaps "global" is a bad word? {shrug})
%%
%% The `#g_hash_r' is a collection of two `#hash_r' records, plus some
%% extra stuff. The two `#hash_r' records describe:
%% <ul>
%% <li> The hashing scheme used in the current hash configuration. </li>
%% <li> The hashing scheme used in the <b>new</b> hash configuration. </li>
%% </ul>
%%
%% Most of the time, the current and new hash configurations are the
%% same. However, if the cluster configuration changes, then the
%% difference betwen current & new hashes are used to "migrate"
%% data from the current scheme to the new scheme.
%%
%% The `#g_hash_r' is data that is distributed to all the
%% `brick_simple' gen_servers (one per client node), to assist with
%% `brick_simple' calculation of {table + key} => brick mapping.
%% The `#g_hash_r' is also distributed to all bricks, so that they can
%% independently verify that that brick is truly responsible for the
%% keys sent to it. If the client's `#g_hash_r' info is stale, that's
%% OK: the incorrect brick will forward the request to the correct
%% brick.
%%
%% == Note on #g_hash_r.phase State Names ==
%%
%% The two valid state names for #g_hash_r.phase are:
%% <ul>
%% <li> pre ... the brick is prepared to sweep itself and to receive sweep
%% updates from other bricks, but should initiate no migration
%% action.</li>
%% <li> migrating ... The brick is actively migrating. It can enter this
%% state (from 'pre') by:
%% <ul>
%% <li> Being explicitly told to start sweeping itself. </li>
%% <li> Receiving a sweep update from another brick B, because
%% that brick got the "start sweeping" command before we did. </li>
%% </ul>
%% </li>
%% </ul>
%% Bugs found with Quickcheck:
%% * discovered URI parsing prefix set to $_ (ASCII 95) instead of
%% expected $/ (ASCII 47), after configuring tab1 to use 2 chains instead
%% of the default 1 chain.
-module(brick_hash).
-include("applog.hrl").
-include("brick.hrl").
-include("brick_hash.hrl").
-include("brick_public.hrl").
-ifdef(debug_hash).
-define(gmt_debug, true).
-endif.
-include("gmt_debug.hrl").
-define(PHASH2_BIGGEST_VAL, 134217727). % (2^27)-1
-define(SMALLEST_SIGNIFICANT_FLOAT_SIZE, 0.1e-12).
%% This is one level of hash management:
%% mapping from key -> chain name.
%% Exports for first hash method, 'naive'.
-export([naive_init/1, naive_init/2, naive_key_to_chain/3]).
%% Exports for second hash method, 'var_prefix'.
%% It hashes the same way as 'naive', but only operates on a
%% variable-length prefix string.
-export([var_prefix_init/1, var_prefix_init/2, var_prefix_init/3,
var_prefix_key_to_chain/3]).
%% Exports another hash method, 'fixed_prefix_numhack'.
%% It hashes similarly to 'var_prefix', but only operates on a
%% uses a constant-length prefix *and*
-export([fixed_prefix_init/1, fixed_prefix_init/3,
fixed_prefix_key_to_chain/3]).
%% API for consistent hashing. We'll do what we should've done
%% with previous efforts: separate:
%% * mapping of key -> raw hash val
%% * mapping of raw hash val -> chain name or brick name.
-export([chash_init/3, chash_key_to_chain/3]).
-export([chash_make_float_map/1, chash_make_float_map/2,
chash_float_map_to_nextfloat_list/1,
chash_nextfloat_list_to_gb_tree/1, chash_gb_next/2,
chash_scale_to_int_interval/2, % debugging func
chash_extract_new_float_map/1, chash_extract_old_float_map/1,
chash_extract_new_chainweights/1, chash_extract_old_props/1]).
%% This is the other level of hash management:
%% mapping from chain names -> individual bricks.
%%
%% This layer does *not* make any key -> chain name mapping decisions.
-export([add_migr_dict_if_missing/1,
init_global_hash_state/7, init_global_hash_state/8,
key_to_brick/3,
key_to_chain/2, % Not for general use
chain2brick/3, chain2brick/4
]).
%% API for miscellaneous/helper functions.
-export([set_chain_sweep_key/3,
all_chains/2,
update_chain_dicts/3, desc_substitute_chain/3,
verify_chain_list/2
]).
%% Useful mostly for debugging: invent a list of chains that will all
%% live on the same node.
-export([invent_nodelist/2, invent_nodelist/4]).
%% Chain descriptor
-record(chaindesc_r, {
name,
length,
head,
middles,
tail
}).
%% Opaque record for naive method.
%% TODO? Move naive functions & records into separate module?
-record(naive, {
num_chains,
map
}).
%% Opaque record for the variable-length prefix method.
-record(var_prefix, {
num_chains,
map,
separator,
num_separators
}).
%% Opaque record for the fixed-length prefix method.
-record(fixed_prefix, {
num_chains,
map,
prefix_len,
prefix_is_integer_hack_p
}).
%% Opaque record for consistent hashing method.
-record(chash, {
num_chains,
map,
%% Key prefix mash-up
prefix_method, % all | var_prefix | fixed_prefix,
separator, % for var prefix
num_separators, % for var prefix
prefix_len, % for fixed-length prefix
prefix_is_integer_hack_p, % for fixed-length prefix
%% chash guts
chash_map, % tuple(ChainName)
old_float_map, % float_map()
new_float_map, % float_map()
new_chainweights, % chainweight_list()
%% administrivia
old_props % proplist()
}).
-export_type([chash_r/0, fixed_prefix_r/0, naive_r/0, var_prefix_r/0]).
%% declare types for easy ref showing _r as record
-type naive_r() :: #naive{}.
-type var_prefix_r() :: #var_prefix{}.
-type fixed_prefix_r() :: #fixed_prefix{}.
-type chash_r() :: #chash{}.
%% QQQ Re-write call sig!
-type brick() :: {atom(),atom()}.
-type bricklist() :: [brick()].
-type chainlist() :: [{atom(),bricklist()}].
-type phase() :: pre | migrating | post | phase_unused | unused.
-type chain_error() :: chain2brick_error | chain_is_zero_length.
-spec add_migr_dict_if_missing(#g_hash_r{}) -> #g_hash_r{}.
-spec all_chains(#g_hash_r{}, current | new ) -> [{any(),any()}].
-spec chain2brick(atom(),read|write,#g_hash_r{}) -> chain_error() |brick().
-spec chain2brick(atom(),read|write,#g_hash_r{},current|new) -> chain_error()|brick().
-spec chash_extract_old_props(#hash_r{}|#g_hash_r{}) -> [{any(),any()}].
-spec chash_init(via_proplist,[{atom(),[atom()]}],[{any(),any()}]) -> #hash_r{}.
-spec desc_substitute_chain(#hash_r{},atom(),[atom()]) -> #hash_r{}.
-spec doo_iter_chain([{any(),float()}],integer(),integer()) -> ok.
-spec fixed_prefix_init(chainlist()) -> #hash_r{}.
-spec fixed_prefix_init(via_proplist,chainlist(),[{any(),any()}]) -> #hash_r{}.
-spec init_global_hash_state(atom(), phase(), integer(),#hash_r{},chainlist(),#hash_r{},chainlist()) -> #g_hash_r{}.
-spec init_global_hash_state(atom(), phase(), integer(),
#hash_r{}, chainlist(),#hash_r{},chainlist(),true|false) -> #g_hash_r{}.
-spec key_to_brick(read|write,term(),#g_hash_r{}) -> chain_error()| brick().
-spec key_to_chain(term(),#g_hash_r{}) -> atom().
-spec naive_init(chainlist(),true|false) -> #hash_r{}.
-spec set_chain_sweep_key(atom(),term(),#g_hash_r{}) -> #g_hash_r{}.
-spec update_chain_dicts(#g_hash_r{},atom(),[any()]) -> #g_hash_r{}.
-spec verify_chain_list(chainlist()) -> ok.
-spec verify_chain_list(chainlist(),true|false) -> ok.
init_global_hash_state(MigratingP, Phase, CurrentRev,
CurrentHashDesc, CurrentChainList,
NewHashDesc, NewChainList) ->
init_global_hash_state(MigratingP, Phase, CurrentRev,
CurrentHashDesc, CurrentChainList,
NewHashDesc, NewChainList, false).
init_global_hash_state(MigratingP, Phase, CurrentRev,
CurrentHashDesc, CurrentChainList,
NewHashDesc, NewChainList, ZeroLengthOK)
when is_integer(CurrentRev), CurrentRev > 0,
is_record(CurrentHashDesc, hash_r),
is_record(NewHashDesc, hash_r) ->
ok = verify_chain_list(CurrentChainList, ZeroLengthOK),
ok = verify_chain_list(NewChainList, ZeroLengthOK),
#g_hash_r{migrating_p = MigratingP,
phase = if Phase == pre; Phase == migrating; Phase == post ->
Phase;
true ->
pre
end,
current_rev = CurrentRev,
current_h_desc = CurrentHashDesc,
new_h_desc = NewHashDesc,
current_chain_dict = chain_list2dict(CurrentChainList),
new_chain_dict = chain_list2dict(NewChainList),
migr_dict = if MigratingP -> dict:new();
true -> undefined end
}.
add_migr_dict_if_missing(GH) when is_record(GH, g_hash_r) ->
if GH#g_hash_r.migr_dict == undefined ->
GH#g_hash_r{migr_dict = dict:new()};
true ->
GH
end.
%% @spec (read | write, term(), g_hash_r()) -> {brick(), node()} | error | exit
key_to_brick(ReadOrWrite, Key, GH) ->
key_to_chainsweep(ReadOrWrite, Key, GH, fun chain2brick/4).
key_to_chainsweep(ReadOrWrite, Key, GH, FinalFun)
when ReadOrWrite == read; ReadOrWrite == write ->
#g_hash_r{current_rev = CurRev, minor_rev = MinorRev, current_h_desc = H} =
GH,
ChainName = (H#hash_r.mod):(H#hash_r.func)(
ReadOrWrite, Key, H#hash_r.opaque),
if GH#g_hash_r.migrating_p == false orelse
(GH#g_hash_r.migrating_p == true andalso GH#g_hash_r.phase == pre) ->
B = FinalFun(ChainName, ReadOrWrite, GH, current),
Debs = [{migrating_p, GH#g_hash_r.migrating_p},
{phase, GH#g_hash_r.phase}],
?DBG_HASHx({ChainName, Key, no_mig, CurRev, MinorRev, B, Debs}),
B;
true ->
HNew = GH#g_hash_r.new_h_desc,
ChainNameNew = (HNew#hash_r.mod):(HNew#hash_r.func)(
ReadOrWrite, Key, HNew#hash_r.opaque),
if ChainName == ChainNameNew ->
B = FinalFun(ChainName, ReadOrWrite, GH, current),
?DBG_HASHx({Key, mig_same, CurRev, MinorRev, B}),
B;
true ->
case where_is_key_relative_to_sweep(Key, ChainName, GH) of
in_front ->
B = FinalFun(ChainName, ReadOrWrite, GH, current),
?DBG_HASHx({Key, mig_current, CurRev, MinorRev, B}),
B;
behind ->
B = FinalFun(ChainNameNew, ReadOrWrite, GH, new),
?DBG_HASHx({Key, mig_new, CurRev, MinorRev, B}),
B
end
end
end.
%% @spec (term(), g_hash_r()) -> atom() | error | exit
key_to_chain(Key, GH) ->
key_to_chainsweep(read, Key, GH,
fun (ChainName, _, _, _) -> ChainName end).
chain2brick(ChainName, ReadOrWrite, GH) ->
chain2brick(ChainName, ReadOrWrite, GH, current).
chain2brick(ChainName, ReadOrWrite, GH, WhichDict) ->
Dict = if WhichDict == current -> GH#g_hash_r.current_chain_dict;
WhichDict == new -> GH#g_hash_r.new_chain_dict
end,
case dict:find(ChainName, Dict) of
{ok, CH} ->
%% TODO: What to do if CH#chaindesc_r.length == 0?
if ReadOrWrite == read -> CH#chaindesc_r.tail;
true -> CH#chaindesc_r.head
end;
_ ->
chain2brick_error
end.
%% @spec (atom(), term(), g_hash_r()) -> g_hash_r()
%% @doc Store the new migration sweep key Key for chain ChainName in the
%% global hash's migration dictionary.
set_chain_sweep_key(ChainName, Key, GH) when is_record(GH, g_hash_r) ->
NewD = dict:store(ChainName, Key, GH#g_hash_r.migr_dict),
GH#g_hash_r{migr_dict = NewD};
set_chain_sweep_key(_ChainName, _Key, GH) ->
%% Just return GH as-is. Odds are high that GH = undefined.
%% If GH = undefined, we're in a pathological situation where we
%% don't know what the global hash is. If that's true, we definitely
%% cannot update it, but crashing here isn't a good idea either, so
%% we will just pass it through.
GH.
%% @spec (g_hash_r(), current | new) -> list({atom(), list()})
%% @doc Return the list of all {chain name, brick list} (active bricks only!)
%% for the current/new hash descriptor in a global hash record.
all_chains(GH, WhichDescriptor) ->
Dict = if WhichDescriptor == current -> GH#g_hash_r.current_chain_dict;
WhichDescriptor == new -> GH#g_hash_r.new_chain_dict
end,
[{ChainName,
if D#chaindesc_r.head == D#chaindesc_r.tail ->
[D#chaindesc_r.head];
true ->
[D#chaindesc_r.head] ++ D#chaindesc_r.middles ++
[D#chaindesc_r.tail]
end}
|| {ChainName, D} <- dict:to_list(Dict)].
update_chain_dicts(GH, ChainName, BrickList) ->
Desc = make_chaindesc(ChainName, BrickList),
NewCur = case dict:find(ChainName, GH#g_hash_r.current_chain_dict) of
{ok, _D1} ->
dict:store(ChainName,Desc,GH#g_hash_r.current_chain_dict);
_ ->
GH#g_hash_r.current_chain_dict
end,
NewNew = case dict:find(ChainName, GH#g_hash_r.new_chain_dict) of
{ok, _D2} ->
dict:store(ChainName,Desc,GH#g_hash_r.new_chain_dict);
_ ->
GH#g_hash_r.new_chain_dict
end,
MinorRev = GH#g_hash_r.minor_rev + 1,
GH#g_hash_r{minor_rev = MinorRev,
current_chain_dict = NewCur, new_chain_dict = NewNew}.
desc_substitute_chain(Desc, ChainName, BrickList) ->
case proplists:get_value(ChainName, Desc#hash_r.healthy_chainlist) of
undefined ->
Desc;
_ ->
NewCL = lists:keyreplace(
ChainName, 1, Desc#hash_r.healthy_chainlist,
{ChainName, BrickList}),
if Desc#hash_r.method == naive ->
naive_init(NewCL);
Desc#hash_r.method == var_prefix ->
Separator = (Desc#hash_r.opaque)#var_prefix.separator,
NumSeps = (Desc#hash_r.opaque)#var_prefix.num_separators,
var_prefix_init(NewCL, false, Separator, NumSeps);
Desc#hash_r.method == fixed_prefix ->
Len = (Desc#hash_r.opaque)#fixed_prefix.prefix_len,
HackP = (Desc#hash_r.opaque)#fixed_prefix.prefix_is_integer_hack_p,
fixed_prefix_init(via_proplist, NewCL,
[{prefix_length, Len},
{prefix_is_integer_hack, HackP}]);
Desc#hash_r.method == chash ->
Props = (Desc#hash_r.opaque)#chash.old_props,
chash_init(via_proplist, NewCL, Props)
end
end.
where_is_key_relative_to_sweep(Key, ChainName, GH) ->
case dict:find(ChainName, GH#g_hash_r.migr_dict) of
{ok, SweepKey} ->
?DBG_HASHx({relative_to_sweep, Key, ChainName, found, SweepKey}),
if SweepKey == ?BRICK__GET_MANY_LAST -> behind;
SweepKey == ?BRICK__GET_MANY_FIRST -> in_front;
Key =< SweepKey -> behind;
true -> in_front
end;
_ ->
?DBG_HASHx({relative_to_sweep, Key, ChainName, not_found}),
in_front
end.
%% QQQ Hrm, I'm thinking that it's going to be difficult to standardize the
%% API for a specific hash algorithm's init() func?
%% @spec (list({brick_name(), list(node_name())})) -> hash_r()
naive_init(ChainList) ->
naive_init(ChainList, false).
naive_init(ChainList, ZeroLengthOK) ->
ok = verify_chain_list(ChainList, ZeroLengthOK),
Opaque = #naive{num_chains = length(ChainList),
map = list_to_tuple(ChainList)},
#hash_r{method = naive,
mod = ?MODULE,
%%REAL VALUE: func = naive_key_to_chain,
%%DEBUG VALUE: func = naive_key_to_chain_DEBUG, %DEBUGGING ONLY!!!
func = naive_key_to_chain,
healthy_chainlist = ChainList,
opaque = Opaque}.
naive_key_to_chain(_ReadOrWrite, Key, Opaque) ->
N = naive_hash(Key),
ChainI = (N rem Opaque#naive.num_chains) + 1,
{ChainName, _ChainMembers} = element(ChainI, Opaque#naive.map),
ChainName.
naive_hash(Key) ->
erlang:phash2(Key).
naive_key_to_chain_DEBUG(_ReadOrWrite, Key, Opaque) ->
IgnoreBytes = size(Key) - 1,
<<_:IgnoreBytes/binary, Last:8>> = Key,
N = Last - $0,
ChainI = (N rem Opaque#naive.num_chains) + 1,
{ChainName, _ChainMembers} = element(ChainI, Opaque#naive.map),
ChainName.
%% @spec (list({brick_name(), list(node_name())})) -> hash_r()
var_prefix_init(ChainList) ->
var_prefix_init(ChainList, false).
var_prefix_init(ChainList, ZeroLengthOK) ->
var_prefix_init(ChainList, ZeroLengthOK, ?HASH_PREFIX_SEPARATOR, 1).
%% @spec (via_proplist, ChainList::chain_list(), Props::prop_list()) -> hash_r()
%% @doc Return a #hash_r record for a variable-length prefix hashing scheme,
%% throwing an exception if sanity checks fail.
%%
%% When using the via_proplist version of this function, the following
%% properties are examined:
%% <ul>
%% <li> zero_length_ok ... should never be true when defining a
%% new table. </li>
%% <li> prefix_separator ... the single byte ASCII value of the byte
%% that separates the key's prefix from the rest of the key.
%% Default is ?HASH_PREFIX_SEPARATOR. </li>
%% <li> num_separators ... number of prefix_separator components that are
%% included in the key's prefix.
%% Default is 2 (assuming key looks like "/prefix/not-prefix-stuff").</li>
%% </ul>
var_prefix_init(via_proplist, ChainList, Props) ->
ZLok = proplists:get_value(zero_length_ok, Props, false),
Sep = proplists:get_value(prefix_separator, Props, ?HASH_PREFIX_SEPARATOR),
NumSeps = proplists:get_value(num_separators, Props, 2),
var_prefix_init(ChainList, ZLok, Sep, NumSeps).
var_prefix_init(ChainList, ZeroLengthOK, PrefixSeparator, NumSeps) ->
ok = verify_chain_list(ChainList, ZeroLengthOK),
Opaque = #var_prefix{num_chains = length(ChainList),
map = list_to_tuple(ChainList),
separator = PrefixSeparator,
num_separators = NumSeps},
#hash_r{method = var_prefix,
mod = ?MODULE,
func = var_prefix_key_to_chain,
healthy_chainlist = ChainList,
opaque = Opaque}.
var_prefix_key_to_chain(_ReadOrWrite, Key, Opaque) ->
N = var_prefix_hash(Key, Opaque#var_prefix.separator,
Opaque#var_prefix.num_separators),
ChainI = (N rem Opaque#var_prefix.num_chains) + 1,
{ChainName, _ChainMembers} = element(ChainI, Opaque#var_prefix.map),
ChainName.
var_prefix_hash(Key, Separator, NumSeps) ->
HashKey = find_var_prefix(Key, Separator, 0, NumSeps),
erlang:phash2(HashKey).
%% @spec (list({brick_name(), list(node_name())})) -> hash_r()
fixed_prefix_init(ChainList) ->
fixed_prefix_init(via_proplist, ChainList, []).
%% @spec (via_proplist, chain_list(), prop_list()) -> hash_r()
%% @doc Return a #hash_r record for a variable-length prefix hashing scheme,
%% throwing an exception if sanity checks fail.
%%
%% When using the via_proplist version of this function, the following
%% properties are examined:
%% <ul>
%% <li> zero_length_ok ... should never be true when defining a
%% new table. </li>
%% <li> prefix_length ... length of the prefix, default = 4 bytes. </li>
%% <li> prefix_is_integer_hack ... if true, the prefix should be interpreted
%% as an ASCII representation of a base 10 integer for use as the
%% hash calculation. </li>
%% </ul>
fixed_prefix_init(via_proplist, ChainList, Props) ->
ZeroLengthOK = proplists:get_value(zero_length_ok, Props, false),
Len = proplists:get_value(prefix_length, Props, 4),
PrefixIsIntegerP = proplists:get_value(prefix_is_integer_hack, Props, false),
ok = verify_chain_list(ChainList, ZeroLengthOK),
Opaque = #fixed_prefix{num_chains = length(ChainList),
map = list_to_tuple(ChainList),
prefix_len = Len,
prefix_is_integer_hack_p = PrefixIsIntegerP},
#hash_r{method = fixed_prefix,
mod = ?MODULE,
func = fixed_prefix_key_to_chain,
healthy_chainlist = ChainList,
opaque = Opaque}.
fixed_prefix_key_to_chain(_ReadOrWrite, Key, Opaque) ->
N = fixed_prefix_hash(Key, Opaque#fixed_prefix.prefix_len,
Opaque#fixed_prefix.prefix_is_integer_hack_p),
ChainI = (N rem Opaque#fixed_prefix.num_chains) + 1,
{ChainName, _ChainMembers} = element(ChainI, Opaque#fixed_prefix.map),
ChainName.
fixed_prefix_hash(Key, PrefixLen, IntegerHackP) ->
Len = PrefixLen,
Prefix = if is_binary(Key), size(Key) > Len ->
<<P:Len/binary, _/binary>> = Key,
P;
is_list(Key), length(Key) > Len ->
lists:sublist(Key, Len);
true ->
Key
end,
if IntegerHackP ->
list_to_integer(gmt_util:list_ify(Prefix));
true ->
erlang:phash2(Prefix)
end.
%% @spec (via_proplist, chain_list(), prop_list()) -> hash_r()
%% @doc Return a #hash_r record for the consistent hashing scheme,
%% throwing an exception if sanity checks fail.
%%
%% The following properties are examined for key preprocessing:
%% <ul>
%% <li> prefix_method ... all | var_prefix | fixed_prefix. This property
%% is mandatory. Its value will affect whether or not the other
%% properties in this section must also be present. </li>
%% <li> zero_length_ok ... should never be true when defining a
%% new table. If undefined, assumed to be false. </li>
%% <li> prefix_length ... length of the prefix, default = 4 bytes. </li>
%% <li> prefix_is_integer_hack ... if true, the prefix should be interpreted
%% as an ASCII representation of a base 10 integer for use as the
%% hash calculation. </li>
%% <li> prefix_separator ... the single byte ASCII value of the byte
%% that separates the key's prefix from the rest of the key.
%% Default is ?HASH_PREFIX_SEPARATOR. </li>
%% <li> num_separators ... number of prefix_separator components that are
%% included in the key's prefix.
%% Default is 2 (assuming key looks like "/prefix/not-prefix-stuff").</li>
%% </ul>
%%
%% The following properties are used for the consistent hash map creation:
%% <ul>
%% <li> old_float_map ... The old/current float_map. If missing, we assume
%% we're starting from scratch and will use [{unused, 1.0}]. </li>
%% <li> new_chainweights ... list({ChainName::atom(), Weight::integer()}).
%% This item is mandatory: an obscure exception will be thrown if
%% it is missing. </li>
%% </ul>
%%
chash_init(via_proplist, ChainList, Props) ->
ZeroLengthOK = proplists:get_value(zero_length_ok, Props, false),
ok = verify_chain_list(ChainList, ZeroLengthOK),
ChainListChains = lists:usort([Ch || {Ch, _Brs} <- ChainList]),
NewWeights = proplists:get_value(new_chainweights, Props, 'missing_bad!'),
WeightChains = lists:usort([Ch || {Ch, _Wt} <- NewWeights]),
if ChainListChains == WeightChains -> ok end, % sanity
%% Key prefix config items
Method = case proplists:get_value(prefix_method, Props) of
M when M == all; M == var_prefix; M == fixed_prefix -> M
end,
Sep = proplists:get_value(prefix_separator, Props, ?HASH_PREFIX_SEPARATOR),
NumSeps = proplists:get_value(num_separators, Props, 2),
Len = proplists:get_value(prefix_length, Props, 4),
PrefixIsIntegerP = proplists:get_value(prefix_is_integer_hack, Props, false),
%% Start the real work.
OldFloatMap = proplists:get_value(old_float_map, Props, []),
NewFloatMap = chash_make_float_map(OldFloatMap, NewWeights),
NextFloatList = chash_float_map_to_nextfloat_list(NewFloatMap),
Opaque = #chash{num_chains = length(ChainList),
map = list_to_tuple(ChainList),
prefix_method = Method,
separator = Sep,
num_separators = NumSeps,
prefix_len = Len,
prefix_is_integer_hack_p = PrefixIsIntegerP,
chash_map = chash_nextfloat_list_to_gb_tree(NextFloatList),
old_float_map = OldFloatMap,
new_float_map = NewFloatMap,
new_chainweights = NewWeights,
old_props = Props
},
#hash_r{method = chash,
mod = ?MODULE,
func = chash_key_to_chain,
healthy_chainlist = ChainList,
opaque = Opaque}.
chash_key_to_chain(_ReadOrWrite, Key, Opaque) ->
N = if Opaque#chash.prefix_method == all ->
naive_hash(Key) / ?PHASH2_BIGGEST_VAL;
Opaque#chash.prefix_method == var_prefix ->
var_prefix_hash(Key, Opaque#chash.separator,
Opaque#chash.num_separators) / ?PHASH2_BIGGEST_VAL;
Opaque#chash.prefix_method == fixed_prefix ->
X = fixed_prefix_hash(Key, Opaque#chash.prefix_len,
Opaque#chash.prefix_is_integer_hack_p),
X / ?PHASH2_BIGGEST_VAL;
true ->
?APPLOG_ALERT(?APPLOG_APPM_050,"pid ~p: Unknown Opaque: ~p (~p)\n",
[self(), element(1, Opaque), Opaque]),
timer:sleep(1000),
exit({unknown_prefix_method, Opaque#chash.prefix_method})
end,
{_Float, Chain} = chash_gb_next(N, Opaque#chash.chash_map),
Chain.
%% @doc Not used directly, but can give a developer an idea of how well
%% chash_float_map_to_nextfloat_list will do for a given value of Max.
%%
%% For example:
%% <verbatim>
%% NewFloatMap = chash_make_float_map([{unused, 1.0}],
%% [{a,100}, {b, 100}, {c, 10}]),
%% ChashMap = chash_scale_to_int_interval(NewFloatMap, 100),
%% io:format("QQQ: int int = ~p\n", [ChashIntInterval]),
%% -> [{a,1,47},{b,48,94},{c,94,100}]
%% </verbatim>
%%
%% Interpretation: out of the 100 slots:
%% <ul>
%% <li> 'a' uses the slots 1-47 </li>
%% <li> 'b' uses the slots 48-94 </li>
%% <li> 'c' uses the slots 95-100 </li>
%% </ul>
chash_scale_to_int_interval(NewFloatMap, Max) ->
chash_scale_to_int_interval(NewFloatMap, 0, Max).
chash_scale_to_int_interval([{Ch, _Wt}], Cur, Max) ->
[{Ch, Cur, Max}];
chash_scale_to_int_interval([{Ch, Wt}|T], Cur, Max) ->
Int = trunc(Wt * Max),
[{Ch, Cur + 1, Cur + Int}|chash_scale_to_int_interval(T, Cur + Int, Max)].
%% chash_scale_to_int_interval(NewFloatMap, Max) ->
%% chash_scale_to_int_interval(NewFloatMap, 0, Max).
%% chash_scale_to_int_interval([{Ch, _Wt}], _Cur, Max) ->
%% [{Max, Ch}];
%% chash_scale_to_int_interval([{Ch, Wt}|T], Cur, Max) ->
%% Int = trunc(Wt * Max),
%% [{Cur + Int, Ch}|chash_scale_to_int_interval(T, Cur + Int, Max)].
%% TODO: Would it ever be necessary to extract new_h_desc from a g_hash_r()?
%% @spec (hash_r() | g_hash_r()) -> float_map()
%% @doc Given a consistent-hash-flavored #hash_r or #g_hash_r, return the
%% new float_map list (i.e. the float_map used for weighting calculations).
chash_extract_new_float_map(LH) when is_record(LH, hash_r) ->
(LH#hash_r.opaque)#chash.new_float_map;
chash_extract_new_float_map(GH) when is_record(GH, g_hash_r) ->
chash_extract_new_float_map(GH#g_hash_r.current_h_desc).
%% @spec (hash_r() | g_hash_r()) -> float_map()
%% @doc Given a consistent-hash-flavored #hash_r or #g_hash_r, return the
%% old float_map list (i.e. the "prior" float_map that was used as an input
%% to chash_make_float_map/2 to create the "new" float_map).
chash_extract_old_float_map(LH) when is_record(LH, hash_r) ->
(LH#hash_r.opaque)#chash.old_float_map;
chash_extract_old_float_map(GH) when is_record(GH, g_hash_r) ->
chash_extract_old_float_map(GH#g_hash_r.current_h_desc).
%% @spec (hash_r() | g_hash_r()) -> float_map()
%% @doc Given a consistent-hash-flavored #hash_r or #g_hash_r, return the
%% new chain weights list that was used to create the new float_map list
%% (i.e. the float_map used for weighting calculations).
chash_extract_new_chainweights(LH) when is_record(LH, hash_r) ->
(LH#hash_r.opaque)#chash.new_chainweights;
chash_extract_new_chainweights(GH) when is_record(GH, g_hash_r) ->
chash_extract_new_chainweights(GH#g_hash_r.current_h_desc).
%% @spec (hash_r() | g_hash_r()) -> proplist()
%% @doc Given a consistent-hash-flavored #hash_r or #g_hash_r, return the
%% old_props properties list, used for initializing the #hash_r.
chash_extract_old_props(LH) when is_record(LH, hash_r) ->
(LH#hash_r.opaque)#chash.old_props;
chash_extract_old_props(GH) when is_record(GH, g_hash_r) ->
chash_extract_old_props(GH#g_hash_r.current_h_desc).
%% @spec (list({chain_name, list({brick_name(), node_name()})}))
%% -> ok | {error, term()}
%% @doc This is a naive chain single (!) list checker.
verify_chain_list(ChainList) ->
verify_chain_list(ChainList, false).
verify_chain_list(ChainList, ZeroLengthOK) ->
case (catch verify_chain_list_2(ChainList, ZeroLengthOK)) of
{'EXIT', Err} ->
{error, Err};
Res ->
Res
end.
verify_chain_list_2(ChainList, ZeroLengthOK)
when is_list(ChainList), length(ChainList) > 0 ->
lists:map(
fun({ChainName, ChainMembers}) when is_atom(ChainName) ->
SortedChainMembers = lists:sort(ChainMembers),
if length(ChainMembers) < 1, not ZeroLengthOK ->
exit({error, ChainName});
true ->
ok
end,
case {SortedChainMembers, list_uniq(SortedChainMembers)} of
{Sl, Sl} -> ok;
_ -> exit({error, duplicate_bricks_in_chain})
end,
%% Check for valid 2-tuples for brick name.
lists:map(
fun({Br, Nd}) when is_atom(Br), is_atom(Nd) ->
ok;
(X) ->
exit({error, X})
end, ChainMembers),
ok;
(X) ->
exit({error, X})
end, ChainList),
ChainNames = [Ch || {Ch, _} <- ChainList],
SortedChainNames = lists:sort(ChainNames),
case {SortedChainNames, list_uniq(SortedChainNames)} of
{Sl0, Sl0} -> ok;
_ -> exit({error, duplicate_chain_names})
end,
BrickNames = [Br || {_, Brs} <- ChainList, Br <- Brs],
SortedBrickNames = lists:sort(BrickNames),
case {SortedBrickNames, list_uniq(SortedBrickNames)} of
{Sl1, Sl1} -> ok;
_ -> exit({error, duplicate_brick_names})
end,
ok;
verify_chain_list_2(ChainList, true)
when is_list(ChainList), length(ChainList) =:= 0 ->
%% Added this case when brick_admin server is started very early,
%% and none of the chains are currently available.
%% TODO: This is harmless/a good idea, right?
ok;
verify_chain_list_2(Bad, _) ->
{error, {bad_list, Bad}}.
chain_list2dict(ChainList) ->
lists:foldl(
fun({ChainName, []}, Dict) ->
V = #chaindesc_r{name = ChainName, length = 0,
head = no_such_head, middles = [],
tail = no_such_tail},
dict:store(ChainName, V, Dict);
({ChainName, ChainMembers}, Dict) ->
V = make_chaindesc(ChainName, ChainMembers),
dict:store(ChainName, V, Dict)
end, dict:new(), ChainList).
make_chaindesc(ChainName, []) ->
#chaindesc_r{name = ChainName, length = 0,
head = chain_is_zero_length, middles = [],
tail = chain_is_zero_length};
make_chaindesc(ChainName, ChainMembers) ->
Head = hd(ChainMembers),
Tail = lists:last(ChainMembers),
L = length(ChainMembers),
Middles = if L > 2 ->
Num = L - 2,
lists:sublist(ChainMembers, 2, Num);
true ->
[]
end,
#chaindesc_r{name = ChainName, length = L,
head = Head, middles = Middles, tail = Tail}.
invent_nodelist(NumChains, ChainLen) ->
invent_nodelist(NumChains, ChainLen, node(), 0).
invent_nodelist(NumChains, ChainLen, Node, BaseChainNum) ->
[invent_nodelist2(ChainLen, "test_ch" ++ integer_to_list(I), Node) ||
I <- lists:seq(BaseChainNum, BaseChainNum + NumChains - 1)].
invent_nodelist2(1, NameBase, Node) ->
ChainName = list_to_atom(NameBase),
Brick1 = list_to_atom(NameBase ++ "_stand"),
{ChainName, [{Brick1, Node}]};
invent_nodelist2(N, NameBase, Node) ->
ChainName = list_to_atom(NameBase),
Head = list_to_atom(NameBase ++ "_head"),
Tail = list_to_atom(NameBase ++ "_tail"),
NumMiddles = N - 2,
Middles = if NumMiddles == 0 ->
[];
true ->
lists:map(
fun(I) ->
Mid = list_to_atom(NameBase ++ "_middle" ++
integer_to_list(I)),
{Mid, Node}
end, lists:seq(1, NumMiddles))
end,
{ChainName, [{Head, Node}] ++ Middles ++ [{Tail, Node}]}.
find_var_prefix(Key, Separator, N, Num) when N < size(Key), Num > 0 ->
case Key of
<<Prefix:N/binary, Separator, _/binary>> ->
if Num == 1 ->
Prefix;
true ->
find_var_prefix(Key, Separator, N + 1, Num - 1)
end;
_ ->
find_var_prefix(Key, Separator, N + 1, Num)
end;
find_var_prefix(Key, _, _, _) ->
Key.
list_uniq([X,X|Xs]) -> list_uniq([X|Xs]);
list_uniq([X|Xs]) -> [X|list_uniq(Xs)];
list_uniq([]) -> [].
%%
%% Consistent hashing API and helper funcs
%%
chash_make_float_map(NewChainWeights) ->
chash_make_float_map([], NewChainWeights).
chash_make_float_map([], NewChainWeights) ->
Sum = add_all_weights(NewChainWeights),
DiffMap = [{Ch, Wt/Sum} || {Ch, Wt} <- NewChainWeights],
chash_make_float_map2([{unused, 1.0}], DiffMap, NewChainWeights);
chash_make_float_map(OldFloatMap, NewChainWeights) ->
NewSum = add_all_weights(NewChainWeights),
%% Normalize to unit interval
%% NewChainWeights2 = [{Ch, Wt / NewSum} || {Ch, Wt} <- NewChainWeights],
%% Reconstruct old chain weights (will be normalized to unit interval)
SumOldFloatsDict =
lists:foldl(fun({Ch, Wt}, OrdDict) ->
orddict:update_counter(Ch, Wt, OrdDict)
end, orddict:new(), OldFloatMap),
OldChainWeights = orddict:to_list(SumOldFloatsDict),
OldSum = add_all_weights(OldChainWeights),
OldChs = [Ch || {Ch, _} <- OldChainWeights],
NewChs = [Ch || {Ch, _} <- NewChainWeights],
OldChsOnly = OldChs -- NewChs,
%% Mark any space in by a deleted chain as unused.
OldFloatMap2 = lists:map(
fun({Ch, Wt} = ChWt) ->
case lists:member(Ch, OldChsOnly) of
true ->
{unused, Wt};
false ->
ChWt
end
end, OldFloatMap),
%% Create a diff map of changing chains and added chains
DiffMap = lists:map(fun({Ch, NewWt}) ->
case orddict:find(Ch, SumOldFloatsDict) of
{ok, OldWt} ->
{Ch, (NewWt / NewSum) -
(OldWt / OldSum)};
error ->
{Ch, NewWt / NewSum}
end
end, NewChainWeights),
chash_make_float_map2(OldFloatMap2, DiffMap, NewChainWeights).
chash_make_float_map2(OldFloatMap, DiffMap, _NewChainWeights) ->
FloatMap = apply_diffmap(DiffMap, OldFloatMap),
XX = combine_neighbors(collapse_unused_in_float_map(FloatMap)),
XX.
apply_diffmap(DiffMap, FloatMap) ->
SubtractDiff = [{Ch, abs(Diff)} || {Ch, Diff} <- DiffMap, Diff < 0],
AddDiff = [D || {_Ch, Diff} = D <- DiffMap, Diff > 0],
TmpFloatMap = iter_diffmap_subtract(SubtractDiff, FloatMap),
iter_diffmap_add(AddDiff, TmpFloatMap).
add_all_weights(ChainWeights) ->
lists:foldl(fun({_Ch, Weight}, Sum) -> Sum + Weight end, 0.0, ChainWeights).
iter_diffmap_subtract([{Ch, Diff}|T], FloatMap) ->
iter_diffmap_subtract(T, apply_diffmap_subtract(Ch, Diff, FloatMap));
iter_diffmap_subtract([], FloatMap) ->
FloatMap.
iter_diffmap_add([{Ch, Diff}|T], FloatMap) ->
iter_diffmap_add(T, apply_diffmap_add(Ch, Diff, FloatMap));
iter_diffmap_add([], FloatMap) ->
FloatMap.
apply_diffmap_subtract(Ch, Diff, [{Ch, Wt}|T]) ->
if Wt == Diff ->
[{unused, Wt}|T];
Wt > Diff ->
[{Ch, Wt - Diff}, {unused, Diff}|T];
Wt < Diff ->
[{unused, Wt}|apply_diffmap_subtract(Ch, Diff - Wt, T)]
end;
apply_diffmap_subtract(Ch, Diff, [H|T]) ->
[H|apply_diffmap_subtract(Ch, Diff, T)];
apply_diffmap_subtract(_Ch, _Diff, []) ->
[].
apply_diffmap_add(Ch, Diff, [{unused, Wt}|T]) ->
if Wt == Diff ->
[{Ch, Wt}|T];
Wt > Diff ->
[{Ch, Diff}, {unused, Wt - Diff}|T];
Wt < Diff ->
[{Ch, Wt}|apply_diffmap_add(Ch, Diff - Wt, T)]
end;
apply_diffmap_add(Ch, Diff, [H|T]) ->
[H|apply_diffmap_add(Ch, Diff, T)];
apply_diffmap_add(_Ch, _Diff, []) ->
[].
combine_neighbors([{Ch, Wt1}, {Ch, Wt2}|T]) ->
combine_neighbors([{Ch, Wt1 + Wt2}|T]);
combine_neighbors([H|T]) ->
[H|combine_neighbors(T)];
combine_neighbors([]) ->
[].
collapse_unused_in_float_map([{Ch, Wt1}, {unused, Wt2}|T]) ->
collapse_unused_in_float_map([{Ch, Wt1 + Wt2}|T]);
collapse_unused_in_float_map([{unused, _}] = L) ->
L; % Degenerate case only
collapse_unused_in_float_map([H|T]) ->
[H|collapse_unused_in_float_map(T)];
collapse_unused_in_float_map([]) ->
[].
chash_float_map_to_nextfloat_list(FloatMap) when length(FloatMap) > 0 ->
%% QuickCheck found a bug ... need to weed out stuff smaller than
%% ?SMALLEST_SIGNIFICANT_FLOAT_SIZE here.
FM1 = [P || {_X, Y} = P <- FloatMap, Y > ?SMALLEST_SIGNIFICANT_FLOAT_SIZE],
{_Sum, NFs0} = lists:foldl(fun({Name, Amount}, {Sum, List}) ->
{Sum+Amount, [{Sum+Amount, Name}|List]}
end, {0, []}, FM1),
lists:reverse(NFs0).
chash_nextfloat_list_to_gb_tree([]) ->
gb_trees:balance(gb_trees:from_orddict([]));
chash_nextfloat_list_to_gb_tree(NextFloatList) ->
{_FloatPos, Name} = lists:last(NextFloatList),
%% QuickCheck found a bug ... it really helps to add a catch-all item
%% at the far "right" of the list ... 42.0 is much greater than 1.0.
NFs = NextFloatList ++ [{42.0, Name}],
gb_trees:balance(gb_trees:from_orddict(orddict:from_list(NFs))).
chash_gb_next(X, {_, GbTree}) ->
chash_gb_next1(X, GbTree).
chash_gb_next1(X, {Key, Val, Left, _Right}) when X < Key ->
case chash_gb_next1(X, Left) of
nil ->
{Key, Val};
Res ->
Res
end;
chash_gb_next1(X, {Key, _Val, _Left, Right}) when X >= Key ->
chash_gb_next1(X, Right);
chash_gb_next1(_X, nil) ->
nil.
%% @type float_map() = list({brick(), float()}). A float_map is a
%% definition of brick assignments over the unit interval [0.0, 1.0].
%% The sum of all floats must be 1.0.
%% For example, [{{br1, nd1}, 0.25}, {{br2, nd1}, 0.5}, {{br3, nd1}, 0.25}].
%% @type nextfloat_list() = list({float(), brick()}). A nextfloat_list
%% differs from a float_map in two respects: 1) nextfloat_list contains
%% tuples with the brick name in 2nd position, 2) the float() at each
%% position I_n > I_m, for all n, m such that n > m.
%% For example, a nextfloat_list of the float_map example above,
%% [{0.25, {br1, nd1}}, {0.75, {br2, nd1}}, {1.0, {br3, nd1}].
%%
%% Doodling
%%
doo_make_chain_weights(Len) ->
[{list_to_atom([Name]), 100} || Name <- lists:seq($a, $a + Len - 1)].
%% doo_make_chain(Weights) ->
%% [{Name, [{x,y}]} || {Name, _Wt} <- Weights].
doo_iter_chain(StartFloatMap, StartLen, Iters) ->
lists:foldl(
fun(ChainLen, FloatMap) ->
NewWeights = doo_make_chain_weights(ChainLen),
%% NewChain = doo_make_chain(NewWeights),
chash_make_float_map(FloatMap, NewWeights)
end, StartFloatMap, lists:seq(StartLen + 1, StartLen + Iters)).
%% length(brick_hash:doo_iter_chain([], 0, 100)) -> 4265
%% And, to see the smallest subdivisions:
%% io:format("~P\n", [lists:keysort(2, brick_hash:doo_iter_chain([], 0, 100)), 150]).
%% [{'\257',3.46945e-18},
%% {'\230',3.46945e-18},
%% {'\223',1.04083e-17},
%% ...
%% {'\246',9.36751e-17},
%% {'\221',9.71445e-17},
%% {'\276',1.04083e-16},
%% ...
%% {'\257',7.26849e-16},
%% {'\300',3.01631e-7},
%% {'\301',3.38759e-7},
%%
%% So, even after 100 migrations of equal size, the smallest
%% significant partition is still as large as 3.01e-7. The sum of all
%% partitions smaller than that (less or equal to 1.04083e-16) is only
%% 1.32810e-14, so they're pretty safe to ignore. I've added the
%% constant ?SMALLEST_SIGNIFICANT_FLOAT_SIZE to keep track of this.
doo_weight1() ->
[{a, 0.1}, {b, 0.025}, {c, 0.125}, {d, 0.125}, {e, 0.025}, {f, 0.1},
{g, 0.125}, {h, 0.125}, {i, 0.05}, {j, 0.075}, {k, 0.125}]. | src/brick_hash.erl | 0.525125 | 0.462109 | brick_hash.erl | starcoder |
%%% @doc This module implements a simple registry to keep track of the number of FSMs.
%%%
%%% The `aesc_limits' server monitors each FSM, and inserts an entry into an ETS table.
%%% The size of the ETS table reflects the number of active FSMs in the node.
%%% We define two entry points in order to distinguish between reestablish attempts
%%% (`register_returning()'), for channels which we have already agreed to serve, and
%%% attempts to create new channels (`allow_new()'), where a limit check is actually
%%% imposed. This means that in certain corner cases, if lots of channels happen to
%%% leave and later reestablish roughly simultaneously, we might overshoot and end
%%% up with more channels than the configured limit (theoretically, many times as
%%% many). To avoid this, a count of channels which are currently 'on leave' would
%%% need to be kept. This could perhaps be kept current by the state cache.
%%%
-module(aesc_limits).
-behavior(gen_server).
-export([ allow_new/0
, register_returning/0 ]).
-export([ start_link/0
, init/1
, handle_call/3
, handle_cast/2
, handle_info/2
, terminate/2
, code_change/3
]).
-record(st, { }).
-define(PIDS, aesc_limits_pids).
allow_new() ->
gen_server:call(?MODULE, allow_new).
register_returning() ->
gen_server:call(?MODULE, register_returning).
start_link() ->
%% We make the ets table public, since it's created by the supervisor.
%% An alternative would be to set an `heir' option and use `ets:give_away/3`
%% to give the newly (re-)started server control; this way, the ets table
%% could be private. In either case, if the server restarts, it will have to
%% either establish new monitors for all pids in the surviving table, or
%% find all running FSMs, e.g. via gproc, to set up new monitors.
ets:new(?PIDS, [set, public, named_table]),
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
init([]) ->
%% If this is a restart, ensure that all existing pids have working mrefs
ets:foldl(fun refresh_monitor/2, ok, ?PIDS),
{ok, #st{}}.
handle_call(register_returning, {Pid, _}, S) ->
MRef = monitor(process, Pid),
case ets:insert_new(?PIDS, {Pid, MRef}) of
true ->
lager:debug("Returning session (~p) allowed", [Pid]),
{reply, ok, S};
false ->
lager:debug("Returning session (~p) denied: already exists", [Pid]),
demonitor(MRef),
{reply, {error, exists}, S}
end;
handle_call(allow_new, {Pid,_}, S) ->
Limit = get_limit(),
case ets:info(?PIDS, size) of
Sz when Sz >= Limit ->
lager:debug("New session (~p) denied; Sz = ~p, Limit = ~p",
[Pid, Sz, Limit]),
{reply, {error, channel_count_limit_exceeded}, S};
Sz ->
MRef = monitor(process, Pid),
case ets:insert_new(?PIDS, {Pid, MRef}) of
true ->
lager:debug("New session (~p) allowed; Size = ~p, Limit = ~p",
[Pid, Sz, Limit]),
{reply, ok, S};
false ->
lager:debug("New session (~p) denied: already exists", [Pid]),
demonitor(MRef),
{reply, {error, exists}, S}
end
end;
handle_call(_, _, S) ->
{reply, {error, unknown_call}, S}.
handle_cast(_Msg, S) ->
{noreply, S}.
handle_info({'DOWN', MRef, process, Pid, _Reason}, S) ->
NDeleted = ets:select_delete(?PIDS, [{ {Pid, MRef}, [], [true] }]),
lager:debug("'DOWN' received; ~p entries deleted for ~p", [NDeleted, Pid]),
{noreply, S};
handle_info(_, S) ->
{noreply, S}.
terminate(_, _) ->
ok.
code_change(_FromVsn, S, _Extra) ->
{ok, S}.
%% This could possibly be optimized. The `find_config/2` function normally doesn't
%% do more than fetching the whole user config (as a map) and extracting an element
%% from it. If no limit has been specified (which may be a common scenario here), the
%% `schema_config' option will be pursued, which involves fetching and traversing the
%% JSON-Schema (hopefully not from disk, as it should be pre-loaded).
%% An issue with caching the value would be that we don't know if it gets changed
%% dynamically. An API function in this module for dynamically updating the limit
%% would be one way of addressing that.
get_limit() ->
{ok, Max} = aeu_env:find_config([ <<"channels">>, <<"max_count">> ] , [ user_config
, schema_default
, {value, 1000} ]),
Max.
refresh_monitor({Pid, _OldMRef}, ok) ->
MRef = monitor(process, Pid),
ets:update_element(?PIDS, Pid, {2, MRef}),
ok. | apps/aechannel/src/aesc_limits.erl | 0.528047 | 0.59796 | aesc_limits.erl | starcoder |
%%-----------------------------------------------------------------------------
%% @copyright (C) 2019, <NAME>
%% @author <NAME>
%% @doc Interface for the xor16 filter.
%%
%% Shorthand for the `exor_filter' module. For indepth documentation, see
%% that module.
%%
%% Example usage:
%% ```
%% Filter = xor16:new(["cat", "dog", "mouse"]),
%% true = xor16:contain(Filter, "cat"),
%% false = xor16:contain(Filter, "goose"),
%% '''
%% @end
%%-----------------------------------------------------------------------------
-module(xor16).
-export([
new/1,
new/2,
new_buffered/1,
new_buffered/2,
new_empty/0,
add/2,
finalize/1,
contain/2,
contain/3,
to_bin/1,
from_bin/1
]).
%%-----------------------------------------------------------------------------
%% @doc Initializes the xor filter, and runs the default hash function on
%% each of the elements in the list. This should be fine for the general case.
%% @end
%%-----------------------------------------------------------------------------
-spec new(list()) -> {reference(), atom()} | {error, atom()}.
new(List) ->
exor_filter:xor16(List).
%%-----------------------------------------------------------------------------
%% @doc Initializes the xor filter, and runs the specified hash on each of
%% the elements in the list.
%%
%% The option `default_hash' uses the `erlang:phash2/1' function.
%% The option `none' is for prehashed data.
%% A fun can be passed that will be applied to each element.
%% @end
%%-----------------------------------------------------------------------------
-spec new(list(), exor_filter:hash_function()) ->
{reference(), exor_filter:hash_function()} | {error, atom()}.
new(List, HashFunction) ->
exor_filter:xor16(List, HashFunction).
%%-----------------------------------------------------------------------------
%% @doc Initializes the xor filter, and runs the default hash function on
%% each of the elements in the list. This is the buffered version, meant for
%% large filters.
%% @end
%%-----------------------------------------------------------------------------
-spec new_buffered(list()) -> {reference(), atom()} | {error, atom()}.
new_buffered(List) ->
exor_filter:xor16_buffered(List).
%%-----------------------------------------------------------------------------
%% @doc Initializes an empty filter. Can be filled incrementally, and is
%% more memory efficient than storing entire data set in the Erlang VM.
%% Initializes the filter to 64 elements, but will be dynamically expanded
%% if more elements are added.
%% @end
%%-----------------------------------------------------------------------------
-spec new_empty() -> {builder, reference()}.
new_empty() ->
exor_filter:exor_empty().
%%-----------------------------------------------------------------------------
%% @doc Adds elements to filter, and applys the default hashing mechanism.
%% Dynamically re-sizes filter if needed.
%% @end
%%-----------------------------------------------------------------------------
-spec add({builder, reference()}, list()) -> {builder, reference()}.
add(Filter, Elements) ->
SortedElements = lists:sort(Elements),
exor_filter:exor_add(Filter, SortedElements).
%%-----------------------------------------------------------------------------
%% @doc Initializes filter internally, and frees data buffer. Equivalent to
%% calling `xor16:new'.
%% Deduplication is not done, `finalize' will fail if duplicates are inserted.
%% @end
%%-----------------------------------------------------------------------------
-spec finalize({builder, reference()}) -> reference().
finalize(Filter) ->
exor_filter:xor16_finalize(Filter).
%%-----------------------------------------------------------------------------
%% @doc Initializes the xor filter, and runs the default hash function on
%% each of the elements in the list. This is the buffered version, meant for
%% large filters. See the `xor16:new/2' or `exor_filter:xor16_new/2' funtions
%% for more indepth documentaiton.
%% @end
%%-----------------------------------------------------------------------------
-spec new_buffered(list(), exor_filter:hash_function())
-> {reference(), exor_filter:hash_function()} | {error, atom()}.
new_buffered(List, HashFunction) ->
exor_filter:xor16_buffered(List, HashFunction).
%%-----------------------------------------------------------------------------
%% @doc Tests to see if the passed argument is in the filter. The first
%% argument must be the pre-initialized filter.
%%
%% A filter previously serialized by `to_bin' is allowed
%% @end
%%-----------------------------------------------------------------------------
-spec contain({reference() | binary(), exor_filter:hash_function()}, term()) -> true | false.
contain(Filter, Key) ->
exor_filter:xor16_contain(Filter, Key).
%%-----------------------------------------------------------------------------
%% @doc Tests to see if the passed argument is in the filter. The first
%% argument must be the pre-initialized filter.
%%
%% A filter previously serialized by `to_bin' is allowed
%%
%% Will return the third argument if the element doesn't exist in the filter.
%% @end
%%-----------------------------------------------------------------------------
-spec contain({reference() | binary(), exor_filter:hash_function()}, term(), any()) -> true | any().
contain(Filter, Key, ReturnValue) ->
exor_filter:xor16_contain(Filter, Key, ReturnValue).
%%-----------------------------------------------------------------------------
%% @doc Serializes the filter to a binary that can be later be deserialized with
%% `from_bin/1'.
%%
%% Returns `{binary(), hash_function()}'.
%% @end
%%-----------------------------------------------------------------------------
-spec to_bin({reference(), exor_filter:hash_function()}) -> {binary(), exor_filter:hash_function()}.
to_bin(Filter) ->
exor_filter:xor16_to_bin(Filter).
%%-----------------------------------------------------------------------------
%% @doc Deserializes a filter previously serialized with `to_bin'.
%%
%% @end
%%-----------------------------------------------------------------------------
-spec from_bin({binary(), exor_filter:hash_function()})
-> {reference(), exor_filter:hash_function()}.
from_bin({Filter, Hash}) ->
exor_filter:xor16_from_bin({Filter, Hash}). | src/xor16.erl | 0.636918 | 0.463626 | xor16.erl | starcoder |
%% @author Couchbase <<EMAIL>>
%% @copyright 2017-Present Couchbase, Inc.
%%
%% Use of this software is governed by the Business Source License included
%% in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
%% in that file, in accordance with the Business Source License, use of this
%% software will be governed by the Apache License, Version 2.0, included in
%% the file licenses/APL2.txt.
%%
%% Generic programming framework loosely based on the paper "Scrap Your
%% Boilerplate: A Practical Design Pattern for Generic Programming" by Ralf
%% Lammel and <NAME>.
%%
%% Link to the paper:
%% https://www.microsoft.com/en-us/research/wp-content/uploads/2003/01/hmap.pdf
%%
-module(generic).
-include("generic.hrl").
-ifdef(TEST).
-include("triq.hrl").
-endif.
-export([transformb/2, transformb/3,
transformt/2, transformt/3,
matching/2, matching/3,
universe/1, universe/2,
maybe_transform/2, maybe_transform/3,
query/3]).
%% Apply a transformation everywhere in bottom-up manner.
transformb(Fun, State, Term) ->
{NewTerm, NewState} = gmap(fun (T, S) ->
transformb(Fun, S, T)
end, State, Term),
Fun(NewTerm, NewState).
transformb(Fun, Term) ->
ignoring_state(fun transformb/3, Fun, Term).
%% Apply a transformation everywhere in top-down manner.
transformt(Fun, State, Term) ->
maybe_transform(fun (T, S) ->
{NewT, NewS} = Fun(T, S),
{continue, NewT, NewS}
end, State, Term).
transformt(Fun, Term) ->
ignoring_state(fun transformt/3, Fun, Term).
%% Return the subterms matching a predicate.
matching(Pred, Term) ->
matching(Pred, Term, fun transformb/3).
matching(Pred, Term, Traversal) ->
{_, Result} =
Traversal(fun (T, Acc) ->
NewAcc = case Pred(T) of
true ->
[T | Acc];
false ->
Acc
end,
{T, NewAcc}
end, [], Term),
lists:reverse(Result).
%% Return all possible subterms.
universe(Term) ->
universe(Term, fun transformb/3).
universe(Term, Traversal) ->
matching(functools:const(true), Term, Traversal).
%% Apply a transformation everywhere in top-down manner. The 'Fun'
%% function may choose to stop the recursive descent early by
%% returning {stop, ResultTerm, ResultState}. Note, there's not 't'
%% suffix here, because short-cutting doesn't make much sense in
%% bottom-up traversal.
maybe_transform(Fun, State, Term) ->
case Fun(Term, State) of
{continue, NewTerm, NewState} ->
gmap(fun (T, S) ->
maybe_transform(Fun, S, T)
end, NewState, NewTerm);
{stop, NewTerm, NewState} ->
{NewTerm, NewState}
end.
maybe_transform(Fun, Term) ->
do_ignoring_state(fun maybe_transform/3,
fun (T, S) ->
{Action, NewT} = Fun(T),
{Action, NewT, S}
end, Term).
%% Run a query on the term. The 'Fun' is called on each element in the term
%% and these values are then recombined by 'K'. The traversal order is
%% top-down, left-to-right.
query(K, Fun, Term) ->
lists:foldl(K, Fun(Term),
gmapq(fun (T) ->
query(K, Fun, T)
end, Term)).
%% internal
gfold(Fun, State, Term) ->
{Type, Children} = term_destructure(Term),
Fun(Children, State,
fun (NewChildren) ->
try
term_recover(Type, NewChildren)
catch
T:E:S ->
error({term_recover_failed,
{T, E, S},
{term, Term},
{type, Type},
{children, Children},
{new_children, NewChildren}})
end
end).
term_destructure([H|T]) ->
{cons, [H,T]};
term_destructure(Tuple) when is_tuple(Tuple) ->
{{tuple, tuple_size(Tuple)}, tuple_to_list(Tuple)};
term_destructure(Map) when is_map(Map) ->
{map, lists:append([[K, V] || {K, V} <- maps:to_list(Map)])};
term_destructure(Term) ->
{{simple, Term}, []}.
term_recover(cons, [H,T]) ->
[H|T];
term_recover({tuple, Size}, List) ->
Tuple = list_to_tuple(List),
Size = tuple_size(Tuple),
Tuple;
term_recover(map, Values) ->
maps:from_list(pairs(Values));
term_recover({simple, Term}, []) ->
Term.
pairs([]) ->
[];
pairs([K, V | Rest]) ->
[{K, V} | pairs(Rest)].
%% Apply a transformation to direct children of a term.
gmap(Fun, State, Term) ->
gfold(fun (Children, S, Recover) ->
{NewChildren, NewState} =
lists:foldl(
fun (Child, {AccChildren, AccS}) ->
{NewChild, NewAccS} = Fun(Child, AccS),
{[NewChild | AccChildren], NewAccS}
end, {[], S}, Children),
{Recover(lists:reverse(NewChildren)), NewState}
end, State, Term).
%% Run a query on all direct children of a term. Return results as a list.
gmapq(Fun, Term) ->
{Result, unused} = gfold(fun (Children, State, _Recover) ->
{lists:map(Fun, Children), State}
end, unused, Term),
Result.
ignoring_state(BaseFun, Fun, Term) ->
do_ignoring_state(BaseFun,
fun (T, S) ->
{Fun(T), S}
end, Term).
do_ignoring_state(BaseFun, WrappedFun, Term) ->
{NewTerm, unused} = BaseFun(WrappedFun, unused, Term),
NewTerm.
-ifdef(TEST).
%% test-related helpers
random_term([]) ->
oneof([{}, [], #{}]);
random_term([X]) ->
oneof([[X], {X}, X, #{key => X}]);
random_term([X, Y | Rest] = Items) ->
frequency([{2, Items},
{2, list_to_tuple(Items)},
{2, glue_terms(#{X => Y}, random_term(Rest))},
{6, random_term_split(Items)}]).
random_term_split(Items) ->
?LET(N, choose(0, length(Items)),
begin
{Front, Rear} = lists:split(N, Items),
glue_terms(random_term(Front), random_term(Rear))
end).
glue_terms(X, Y) ->
oneof([{X, Y}, [X, Y], [X | Y], singleton_map(X, Y)]).
singleton_map(KSpec, VSpec) ->
%% Typically triq instantiates the random specs lazily by walking through
%% the spec at the very end. But at the time of this writing, it doesn't
%% know how to deal with maps. So we need to explicitly instantiate the
%% subterms when constructing maps.
?LET(K, KSpec,
?LET(V, VSpec, #{K => V})).
%% triq properties
prop_transform_id(Transform) ->
?FORALL(Term, any(), Transform(fun functools:id/1, Term) =:= Term).
prop_transformt_id() ->
prop_transform_id(fun transformt/2).
prop_transformb_id() ->
prop_transform_id(fun transformb/2).
%% traversal order is left to right, so the order of original elements must be
%% the same as in Items list
prop_transform_items_order(Transform) ->
forall_terms(fun (Items, Term) ->
Items =:= matching(fun is_integer/1, Term, Transform)
end).
prop_transformt_items_order() ->
prop_transform_items_order(fun transformt/3).
prop_transformb_items_order() ->
prop_transform_items_order(fun transformb/3).
prop_transforms_same_subterms() ->
forall_terms(fun (_Items, Term) ->
AllT = universe(Term, fun transformt/3),
AllB = universe(Term, fun transformb/3),
lists:sort(AllT) =:= lists:sort(AllB)
end).
prop_transforms_result(Transform) ->
Props = ?FORALL(Fun, triq_utils:random_integer_fun(),
forall_terms(
fun (Items, Term) ->
TransFun = ?transform(I when is_integer(I), Fun(I)),
Items1 = lists:map(Fun, Items),
Term1 = Transform(TransFun, Term),
Items1 =:= matching(fun is_integer/1, Term1)
end)),
%% each forall multiplies number of tests by 100 (by default), so we'd
%% have to run 10^6 number of tests which is a bit too much; here we lower
%% it to 22^3 (yes, it's somewhat confusing) which is approximately 10000
triq:numtests(22, Props).
prop_transformt_result() ->
prop_transforms_result(fun transformt/2).
prop_transformb_result() ->
prop_transforms_result(fun transformb/2).
prop_query_result(QueryK, QueryFun, ListFun) ->
forall_terms(fun (Items, Term) ->
ListFun(Items) =:= query(QueryK, QueryFun, Term)
end).
prop_query_count() ->
prop_query_result(fun functools:add/2, ?query(I when is_integer(I), 1, 0),
fun erlang:length/1).
prop_query_sum() ->
prop_query_result(fun functools:add/2, ?query(I when is_integer(I), I, 0),
fun lists:sum/1).
forall_terms(Prop) ->
?FORALL(Items, list(int()),
?FORALL(Term, random_term(Items), Prop(Items, Term))).
-endif. | src/generic.erl | 0.633183 | 0.450843 | generic.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose : Converts intermediate assembly code to final format.
-module(beam_flatten).
-export([module/2]).
-import(lists, [reverse/1,reverse/2]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
module({Mod,Exp,Attr,Fs,Lc}, _Opt) ->
{ok,{Mod,Exp,Attr,[function(F) || F <- Fs],Lc}}.
function({function,Name,Arity,CLabel,Is0}) ->
Is = block(Is0),
{function,Name,Arity,CLabel,Is}.
block(Is) ->
block(Is, []).
block([{block,Is0}|Is1], Acc) -> block(Is1, norm_block(Is0, Acc));
block([I|Is], Acc) -> block(Is, [I|Acc]);
block([], Acc) -> reverse(Acc).
norm_block([{set,[],[],{alloc,R,Alloc}}|Is], Acc0) ->
norm_block(Is, reverse(norm_allocate(Alloc, R), Acc0));
norm_block([I|Is], Acc) ->
norm_block(Is, [norm(I)|Acc]);
norm_block([], Acc) -> Acc.
norm({set,[D],As,{bif,N,F}}) -> {bif,N,F,As,D};
norm({set,[D],As,{alloc,R,{gc_bif,N,F}}}) -> {gc_bif,N,F,R,As,D};
norm({set,[D],[S],move}) -> {move,S,D};
norm({set,[D],[S],fmove}) -> {fmove,S,D};
norm({set,[D],[S],fconv}) -> {fconv,S,D};
norm({set,[D],[S1,S2],put_list}) -> {put_list,S1,S2,D};
norm({set,[D],Els,put_tuple2}) -> {put_tuple2,D,{list,Els}};
norm({set,[D],[],{put_tuple,A}}) -> {put_tuple,A,D};
norm({set,[],[S],put}) -> {put,S};
norm({set,[D],[S],{get_tuple_element,I}}) -> {get_tuple_element,S,I,D};
norm({set,[],[S,D],{set_tuple_element,I}}) -> {set_tuple_element,S,D,I};
norm({set,[D],[S],get_hd}) -> {get_hd,S,D};
norm({set,[D],[S],get_tl}) -> {get_tl,S,D};
norm({set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}}) ->
{put_map,F,Op,S,D,R,{list,Puts}};
norm({set,[],[],remove_message}) -> remove_message;
norm({set,[],[],{line,_}=Line}) -> Line.
norm_allocate({_Zero,nostack,Nh,[]}, Regs) ->
[{test_heap,Nh,Regs}];
norm_allocate({nozero,Ns,0,Inits}, Regs) ->
[{allocate,Ns,Regs}|Inits];
norm_allocate({nozero,Ns,Nh,Inits}, Regs) ->
[{allocate_heap,Ns,Nh,Regs}|Inits]. | lib/compiler/src/beam_flatten.erl | 0.586168 | 0.410993 | beam_flatten.erl | starcoder |
%% The contents of this file are subject to the Erlang Public License, Version
%% 1.1, (the "License"); you may not use this file except in compliance with
%% the License. You should have received a copy of the Erlang Public License
%% along with this software. If not, it can be retrieved via the world wide web
%% at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS" basis,
%% WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
%% the specific language governing rights and limitations under the License.
%% Based on: Scalable Bloom Filters
%% <NAME>, <NAME>, <NAME>, <NAME>
%% Information Processing Letters
%% Volume 101, Issue 6, 31 March 2007, Pages 255-261
%%
%% Provides scalable bloom filters that can grow indefinitely while ensuring a
%% desired maximum false positive probability. Also provides standard
%% partitioned bloom filters with a maximum capacity. Bit arrays are
%% dimensioned as a power of 2 to enable reusing hash values across filters
%% through bit operations. Double hashing is used (no need for enhanced double
%% hashing for partitioned bloom filters).
%% Modified slightly by Justin Sheehy to make it a single file (incorporated
%% the array-based bitarray internally).
-module(hanoidb_bloom).
-author("<NAME> <<EMAIL>>").
-export([sbf/1, sbf/2, sbf/3, sbf/4,
bloom/1, bloom/2,
member/2, add/2,
size/1, capacity/1,
encode/1, decode/1]).
-import(math, [log/1, pow/2]).
-ifdef(TEST).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
-endif.
-include_lib("eunit/include/eunit.hrl").
-endif.
-define(W, 27).
-type bitmask() :: array() | any().
-record(bloom, {
e :: float(), % error probability
n :: non_neg_integer(), % maximum number of elements
mb :: non_neg_integer(), % 2^mb = m, the size of each slice (bitvector)
size :: non_neg_integer(), % number of elements
a :: [bitmask()] % list of bitvectors
}).
-record(sbf, {
e :: float(), % error probability
r :: float(), % error probability ratio
s :: non_neg_integer(), % log 2 of growth ratio
size :: non_neg_integer(), % number of elements
b :: [#bloom{}] % list of plain bloom filters
}).
%% Constructors for (fixed capacity) bloom filters
%%
%% N - capacity
%% E - error probability
bloom(N) -> bloom(N, 0.001).
bloom(N, E) when is_number(N), N > 0,
is_float(E), E > 0, E < 1,
N >= 4/E -> % rule of thumb; due to double hashing
bloom(size, N, E);
bloom(N, E) when is_number(N), N >= 0,
is_float(E), E > 0, E < 1 ->
bloom(bits, 32, E).
bloom(Mode, N, E) ->
K = case Mode of
size -> 1 + trunc(log2(1/E));
bits -> 1
end,
P = pow(E, 1 / K),
Mb =
case Mode of
size ->
1 + trunc(-log2(1 - pow(1 - P, 1 / N)));
bits ->
N
end,
M = 1 bsl Mb,
D = trunc(log(1-P) / log(1-1/M)),
#bloom{e=E, n=D, mb=Mb, size = 0,
a = [bitmask_new(Mb) || _ <- lists:seq(1, K)]}.
log2(X) -> log(X) / log(2).
%% Constructors for scalable bloom filters
%%
%% N - initial capacity before expanding
%% E - error probability
%% S - growth ratio when full (log 2) can be 1, 2 or 3
%% R - tightening ratio of error probability
sbf(N) -> sbf(N, 0.001).
sbf(N, E) -> sbf(N, E, 1).
sbf(N, E, 1) -> sbf(N, E, 1, 0.85);
sbf(N, E, 2) -> sbf(N, E, 2, 0.75);
sbf(N, E, 3) -> sbf(N, E, 3, 0.65).
sbf(N, E, S, R) when is_number(N), N > 0,
is_float(E), E > 0, E < 1,
is_integer(S), S > 0, S < 4,
is_float(R), R > 0, R < 1,
N >= 4/(E*(1-R)) -> % rule of thumb; due to double hashing
#sbf{e=E, s=S, r=R, size=0, b=[bloom(N, E*(1-R))]}.
%% Returns number of elements
%%
size(#bloom{size=Size}) -> Size;
size(#sbf{size=Size}) -> Size.
%% Returns capacity
%%
capacity(#bloom{n=N}) -> N;
capacity(#sbf{}) -> infinity.
%% Test for membership
%%
member(Elem, #bloom{mb=Mb}=B) ->
Hashes = make_hashes(Mb, Elem),
hash_member(Hashes, B);
member(Elem, #sbf{b=[H|_]}=Sbf) ->
Hashes = make_hashes(H#bloom.mb, Elem),
hash_member(Hashes, Sbf).
hash_member(Hashes, #bloom{mb=Mb, a=A}) ->
Mask = 1 bsl Mb -1,
{I1, I0} = make_indexes(Mask, Hashes),
all_set(Mask, I1, I0, A);
hash_member(Hashes, #sbf{b=B}) ->
lists:any(fun(X) -> hash_member(Hashes, X) end, B).
make_hashes(Mb, E) when Mb =< 16 ->
erlang:phash2({E}, 1 bsl 32);
make_hashes(Mb, E) when Mb =< 32 ->
{erlang:phash2({E}, 1 bsl 32), erlang:phash2([E], 1 bsl 32)}.
make_indexes(Mask, {H0, H1}) when Mask > 1 bsl 16 -> masked_pair(Mask, H0, H1);
make_indexes(Mask, {H0, _}) -> make_indexes(Mask, H0);
make_indexes(Mask, H0) -> masked_pair(Mask, H0 bsr 16, H0).
masked_pair(Mask, X, Y) -> {X band Mask, Y band Mask}.
all_set(_Mask, _I1, _I, []) -> true;
all_set(Mask, I1, I, [H|T]) ->
bitmask_get(I, H) andalso all_set(Mask, I1, (I+I1) band Mask, T).
%% Adds element to set
%%
add(Elem, #bloom{mb=Mb} = B) ->
Hashes = make_hashes(Mb, Elem),
hash_add(Hashes, B);
add(Elem, #sbf{size=Size, r=R, s=S, b=[H|T]=Bs}=Sbf) ->
#bloom{mb=Mb, e=E, n=N, size=HSize} = H,
Hashes = make_hashes(Mb, Elem),
case hash_member(Hashes, Sbf) of
true -> Sbf;
false ->
case HSize < N of
true -> Sbf#sbf{size=Size+1, b=[hash_add(Hashes, H)|T]};
false ->
B = add(Elem, bloom(bits, Mb + S, E * R)),
Sbf#sbf{size=Size+1, b=[B|Bs]}
end
end.
hash_add(Hashes, #bloom{mb=Mb, a=A, size=Size} = B) ->
Mask = 1 bsl Mb -1,
{I1, I0} = make_indexes(Mask, Hashes),
B#bloom{size=Size+1, a=set_bits(Mask, I1, I0, A, [])}.
set_bits(_Mask, _I1, _I, [], Acc) -> lists:reverse(Acc);
set_bits(Mask, I1, I, [H|T], Acc) ->
set_bits(Mask, I1, (I+I1) band Mask, T, [bitmask_set(I, H) | Acc]).
%%%========== Dispatch to appropriate representation:
bitmask_new(LogN) ->
if LogN >= 20 -> % Use sparse representation.
hanoidb_sparse_bitmap:new(LogN);
true -> % Use dense representation.
hanoidb_dense_bitmap:new(1 bsl LogN)
end.
bitmask_set(I, BM) ->
case element(1,BM) of
array -> bitarray_set(I, as_array(BM));
sparse_bitmap -> hanoidb_sparse_bitmap:set(I, BM);
dense_bitmap_ets -> hanoidb_dense_bitmap:set(I, BM);
dense_bitmap ->
%% Surprise - we need to mutate a built representation:
hanoidb_dense_bitmap:set(I, hanoidb_dense_bitmap:unbuild(BM))
end.
%%% Convert to external form.
bitmask_build(BM) ->
case element(1,BM) of
array -> BM;
sparse_bitmap -> BM;
dense_bitmap_ets -> hanoidb_dense_bitmap:build(BM)
end.
bitmask_get(I, BM) ->
case element(1,BM) of
array -> bitarray_get(I, as_array(BM));
sparse_bitmap -> hanoidb_sparse_bitmap:member(I, BM);
dense_bitmap_ets -> hanoidb_dense_bitmap:member(I, BM);
dense_bitmap -> hanoidb_dense_bitmap:member(I, BM)
end.
-spec as_array(bitmask()) -> array().
as_array(BM) ->
case array:is_array(BM) of
true -> BM
end.
%%%========== Bitarray representation - suitable for sparse arrays ==========
bitarray_new(N) -> array:new((N-1) div ?W + 1, {default, 0}).
-spec bitarray_set( non_neg_integer(), array() ) -> array().
bitarray_set(I, A1) ->
A = as_array(A1),
AI = I div ?W,
V = array:get(AI, A),
V1 = V bor (1 bsl (I rem ?W)),
if V =:= V1 -> A; % The bit is already set
true -> array:set(AI, V1, A)
end.
-spec bitarray_get( non_neg_integer(), array() ) -> boolean().
bitarray_get(I, A) ->
AI = I div ?W,
V = array:get(AI, A),
(V band (1 bsl (I rem ?W))) =/= 0.
%%%^^^^^^^^^^ Bitarray representation - suitable for sparse arrays ^^^^^^^^^^
encode(Bloom) ->
zlib:gzip(term_to_binary(bloom_build(Bloom))).
decode(Bin) ->
binary_to_term(zlib:gunzip(Bin)).
%%% Convert to external form.
bloom_build(Bloom=#bloom{a=Bitmasks}) ->
Bloom#bloom{a=[bitmask_build(X) || X <- Bitmasks]};
bloom_build(Sbf=#sbf{b=Blooms}) ->
Sbf#sbf{b=[bloom_build(X) || X <- Blooms]}.
%% UNIT TESTS
-ifdef(TEST).
-ifdef(EQC).
prop_bloom_test_() ->
{timeout, 60, fun() -> ?assert(eqc:quickcheck(prop_bloom())) end}.
g_keys() ->
non_empty(list(non_empty(binary()))).
prop_bloom() ->
?FORALL(Keys, g_keys(),
begin
Bloom = ?MODULE:bloom(Keys),
F = fun(X) -> member(X, Bloom) end,
lists:all(F, Keys)
end).
-endif.
-endif. | src/hanoidb_bloom.erl | 0.648911 | 0.489992 | hanoidb_bloom.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(ets_lru).
-behaviour(gen_server).
-vsn(2).
-export([
start_link/2,
stop/1,
insert/3,
lookup/2,
match/3,
match_object/3,
remove/2,
clear/1,
% Dirty functions read straight from
% the ETS tables which means there are
% race conditions with concurrent access.
lookup_d/2
]).
-export([
init/1,
terminate/2,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3
]).
-define(DEFAULT_TIME_UNIT, millisecond).
-type time_value() :: integer().
-type strict_monotonic_time() :: {time_value(), integer()}.
-record(entry, {
key :: term(),
val :: term(),
atime :: strict_monotonic_time(),
ctime :: strict_monotonic_time()
}).
-record(st, {
objects,
atimes,
ctimes,
max_objs :: non_neg_integer() | undefined,
max_size :: non_neg_integer() | undefined,
max_lifetime :: non_neg_integer() | undefined,
time_unit = ?DEFAULT_TIME_UNIT :: atom()
}).
start_link(Name, Options) when is_atom(Name) ->
gen_server:start_link({local, Name}, ?MODULE, {Name, Options}, []).
stop(LRU) ->
gen_server:cast(LRU, stop).
lookup(LRU, Key) ->
gen_server:call(LRU, {lookup, Key}).
insert(LRU, Key, Val) ->
gen_server:call(LRU, {insert, Key, Val}).
remove(LRU, Key) ->
gen_server:call(LRU, {remove, Key}).
%% @doc match/3 provides an efficient way to retrieve parts of the
%% keys and values without copying or requiring circumvention of the
%% ets_lru API. The KeySpec and ValueSpec parameters are used as part
%% of one larger match spec so keep in mind that all capturing
%% placeholders will be aliased between the key and value parts.
-spec match(atom() | pid(), term(), term()) -> [[any()]].
match(LRU, KeySpec, ValueSpec) ->
gen_server:call(LRU, {match, KeySpec, ValueSpec}).
%% @doc match_object/3 provides an efficient way to retrieve multiple
%% values using match conditions. The KeySpec and ValueSpec parameters
%% are used as part of one larger match spec so keep in mind that all
%% capturing placeholders will be aliased between the key and value
%% parts.
-spec match_object(atom() | pid(), term(), term()) -> [any()].
match_object(Name, KeySpec, ValueSpec) when is_atom(Name) ->
Pattern = #entry{key=KeySpec, val=ValueSpec, _='_'},
Entries = ets:match_object(obj_table(Name), Pattern),
lists:map(fun(#entry{key=Key,val=Val}) ->
gen_server:cast(Name, {accessed, Key}),
Val
end, Entries);
match_object(LRU, KeySpec, ValueSpec) ->
gen_server:call(LRU, {match_object, KeySpec, ValueSpec}).
clear(LRU) ->
gen_server:call(LRU, clear).
lookup_d(Name, Key) when is_atom(Name) ->
case ets:lookup(obj_table(Name), Key) of
[#entry{val=Val}] ->
gen_server:cast(Name, {accessed, Key}),
{ok, Val};
[] ->
not_found
end.
init({Name, Options}) ->
St = set_options(#st{}, Options),
ObjOpts = [set, named_table, protected, {keypos, #entry.key}],
TimeOpts = [ordered_set, named_table, protected],
{ok, St#st{
objects = ets:new(obj_table(Name), ObjOpts),
atimes = ets:new(at_table(Name), TimeOpts),
ctimes = ets:new(ct_table(Name), TimeOpts)
}}.
terminate(_Reason, St) ->
true = ets:delete(St#st.objects),
true = ets:delete(St#st.atimes),
true = ets:delete(St#st.ctimes),
ok.
handle_call({lookup, Key}, _From, St) ->
Reply = case ets:lookup(St#st.objects, Key) of
[#entry{val=Val} | _] ->
accessed(Key, St),
{ok, Val};
[] ->
not_found
end,
{reply, Reply, St, 0};
handle_call({match_object, KeySpec, ValueSpec}, _From, St) ->
Pattern = #entry{key=KeySpec, val=ValueSpec, _='_'},
Entries = ets:match_object(St#st.objects, Pattern),
Values = lists:map(fun(#entry{key=Key,val=Val}) ->
accessed(Key, St),
Val
end, Entries),
{reply, Values, St, 0};
handle_call({match, KeySpec, ValueSpec}, _From, St) ->
Pattern = #entry{key=KeySpec, val=ValueSpec, _='_'},
Values = ets:match(St#st.objects, Pattern),
{reply, Values, St, 0};
handle_call({insert, Key, Val}, _From, St) ->
NewATime = strict_monotonic_time(St#st.time_unit),
Pattern = #entry{key=Key, atime='$1', _='_'},
case ets:match(St#st.objects, Pattern) of
[[ATime]] ->
Update = {#entry.val, Val},
true = ets:update_element(St#st.objects, Key, Update),
true = ets:delete(St#st.atimes, ATime),
true = ets:insert(St#st.atimes, {NewATime, Key});
[] ->
Entry = #entry{key=Key, val=Val, atime=NewATime, ctime=NewATime},
true = ets:insert(St#st.objects, Entry),
true = ets:insert(St#st.atimes, {NewATime, Key}),
true = ets:insert(St#st.ctimes, {NewATime, Key})
end,
{reply, ok, St, 0};
handle_call({remove, Key}, _From, St) ->
Pattern = #entry{key=Key, atime='$1', ctime='$2', _='_'},
Reply = case ets:match(St#st.objects, Pattern) of
[[ATime, CTime]] ->
true = ets:delete(St#st.objects, Key),
true = ets:delete(St#st.atimes, ATime),
true = ets:delete(St#st.ctimes, CTime),
ok;
[] ->
not_found
end,
{reply, Reply, St, 0};
handle_call(clear, _From, St) ->
true = ets:delete_all_objects(St#st.objects),
true = ets:delete_all_objects(St#st.atimes),
true = ets:delete_all_objects(St#st.ctimes),
% No need to timeout here and evict cache
% entries because its now empty.
{reply, ok, St};
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
handle_cast({accessed, Key}, St) ->
accessed(Key, St),
{noreply, St, 0};
handle_cast(stop, St) ->
{stop, normal, St};
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
handle_info(timeout, St) ->
trim(St),
{noreply, St, next_timeout(St)};
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
accessed(Key, St) ->
Pattern = #entry{key=Key, atime='$1', _='_'},
case ets:match(St#st.objects, Pattern) of
[[ATime]] ->
NewATime = strict_monotonic_time(St#st.time_unit),
Update = {#entry.atime, NewATime},
true = ets:update_element(St#st.objects, Key, Update),
true = ets:delete(St#st.atimes, ATime),
true = ets:insert(St#st.atimes, {NewATime, Key}),
ok;
[] ->
ok
end.
trim(St) ->
trim_count(St),
trim_size(St),
trim_lifetime(St).
trim_count(#st{max_objs=undefined}) ->
ok;
trim_count(#st{max_objs=Max}=St) ->
case ets:info(St#st.objects, size) > Max of
true ->
drop_lru(St, fun trim_count/1);
false ->
ok
end.
trim_size(#st{max_size=undefined}) ->
ok;
trim_size(#st{max_size=Max}=St) ->
case ets:info(St#st.objects, memory) > Max of
true ->
drop_lru(St, fun trim_size/1);
false ->
ok
end.
trim_lifetime(#st{max_lifetime=undefined}) ->
ok;
trim_lifetime(#st{max_lifetime=Max}=St) ->
Now = erlang:monotonic_time(St#st.time_unit),
case ets:first(St#st.ctimes) of
'$end_of_table' ->
ok;
CTime = {Time, _} ->
case Now - Time > Max of
true ->
[{CTime, Key}] = ets:lookup(St#st.ctimes, CTime),
Pattern = #entry{key=Key, atime='$1', _='_'},
[[ATime]] = ets:match(St#st.objects, Pattern),
true = ets:delete(St#st.objects, Key),
true = ets:delete(St#st.atimes, ATime),
true = ets:delete(St#st.ctimes, CTime),
trim_lifetime(St);
false ->
ok
end
end.
drop_lru(St, Continue) ->
case ets:first(St#st.atimes) of
'$end_of_table' ->
empty;
ATime ->
[{ATime, Key}] = ets:lookup(St#st.atimes, ATime),
Pattern = #entry{key=Key, ctime='$1', _='_'},
[[CTime]] = ets:match(St#st.objects, Pattern),
true = ets:delete(St#st.objects, Key),
true = ets:delete(St#st.atimes, ATime),
true = ets:delete(St#st.ctimes, CTime),
Continue(St)
end.
next_timeout(#st{max_lifetime=undefined}) ->
infinity;
next_timeout(St) ->
case ets:first(St#st.ctimes) of
'$end_of_table' ->
infinity;
{Time, _} ->
Now = erlang:monotonic_time(St#st.time_unit),
TimeDiff = Now - Time,
erlang:max(St#st.max_lifetime - TimeDiff, 0)
end.
set_options(St, []) ->
St;
set_options(St, [{max_objects, N} | Rest]) when is_integer(N), N >= 0 ->
set_options(St#st{max_objs=N}, Rest);
set_options(St, [{max_size, N} | Rest]) when is_integer(N), N >= 0 ->
set_options(St#st{max_size=N}, Rest);
set_options(St, [{max_lifetime, N} | Rest]) when is_integer(N), N >= 0 ->
set_options(St#st{max_lifetime=N}, Rest);
set_options(St, [{time_unit, T} | Rest]) when is_atom(T) ->
set_options(St#st{time_unit=T}, Rest);
set_options(_, [Opt | _]) ->
throw({invalid_option, Opt}).
obj_table(Name) ->
table_name(Name, "_objects").
at_table(Name) ->
table_name(Name, "_atimes").
ct_table(Name) ->
table_name(Name, "_ctimes").
table_name(Name, Ext) ->
list_to_atom(atom_to_list(Name) ++ Ext).
-spec strict_monotonic_time(atom()) -> strict_monotonic_time().
strict_monotonic_time(TimeUnit) ->
{erlang:monotonic_time(TimeUnit), erlang:unique_integer([monotonic])}. | src/ets_lru.erl | 0.690663 | 0.46132 | ets_lru.erl | starcoder |
%% @doc
%% Summary metric, to track the size of events and report quantiles
%% Based on prometheus_summary
%%
%% Example use cases for Summaries:
%% - Response latency;
%% - Request size;
%% - Response size.
%%
%% Example:
%% <pre lang="erlang">
%% -module(my_proxy_instrumenter).
%%
%% setup() ->
%% prometheus_quantile_summary:declare([{name, request_size_bytes},
%% {help, "Request size in bytes."}]),
%% prometheus_quantile_summary:declare([{name, response_size_bytes},
%% {help, "Response size in bytes."}]).
%%
%% observe_request(Size) ->
%% prometheus_quantile_summary:observe(request_size_bytes, Size).
%%
%% observe_response(Size) ->
%% prometheus_quantile_summary:observe(response_size_bytes, Size).
%% </pre>
%%
%% Reports:
%% request_size_bytes_size
%% request_size_bytes_count
%% request_size_bytes{quantile="0.5"}
%% request_size_bytes{quantile="0.9"}
%% request_size_bytes{quantile="0.95"}
%% @end
-module(prometheus_quantile_summary).
%%% metric
-export([new/1,
declare/1,
deregister/1,
deregister/2,
set_default/2,
observe/2,
observe/3,
observe/4,
observe_duration/2,
observe_duration/3,
observe_duration/4,
remove/1,
remove/2,
remove/3,
reset/1,
reset/2,
reset/3,
value/1,
value/2,
value/3,
values/2]).
%%% collector
-export([deregister_cleanup/1,
collect_mf/2,
collect_metrics/2]).
-include("prometheus.hrl").
-include_lib("quantile_estimator/include/quantile_estimator.hrl").
-behaviour(prometheus_metric).
-behaviour(prometheus_collector).
%%====================================================================
%% Macros
%%====================================================================
-define(TABLE, ?PROMETHEUS_QUANTILE_SUMMARY_TABLE).
-define(SUM_POS, 3).
-define(COUNTER_POS, 2).
-define(QUANTILE_POS, 4).
-define(WIDTH, 16).
%%====================================================================
%% Metric API
%%====================================================================
%% @doc Creates a summary using `Spec'.
%%
%% Raises `{missing_metric_spec_key, Key, Spec}' error if required `Soec' key
%% is missing.<br/>
%% Raises `{invalid_metric_name, Name, Message}' error if metric `Name'
%% is invalid.<br/>
%% Raises `{invalid_metric_help, Help, Message}' error if metric `Help'
%% is invalid.<br/>
%% Raises `{invalid_metric_labels, Labels, Message}' error if `Labels'
%% isn't a list.<br/>
%% Raises `{invalid_label_name, Name, Message}' error if `Name' isn't a valid
%% label name.<br/>
%% Raises `{invalid_value_error, Value, Message}' error if `duration_unit' is
%% unknown or doesn't match metric name.<br/>
%% Raises `{mf_already_exists, {Registry, Name}, Message}' error if a summary
%% with the same `Spec' already exists.
%% @end
new(Spec) ->
Spec1 = validate_summary_spec(Spec),
prometheus_metric:insert_new_mf(?TABLE, ?MODULE, Spec1).
%% @doc Creates a summary using `Spec'.
%% If a summary with the same `Spec' exists returns `false'.
%%
%% Raises `{missing_metric_spec_key, Key, Spec}' error if required `Soec' key
%% is missing.<br/>
%% Raises `{invalid_metric_name, Name, Message}' error if metric `Name'
%% is invalid.<br/>
%% Raises `{invalid_metric_help, Help, Message}' error if metric `Help'
%% is invalid.<br/>
%% Raises `{invalid_metric_labels, Labels, Message}' error if `Labels'
%% isn't a list.<br/>
%% Raises `{invalid_label_name, Name, Message}' error if `Name' isn't a valid
%% label name.<br/>
%% Raises `{invalid_value_error, Value, MessagE}' error if `duration_unit' is
%% unknown or doesn't match metric name.<br/>
%% @end
declare(Spec) ->
Spec1 = validate_summary_spec(Spec),
prometheus_metric:insert_mf(?TABLE, ?MODULE, Spec1).
%% @equiv deregister(default, Name)
deregister(Name) ->
deregister(default, Name).
%% @doc
%% Removes all summary series with name `Name' and
%% removes Metric Family from `Registry'.
%%
%% After this call new/1 for `Name' and `Registry' will succeed.
%%
%% Returns `{true, _}' if `Name' was a registered summary.
%% Otherwise returns `{false, _}'.
%% @end
deregister(Registry, Name) ->
MFR = prometheus_metric:deregister_mf(?TABLE, Registry, Name),
NumDeleted = ets:select_delete(?TABLE, deregister_select(Registry, Name)),
{MFR, NumDeleted > 0}.
%% @private
set_default(Registry, Name) ->
Configuration = get_configuration(Registry, Name),
#{compress_limit := CompressLimit} = Configuration,
ets:insert_new(?TABLE, {
key(Registry, Name, []),
0,
0,
quantile(Configuration),
CompressLimit}).
%% @equiv observe(default, Name, [], Value)
observe(Name, Value) ->
observe(default, Name, [], Value).
%% @equiv observe(default, Name, LabelValues, Value)
observe(Name, LabelValues, Value) ->
observe(default, Name, LabelValues, Value).
%% @doc Observes the given `Value'.
%%
%% Raises `{invalid_value, Value, Message}' if `Value'
%% isn't an integer.<br/>
%% Raises `{unknown_metric, Registry, Name}' error if summary with named `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
observe(Registry, Name, LabelValues, Value) when is_number(Value) ->
Key = key(Registry, Name, LabelValues),
case ets:lookup(?TABLE, Key) of
[] -> insert_metric(Registry, Name, LabelValues, Value, fun observe/4);
[{Key, Count, S, Q, CompressLimit}] ->
ets:insert(?TABLE, {Key, Count + 1, S + Value, quantile_add(Q, Value, CompressLimit), CompressLimit})
end,
ok;
observe(_Registry, _Name, _LabelValues, Value) ->
erlang:error({invalid_value, Value, "observe accepts only numbers"}).
%% @equiv observe_duration(default, Name, [], Fun)
observe_duration(Name, Fun) ->
observe_duration(default, Name, [], Fun).
%% @equiv observe_duration(default, Name, LabelValues, Fun)
observe_duration(Name, LabelValues, Fun) ->
observe_duration(default, Name, LabelValues, Fun).
%% @doc Tracks the amount of time spent executing `Fun'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if summary with named `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% Raises `{invalid_value, Value, Message}' if `Fun'
%% isn't a function.<br/>
%% @end
observe_duration(Registry, Name, LabelValues, Fun) when is_function(Fun)->
Start = erlang:monotonic_time(),
try
Fun()
after
observe(Registry, Name, LabelValues, erlang:monotonic_time() - Start)
end;
observe_duration(_Regsitry, _Name, _LabelValues, Fun) ->
erlang:error({invalid_value, Fun, "observe_duration accepts only functions"}).
%% @equiv remove(default, Name, [])
remove(Name) ->
remove(default, Name, []).
%% @equiv remove(default, Name, LabelValues)
remove(Name, LabelValues) ->
remove(default, Name, LabelValues).
%% @doc Removes summary series identified by `Registry', `Name'
%% and `LabelValues'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if summary with name `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
remove(Registry, Name, LabelValues) ->
prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
case lists:flatten([ets:take(?TABLE,
{Registry, Name, LabelValues, Scheduler})
|| Scheduler <- schedulers_seq()]) of
[] -> false;
_ -> true
end.
%% @equiv reset(default, Name, [])
reset(Name) ->
reset(default, Name, []).
%% @equiv reset(default, Name, LabelValues)
reset(Name, LabelValues) ->
reset(default, Name, LabelValues).
%% @doc Resets the value of the summary identified by `Registry', `Name'
%% and `LabelValues'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if summary with name `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
reset(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
Configuration = prometheus_metric:mf_data(MF),
case lists:usort([ets:update_element(?TABLE,
{Registry, Name, LabelValues, Scheduler},
[{?COUNTER_POS, 0}, {?SUM_POS, 0}, {?QUANTILE_POS, quantile(Configuration)}])
|| Scheduler <- schedulers_seq()]) of
[_, _] -> true;
[true] -> true;
_ -> false
end.
%% @equiv value(default, Name, [])
value(Name) ->
value(default, Name, []).
%% @equiv value(default, Name, LabelValues)
value(Name, LabelValues) ->
value(default, Name, LabelValues).
%% @doc Returns the value of the summary identified by `Registry', `Name'
%% and `LabelValues'. If there is no summary for `LabelValues',
%% returns `undefined'.
%%
%% If duration unit set, sum will be converted to the duration unit.
%% {@link prometheus_time. Read more here.}
%%
%% Raises `{unknown_metric, Registry, Name}' error if summary named `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
value(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
DU = prometheus_metric:mf_duration_unit(MF),
#{quantiles := QNs} = prometheus_metric:mf_data(MF),
case ets:select(?TABLE, [{{{Registry, Name, LabelValues, '_'}, '$1', '$2', '$3', '_'},
[],
['$$']}]) of
[] -> undefined;
Values -> {Count, Sum, QE} = reduce_values(Values),
{Count, prometheus_time:maybe_convert_to_du(DU, Sum), quantile_values(QE, QNs)}
end.
values(Registry, Name) ->
case prometheus_metric:check_mf_exists(?TABLE, Registry, Name) of
false -> [];
MF ->
DU = prometheus_metric:mf_duration_unit(MF),
Labels = prometheus_metric:mf_labels(MF),
#{quantiles := QNs} = Configuration = prometheus_metric:mf_data(MF),
MFValues = load_all_values(Registry, Name),
ReducedMap = lists:foldl(
fun([L, C, S, QE], ResAcc) ->
{PrevCount, PrevSum, PrevQE} = maps:get(L, ResAcc, {0, 0, quantile(Configuration)}),
ResAcc#{L => {PrevCount + C, PrevSum + S, quantile_merge(PrevQE, QE)}}
end,
#{},
MFValues),
ReducedMapList = lists:sort(maps:to_list(ReducedMap)),
lists:foldr(
fun({LabelValues, {Count, Sum, QE}}, Acc) ->
[{lists:zip(Labels, LabelValues), Count,
prometheus_time:maybe_convert_to_du(DU, Sum),
quantile_values(QE, QNs)} | Acc]
end,
[],
ReducedMapList)
end.
%%====================================================================
%% Collector API
%%====================================================================
%% @private
deregister_cleanup(Registry) ->
prometheus_metric:deregister_mf(?TABLE, Registry),
true = ets:match_delete(?TABLE, {{Registry, '_', '_', '_'}, '_', '_', '_', '_'}),
ok.
%% @private
collect_mf(Registry, Callback) ->
[Callback(create_summary(Name, Help, {CLabels, Labels, Registry, DU, Data})) ||
[Name, {Labels, Help}, CLabels, DU, Data] <- prometheus_metric:metrics(?TABLE,
Registry)],
ok.
%% @private
collect_metrics(Name, {CLabels, Labels, Registry, DU, Configuration}) ->
#{quantiles := QNs} = Configuration,
MFValues = load_all_values(Registry, Name),
ReducedMap = lists:foldl(
fun([L, C, S, QE], ResAcc) ->
{PrevCount, PrevSum, PrevQE} = maps:get(L, ResAcc, {0, 0, quantile(Configuration)}),
ResAcc#{L => {PrevCount + C, PrevSum + S, quantile_merge(PrevQE, QE)}}
end,
#{},
MFValues),
ReducedMapList = lists:sort(maps:to_list(ReducedMap)),
lists:foldr(
fun({LabelValues, {Count, Sum, QE}}, Acc) ->
[prometheus_model_helpers:summary_metric(
CLabels ++ lists:zip(Labels, LabelValues), Count,
prometheus_time:maybe_convert_to_du(DU, Sum),
quantile_values(QE, QNs)) | Acc]
end,
[],
ReducedMapList).
%%====================================================================
%% Private Parts
%%====================================================================
deregister_select(Registry, Name) ->
[{{{Registry, Name, '_', '_'}, '_', '_', '_', '_'}, [], [true]}].
validate_summary_spec(Spec) ->
Labels = prometheus_metric_spec:labels(Spec),
validate_summary_labels(Labels),
{Invariant, QNs} = invariant_and_quantiles_from_spec(Spec),
CompressLimit = compress_limit_from_spec(Spec),
[
{data,
#{quantiles => QNs,
invariant => Invariant,
compress_limit => CompressLimit}}
| Spec
].
validate_summary_labels(Labels) ->
[raise_error_if_quantile_label_found(Label) || Label <- Labels].
raise_error_if_quantile_label_found("quantile") ->
erlang:error({invalid_metric_label_name, "quantile",
"summary cannot have a label named \"quantile\""});
raise_error_if_quantile_label_found(Label) ->
Label.
insert_metric(Registry, Name, LabelValues, Value, ConflictCB) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
Configuration = prometheus_metric:mf_data(MF),
#{compress_limit := CompressLimit} = Configuration,
Quantile = quantile(Configuration, Value),
case ets:insert_new(?TABLE, {key(Registry, Name, LabelValues), 1, Value, Quantile, CompressLimit}) of
false -> %% some sneaky process already inserted
ConflictCB(Registry, Name, LabelValues, Value);
true ->
ok
end.
load_all_values(Registry, Name) ->
ets:match(?TABLE, {{Registry, Name, '$1', '_'}, '$2', '$3', '$4', '_'}).
get_configuration(Registry, Name) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name),
prometheus_metric:mf_data(MF).
schedulers_seq() ->
lists:seq(0, ?WIDTH-1).
key(Registry, Name, LabelValues) ->
X = erlang:system_info(scheduler_id),
Rnd = X band (?WIDTH-1),
{Registry, Name, LabelValues, Rnd}.
reduce_values(Values) ->
{lists:sum([C || [C, _, _] <- Values]),
lists:sum([S || [_, S, _] <- Values]),
fold_quantiles([Q || [_C, _S, Q] <- Values])}.
create_summary(Name, Help, Data) ->
prometheus_model_helpers:create_mf(Name, Help, summary, ?MODULE, Data).
default_compress_limit() -> 100.
invariant_and_quantiles_from_spec(Spec) ->
Targets = prometheus_metric_spec:get_value(targets, Spec, default_targets()),
validate_targets(Targets),
{QNs, _} = lists:unzip(Targets),
Invariant = quantile_estimator:f_targeted(Targets),
{Invariant, QNs}.
compress_limit_from_spec(Spec) ->
prometheus_metric_spec:get_value(compress_limit, Spec, default_compress_limit()).
validate_targets(Targets) when is_list(Targets) ->
lists:foreach(
fun
({Q, _E}) when not is_float(Q) ->
erlang:error({invalid_targets, "target quantile value should be float"});
({_Q, E}) when not is_float(E) ->
erlang:error({invalid_targets, "target error value should be float"});
({_, _}) ->
ok;
(_) ->
erlang:error({invalid_targets, "targets should be tuples of quantile and error"})
end,
Targets);
validate_targets(_Targets) ->
erlang:error({invalid_targets, "targets should be a list of tuples"}).
default_targets() ->
[{0.5, 0.02}, {0.9, 0.01}, {0.95, 0.005}].
quantile(#{invariant := Invariant}) ->
quantile_estimator:new(Invariant).
quantile(Configuration, Val) ->
quantile_estimator:insert(Val, quantile(Configuration)).
quantile_add(Q = #quantile_estimator{inserts_since_compression = ISS}, Val, CompressLimit) ->
Q1 = case ISS > CompressLimit of
true -> quantile_estimator:compress(Q);
false -> Q
end,
quantile_estimator:insert(Val, Q1).
%% Quantile estimator throws on empty stats
quantile_values(#quantile_estimator{data = []}, _QNs) ->
[];
quantile_values(Q, QNs) ->
[{QN, quantile_estimator:quantile(QN, Q)} || QN <- QNs].
fold_quantiles(QList) ->
lists:foldl(
fun
(Q, init) -> Q;
(Q1, Q2) -> quantile_merge(Q1, Q2)
end,
init,
QList).
quantile_merge(QE1, QE2) ->
#quantile_estimator{samples_count = N1, data = Data1, invariant = Invariant} = QE1,
#quantile_estimator{samples_count = N2, data = Data2} = QE2,
quantile_estimator:compress(#quantile_estimator{
samples_count = N1 + N2,
data = Data1 ++ Data2,
invariant = Invariant
}). | src/metrics/prometheus_quantile_summary.erl | 0.7641 | 0.416975 | prometheus_quantile_summary.erl | starcoder |
%% vim: set ai et sw=4 sts=4:
%% See LICENSE for licensing information.
% This module has been derived from:
% https://github.com/marco-m/mock_io
% Copyright (c) 2015, <NAME>
-module(solarized_capture).
-export([ output/1
, output/3
, result_and_output/1
, result_and_output/3
]).
-export_type([ geometry/0
]).
-type geometry() ::
pos_integer()
| enotsup
| {ok, pos_integer()}
| {error, enotsup}.
-type internal_geometry() ::
pos_integer()
| {error, enotsup}.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%=======================================================================
-record(state,
{ output = <<>>
, binary = false
, columns = 80
, rows = 25
}).
%=======================================================================
-spec output(Test) -> Output
when
Test :: fun(() -> Result),
Result :: term(),
Output :: binary().
output(Test) when is_function(Test, 0) ->
output(Test, io:columns(), io:rows()).
%=======================================================================
-spec output(Test, Columns, Rows) -> Output
when
Test :: fun(() -> Result),
Columns :: geometry(),
Rows :: geometry(),
Result :: term(),
Output :: binary().
output(Test, Columns, Rows) when is_function(Test, 0) ->
{_, Output} = result_and_output(Test, Columns, Rows),
Output.
%=======================================================================
-spec result_and_output(Test) -> {Result, Output}
when
Test :: fun(() -> Result),
Result :: term(),
Output :: binary().
result_and_output(Test) when is_function(Test, 0) ->
result_and_output(Test, io:columns(), io:rows()).
%=======================================================================
-spec result_and_output(Test, Columns, Rows) -> {Result, Output}
when
Test :: fun(() -> Result),
Columns :: geometry(),
Rows :: geometry(),
Result :: term(),
Output :: binary().
result_and_output(Test, Columns, Rows) when is_function(Test, 0) ->
State = #state{columns = geometry(Columns), rows = geometry(Rows)},
mock(Test, State).
%=======================================================================
-spec geometry(geometry()) -> internal_geometry().
geometry(N) when is_integer(N) andalso N > 0 ->
N;
geometry(enotsup) ->
{error, enotsup};
geometry({ok, N}) when is_integer(N) andalso N > 0 ->
N;
geometry(Error = {error, enotsup}) ->
Error;
geometry(_) ->
throw(badarg).
%-----------------------------------------------------------------------
mock(Test, State) ->
Was = erlang:group_leader(),
Mock = spawn_link(fun () -> mock_loop(State) end),
true = erlang:group_leader(Mock, self()),
Result = try
Test()
after
true = erlang:group_leader(Was, self())
end,
{ok, Output} = mock_call(Mock, stop),
{Result, Output}.
%=======================================================================
mock_loop(State) ->
receive
{mock, From, Request} ->
mock_request(From, Request, State);
{io_request, From, Opaque, Request} ->
io_request(From, Opaque, Request, State)
end.
%-----------------------------------------------------------------------
mock_request(From, stop, State) ->
mock_reply(From, {ok, State#state.output}),
stop.
%-----------------------------------------------------------------------
io_request(From, Opaque, {put_chars, Encoding, Chars}, State) ->
io_put_chars(From, Opaque, Encoding, Chars, State);
io_request(From, Opaque, {put_chars, Encoding, M, F, A}, State) ->
io_put_chars(From, Opaque, Encoding, M, F, A, State);
io_request(From, Opaque, {get_geometry, Geometry}, State) ->
io_get_geometry(From, Opaque, Geometry, State);
io_request(From, Opaque, _Request, State) ->
io_reply(From, Opaque, {error, enotsup}),
mock_loop(State).
%-----------------------------------------------------------------------
io_put_chars(From, Opaque, _Encoding, Chars, State) when is_binary(Chars) ->
io_reply(From, Opaque, ok),
Output = <<(State#state.output)/binary, Chars/binary>>,
mock_loop(State#state{output = Output});
io_put_chars(From, Opaque, Encoding, Chars, State) ->
io_put_chars(From, Opaque, Encoding, iolist_to_binary(Chars), State).
%-----------------------------------------------------------------------
io_put_chars(From, Opaque, unicode, io_lib, format, [Format, Data], State) ->
Chars = io_lib:format(Format, Data),
io_put_chars(From, Opaque, unicode, Chars, State).
%-----------------------------------------------------------------------
io_get_geometry(From, Opaque, columns, State = #state{columns = Columns}) ->
io_reply(From, Opaque, Columns),
mock_loop(State);
io_get_geometry(From, Opaque, rows, State = #state{rows = Rows}) ->
io_reply(From, Opaque, Rows),
mock_loop(State);
io_get_geometry(From, Opaque, _Geometry, State) ->
io_reply(From, Opaque, {error, enotsup}),
mock_loop(State).
%-----------------------------------------------------------------------
io_reply(To, Opaque, Reply) ->
To ! {io_reply, Opaque, Reply},
ok.
%-----------------------------------------------------------------------
mock_reply(To, Reply) ->
To ! {mock, self(), Reply},
ok.
%-----------------------------------------------------------------------
mock_call(Mock, Request) ->
Mock ! {mock, self(), Request},
receive
{mock, Mock, Response} ->
Response
after
1000 ->
erlang:error({?MODULE, timeout})
end.
%=======================================================================
-ifdef(TEST).
%-----------------------------------------------------------------------
basic_test() ->
Test = fun () -> io:put_chars(<<"Hello">>) end,
Expect = <<"Hello">>,
?assertEqual(Expect, output(Test)).
%-----------------------------------------------------------------------
mixed_test() ->
Test = fun () ->
io:put_chars([$H, ["el", $l], <<"o">>]),
io:format(" ~p!~n", [42])
end,
Expect = <<"Hello 42!\n">>,
?assertEqual(Expect, output(Test)).
%-----------------------------------------------------------------------
columns_test() ->
Test = fun () ->
{ok, Columns} = io:columns(),
{error, Rows} = io:rows(),
{Columns, Rows}
end,
Expect = {{40, enotsup}, <<>>},
?assertEqual(Expect, result_and_output(Test, {ok, 40}, enotsup)).
%-----------------------------------------------------------------------
rows_test() ->
Test = fun () ->
{error, Columns} = io:columns(),
{ok, Rows} = io:rows(),
{Columns, Rows}
end,
Expect = {{enotsup, 12}, <<>>},
?assertEqual(Expect, result_and_output(Test, {error, enotsup}, 12)).
%-----------------------------------------------------------------------
input_test() ->
Test = fun () ->
io:get_chars(prompt, 10)
end,
Expect = {{error, enotsup}, <<>>},
?assertEqual(Expect, result_and_output(Test)).
%-----------------------------------------------------------------------
-endif. | src/solarized_capture.erl | 0.62395 | 0.4917 | solarized_capture.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% cuttlefish_validator: models a cuttlefish validator
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(cuttlefish_validator).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
-endif.
-record(validator, {
name::string(),
description::string(),
func::fun()
}).
-type validator() :: #validator{}.
-type validator_fun() :: fun((any()) -> boolean()).
-type raw_validator() :: {validator, string(), string(), validator_fun()}.
-export_type([validator/0]).
-export([
parse/1,
parse_and_merge/2,
is_validator/1,
name/1,
description/1,
func/1,
replace/2]).
-spec parse(raw_validator()) -> validator() | cuttlefish_error:error().
parse({validator, Name, Description, Fun}) ->
#validator{
name = Name,
description = Description,
func = Fun
};
parse(X) ->
{error, {validator_parse, X}}.
%% This assumes it's run as part of a foldl over new schema elements
%% in which case, there's only ever one instance of a key in the list
%% so keyreplace works fine.
-spec parse_and_merge(
raw_validator(), [validator()]) -> [validator()|cuttlefish_error:error()].
parse_and_merge({validator, ValidatorName, _, _} = ValidatorSource, Validators) ->
NewValidator = parse(ValidatorSource),
case lists:keyfind(ValidatorName, #validator.name, Validators) of
false ->
[ NewValidator | Validators];
_OldMapping ->
lists:keyreplace(ValidatorName, #validator.name, Validators, NewValidator)
end.
-spec is_validator(any()) -> boolean().
is_validator(V) -> is_tuple(V) andalso element(1, V) =:= validator.
-spec name(validator()) -> string().
name(V) -> V#validator.name.
-spec description(validator()) -> string().
description(V) -> V#validator.description.
-spec func(validator()) -> fun().
func(V) -> V#validator.func.
-spec replace(validator(), [validator()]) -> [validator()].
replace(Validator, ListOfValidators) ->
Exists = lists:keymember(name(Validator), #validator.name, ListOfValidators),
case Exists of
true ->
lists:keyreplace(name(Validator), #validator.name, ListOfValidators, Validator);
_ ->
[Validator | ListOfValidators]
end.
-ifdef(TEST).
-define(XLATE(X), lists:flatten(cuttlefish_error:xlate(X))).
parse_test() ->
ValidatorDataStruct = {
validator,
"name",
"description",
fun(X) -> X*2 end
},
Validator = parse(ValidatorDataStruct),
?assertEqual("name", Validator#validator.name),
?assertEqual("description", Validator#validator.description),
F = Validator#validator.func,
?assertEqual(4, F(2)),
ok.
getter_test() ->
Validator = #validator{
name = "name",
description = "description",
func = fun(X) -> X*2 end
},
?assertEqual("name", name(Validator)),
?assertEqual("description", description(Validator)),
Fun = func(Validator),
?assertEqual(4, Fun(2)),
ok.
replace_test() ->
Element1 = #validator{
name = "name18",
description = "description18",
func = fun(X) -> X*2 end
},
?assertEqual(4, (Element1#validator.func)(2)),
Element2 = #validator{
name = "name1",
description = "description1",
func = fun(X) -> X*4 end
},
?assertEqual(8, (Element2#validator.func)(2)),
SampleValidators = [Element1, Element2],
Override = #validator{
name = "name1",
description = "description1",
func = fun(X) -> X*5 end
},
?assertEqual(25, (Override#validator.func)(5)),
NewValidators = replace(Override, SampleValidators),
?assertEqual([Element1, Override], NewValidators),
ok.
remove_duplicates_test() ->
Sample1 = #validator{
name = "name1",
description = "description1",
func = fun(X) -> X*3 end
},
?assertEqual(6, (Sample1#validator.func)(2)),
Sample2 = #validator{
name = "name1",
description = "description1",
func = fun(X) -> X*4 end
},
?assertEqual(8, (Sample2#validator.func)(2)),
SampleValidators = [Sample1, Sample2],
[NewValidator|_] = parse_and_merge(
{validator, "name1", "description2", fun(X) -> X*10 end},
SampleValidators),
F = func(NewValidator),
?assertEqual(50, F(5)),
?assertEqual("description2", description(NewValidator)),
?assertEqual("name1", name(NewValidator)),
ok.
parse_error_test() ->
{ErrorAtom, ErrorTerm} = parse(not_a_raw_validator),
?assertEqual(error, ErrorAtom),
?assertEqual(
"Poorly formatted input to cuttlefish_validator:parse/1 : not_a_raw_validator",
?XLATE(ErrorTerm)),
ok.
is_validator_test() ->
?assert(not(is_validator(not_a_validator))),
V = #validator{
name = "name1",
description = "description1",
func = fun(X) -> X*4 end
},
?assertEqual(8, (V#validator.func)(2)),
?assert(is_validator(V)),
ok.
-endif. | deps/cuttlefish/src/cuttlefish_validator.erl | 0.664976 | 0.41947 | cuttlefish_validator.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2014 <NAME>
%% @version 0.2.0
%%
%% @doc
%%
%% The HDR histogram library is an Erlang native interface function wrapper of
%% Mike Barker's C port of Gil Tene's HDR Histogram utility.
%%
%%
%% A high dynamic range histogram is one that supports recording and analyzing
%% sampled data points across a configurable range with configurable precision
%% within that range. The precision is expressed as a number of significant
%% figures in the recording.
%%
%% This HDR histogram implementation is designed for recording histograms of
%% value measurements in latency sensitive environments. Although the native
%% recording times can be as low as single digit nanoseconds there is added
%% overhead in this wrapper/binding due to both the frontend overhead of converting
%% from native C to the NIF interface, and the erlang overhead incurred calling
%% into the NIFs. C'est la vie, I suppose.
%%
%% A distinct advantage of this histogram implementation is constant space and
%% recording (time) overhead with an ability to recycle and reset instances whilst
%% reclaiming already allocated space for reuse thereby reducing allocation cost
%% and garbage collection overhead in the BEAM where repeated or continuous usage
%% is likely. For example, a gen_server recording metrics continuously and resetting
%% and logging histogram dumps on a periodic or other windowed basis.
%%
%% The code is released to the public domain, under the same terms as its
%% sibling projects, as explained in the LICENSE.txt and COPYING.txt in the
%% root of this repository, but normatively at:
%%
%% http://creativecommons.org/publicdomain/zero/1.0/
%%
%% For users of this code who wish to consume it under the "BSD" license
%% rather than under the public domain or CC0 contribution text mentioned
%% above, the code found under this directory is *also* provided under the
%% following license (commonly referred to as the BSD 2-Clause License). This
%% license does not detract from the above stated release of the code into
%% the public domain, and simply represents an additional license granted by
%% http://creativecommons.org/publicdomain/zero/1.0/
%%
%% -----------------------------------------------------------------------------
%% ** Beginning of "BSD 2-Clause License" text. **
%%
%% Copyright (c) 2012, 2013, 2014 <NAME>
%% Copyright (c) 2014 <NAME>
%% Copyright (c) 2014 <NAME>
%% All rights reserved.
%%
%% Redistribution and use in source and binary forms, with or without
%% modification, are permitted provided that the following conditions are met:
%%
%% 1. Redistributions of source code must retain the above copyright notice,
%% this list of conditions and the following disclaimer.
%%
%% 2. Redistributions in binary form must reproduce the above copyright notice,
%% this list of conditions and the following disclaimer in the documentation
%% and/or other materials provided with the distribution.
%%
%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
%% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
%% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
%% ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
%% LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
%% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
%% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
%% INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
%% CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
%% ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
%% THE POSSIBILITY OF SUCH DAMAGE.
%%
%% @end
-module(hdr_iter).
-export([open/3]).
-export([each/2]).
-export([each/3]).
-export([close/1]).
-type ref() :: binary(). %% NIF private data (looks like empty binary)
-spec open(IterationType, HdrRef, Options)
-> {ok,Ref} | {error,Reason} when
IterationType :: record | linear | logarithmic | percentile,
HdrRef :: ref(),
Ref :: ref(),
Options :: [proplists:property()],
Reason :: term().
%% @doc Open a fresh instance of a high dynamic range (HDR) histogram iterator
open(record,HdrRef,Options) ->
open_and_init(1, HdrRef, Options);
open(linear,HdrRef,Options) ->
open_and_init(2, HdrRef, Options);
open(logarithmic,HdrRef,Options) ->
open_and_init(4, HdrRef, Options);
open(percentile,HdrRef,Options) ->
open_and_init(8, HdrRef, Options).
%% @private
open_and_init(IterType,HdrRef,Options) ->
{ok,IterRef} = hdr_histogram:iter_open(IterType),
case hdr_histogram:iter_init(IterRef,HdrRef,Options) of
ok -> {ok,IterRef};
Error -> throw({bad_init,Error})
end.
-spec each(IteratorRef,EachFun) -> ok | {error,Reason} when
EachFun :: fun((Data) -> any()),
IteratorRef :: ref(),
Data :: term(),
Reason :: term().
%% @doc Iterate over histogram applying a function to each data point
each(IteratorRef,EachFun) ->
each(IteratorRef,fun(Data,_) ->
EachFun(Data),
ok
end, ok).
-spec each(IteratorRef,EachFun,Initial) -> {ok,Accum} | {error,Reason} when
EachFun :: fun(({IteratorType,Data},Acc) -> any()),
IteratorType :: record | linear | logarithmic | percentile,
Data :: list({atom(),term()}), %% TODO FIXME type spec
Acc :: term(),
IteratorRef :: ref(),
Reason :: term(),
Initial :: term(),
Accum :: term().
%% @doc Iterate over histogram applying a function to each data point accumulating a result
each(IteratorRef,EachFun,InitialAcc) ->
scan(IteratorRef,EachFun,InitialAcc).
scan(IteratorRef,EachFun,Acc) ->
case hdr_histogram:iter_next(IteratorRef) of
{false,_} -> Acc;
{Type,Data} ->
NewAcc = EachFun({Type,Data},Acc),
scan(IteratorRef,EachFun,NewAcc)
end.
-spec close(Ref) -> ok | {error,term()} when
Ref :: ref().
%% @doc Close this HDR histogram instance and free any system resources
close(Ref) ->
hdr_histogram:iter_close(Ref). | src/hdr_iter.erl | 0.57093 | 0.600042 | hdr_iter.erl | starcoder |
% @copyright 2007-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% TODO change from camel case to prefix_infix_postfix()
%% @doc Math utility functions.
%% @version $Id$
-module(mathlib).
-author('<EMAIL>').
-vsn('$Id$').
-export([closestPoints/1, euclideanDistance/1, euclideanDistance/2, u/1,
vecAdd/2, vecSub/2, vecMult/2, vecWeightedAvg/4, zeros/1, median/1,
nearestCentroid/2,
aggloClustering/2]).
-export([factorial/1, binomial_coeff/2, gcd/2]).
%% for type_check_SUITE
-export([binomial_coeff_feeder/2,
factorial_feeder/1,
zeros_feeder/1]).
-type(vector() :: [number(),...]).
%% @doc Median of an unsorted non-empty list of numbers, i.e. a vector.
-spec median(vector()) -> number().
median(L) ->
L1 = lists:sort(L),
N = length(L1),
case N rem 2 of
1 -> lists:nth(round(N / 2), L1);
0 -> (lists:nth(trunc(N / 2), L1) + lists:nth(trunc(N / 2) + 1, L1)) / 2
end.
%% @doc Add two vectors X,Y, i.e. X + Y.
-spec vecAdd(X::vector(), Y::vector()) -> vector().
vecAdd(X, Y) ->
lists:zipwith(fun(Xi, Yi) -> Xi + Yi end, X, Y).
%% @doc Substract two vectors X,Y, i.e. X - Y.
-spec vecSub(X::vector(), Y::vector()) -> vector().
vecSub(X, Y) ->
lists:zipwith(fun(Xi, Yi) -> Xi - Yi end, X, Y).
%% @doc Multiply vector V with a scalar S.
-spec vecMult(V::vector(), S::float()) -> vector().
vecMult(V, S) ->
lists:map(fun(X) -> S*X end, V).
-spec vecWeightedAvg(V1::vector(), V2::vector(), W1::float(), W2::float()) -> vector().
vecWeightedAvg(V1, V2, W1, W2) ->
vecMult(vecAdd(vecMult(V1, W1), vecMult(V2, W2)), 1 / (W1 + W2)).
%% @doc Euclidean distance between origin and V.
-spec euclideanDistance(V::vector()) -> Distance::float().
euclideanDistance(V) ->
math:sqrt(lists:foldl(fun(Vi, OldDist) -> OldDist + math:pow(Vi, 2) end,
0.0, V)).
%% @doc Euclidean distance between two vectors.
-spec euclideanDistance(V::vector(), W::vector()) -> Distance::float().
euclideanDistance(V, W) ->
math:sqrt(util:zipfoldl(fun(Vi, Wi) -> math:pow(Vi - Wi, 2) end,
fun(Dist, OldDist) -> OldDist + Dist end,
V, W, 0.0)).
%% @doc Unit vector u(v) = v/||v||
-spec u(V::vector()) -> UV::vector().
u(V) ->
vecMult(V, 1 / euclideanDistance(V)).
%% @doc Get the nearest centroid to U from the list Centroids, including the euclidian
% distance. The function returns 'none' if no nearest centroid can be found. Ambiguity is
% resolved by picking the first one of the nearest centroids.
-spec nearestCentroid(U::dc_centroids:centroid(), dc_centroids:centroids()) ->
{Distance::float(), NearestCentroid::dc_centroids:centroid()} | none.
nearestCentroid(_U, []) -> none;
nearestCentroid(U, [U|T]) -> nearestCentroid(U, T);
nearestCentroid(U, [X|Centroids]) ->
CoordU = dc_centroids:get_coordinate(U),
CoordsX = dc_centroids:get_coordinate(X),
First = {euclideanDistance(CoordU, CoordsX), X},
lists:foldl(fun
(C, {CurrentDistance, _CurrentMin} = Current) ->
case C of
U -> Current;
_ -> CoordC = dc_centroids:get_coordinate(C),
NewDistance = euclideanDistance(CoordU, CoordC),
case NewDistance < CurrentDistance of
true -> {NewDistance, C};
false -> Current
end
end
end, First, Centroids).
%% @doc Find indices of closest centroids.
-spec closestPoints(dc_centroids:centroids())
-> {float(), dc_centroids:centroid(), dc_centroids:centroid()} | none.
closestPoints([]) -> none;
closestPoints([_]) -> none;
closestPoints([First, Second|_] = Centroids) ->
FirstDist = dc_centroids:distance(First, Second),
lists:foldl(
fun
(Centroid, {CurrentMinDist, _, _} = Acc) ->
% get the centroid with minimum distance to Centroid. If this is less than
% the distance of the centroids in Acc, exchange
case nearestCentroid(Centroid, Centroids) of
none -> Acc;
{Dist, CentroidMin} ->
case Dist < CurrentMinDist of
true -> {Dist, Centroid, CentroidMin};
false -> Acc
end
end
end, {FirstDist, First, Second}, Centroids).
-spec zeros_feeder(0..10000) -> {0..10000}.
zeros_feeder(N) -> {N}.
%% @doc Create a list with N zeros.
-spec zeros(N::0) -> [];
(N::pos_integer()) -> [0,...].
zeros(N) -> lists:duplicate(N, 0).
%% @doc Get closest centroids and merge them if their distance is within Radius.
-spec aggloClustering(Centroids::dc_centroids:centroids(), Radius::number()) -> dc_centroids:centroids().
aggloClustering(Centroids, Radius) when Radius >= 0 ->
case closestPoints(Centroids) of
none -> Centroids;
{Min, I, J} -> aggloClusteringHelper(Centroids, Radius, Min, I, J)
end.
-spec aggloClusteringHelper
(Centroids::[dc_centroids:centroid(),...], Radius::number(),
Min::float(), I::dc_centroids:centroid(), J::dc_centroids:centroid()) ->
dc_centroids:centroids().
% Note: closestPoints/1 creates Min, I, J and only returns {-1, -1, -1} if
% Centroids contains less than two elements. This is not the case in the first
% pattern and we can thus assume these values are pos_integer().
aggloClusteringHelper(Centroids, _Radius, 0.0, _, _) -> Centroids;
aggloClusteringHelper(Centroids, Radius, Min, I, J) when Min =< Radius ->
{C1, S1} = dc_centroids:get_coordinate_and_relative_size(I),
{C2, S2} = dc_centroids:get_coordinate_and_relative_size(J),
NewCoordinate = vecWeightedAvg(C1, C2, S1, S2),
NewCentroid = dc_centroids:new(NewCoordinate, S1+S2),
NewCentroids = [NewCentroid | lists:subtract(Centroids, [I,J])],
case closestPoints(NewCentroids) of
none -> NewCentroids;
{Min1, I1, J1} ->
aggloClusteringHelper(NewCentroids, Radius, Min1, I1, J1)
end;
aggloClusteringHelper(Centroids, _Radius, _Min, _I, _J) ->
Centroids.
% @doc Calculates the binomial coefficient of n over k for n >= k.
% see http://rosettacode.org/wiki/Evaluate_binomial_coefficients#Erlang
-spec binomial_coeff(non_neg_integer(), non_neg_integer()) -> integer().
binomial_coeff(_, 0) -> 1;
binomial_coeff(N, K) when N >= K ->
choose(N, K, 1, 1).
-spec binomial_coeff_feeder(0..100, 0..100) ->
{non_neg_integer(), non_neg_integer()}.
binomial_coeff_feeder(X, Y) ->
{erlang:max(X, Y), erlang:min(X, Y)}.
-spec choose(non_neg_integer(), non_neg_integer(),
non_neg_integer(), non_neg_integer()) -> non_neg_integer().
choose(N, K, K, Acc) ->
(Acc * (N-K+1)) div K;
choose(N, K, I, Acc) ->
choose(N, K, I+1, (Acc * (N-I+1)) div I).
-spec factorial_feeder(0..20) -> {0..20}.
factorial_feeder(N) -> {N}.
% @doc calculates N!
-spec factorial(non_neg_integer()) -> pos_integer().
factorial(N) -> factorial(N, 1).
-compile({nowarn_unused_function, {factorial_feeder, 2}}).
-spec factorial_feeder(0..20, pos_integer()) -> {0..20, pos_integer()}.
factorial_feeder(N, Acc) -> {N, Acc}.
-spec factorial(non_neg_integer(), pos_integer()) -> pos_integer().
factorial(0, Acc) -> Acc;
factorial(N, Acc) ->
factorial(N - 1, N * Acc).
%% @doc Calculates the greatest common divisor of two integers.
-spec gcd(non_neg_integer(), non_neg_integer()) -> non_neg_integer().
gcd(A, 0) -> A;
gcd(A, B) -> gcd(B, A rem B). | src/mathlib.erl | 0.53437 | 0.469703 | mathlib.erl | starcoder |
%%% @doc A Flat Tree is a deterministic way of using a list as an index
%%% for nodes in a tree. Essentially a simpler way of representing the
%%% position of nodes.
%%%
%%% A Flat Tree is also refered to as 'bin numbers' described here
%%% in RFC 7574: https://datatracker.ietf.org/doc/html/rfc7574#section-4.2
%%%
%%% As an example (from the RFC), here's a tree with a width of 8 leafs
%%% and a depth of 3:
%%%```
%%% 3 7
%%% / \
%%% / \
%%% / \
%%% / \
%%% 2 3 11
%%% / \ / \
%%% / \ / \
%%% / \ / \
%%% 1 1 5 9 13
%%% / \ / \ / \ / \
%%% Depth 0 0 2 4 6 8 10 12 14
%%% C0 C1 C2 C3 C4 C5 C6 C7
%%%
%%% The flat tree is the list [0..14]
%%% The content (leafs) is C0..C7
%%%
%%% Using the flat tree, we can see index:
%%% - 7 represents all the content (C0..C7)
%%% - 1 represents C0 and C1
%%% - 3 represent C0..C3
%%% ... etc ...
%%% '''
%%%
%%% Even numbers are always leafs at depth 0
%%% Odd numbers are parents at depths > 0
%%%
%%% This work is almost a direct port of the DAT rust project:
%%% https://github.com/datrs/flat-tree
%%%
%%% Along with the content described here:
%%% https://datprotocol.github.io/book/ch01-01-flat-tree.html
%%% @end
-module(flat_tree).
-export([
index/2,
depth/1,
offset/1,
parent/1,
sibling/1,
uncle/1,
children/1,
left_child/1,
right_child/1,
left_span/1,
right_span/1,
spans/1,
count/1,
full_roots/1
]).
%% @doc Find the index value at a given depth and offset from the left of the tree.
-spec index(Depth :: pos_integer(), Offset :: pos_integer()) -> Index :: pos_integer().
index(Depth, Offset) ->
Offset bsl (Depth + 1) bor ((1 bsl Depth) - 1).
%% @doc Calculate the depth of the tree for a given index.
%% The root of the tree is the highest depth.
%% The leafs are always at depth 0.
%%
%% To find the depth count the number of trailing '1' bits (LSBs)
%% for the given Index. For example, the value:
%%```
%% 11000001,
%% ^- has a depth of 1.
%% 11000011
%% ^- has a depth of 2.
%%'''
%%
%% Interestingly, even numbers have no trailing '1s' and
%% therefore are always at depth 0.
%%
%% Side note: Funky business converting an integer to binary...
%% the built in function (integer_to_binary) returns something different.
%% ```
%% <<Index>> != integer_to_binary(Index)
%%'''
%%
%% This is not the most optimal solution. A better approach would be
%% with a lookup table.
%% Here we:
%% - reverse the binary and push the bits on to the head of a list (Acc)
%% - then, we count '1s' till we hit a '0'.
-spec depth(Index :: pos_integer()) -> Depth :: pos_integer().
depth(Index) ->
dep(<<Index>>, <<>>).
dep(<<>>, Acc) ->
count_ones(Acc, 0);
dep(<<X:1, R/bitstring>>, Acc) ->
dep(R, [X | Acc]).
count_ones([], Count) ->
Count;
count_ones([H | T], Count) ->
case H =:= 1 of
true -> count_ones(T, Count + 1);
_ -> Count
end.
%% @doc Return the offset for an index from the left side of the tree.
%%
%% For example: (0, 1, 3, 7) have an offset of 0
%% (Tree is rotated to right in diagram)
%%```
%% (0)┐
%% (1)┐
%% 2─┘ │
%% (3)┐
%% 4─┐ │ │
%% 5─┘ │
%% 6─┘ │
%% (7)
%%
%% While (2, 5, 11) have an offset of 1:
%% 0──┐
%% 1──┐
%% (2)─┘ │
%% 3──┐
%% 4──┐ │ │
%% (5)─┘ │
%% 6──┘ │
%% 7
%% 8──┐ │
%% 9──┐ │
%% 10──┘ │ │
%% (11)─┘
%% 12──┐ │
%% 13──┘
%% 14──┘
%%'''
-spec offset(Index :: pos_integer()) -> Offset :: pos_integer().
offset(Index) ->
Depth = depth(Index),
case is_even(Index) of
true -> Index div 2;
_ -> Index bsr (Depth + 1)
end.
%% @doc Return the index of the parent for the given index.
-spec parent(Index :: pos_integer()) -> Parent :: pos_integer().
parent(Index) ->
Depth = depth(Index),
index(Depth + 1, offset(Index) bsr 1).
%% @doc Return the index of node that shares a parent
-spec sibling(Index :: pos_integer()) -> Sibling :: pos_integer().
sibling(Index) ->
Depth = depth(Index),
index(Depth, offset(Index) bxor 1).
%% @doc Return a parent's sibiling
-spec uncle(Index :: pos_integer()) -> Uncle :: pos_integer().
uncle(Index) ->
Depth = depth(Index),
index(Depth + 1, offset(parent(Index)) bxor 1).
%% @doc Find the Indices of the children for a given index.
-spec children(Index :: pos_integer()) ->
none | {LeftIndex :: pos_integer(), RightIndex :: pos_integer}.
children(Index) ->
Depth = depth(Index),
find_chillin(Index, Depth).
find_chillin(Index, _Depth) when (Index band 1) =:= 0 ->
%% The index is an even number, it's a leaf
none;
find_chillin(Index, Depth) when Depth =:= 0 ->
%% Still at a leaf, return the Index
{Index, Index};
find_chillin(Index, Depth) ->
Offset = offset(Index) * 2,
{index(Depth - 1, Offset), index(Depth - 1, Offset + 1)}.
%% @doc Get the child to the left
-spec left_child(Index :: pos_integer()) -> none | Index :: pos_integer().
left_child(Index) ->
Depth = depth(Index),
find_left_child(Index, Depth).
find_left_child(Index, _Depth) when (Index band 1) =:= 0 ->
none;
find_left_child(Index, Depth) when Depth =:= 0 ->
Index;
find_left_child(Index, Depth) ->
index(Depth - 1, offset(Index) bsl 1).
%% @doc Get the child to the right
-spec right_child(Index :: pos_integer()) -> none | Index :: pos_integer().
right_child(Index) ->
Depth = depth(Index),
find_right_child(Index, Depth).
find_right_child(Index, _Depth) when (Index band 1) =:= 0 ->
none;
find_right_child(Index, Depth) when Depth =:= 0 ->
Index;
find_right_child(Index, Depth) ->
index(Depth - 1, (offset(Index) bsl 1) + 1).
%% @doc The index of the left most node in the span
-spec left_span(Index :: pos_integer()) -> Index :: pos_integer().
left_span(Index) ->
Depth = depth(Index),
l_span(Index, Depth).
l_span(Index, Depth) when Depth =:= 0 -> Index;
l_span(Index, Depth) -> offset(Index) * (2 bsl Depth).
%% @doc The index of the right most node in the span
-spec right_span(Index :: pos_integer()) -> Index :: pos_integer().
right_span(Index) ->
Depth = depth(Index),
r_span(Index, Depth).
r_span(Index, Depth) when Depth =:= 0 -> Index;
r_span(Index, Depth) -> (offset(Index) + 1) * (2 bsl Depth) - 2.
%% @doc return the span of the left and right nodes for the given index.
-spec spans(Index :: pos_integer()) ->
{LeftIndex :: pos_integer(), RightIndex :: pos_integer()}.
spans(Index) ->
{left_span(Index), right_span(Index)}.
%% Return how many nodes are in the subtee at the given index including
%% the node of the index in the count.
-spec count(Index :: pos_integer()) -> Count :: pos_integer().
count(Index) ->
Depth = depth(Index),
(2 bsl Depth) - 1.
%% @doc Return a list of indices that represent the full nodes and subtrees
%% to the left of the given index. For example, given this (partial) tree:
%%```
%% 3
%% / \
%% / \
%% 1 5
%% / \ / \
%% 0 2 4 6
%%
%% You get:
%% [1,4] = flat_tree:full_roots(6)
%% As index 1 is full to the left, and 4 is the node to the left
%%
%% [1] = flat_tree:full_roots(4)
%% 1 is the only full node to the left of 4
%%
%% and...
%% [] = flat_tree:full_roots(0).
%% as there are no nodes to the left of 0
%%'''
%% The input Index must be a leaf index (even number), otherwise you get an error.
-spec full_roots(Index :: pos_integer()) ->
{error, only_even_indices_allowed} | [Index :: pos_integer()].
full_roots(Index) when Index band 1 =:= 1 ->
{error, only_even_indices_allowed};
full_roots(Index) ->
find_roots(Index bsr 1, 0, 1, []).
calculate_factor(Factor, Index) when Factor * 2 =< Index ->
calculate_factor(Factor * 2, Index);
calculate_factor(Factor, _) ->
Factor.
find_roots(0, _, _, Nodes) ->
lists:reverse(Nodes);
find_roots(Index, Offset, Factor, Nodes) ->
NextFactor = calculate_factor(Factor, Index),
find_roots(
Index - NextFactor,
Offset + 2 * NextFactor,
1,
[Offset + NextFactor - 1 | Nodes]
).
%%%
%%% Private
%%%
is_even(Index) when is_integer(Index) ->
(Index band 1) =:= 0. | src/flat_tree.erl | 0.791096 | 0.909265 | flat_tree.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%
-module(othello_adt).
-compile(export_all).
%%-------------------------------------------------------
%% Use three main states for the strategy:
%%
%% BeginPlay: Stay in the inner square as long as possible.
%% Use the possible_draws/3.
%%
%% MiddlePlay: Try to choose stable markers (?)
%% Use stable/3
%%
%% EndPlay: Try to flip as many markers as possible
%%
%% The transition from Begin to Middle is obvious. From Middle
%% to End however, is can be discussed.
%%-------------------------------------------------------
test(N,B) ->
X=new(B),
statistics(wall_clock),
test0(N,X),
{_,T} = statistics(wall_clock),
{time_was,T/N}.
test0(0,_) -> true;
test0(N,X) ->
possible_draws(black,X,begin_play),
test0(N-1,X).
%%-------------------------------------------------------
%% new/1 - returns a new board
%%
%% Uses a tuple for storing the board
%%-------------------------------------------------------
new(B) ->
Board = mk_board(B),
{ordsets:from_list([18,19,20,21,26,29,34,37,42,43,44,45]),Board}.
mk_board(t) ->
Tup = list_to_tuple(gen_list(64,grey)),
Tup1 = setelement(28+1, Tup, white),
Tup2 = setelement(35+1, Tup1, white),
Tup3 = setelement(27+1, Tup2, black),
gen_score_board(),
setelement(36+1, Tup3, black).
gen_list(0,_) -> [];
gen_list(I,Def) -> [Def|gen_list(I-1,Def)].
gen_score_board() -> put(score,list_to_tuple(gen_list(64,0))).
%%-------------------------------------------------------
%% pos(Col,Row) - returns a position describing column
%% and row.
%% Col and Row have the range 1 - 8.
%%-------------------------------------------------------
pos(Col,Row) -> ((Row - 1) bsl 3) + (Col - 1).
%%-------------------------------------------------------
%% col(Pos) - returns the column of the Pos position
%%-------------------------------------------------------
col(Pos) -> (Pos band 7) + 1.
%%-------------------------------------------------------
%% row(Pos) - returns the row of the Pos position
%%-------------------------------------------------------
row(Pos) -> (Pos bsr 3) + 1.
%%-------------------------------------------------------
%% is_draw(Pos,Colour,Board) - returns true if Pos is a
%% correct draw.
%%-------------------------------------------------------
is_draw(Pos,Colour,{Bset,Board}) ->
case ordsets:is_element(Pos,Bset) of
true ->
case catch is_good(Colour,Pos,Board) of
true ->
true;
_ ->
false
end;
_ ->
false
end.
%%-------------------------------------------------------
%% set(Pos,Colour,Board) - returns an updated board
%%-------------------------------------------------------
set(Pos,Colour,{Bset,Board}) ->
case ordsets:is_element(Pos,Bset) of
true ->
NewBoard = setelement(Pos+1,Board,Colour),
Empty = empty_neighbour(Pos,NewBoard),
NewBset = ordsets:union(Empty,ordsets:del_element(Pos,Bset)),
turn(Colour,Pos,{NewBset,NewBoard});
_ ->
{error,invalid_position}
end.
empty_neighbour(Pos,Board) ->
ordsets:from_list(empty_neighbour(Pos,Board,deltas())).
empty_neighbour(_,_,[]) -> [];
empty_neighbour(Pos,Board,[H|T]) ->
case is_empty(Pos+H,dir(Pos,H),Board) of
true -> [Pos+H|empty_neighbour(Pos,Board,T)];
_ -> empty_neighbour(Pos,Board,T)
end.
is_empty(_,false,_) -> false;
is_empty(X,_,_Board) when X<0 -> false;
is_empty(X,_,_Board) when X>63 -> false;
is_empty(X,_,Board) ->
case element(X+1,Board) of
grey -> true; % Empty
_ -> false
end.
%%-------------------------------------------------------
%% get(Pos,Board) - returns the contents in Pos
%%-------------------------------------------------------
get(Pos,{_Bset,Board}) -> element(Pos+1,Board).
%%-------------------------------------------------------
%% pieces(Colour,Board) - returns the number of Colour
%% pieces.
%%-------------------------------------------------------
pieces(Colour,{_Bset,Board}) ->
pieces(Colour,Board,0,0).
pieces(Colour,Board,Pos,Count) when Pos < 64 ->
case element(Pos+1,Board) of
Colour ->
pieces(Colour,Board,Pos+1,Count+1);
_ ->
pieces(Colour,Board,Pos+1,Count)
end;
pieces(_,_,_,Count) ->
Count.
%%-------------------------------------------------------
%% possible_draws(Colour, Board, State)
%%
%% Returns a list of possible draws regarding the current
%% strategy state.
%%-------------------------------------------------------
possible_draws(Colour,{Bset,Board},begin_play) ->
Dset = ordsets:intersection(Bset,inner_square()),
possible_draws_0(Colour,Dset,Board);
possible_draws(Colour,{Bset,Board},_) ->
possible_draws_0(Colour,Bset,Board).
possible_draws(Colour,{Bset,Board}) ->
possible_draws_0(Colour,Bset,Board).
possible_draws_0(_,[],_) -> [];
possible_draws_0(Colour,[H|T],Board) ->
case catch is_good(Colour,H,Board) of
true -> [H|possible_draws_0(Colour,T,Board)];
false -> possible_draws_0(Colour,T,Board)
end.
%%-------------------------------------------------------
%% evaluate_board(Colour,Board) - returns the value of
%% the board from Colours
%% point of view.
%%-------------------------------------------------------
evaluate_board(Colour,{_Bset,Board}) ->
Score = get(score), % Initialized (zeroed) score board !!
Colour1 = swap(Colour),
Score1 = eval_board(Colour,Colour1,Score,Board,0),
Score2 = cnt_corner(0,Score1,Board,Colour,Colour1),
Score3 = cnt_corner(7,Score2,Board,Colour,Colour1),
Score4 = cnt_corner(56,Score3,Board,Colour,Colour1),
Score5 = cnt_corner(63,Score4,Board,Colour,Colour1),
count(Score5,0).
% A = count(Score5,0),
% io:format('Score = ~w~n',[A]),
% A.
eval_board(MyCol,OtCol,Score,Board,Pos) when Pos < 64 ->
case element(Pos+1,Board) of
MyCol ->
Score1 = setelement(Pos+1,Score,score(Pos)),
eval_board(MyCol,OtCol,Score1,Board,Pos+1);
OtCol ->
Score1 = setelement(Pos+1,Score,-score(Pos)),
eval_board(MyCol,OtCol,Score1,Board,Pos+1);
_ ->
eval_board(MyCol,OtCol,Score,Board,Pos+1)
end;
eval_board(_,_,Score,_,_) ->
Score.
cnt_corner(Corner,Score,Board,MyCol,OtCol) ->
case element(Corner+1,Board) of
MyCol ->
cnt_corn(Corner,setelement(Corner+1,Score,50),
Board,50,MyCol);
OtCol ->
cnt_corn(Corner,setelement(Corner+1,Score,-50),
Board,-50,OtCol);
_ ->
Score
end.
cnt_corn(0,Score,Board,Value,Colour) ->
Score1 = cnt_corn(0,1,8,Score,Board,Value,Colour),
cnt_corn(0,8,1,Score1,Board,Value,Colour);
cnt_corn(7,Score,Board,Value,Colour) ->
Score1 = cnt_corn(7,-1,8,Score,Board,Value,Colour),
cnt_corn(7,8,-1,Score1,Board,Value,Colour);
cnt_corn(56,Score,Board,Value,Colour) ->
Score1 = cnt_corn(56,1,-8,Score,Board,Value,Colour),
cnt_corn(56,-8,1,Score1,Board,Value,Colour);
cnt_corn(63,Score,Board,Value,Colour) ->
Score1 = cnt_corn(63,-1,-8,Score,Board,Value,Colour),
cnt_corn(63,-8,-1,Score1,Board,Value,Colour).
cnt_corn(Pos,Dir,LineDir,Score,Board,Value,Colour) ->
case dir(Pos,Dir) of
Dir ->
NextEdge = Pos+Dir,
case element(NextEdge+1,Board) of
Colour ->
Score1 = setelement(NextEdge+1,Score,Value),
Score2 = cnt_line(NextEdge,LineDir,Score1,Board,
Colour,Value),
cnt_corn(NextEdge,Dir,LineDir,Score2,Board,Value,Colour);
_ ->
Score
end;
_ ->
Score
end.
cnt_line(Pos,Dir,Score,Board,Colour,Value) ->
case dir(Pos,Dir) of
Dir ->
OnLinePos = Pos+Dir,
case element(OnLinePos+1,Board) of
Colour ->
Score1 = setelement(OnLinePos+1,Score,Value),
cnt_line(OnLinePos,Dir,Score1,Board,Colour,Value);
_ ->
Score
end;
_ ->
Score
end.
count(Score,Pos) when Pos < 64 ->
element(Pos+1,Score) + count(Score,Pos+1);
count(_,_) ->
0.
swap(white) -> black;
swap(black) -> white.
%%-------------------------------------------------------
%% stable(Colour,Pos,Board) - returns a value 0-8
%%
%% A high value is regarded as more stable than a lower one.
%% The stability means how many "friendly" neighbours there
%% are, i.e markers of the same colour. Neighbours positions
%% outside the board are regarded as friendly.
%%-------------------------------------------------------
stable(Colour,Pos,{_,Board}) ->
stable(deltas(),Colour,Pos,Board).
stable([],_,_,_) -> 0;
stable([H|T],Colour,Pos,Board) ->
stable_val(Colour,Pos,H,Board) + stable(T,Colour,Pos,Board).
stable_val(_,H,D,_) when H+D<0 -> 1;
stable_val(_,H,D,_) when H+D>63 -> 1;
stable_val(black,H,D,Board) ->
case element((H+D)+1,Board) of
black -> 1;
_ -> 0
end;
stable_val(white,H,D,Board) ->
case element((H+D)+1,Board) of
white -> 1;
_ -> 0
end.
%%-------------------------------------------------------
%% diff(Board,OldBoard) - return a list of the positions
%% with changed pieces.
%% [{Pos1,Colour1},...]
%%-------------------------------------------------------
diff(Board,OldBoard) -> diff(0,Board,OldBoard).
diff(Pos,Board,OldBoard) when Pos < 64 ->
OldP = get(Pos,OldBoard),
case get(Pos,Board) of
OldP ->
diff(Pos+1,Board,OldBoard);
NewP ->
[{Pos,NewP}|diff(Pos+1,Board,OldBoard)]
end;
diff(_,_,_) ->
[].
%%-------------------------------------------------------
%% all_pos(Board) - return a list of the positions colour.
%% [{Pos1,Colour1},...]
%%-------------------------------------------------------
all_pos(Board) -> all_pos(0,Board).
all_pos(Pos,Board) when Pos < 64 ->
[{Pos,get(Pos,Board)}|all_pos(Pos+1,Board)];
all_pos(_,_) ->
[].
%%-------------------------------------------------------
%% Internal stuff
%%-------------------------------------------------------
deltas() -> [9,8,7,1,-1,-7,-8,-9].
inner_square() ->
[18,19,20,21,26,27,28,29,34,35,36,37,42,43,44,45]. % Is already an ordset
% Save list traversing.
% ordsets:list_to_set([18,19,20,21,26,27,28,29,34,35,36,37,42,43,44,45]).
inv(black) -> white;
inv(white) -> black.
is_good(Colour,H,Board) ->
is_good_0(Colour,H,dir(H,-9),Board),
is_good_0(Colour,H,dir(H,-8),Board),
is_good_0(Colour,H,dir(H,-7),Board),
is_good_0(Colour,H,dir(H,-1),Board),
is_good_0(Colour,H,dir(H,1),Board),
is_good_0(Colour,H,dir(H,7),Board),
is_good_0(Colour,H,dir(H,8),Board),
is_good_0(Colour,H,dir(H,9),Board),
false.
is_good_0(_,_,false,_) -> false;
is_good_0(_,H,D,_) when is_integer(H), is_integer(D), H+D<0 -> false;
is_good_0(_,H,D,_) when is_integer(H), is_integer(D), H+D>63 -> false;
is_good_0(black,H,D,Board) when is_integer(H), is_integer(D) ->
case element((H+D)+1,Board) of
white -> is_good_1(black,H+D,dir(H+D,D),Board);
_ -> false
end;
is_good_0(white,H,D,Board) when is_integer(H), is_integer(D) ->
case element((H+D)+1,Board) of
black -> is_good_1(white,H+D,dir(H+D,D),Board);
_ -> false
end.
is_good_1(_,_,false,_) -> false;
is_good_1(_,H,D,_) when is_integer(H), is_integer(D), H+D<0 -> false;
is_good_1(_,H,D,_) when is_integer(H), is_integer(D), H+D>63 -> false;
is_good_1(black,H,D,Board) when is_integer(H), is_integer(D) ->
case element((H+D)+1,Board) of
white -> is_good_1(black,H+D,dir(H+D,D),Board);
black -> throw(true);
_ -> false
end;
is_good_1(white,H,D,Board) when is_integer(H), is_integer(D) ->
case element((H+D)+1,Board) of
black -> is_good_1(white,H+D,dir(H+D,D),Board);
white -> throw(true);
_ -> false
end.
%%-------------------------------------------------------
%% turn(Colour,Draw,Board) - returns an updated board
%% turn all possible pieces
%% on the board
%% Neighbours are not changed !!
%%-------------------------------------------------------
turn(Colour,Draw,{Bset,Board}) ->
{Bset,turn(Colour,Draw,-9,
turn(Colour,Draw,-8,
turn(Colour,Draw,-7,
turn(Colour,Draw,-1,
turn(Colour,Draw,1,
turn(Colour,Draw,7,
turn(Colour,Draw,8,
turn(Colour,Draw,9,Board))))))))}.
turn(Colour,H,D,Board) ->
case catch is_good_0(Colour,H,dir(H,D),Board) of
true ->
turn_0(Colour,H,D,Board);
false ->
Board
end.
turn_0(_,H,D,B) when is_integer(H), is_integer(D), H+D<0 -> B;
turn_0(_,H,D,B) when is_integer(H), is_integer(D), H+D>63 -> B;
turn_0(black,H,D,Board) when is_integer(H), is_integer(D) ->
E = H+D,
case element(E+1,Board) of
white -> turn_0(black,H+D,D,swap(black,E,Board));
_ -> Board
end;
turn_0(white,H,D,Board) when is_integer(H), is_integer(D) ->
E = H+D,
case element(E+1,Board) of
black -> turn_0(white,H+D,D,swap(white,E,Board));
_ -> Board
end.
%%-------------------------------------------------------
%% swap(Colour,Pos,Board) - returns an updated board
%% turn a piece on the board
%% Neighbours are not changed !!
%%-------------------------------------------------------
swap(Colour,Pos,Board) when is_integer(Pos) ->
setelement(Pos+1,Board,Colour).
score(Pos) -> score1({col(Pos),row(Pos)}).
score1({Column,1}) when Column >= 3, Column =< 6 -> 20;
score1({Column,8}) when Column >= 3, Column =< 6 -> 20;
score1({1,Line}) when Line >= 3, Line =< 6 -> 20;
score1({8,Line}) when Line >= 3, Line =< 6 -> 20;
score1({Column,2}) when Column >= 3, Column =< 6 -> -7;
score1({Column,7}) when Column >= 3, Column =< 6 -> -7;
score1({2,Line}) when Line >= 3, Line =< 6 -> -7;
score1({7,Line}) when Line >= 3, Line =< 6 -> -7;
score1({Column,Line}) when Column >= 3, Column =< 6,
Line >= 3, Line =< 6 -> 1;
score1({1,1}) -> 100;
score1({1,8}) -> 100;
score1({8,1}) -> 100;
score1({8,8}) -> 100;
score1({2,1}) -> -30;
score1({7,1}) -> -30;
score1({1,2}) -> -30;
score1({8,2}) -> -30;
score1({1,7}) -> -30;
score1({8,7}) -> -30;
score1({2,8}) -> -30;
score1({7,8}) -> -30;
score1({2,2}) -> -50;
score1({7,2}) -> -50;
score1({2,7}) -> -50;
score1({7,7}) -> -50.
%%-------------------------------------------------------
%% dir(Pos,Dir) - return Dir if allowed direction at Pos.
%% else return false.
%%-------------------------------------------------------
dir(0,1) -> 1; % {1,1}
dir(0,8) -> 8;
dir(0,9) -> 9;
dir(0,_) -> false;
dir(7,-1) -> -1; % {8,1}
dir(7,7) -> 7;
dir(7,8) -> 8;
dir(7,_) -> false;
dir(56,-8) -> -8; % {1,8}
dir(56,-7) -> -7;
dir(56,1) -> 1;
dir(56,_) -> false;
dir(63,-9) -> -9; % {8,8}
dir(63,-8) -> -8;
dir(63,-1) -> -1;
dir(63,_) -> false;
dir(Pos,-1) when (Pos bsr 3) == 0 -> -1; % {_,1}
dir(Pos,1) when (Pos bsr 3) == 0 -> 1;
dir(Pos,7) when (Pos bsr 3) == 0 -> 7;
dir(Pos,8) when (Pos bsr 3) == 0 -> 8;
dir(Pos,9) when (Pos bsr 3) == 0 -> 9;
dir(Pos,_) when (Pos bsr 3) == 0 -> false;
dir(Pos,-9) when (Pos bsr 3) == 7 -> -9; % {_,8}
dir(Pos,-8) when (Pos bsr 3) == 7 -> -8;
dir(Pos,-7) when (Pos bsr 3) == 7 -> -7;
dir(Pos,-1) when (Pos bsr 3) == 7 -> -1;
dir(Pos,1) when (Pos bsr 3) == 7 -> 1;
dir(Pos,_) when (Pos bsr 3) == 7 -> false;
dir(Pos,-8) when (Pos band 7) == 0 -> -8; % {1,_}
dir(Pos,-7) when (Pos band 7) == 0 -> -7;
dir(Pos,1) when (Pos band 7) == 0 -> 1;
dir(Pos,8) when (Pos band 7) == 0 -> 8;
dir(Pos,9) when (Pos band 7) == 0 -> 9;
dir(Pos,_) when (Pos band 7) == 0 -> false;
dir(Pos,-9) when (Pos band 7) == 7 -> -9; % {8,_}
dir(Pos,-8) when (Pos band 7) == 7 -> -8;
dir(Pos,-1) when (Pos band 7) == 7 -> -1;
dir(Pos,7) when (Pos band 7) == 7 -> 7;
dir(Pos,8) when (Pos band 7) == 7 -> 8;
dir(Pos,_) when (Pos band 7) == 7 -> false;
dir(_Pos,Dir) -> Dir. | lib/gs/contribs/othello/othello_adt.erl | 0.521959 | 0.487063 | othello_adt.erl | starcoder |
-module(erlang_tc_poly).
-export([
%% polynomial API
from_coeffs/1,
eval/2,
eval_from_fr/2,
cmp/2,
interpolate/1,
interpolate_from_fr/1,
gen_monomial/1,
degree/1,
random/1,
zero/0,
zeroize/1,
is_zero/1,
constant/1,
mul/2,
add/2,
sub/2,
mul_scalar/2,
add_scalar/2,
sub_scalar/2,
reveal/1,
commitment/1
]).
-type coeffs() :: [integer()].
-type samples() :: [{integer(), integer()}, ...].
-type fr_samples() :: [{reference(), reference()}, ...].
-type poly() :: reference().
-export_type([poly/0]).
-spec from_coeffs(Coeffs :: coeffs()) -> poly().
from_coeffs(Coeffs) ->
erlang_tc:poly_from_coeffs(Coeffs).
-spec eval(Poly :: poly(), Point :: integer()) -> erlang_tc_fr:fr().
eval(Poly, Point) ->
erlang_tc:eval_uni_poly(Poly, Point).
-spec eval_from_fr(Poly :: poly(), Point :: erlang_tc_fr:fr()) -> erlang_tc_fr:fr().
eval_from_fr(Poly, Point) ->
erlang_tc:eval_uni_poly_from_fr(Poly, Point).
-spec cmp(P1 :: poly(), P2 :: poly()) -> boolean().
cmp(P1, P2) ->
erlang_tc:cmp_poly(P1, P2).
%% NOTE: only works if the number of samples is `degree + 1` minimum
-spec interpolate(Samples :: samples()) -> poly().
interpolate(Samples) ->
erlang_tc:interpolate_uni_poly(Samples).
%% NOTE: only works if the number of samples is `degree + 1` minimum
-spec interpolate_from_fr(Samples :: fr_samples()) -> poly().
interpolate_from_fr(Samples) ->
erlang_tc:interpolate_uni_poly_from_fr(Samples).
-spec degree(Poly :: poly()) -> non_neg_integer().
degree(Poly) ->
erlang_tc:degree_poly(Poly).
-spec gen_monomial(Degree :: non_neg_integer()) -> poly().
gen_monomial(Degree) ->
erlang_tc:gen_monomial(Degree).
-spec random(Degree :: non_neg_integer()) -> poly().
random(Degree) ->
erlang_tc:random_poly(Degree).
-spec constant(C :: number()) -> poly().
constant(C) ->
erlang_tc:constant_poly(C).
-spec zero() -> poly().
zero() ->
erlang_tc:zero_poly().
-spec zeroize(P :: poly()) -> poly().
zeroize(P) ->
erlang_tc:zeroize_poly(P).
-spec is_zero(P :: poly()) -> boolean().
is_zero(P) ->
erlang_tc:is_zero_poly(P).
-spec add(P1 :: poly(), P2 :: poly()) -> poly().
add(P1, P2) ->
erlang_tc:add_poly(P1, P2).
-spec sub(P1 :: poly(), P2 :: poly()) -> poly().
sub(P1, P2) ->
erlang_tc:sub_poly(P1, P2).
-spec mul(P1 :: poly(), P2 :: poly()) -> poly().
mul(P1, P2) ->
erlang_tc:mul_poly(P1, P2).
-spec add_scalar(Scalar :: number(), P :: poly()) -> poly().
add_scalar(Scalar, P) ->
case Scalar < 0 of
false ->
erlang_tc:add_scalar_poly(Scalar, P);
true ->
sub_scalar(Scalar, P)
end.
-spec sub_scalar(Scalar :: number(), P :: poly()) -> poly().
sub_scalar(Scalar, P) ->
erlang_tc:sub_scalar_poly(Scalar, P).
-spec mul_scalar(Scalar :: number(), P :: poly()) -> poly().
mul_scalar(Scalar, P) ->
erlang_tc:mul_scalar_poly(Scalar, P).
-spec reveal(P :: poly()) -> string().
reveal(P) ->
erlang_tc:reveal_poly(P).
-spec commitment(P :: poly()) -> erlang_tc_commitment:commitment().
commitment(P) ->
erlang_tc:commitment_poly(P). | src/erlang_tc_poly.erl | 0.709221 | 0.554591 | erlang_tc_poly.erl | starcoder |
%%==============================================================================
%% Copyright 2015 Erlang Solutions Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
-module(escalus_mongooseim).
-behaviour(escalus_server).
%% API
-export([pre_story/1,
post_story/1,
name/0]).
%% for testing
-export([check_metric_change/2]).
-spec pre_story(escalus:config()) -> escalus:config().
pre_story(Config) ->
maybe_read_initial_metric_values(Config).
maybe_read_initial_metric_values(Config) ->
Metrics = proplists:get_value(mongoose_metrics, Config, undefined),
case Metrics of
undefined ->
Config;
_ ->
InitialMetrics = lists:foldl(fun read_metric_initial_value/2, [], Metrics),
[{mongoose_metrics_initial, InitialMetrics} | Config]
end.
-spec post_story(escalus:config()) -> escalus:config().
post_story(Config) ->
maybe_check_metrics_post_story(Config).
maybe_check_metrics_post_story(Config) ->
case proplists:get_value(mongoose_metrics_initial, Config) of
undefined ->
ok;
InitialMetrics ->
post_story_check_metrics(InitialMetrics)
end,
Config.
-spec name() -> mongooseim.
name() ->
mongooseim.
read_metric_initial_value({Metric, _} = MetricSpec, Acc) ->
Type = metric_type(Metric),
maybe_reset_metric(Metric, Type),
Value = get_value(Metric, Type),
[{MetricSpec, Type, Value} | Acc];
read_metric_initial_value({Precond, Metric, Change}, Acc) ->
case Precond() of
true ->
read_metric_initial_value({Metric, Change}, Acc);
_ ->
Acc
end.
post_story_check_metrics(CountersToCheck) ->
After = [{MetricSpec, OldValue, get_value(Metric, Type)} || {{Metric, _} = MetricSpec, Type, OldValue } <- CountersToCheck],
[] = lists:foldl(fun check_metric_change/2, [], After),
ok.
maybe_reset_metric(Metric, histogram) ->
escalus_ejabberd:rpc(exometer, reset, [Metric]);
maybe_reset_metric(_, _) ->
ok.
get_value(Metric, spiral) ->
Values = get_values(Metric),
lists:foldl(fun({_, [{count, X}, _]}, Sum) ->
Sum + X
end, 0, Values);
get_value(Metric, histogram) ->
get_value(Metric);
get_value(Metric, _) ->
get_value(Metric).
get_value(Metric) ->
{ok, Value} = escalus_ejabberd:rpc(mongoose_metrics, get_metric_value, [Metric]),
Value.
get_values(Metric) ->
escalus_ejabberd:rpc(mongoose_metrics, get_metric_values, [Metric]).
metric_type(Metric) ->
[{_, Type, _} | _] = escalus_ejabberd:rpc(exometer, find_entries, [Metric]),
Type.
check_metric_change({{Metric, {MinChange, MaxChange}}, Before, After}, Acc) ->
Change = After - Before,
case {Change < MinChange, Change > MaxChange} of
{true, _} ->
[{Metric, {minimum_change, MinChange}, {before_story, Before}, {after_story, After}} | Acc];
{_, true} ->
[{Metric, {maximum_change, MinChange}, {before_story, Before}, {after_story, After}} | Acc];
{_, _} ->
Acc
end;
check_metric_change({{Metric, Change}, Before, After}, Acc) when is_integer(Change) ->
case Before + Change =:= After of
true ->
Acc;
_ ->
[{Metric, {expected_diff, Change}, {before_story, Before}, {after_story, After}} | Acc]
end;
check_metric_change({{Metric, changed}, Before, After}, Acc) ->
case After of
Before ->
[{Metric, expected_change, {before_story, Before}, {after_story, After}} | Acc];
_ ->
Acc
end;
check_metric_change({{Metric, Changes}, Before, After}, Acc) when is_list(Changes)->
Check = fun({Property, Change}) ->
{Property, BeforeProp} = lists:keyfind(Property, 1, Before),
{Property, AfterProp} = lists:keyfind(Property, 1, After),
check_change(BeforeProp, AfterProp, Change)
end,
case lists:all(Check, Changes) of
false ->
[{Metric, {expected_change, Changes}, {before_story, Before}, {after_story, After}} | Acc];
_ ->
Acc
end.
check_change(Before, After, Change) when is_atom(Change) ->
erlang:apply(erlang, Change, [After, Before]);
check_change(Before, After, Change) when is_integer(Change) ->
After == Before + Change. | src/escalus_mongooseim.erl | 0.689619 | 0.422147 | escalus_mongooseim.erl | starcoder |
%% @doc Integer counter based on an ordered list of counter events.
%%
%% A counter is stored as an orddict of counter events. Each counter
%% event has a unique key based on the timestamp and some entropy, and it
%% stores the delta from the inc operation. The value of a counter is the
%% sum of all these deltas.
%%
%% As an optimization, counter events older than a given age are coalesced
%% to a single counter event with a key in the form of
%% <code>{timestamp(), 'acc'}</code>.
-module(statebox_counter).
-export([value/1, merge/1, accumulate/2, inc/3]).
-export([f_inc_acc/2, f_inc_acc/3, op_inc_acc/4]).
-type op() :: statebox:op().
-type timestamp() :: statebox_clock:timestamp().
-type timedelta() :: statebox:timedelta().
-type counter_id() :: statebox_identity:entropy() | acc.
-type counter_key() :: {timestamp(), counter_id()}.
-type counter_op() :: {counter_key(), integer()}.
-type counter() :: [counter_op()].
%% @doc Return the value of the counter (the sum of all counter event deltas).
-spec value(counter()) -> integer().
value([]) ->
0;
value([{_Key, Value} | Rest]) ->
Value + value(Rest).
%% @doc Merge the given list of counters and return a new counter
%% with the union of that history.
-spec merge([counter()]) -> counter().
merge([Counter]) ->
Counter;
merge(Counters) ->
orddict:from_list(merge_prune(Counters)).
%% @doc Accumulate all counter events older than <code>Timestamp</code> to
%% the key <code>{Timestamp, acc}</code>. If there is already an
%% <code>acc</code> at or before <code>Timestamp</code> this is a no-op.
-spec accumulate(timestamp(), counter()) -> counter().
accumulate(Timestamp, Counter=[{{T0, acc}, _} | _]) when Timestamp =< T0 ->
Counter;
accumulate(Timestamp, Counter) ->
accumulate(Timestamp, Counter, 0).
%% @doc Return a new counter with the given counter event. If there is
%% an <code>acc</code> at or before the timestamp of the given key then
%% this is a no-op.
-spec inc(counter_key(), integer(), counter()) -> counter().
inc({T1, _Id1}, _Value, Counter=[{{T0, acc}, _} | _Rest]) when T1 =< T0 ->
Counter;
inc(Key, Value, Counter) ->
orddict:store(Key, Value, Counter).
%% @equiv f_inc_acc(Value, Age, {statebox_clock:timestamp(),
%% statebox_identity:entropy()})
-spec f_inc_acc(integer(), timedelta()) -> op().
f_inc_acc(Value, Age) ->
Key = {statebox_clock:timestamp(), statebox_identity:entropy()},
f_inc_acc(Value, Age, Key).
%% @doc Return a statebox event to increment and accumulate the counter.
%% <code>Value</code> is the delta,
%% <code>Age</code> is the maximum age of counter events in milliseconds
%% (this should be longer than the amount of time you expect your cluster to
%% reach a consistent state),
%% <code>Key</code> is the counter event key.
-spec f_inc_acc(integer(), timedelta(), counter_key()) -> op().
f_inc_acc(Value, Age, Key={Timestamp, _Id}) ->
{fun ?MODULE:op_inc_acc/4, [Timestamp - Age, Key, Value]}.
%% @private
op_inc_acc(Timestamp, Key, Value, Counter) ->
accumulate(Timestamp, inc(Key, Value, Counter)).
%% Internal API
merge_prune(Counters) ->
%% Merge of all of the counters and prune all entries older than the
%% newest {_, acc}.
prune(lists:umerge(Counters)).
prune(All) ->
prune(All, All).
prune(Here=[{{_Ts, acc}, _V} | Rest], _Last) ->
prune(Rest, Here);
prune([_ | Rest], Last) ->
prune(Rest, Last);
prune([], Last) ->
Last.
accumulate(Timestamp, [{{T1, _Id}, Value} | Rest], Sum) when T1 =< Timestamp ->
%% Roll up old counter events
accumulate(Timestamp, Rest, Value + Sum);
accumulate(Timestamp, Counter, Sum) ->
%% Return the new counter
inc({Timestamp, acc}, Sum, Counter). | src/statebox_counter.erl | 0.657978 | 0.878523 | statebox_counter.erl | starcoder |
-module(ask_area).
-export([area/0]).
-spec(area() -> number()).
area() ->
Answer = io:get_line("R)ectangle, T)riangle, or E)llipse > "),
Shape = char_to_shape(hd(Answer)),
case Shape of
rectangle -> Numbers = get_dimensions("width", "height");
triangle -> Numbers = get_dimensions("base", "height");
ellipse -> Numbers = get_dimensions("major axis", "minor axis");
unknown -> Numbers = {error, "Unknown shape " ++ [hd(Answer)]}
end,
Area = calculate(Shape, element(1, Numbers), element(2, Numbers)),
Area.
%% @doc Given a character, returns an atom representing the
%% specified shape (or the atom unknown if a bad character is given).
-spec(char_to_shape(char()) -> atom()).
char_to_shape(Char) ->
case Char of
$R -> rectangle;
$r -> rectangle;
$T -> triangle;
$t -> triangle;
$E -> ellipse;
$e -> ellipse;
_ -> unknown
end.
%% @doc Present a prompt and get a number from the
%% user. Allow either integers or floats.
-spec(get_number(string()) -> number()).
get_number(Prompt) ->
Str = io:get_line("Enter " ++ Prompt ++ " > "),
{Test, _} = string:to_float(Str),
case Test of
error -> {N, _} = string:to_integer(Str);
_ -> N = Test
end,
N.
%% @doc Get dimensions for a shape. Input are the two prompts,
%% output is a tuple {Dimension1, Dimension2}.
-spec(get_dimensions(string(), string()) -> {number(), number()}).
get_dimensions(Prompt1, Prompt2) ->
N1 = get_number(Prompt1),
N2 = get_number(Prompt2),
{N1, N2}.
%% @doc Calculate area of a shape, given its shape and dimensions.
%% Handle errors appropriately.
-spec(calculate(atom(), number(), number()) -> number()).
calculate(unknown, _, Err) -> io:format("~s~n", [Err]);
calculate(_, error, _) -> io:format("Error in first number.~n");
calculate(_, _, error) -> io:format("Error in second number.~n");
calculate(_, A, B) when A < 0; B < 0 ->
io:format("Both numbers must be greater than or equal to zero~n");
calculate(Shape, A, B) -> geom:area(Shape, A, B). | src/ask_area.erl | 0.583915 | 0.734976 | ask_area.erl | starcoder |
-module('99-problems').
-compile(export_all).
% P01: Find the last element of a list.
last(L) ->
lists:last(L).
% P02: Find the last but one element of a list.
lastButOne(L) ->
hd(lists:nthtail(length(L) - 2, L)).
% P03: Find the k-th element of a list.
kthElement(K, L) ->
lists:nth(K, L).
% P04: Find the number of elements of a list.
listLength(L) ->
length(L).
% P05: Reverse a list.
reverse(L) ->
lists:reverse(L).
% P06: Find out whether a list is a palindrome.
isPalindrome(L) ->
lists:reverse(L) == L.
% P07: Flatten a nested list structure.
flatten(L) ->
lists:flatten(L).
% P08: Eliminate consecutive duplicates of list elements.
compress(H, [ H | T ]) -> [ H | T ];
compress(H, L) -> [ H | L ].
compress(L) ->
lists:foldr(fun compress/2, [], L).
% P09: Pack consecutive duplicates of list elements into sublists.
pack(H, [ [ H | T ] | TT ]) -> [ [ H, H | T ] | TT ];
pack(H, L) -> [ [ H ] | L ].
pack(L) ->
lists:foldr(fun pack/2, [], L).
% P10: Run-length encoding of a list.
encode(H, []) -> [ [ 1, H ] ];
encode(H, [ [ N, H ] | T ]) -> [ [ N + 1, H ] | T ];
encode(H, [ [ _, _ ] | _ ] = L) -> [ [ 1, H ] | L ].
encode(L) ->
lists:foldr(fun encode/2, [], L).
% P11: Modified run-length encoding.
encodeWithoutDuplicates(H, []) -> [ H ];
encodeWithoutDuplicates(H, [ H | T ]) -> [ [ 2, H ] | T ];
encodeWithoutDuplicates(H, [ [ N, H ] | T ]) -> [ [ N + 1, H ] | T ];
encodeWithoutDuplicates(H, [ [ _, _ ] | _ ] = L) -> [ H | L ];
encodeWithoutDuplicates(H, [ _ | _ ] = L) -> [ H | L ].
encodeWithoutDuplicates(L) ->
lists:foldr(fun encodeWithoutDuplicates/2, [], L).
% P12: Decode a run-length encoded list.
decode([ N, C ], L) -> lists:duplicate(N, C) ++ L;
decode(C, L) -> [ C | L ].
decode(L) ->
lists:foldr(fun decode/2, [], L).
% P13: Run-length encoding of a list (direct solution).
encodeDirect(L) ->
encodeWithoutDuplicates(L).
% P14: Duplicate the elements of a list.
duplicate(H, []) -> [ H, H ];
duplicate(H, L) -> [ H, H ] ++ L.
duplicate(L) ->
lists:foldr(fun duplicate/2, [], L).
% P15: Duplicate the elements of a list a given number of times.
multiplicate(N, H, []) -> lists:duplicate(N, H);
multiplicate(N, H, L) -> lists:duplicate(N, H) ++ L.
multiplicate(L, N) ->
lists:foldr(fun(H, T) ->
multiplicate(N, H, T)
end, [], L).
% P16: Drop every n-th element from a list.
drop([], _) -> [];
drop(L, N) -> drop(L, N, 1, []).
drop([], _, _, Result) -> lists:reverse(Result);
drop([ _ | T ], N, Count, Result) when Count rem N == 0 -> drop(T, N, Count + 1, Result);
drop([ H | T ], N, Count, Result) -> drop(T, N, Count + 1, [ H | Result ]).
% P17: Split a list into two parts - the length of the first part is given.
split([], _) -> [];
split(L, 0) -> [L,[]];
split(L, N) when length(L) < N -> [];
split([ H | T ], N) -> split(T, N, 1, [ H ]).
split(L, N, Count, Result) when Count >= N -> [ lists:reverse(Result), L ];
split([ H | T ], N, Count, Result) -> split(T, N, Count + 1, [ H | Result ]).
% P18: Extract a slice from a list, by given indexes (result should contain values from both indexes).
slice(L, I, K) when I =< K, I >= 1, K >= 1 -> slice(L, 1, 1, I, K, []).
slice(_, _, _, I, K, Result) when length(Result) >= K - I + 1 -> lists:reverse(Result);
slice([ _ | T ], II, KK, I, K, Result) when II < I -> slice(T, II + 1, KK + 1, I, K, Result);
slice([ H | T ], II, KK, I, K, Result) when II >= I, KK =< K -> slice(T, II + 1, KK + 1, I, K, [ H | Result ]).
% P19: Rotate a list N places to the right (negative number means rotation to the left).
rotate(L, N) when N =:= 0 -> L;
rotate(L, N) when N < 0 -> rotateLeft(L, -N, []);
rotate(L, N) when N > 0 -> rotateRight(lists:reverse(L), N, []).
rotateLeft([ H | T ], N, Result) when N > 0 -> rotateLeft(T, N - 1, Result ++ [ H ]);
rotateLeft(L, N, Result) when N =:= 0 -> L ++ Result.
rotateRight([ H | T ], N, Result) when N > 0 -> rotateRight(T, N - 1, [ H | Result ]);
rotateRight(L, N, Result) when N =:= 0 -> Result ++ lists:reverse(L).
% P20: Remove the k-th element from a list (index of first element is 1).
remove(L, K) -> remove(L, 1, K, []).
remove([ _ | T ], I, K, Result) when I =:= K -> Result ++ T;
remove([ H | T ], I, K, Result) when I < K -> remove(T, I + 1, K, Result ++ [ H ]).
% P21: Insert an element at a given position into a list.
insert(L, K, E) -> insert(L, 1, K, E, []).
insert([], I, K, E, Result) when I =:= K -> Result ++ [ E ];
insert([ H | T ], I, K, E, Result) when I =:= K -> Result ++ [ E | [ H | T ] ];
insert([ H | T ], I, K, E, Result) when I < K, I > 0 -> insert(T, I + 1, K, E, Result ++ [ H ]).
% P22: Create a list containing all integers within a given range (and will contain both limits).
sequence(Start, End) when Start =< End -> sequence(Start, End, []).
sequence(I, E, Result) when I =< E -> sequence(I + 1, E, Result ++ [ I ]);
sequence(I, E, Result) when I > E -> Result.
% P23: Extract a given number of randomly selected elements from a list (without using the same value many times).
selectRandom(_, N) when N =:= 0 -> [];
selectRandom(L, N) when N > 0, N =< length(L) -> selectRandom(L, N, []).
selectRandom(_, N, Result) when N =:= 0 -> Result;
selectRandom(L, N, Result) when N > 0 ->
RandomizedIndex = random:uniform(length(L)),
Element = lists:nth(RandomizedIndex, L),
selectRandom(remove(L, RandomizedIndex), N - 1, Result ++ [ Element ]).
% P24: Lotto: Draw 'N' different random numbers from the set '1 .. M'.
lotto(N, M) when N =< M ->
selectRandom(sequence(1, M), N).
% P25: Generate a random permutation of the elements of a list.
permutation(L) ->
selectRandom(L, length(L)).
% P26: Generate the combinations of 'K' distinct objects chosen from the 'N' elements of a list.
combinations(0, _) -> [];
combinations(1, L) -> [ [ X ] || X <- L ];
combinations(N, L) -> [ [ H | Res ] || H <- L, Res <- combinations(N - 1, L), false == lists:member(H, Res) ].
% P27: Group the elements of a set into disjoint subsets.
%
% a) In how many ways can a group of 9 people work in 3 disjoint subgroups
% of 2, 3 and 4 persons? Write a predicate that generates all the
% possibilities via backtracking.
combinationWithoutRepetition(0, Xs) -> [ { [], Xs } ];
combinationWithoutRepetition(_, []) -> [];
combinationWithoutRepetition(N, [ X | Xs ]) ->
Ts = [ { [ X | Ys ], Zs } || { Ys, Zs } <- combinationWithoutRepetition(N - 1, Xs) ],
Ds = [ { Ys, [ X | Zs ] } || { Ys, Zs } <- combinationWithoutRepetition(N, Xs) ],
Ts ++ Ds.
group3(L) ->
[ [ Twos, Threes, Fours ] || { Twos, L2 } <- combinationWithoutRepetition(2, L),
{ Threes, L3 } <- combinationWithoutRepetition(3, L2),
{ Fours, _ } <- combinationWithoutRepetition(4, L3) ].
% b) Generalize the above predicate in a way that we can specify a
% list of group sizes and the predicate will return a list of groups.
group([], _) -> [[]];
group([ N | Ns ], Xs) ->
[ [ G | Gs ] || { G, Rs } <- combinationWithoutRepetition(N, Xs),
Gs <- group(Ns, Rs) ].
% P28: Sorting a list of lists according to length of sublists.
%
% a) We suppose that a list (InList) contains elements that are lists themselves.
% The objective is to sort the elements of InList according to their length.
% E.g. short lists first, longer lists later, or vice versa.
lsort([]) -> [];
lsort([ H | T ]) -> lsort(H, T, [], []).
lsort(P, [], Left, Right) -> lsort(Left) ++ [ P ] ++ lsort(Right);
lsort(P, [ H | T ], Left, Right) when length(H) >= length(P) -> lsort(P, T, Left, [ H | Right ]);
lsort(P, [ H | T ], Left, Right) -> lsort(P, T, [ H | Left ], Right).
% b) Again, we suppose that a list (InList) contains elements that are lists
% themselves. But this time the objective is to sort the elements of InList
% according to their length frequency; i.e. in the default, where sorting
% is done ascendingly, lists with rare lengths are placed first, others
% with a more frequent length come later.
groupBy(F, L) -> lists:foldr(fun({ K, V }, D) -> dict:append(K, V, D) end, dict:new(), [ { F(X), X } || X <- L ]).
lfsort(L) ->
Sorted = lsort(L),
GrouppedEqualLengthDict = groupBy(fun (A) -> length(A) end, Sorted),
GrouppedEqualLength = dict:fold(fun (_K, V, Acc) -> [ V | Acc ] end, [], GrouppedEqualLengthDict),
lists:append(lsort(GrouppedEqualLength)). | assignments/solutions/99-problems/erlang/99-problems.erl | 0.673943 | 0.600159 | 99-problems.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2012 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(verify_build_cluster).
-behavior(riak_test).
-export([confirm/0]).
-include_lib("eunit/include/eunit.hrl").
confirm() ->
%% Deploy a set of new nodes
lager:info("Deploying 4 nodes"),
%% handoff_concurrency needs to be raised to make the leave operation faster.
%% most clusters go up to 10, but this one is one louder, isn't it?
[Node1, Node2, Node3, Node4] = Nodes = rt:deploy_nodes(4, [{riak_core, [{handoff_concurrency, 11}]}]),
%% Ensure each node owns 100% of it's own ring
lager:info("Ensure each nodes 100% of it's own ring"),
[rt:wait_until_owners_according_to(Node, [Node]) || Node <- Nodes],
lager:info("joining Node 2 to the cluster... It takes two to make a thing go right"),
rt:join(Node2, Node1),
wait_and_validate([Node1, Node2]),
lager:info("joining Node 3 to the cluster"),
rt:join(Node3, Node1),
wait_and_validate([Node1, Node2, Node3]),
lager:info("joining Node 4 to the cluster"),
rt:join(Node4, Node1),
wait_and_validate(Nodes),
lager:info("taking Node 1 down"),
rt:stop(Node1),
?assertEqual(ok, rt:wait_until_unpingable(Node1)),
wait_and_validate(Nodes, [Node2, Node3, Node4]),
lager:info("taking Node 2 down"),
rt:stop(Node2),
?assertEqual(ok, rt:wait_until_unpingable(Node2)),
wait_and_validate(Nodes, [Node3, Node4]),
lager:info("bringing Node 1 up"),
rt:start(Node1),
ok = rt:wait_until_pingable(Node1),
wait_and_validate(Nodes, [Node1, Node3, Node4]),
lager:info("bringing Node 2 up"),
rt:start(Node2),
ok = rt:wait_until_pingable(Node2),
wait_and_validate(Nodes),
% leave 1, 2, and 3
lager:info("leaving Node 1"),
rt:leave(Node1),
?assertEqual(ok, rt:wait_until_unpingable(Node1)),
wait_and_validate([Node2, Node3, Node4]),
lager:info("leaving Node 2"),
rt:leave(Node2),
?assertEqual(ok, rt:wait_until_unpingable(Node2)),
wait_and_validate([Node3, Node4]),
lager:info("leaving Node 3"),
rt:leave(Node3),
?assertEqual(ok, rt:wait_until_unpingable(Node3)),
% verify 4
wait_and_validate([Node4]),
pass.
wait_and_validate(Nodes) -> wait_and_validate(Nodes, Nodes).
wait_and_validate(RingNodes, UpNodes) ->
lager:info("Wait until all nodes are ready and there are no pending changes"),
?assertEqual(ok, rt:wait_until_nodes_ready(UpNodes)),
?assertEqual(ok, rt:wait_until_all_members(UpNodes)),
?assertEqual(ok, rt:wait_until_no_pending_changes(UpNodes)),
lager:info("Ensure each node owns a portion of the ring"),
[rt:wait_until_owners_according_to(Node, RingNodes) || Node <- UpNodes],
[rt:wait_for_service(Node, freya_stats) || Node <- UpNodes],
done. | dist-tests/verify_build_cluster.erl | 0.61173 | 0.566498 | verify_build_cluster.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.