code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%
% reia_class: Build modules conforming to the gen_server behavior from Reia classes
% Copyright (C)2008 <NAME>
%
% Redistribution is permitted under the MIT license. See LICENSE for details.
%
-module(reia_class).
-export([build/1, build/2, ast/1, inst/3, call/2, cast/2]).
%% Convert a Reia class definition into a Reia module which conforms to the
%% gen_server behavior, then load it into the code server
build({class, _Line, 'Object', _, _Methods} = Class) ->
% Object gets special case behavior as it has no ancestor
reia_module:build(ast(Class)).
build({class, Line, Name, Ancestor, Methods}, OrigExprs) ->
% Generate the methods which are derived from this class's ancestors
ParentMethods = build_inherited_methods(build_parent_from_ancestry(Ancestor)),
% Generate the obj.class() method
ClassMethod = parse_function("class({}, nil) -> {constant, '" ++ atom_to_list(Name) ++ "'}."),
% Merge this class's methods with its parent
FinalMethods = merge_with_parent([ClassMethod|Methods], ParentMethods),
reia_module:build(ast({class, Line, Name, Ancestor, FinalMethods}), [{code, [OrigExprs]}]).
% Walk the ancestors living in the code server, combining them into a single
% unified parent class
build_parent_from_ancestry(AncestorName) when is_atom(AncestorName) ->
case code:ensure_loaded(AncestorName) of
{module, _} -> void;
Error -> throw(Error)
end,
AncestorClass = case [Code || {code, Code} <- AncestorName:module_info(attributes)] of
[[Class]] -> Class;
_ -> throw({error, {AncestorName, "lacks a code attribute (not a Reia module?)"}})
end,
build_parent_from_ancestry(AncestorClass);
build_parent_from_ancestry({class, _Line, {constant, _, 'Object'}, Methods}) ->
merge_ancestor_methods(dict:new(), 'Object', Methods);
build_parent_from_ancestry({class, Line, Name, Methods}) ->
build_parent_from_ancestry({class, Line, Name, {constant, Line, 'Object'}, Methods});
build_parent_from_ancestry({class, _Line, {constant, _, Name}, {constant, _, AncestorName}, Methods}) ->
merge_ancestor_methods(build_parent_from_ancestry(AncestorName), Name, Methods).
merge_ancestor_methods(AncestorMethods, ClassName, Methods) ->
lists:foldl(
fun({function, _, {identifier, _, Name}, _, _, _} = Function, Dict) ->
dict:store(Name, {method, ClassName, Function}, Dict)
end,
AncestorMethods,
Methods
).
merge_with_parent(Methods, AncestorMethods) ->
FinalMethods = lists:foldl(
fun({function, _, Name, _, _} = Function, Dict) ->
dict:store(Name, Function, Dict)
end,
AncestorMethods,
Methods
),
[Method || {_, Method} <- dict:to_list(FinalMethods)].
build_inherited_methods(MethodsDict) ->
Methods = compile_inherited_methods(MethodsDict),
lists:foldl(
fun({function, _, Name, _, _} = Function, Dict) ->
{method, Ancestor, _} = dict:fetch(Name, MethodsDict),
dict:store(Name, {method, Ancestor, Function}, Dict)
end,
dict:new(),
Methods
).
compile_inherited_methods(MethodsDict) ->
Methods = [Method || {_, {_, _, Method}} <- dict:to_list(MethodsDict)],
Class = {class, 1, {constant, 1, 'base_class'}, Methods},
Passes = [Pass || Pass <- reia_compiler:default_passes(), Pass /= dynamic],
[{class, _, _, _, Functions}] = reia_compiler:compile([Class], Passes),
Functions.
%% Compile a Reia class to an Erlang module
ast({class, Line, Name, _Ancestor, Methods}) ->
Functions2 = build_functions(Name, Methods),
{module, Line, Name, Functions2};
ast(_) ->
{error, "invalid class"}.
%% Create an instance of a given class, passing the arguments on to its
%% initialize function
inst(Class, Arguments, Block) ->
Obj = Class:spawn_link(),
call(Obj, {'initialize', Arguments, Block}),
Obj.
%% Call a method on a Reia object at the given Pid
call({object, {Pid, _Class}}, Request) ->
call(Pid, Request);
call(Pid, {_Method, _Arguments, _Block} = Request) when is_pid(Pid) ->
case gen_server:call(Pid, Request) of
{ok, Value} -> Value;
{error, Error} -> throw(Error)
end.
%% Cast to a given object
cast({object, {Pid, _Class}}, Request) ->
cast(Pid, Request);
cast(Pid, {_Method, _Arguments, _Block} = Request) when is_pid(Pid) ->
ok = gen_server:cast(Pid, Request),
nil.
%% Process incoming methods and build the functions for the resulting module
build_functions(Module, Methods) ->
lists:flatten([
start_functions(Module),
default_functions(),
method_functions(Methods)
]).
%% Build a dispatch_method function and functions for each of the mangled methods
method_functions(Methods) ->
% Decompose the function clauses for methods into handle_call clauses
{Clauses, Functions} = process_methods(Methods),
[build_method_dispatch_function(Clauses)|Functions].
%% Process methods into a list of clauses for dispatch_method and functions
%% with mangled names for each method
process_methods(Methods) ->
{NewClauses, NewFunctions} = lists:foldl(fun(Method, {Clauses, Functions}) ->
{Name, Function} = extract_mangled_name_and_function(Method),
{Clause, Function2} = build_dispatcher_clause_and_function(Function, Name),
{[Clause|Clauses], [Function2|Functions]}
end, {[],[]}, Methods),
{lists:reverse(NewClauses), lists:reverse(NewFunctions)}.
%% Extract a method into its dispatch_method clause and mangled form
extract_mangled_name_and_function({method, Ancestor, {function, _, Name, _, _} = Function}) ->
{reia_mangle:method(Ancestor, Name), Function};
extract_mangled_name_and_function({function, _, Name, _, _} = Function) ->
{reia_mangle:method(Name), Function}.
%% Construct the new function and dispatcher clause
build_dispatcher_clause_and_function({function, Line, Name, _Arity, Clauses}, MangledName) ->
DispatcherClause = dispatcher_clause(Name, MangledName, Line),
Function = {function, Line, MangledName, 4,
[process_method_clause(Clause) || Clause <- Clauses]
},
{DispatcherClause, Function}.
%% Generate a clause for dispatch_method which thunks from a real method name
%% to the given mangled name
dispatcher_clause(RealName, MangledName, Line) ->
{clause, Line, [
{tuple, Line, [{atom, Line, RealName}, {var, Line, 'arguments'}, {var, Line, 'block'}]},
{var, Line, 'caller'},
{var, Line, 'instance_variables'}
], [], [
{call, Line, {atom, Line, MangledName}, [
{var, Line, 'arguments'},
{var, Line, 'block'},
{var, Line, 'caller'},
{var, Line, 'instance_variables'}
]}
]}.
%% Build a clause for dispatch_method from the original clauses for a method
process_method_clause({clause, Line, [], [], Expressions}) ->
process_method_clause({clause, Line, [{tuple, Line, []}, nil], [], Expressions});
process_method_clause({clause, Line, [{tuple, _, Arguments}, Block], [], Expressions}) ->
{clause, Line, [
argument_list_cons(Arguments, Line),
Block,
{var, Line, '_caller'},
{var, Line, '___instance_variables_0'}
], [], process_return_value(Line, Expressions)}.
%% Convert a method's return value into a gen_server reply
process_return_value(Line, []) ->
process_return_value(Line, [{atom, Line, 'nil'}]);
process_return_value(Line, Expressions) ->
[Result|Expressions2] = lists:reverse(Expressions),
Result2 = {match, Line, {var, Line, '__method_return_value'}, Result},
Result3 = {tuple, Line, [
{atom, Line, reply},
{tuple, Line, [{atom, Line, ok}, {var, Line, '__method_return_value'}]},
{var, Line, final_ivars(Expressions)}
]},
lists:reverse([Result3,Result2|Expressions2]).
%% Find the name of the last SSA-transformed ___instance_variables variable
%% present in a given function.
final_ivars(Expressions) ->
{ok, Newest, _} = reia_visitor:transform(Expressions, 0, fun newest_ivars/2),
Name = io_lib:format("~s~w", ["___instance_variables_", Newest]),
list_to_atom(lists:flatten(Name)).
%% Locate the number of the last SSA transformation of the __instance_variables
%% variable in a given function.
newest_ivars(Newest, {var, _Line, Name} = Node) ->
case atom_to_list(Name) of
"___instance_variables_" ++ VersionStr ->
Version = list_to_integer(VersionStr),
Newest2 = if
Version > Newest ->
Version;
true ->
Newest
end,
{stop, Newest2, Node};
_ ->
{stop, Newest, Node}
end;
newest_ivars(Newest, Node) ->
{walk, Newest, Node}.
%% Generate cons for arguments
argument_list_cons([], Line) ->
{nil, Line};
argument_list_cons([Element|Rest], Line) ->
{cons, Line, Element, argument_list_cons(Rest, Line)}.
%% Generate Erlang forms for the class's method dispatch function
build_method_dispatch_function(Clauses) ->
% Add a clause which thunks to _ if no method responds
CatchallFunc = "dispatch_method({Method, Args, Block}, Caller, State) -> dispatch_method({'_', [Method, Args], Block}, Caller, State).",
{function, _, _, _, CatchallClause} = parse_function(CatchallFunc),
{function, 1, dispatch_method, 3, Clauses ++ CatchallClause}.
%% These functions are required for the generated modules to implement the
%% gen_server behavior
default_functions() ->
[parse_function(Function) || Function <- [
"init([]) -> {ok, dict:new()}.",
"handle_call(Request, From, State) -> try dispatch_method(Request, From, State) catch throw:Error -> {reply, {error, Error}, State} end.",
"handle_cast(Request, State) -> {reply, _, NewState} = dispatch_method(Request, cast, State), {noreply, NewState}.",
"handle_info(Message, State) -> {reply, _, NewState} = dispatch_method({'handle_message', [Message], nil}, unknown, State), {noreply, NewState}.",
"terminate(_Reason, _State) -> ok.",
"code_change(_OldVsn, State, _Extra) -> {ok, State}."
]].
%% Functions for starting a new object
start_functions(Module) ->
[start_function(Module, Function) || Function <- [{"spawn", "start"}, {"spawn_link", "start_link"}]].
start_function(Module, {ReiaFunc, OtpFunc}) ->
String = [
ReiaFunc, "() -> {ok, Pid} = gen_server:", OtpFunc, "('", Module, "', [], [])," ++
"{object, {Pid, '", Module, "'}}."
],
parse_function(lists:concat(String)).
%% Parse a function from a string
parse_function(String) ->
{ok, Scanned, _} = erl_scan:string(String),
{ok, Form} = erl_parse:parse_form(Scanned),
Form. | attic/src/core/reia_class.erl | 0.547585 | 0.473231 | reia_class.erl | starcoder |
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
%% --------------------------------------------------
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% --------------------------------------------------
%% File : parse_trans_codegen.erl
%% @author : <NAME> <<EMAIL>>
%% @end
%%-------------------------------------------------------------------
%% @doc Parse transform for code generation pseduo functions
%%
%% <p>...</p>
%%
%% @end
-module(parse_trans_codegen).
-export([parse_transform/2]).
-export([format_error/1]).
%% @spec (Forms, Options) -> NewForms
%%
%% @doc
%% Searches for calls to pseudo functions in the module `codegen',
%% and converts the corresponding erlang code to a data structure
%% representing the abstract form of that code.
%%
%% The purpose of these functions is to let the programmer write
%% the actual code that is to be generated, rather than manually
%% writing abstract forms, which is more error prone and cannot be
%% checked by the compiler until the generated module is compiled.
%%
%% Supported functions:
%%
%% <h2>gen_function/2</h2>
%%
%% Usage: `codegen:gen_function(Name, Fun)'
%%
%% Substitutes the abstract code for a function with name `Name'
%% and the same behaviour as `Fun'.
%%
%% `Fun' can either be a anonymous `fun', which is then converted to
%% a named function, or it can be an `implicit fun', e.g.
%% `fun is_member/2'. In the latter case, the referenced function is fetched
%% and converted to an abstract form representation. It is also renamed
%% so that the generated function has the name `Name'.
%% <p/>
%% Another alternative is to wrap a fun inside a list comprehension, e.g.
%% <pre>
%% f(Name, L) ->
%% codegen:gen_function(
%% Name,
%% [ fun({'$var',X}) ->
%% {'$var', Y}
%% end || {X, Y} &lt;- L ]).
%% </pre>
%% <p/>
%% Calling the above with `f(foo, [{1,a},{2,b},{3,c}])' will result in
%% generated code corresponding to:
%% <pre>
%% foo(1) -> a;
%% foo(2) -> b;
%% foo(3) -> c.
%% </pre>
%%
%% <h2>gen_functions/1</h2>
%%
%% Takes a list of `{Name, Fun}' tuples and produces a list of abstract
%% data objects, just as if one had written
%% `[codegen:gen_function(N1,F1),codegen:gen_function(N2,F2),...]'.
%%
%% <h2>exprs/1</h2>
%%
%% Usage: `codegen:exprs(Fun)'
%%
%% `Fun' is either an anonymous function, or an implicit fun with only one
%% function clause. This "function" takes the body of the fun and produces
%% a data type representing the abstract form of the list of expressions in
%% the body. The arguments of the function clause are ignored, but can be
%% used to ensure that all necessary variables are known to the compiler.
%%
%% <h2>gen_module/3</h2>
%%
%% Generates abstract forms for a complete module definition.
%%
%% Usage: `codegen:gen_module(ModuleName, Exports, Functions)'
%%
%% `ModuleName' is either an atom or a <code>{'$var', V}</code> reference.
%%
%% `Exports' is a list of `{Function, Arity}' tuples.
%%
%% `Functions' is a list of `{Name, Fun}' tuples analogous to that for
%% `gen_functions/1'.
%%
%% <h2>Variable substitution</h2>
%%
%% It is possible to do some limited expansion (importing a value
%% bound at compile-time), using the construct <code>{'$var', V}</code>, where
%% `V' is a bound variable in the scope of the call to `gen_function/2'.
%%
%% Example:
%% <pre>
%% gen(Name, X) ->
%% codegen:gen_function(Name, fun(L) -> lists:member({'$var',X}, L) end).
%% </pre>
%%
%% After transformation, calling `gen(contains_17, 17)' will yield the
%% abstract form corresponding to:
%% <pre>
%% contains_17(L) ->
%% lists:member(17, L).
%% </pre>
%%
%% <h2>Form substitution</h2>
%%
%% It is possible to inject abstract forms, using the construct
%% <code>{'$form', F}</code>, where `F' is bound to a parsed form in
%% the scope of the call to `gen_function/2'.
%%
%% Example:
%% <pre>
%% gen(Name, F) ->
%% codegen:gen_function(Name, fun(X) -> X =:= {'$form',F} end).
%% </pre>
%%
%% After transformation, calling `gen(is_foo, {atom,0,foo})' will yield the
%% abstract form corresponding to:
%% <pre>
%% is_foo(X) ->
%% X =:= foo.
%% </pre>
%% @end
%%
parse_transform(Forms, Options) ->
Context = parse_trans:initial_context(Forms, Options),
{NewForms, _} =
parse_trans:do_depth_first(
fun xform_fun/4, _Acc = Forms, Forms, Context),
parse_trans:return(parse_trans:revert(NewForms), Context).
xform_fun(application, Form, _Ctxt, Acc) ->
MFA = erl_syntax_lib:analyze_application(Form),
Anno = erl_syntax:get_pos(Form),
L = erl_anno:line(Anno),
case MFA of
{codegen, {gen_module, 3}} ->
[NameF, ExportsF, FunsF] =
erl_syntax:application_arguments(Form),
NewForms = gen_module(NameF, ExportsF, FunsF, L, Acc),
{NewForms, Acc};
{codegen, {gen_function, 2}} ->
[NameF, FunF] =
erl_syntax:application_arguments(Form),
NewForm = gen_function(NameF, FunF, L, L, Acc),
{NewForm, Acc};
{codegen, {gen_function, 3}} ->
[NameF, FunF, LineF] =
erl_syntax:application_arguments(Form),
NewForm = gen_function(
NameF, FunF, L, erl_syntax:integer_value(LineF), Acc),
{NewForm, Acc};
{codegen, {gen_function_alt, 3}} ->
[NameF, FunF, AltF] =
erl_syntax:application_arguments(Form),
NewForm = gen_function_alt(NameF, FunF, AltF, L, L, Acc),
{NewForm, Acc};
{codegen, {gen_functions, 1}} ->
[List] = erl_syntax:application_arguments(Form),
Elems = erl_syntax:list_elements(List),
NewForms = lists:map(
fun(E) ->
[NameF, FunF] = erl_syntax:tuple_elements(E),
gen_function(NameF, FunF, L, L, Acc)
end, Elems),
{erl_syntax:list(NewForms), Acc};
{codegen, {exprs, 1}} ->
[FunF] = erl_syntax:application_arguments(Form),
[Clause] = erl_syntax:fun_expr_clauses(FunF),
[{clause,_,_,_,Body}] = parse_trans:revert([Clause]),
NewForm = substitute(erl_parse:abstract(Body)),
{NewForm, Acc};
_ ->
{Form, Acc}
end;
xform_fun(_, Form, _Ctxt, Acc) ->
{Form, Acc}.
gen_module(NameF, ExportsF, FunsF, L, Acc) ->
case erl_syntax:type(FunsF) of
list ->
try gen_module_(NameF, ExportsF, FunsF, L, Acc)
catch
error:E ->
ErrStr = parse_trans:format_exception(error, E),
{error, {L, ?MODULE, ErrStr}}
end;
_ ->
ErrStr = parse_trans:format_exception(
error, "Argument must be a list"),
{error, {L, ?MODULE, ErrStr}}
end.
gen_module_(NameF, ExportsF, FunsF, L0, Acc) ->
P = erl_syntax:get_pos(NameF),
ModF = case parse_trans:revert_form(NameF) of
{atom,_,_} = Am -> Am;
{tuple,_,[{atom,_,'$var'},
{var,_,V}]} ->
{var,P,V}
end,
cons(
{cons,P,
{tuple,P,
[{atom,P,attribute},
{integer,P,1},
{atom,P,module},
ModF]},
substitute(
abstract(
[{attribute,P,export,
lists:map(
fun(TupleF) ->
[F,A] = erl_syntax:tuple_elements(TupleF),
{erl_syntax:atom_value(F), erl_syntax:integer_value(A)}
end, erl_syntax:list_elements(ExportsF))}]))},
lists:map(
fun(FTupleF) ->
Pos = erl_syntax:get_pos(FTupleF),
[FName, FFunF] = erl_syntax:tuple_elements(FTupleF),
gen_function(FName, FFunF, L0, Pos, Acc)
end, erl_syntax:list_elements(FunsF))).
cons({cons,L,H,T}, L2) ->
{cons,L,H,cons(T, L2)};
cons({nil,L}, [H|T]) ->
Pos = erl_syntax:get_pos(H),
{cons,L,H,cons({nil,Pos}, T)};
cons({nil,L}, []) ->
{nil,L}.
gen_function(NameF, FunF, L0, L, Acc) ->
try gen_function_(NameF, FunF, [], L, Acc)
catch
error:E ->
ErrStr = parse_trans:format_exception(error, E),
{error, {L0, ?MODULE, ErrStr}}
end.
gen_function_alt(NameF, FunF, AltF, L0, L, Acc) ->
try gen_function_(NameF, FunF, AltF, L, Acc)
catch
error:E ->
ErrStr = parse_trans:format_exception(error, E),
{error, {L0, ?MODULE, ErrStr}}
end.
gen_function_(NameF, FunF, AltF, L, Acc) ->
case erl_syntax:type(FunF) of
T when T==implicit_fun; T==fun_expr ->
{Arity, Clauses} = gen_function_clauses(T, NameF, FunF, L, Acc),
A1 = erl_anno:new(1),
{tuple, A1, [{atom, A1, function},
{integer, A1, L},
NameF,
{integer, A1, Arity},
substitute(abstract(Clauses))]};
list_comp ->
%% Extract the fun from the LC
[Template] = parse_trans:revert(
[erl_syntax:list_comp_template(FunF)]),
%% Process fun in the normal fashion (as above)
{Arity, Clauses} = gen_function_clauses(erl_syntax:type(Template),
NameF, Template, L, Acc),
Body = erl_syntax:list_comp_body(FunF),
%% Collect all variables from the LC generator(s)
%% We want to produce an abstract representation of something like:
%% {function,1,Name,Arity,
%% lists:flatten(
%% [(fun(V1,V2,...) ->
%% ...
%% end)(__V1,__V2,...) || {__V1,__V2,...} <- L])}
%% where the __Vn vars are our renamed versions of the LC generator
%% vars. This allows us to instantiate the clauses at run-time.
Vars = lists:flatten(
[sets:to_list(erl_syntax_lib:variables(
erl_syntax:generator_pattern(G)))
|| G <- Body]),
Vars1 = [list_to_atom("__" ++ atom_to_list(V)) || V <- Vars],
VarMap = lists:zip(Vars, Vars1),
Body1 =
[erl_syntax:generator(
rename_vars(VarMap, gen_pattern(G)),
gen_body(G)) || G <- Body],
A1 = erl_anno:new(1),
[RevLC] = parse_trans:revert(
[erl_syntax:list_comp(
{call, A1,
{'fun',A1,
{clauses,
[{clause,A1,[{var,A1,V} || V <- Vars],[],
[substitute(
abstract(Clauses))]
}]}
}, [{var,A1,V} || V <- Vars1]}, Body1)]),
AltC = case AltF of
[] -> {nil,A1};
_ ->
{Arity, AltC1} = gen_function_clauses(
erl_syntax:type(AltF),
NameF, AltF, L, Acc),
substitute(abstract(AltC1))
end,
{tuple,A1,[{atom,A1,function},
{integer, A1, L},
NameF,
{integer, A1, Arity},
{call, A1, {remote, A1, {atom, A1, lists},
{atom,A1,flatten}},
[{op, A1, '++', RevLC, AltC}]}]}
end.
gen_pattern(G) ->
erl_syntax:generator_pattern(G).
gen_body(G) ->
erl_syntax:generator_body(G).
rename_vars(Vars, Tree) ->
erl_syntax_lib:map(
fun(T) ->
case erl_syntax:type(T) of
variable ->
V = erl_syntax:variable_name(T),
{_,V1} = lists:keyfind(V,1,Vars),
erl_syntax:variable(V1);
_ ->
T
end
end, Tree).
gen_function_clauses(implicit_fun, _NameF, FunF, _L, Acc) ->
AQ = erl_syntax:implicit_fun_name(FunF),
Name = erl_syntax:atom_value(erl_syntax:arity_qualifier_body(AQ)),
Arity = erl_syntax:integer_value(
erl_syntax:arity_qualifier_argument(AQ)),
NewForm = find_function(Name, Arity, Acc),
ClauseForms = erl_syntax:function_clauses(NewForm),
{Arity, ClauseForms};
gen_function_clauses(fun_expr, _NameF, FunF, _L, _Acc) ->
ClauseForms = erl_syntax:fun_expr_clauses(FunF),
Arity = get_arity(ClauseForms),
{Arity, ClauseForms}.
find_function(Name, Arity, Forms) ->
[Form] = [F || {function,_,N,A,_} = F <- Forms,
N == Name,
A == Arity],
Form.
abstract(ClauseForms) ->
erl_parse:abstract(parse_trans:revert(ClauseForms)).
substitute({tuple,L0,
[{atom,_,tuple},
{integer,_,L},
{cons,_,
{tuple,_,[{atom,_,atom},{integer,_,_},{atom,_,'$var'}]},
{cons,_,
{tuple,_,[{atom,_,var},{integer,_,_},{atom,_,V}]},
{nil,_}}}]}) ->
{call, L0, {remote,L0,{atom,L0,erl_parse},
{atom,L0,abstract}},
[{var, L0, V}, {integer, L0, L}]};
substitute({tuple,L0,
[{atom,_,tuple},
{integer,_,_},
{cons,_,
{tuple,_,[{atom,_,atom},{integer,_,_},{atom,_,'$form'}]},
{cons,_,
{tuple,_,[{atom,_,var},{integer,_,_},{atom,_,F}]},
{nil,_}}}]}) ->
{var, L0, F};
substitute([]) ->
[];
substitute([H|T]) ->
[substitute(H) | substitute(T)];
substitute(T) when is_tuple(T) ->
list_to_tuple(substitute(tuple_to_list(T)));
substitute(X) ->
X.
get_arity(Clauses) ->
Ays = [length(erl_syntax:clause_patterns(C)) || C <- Clauses],
case lists:usort(Ays) of
[Ay] ->
Ay;
Other ->
erlang:error(ambiguous, Other)
end.
format_error(E) ->
case io_lib:deep_char_list(E) of
true ->
E;
_ ->
io_lib:write(E)
end. | src/parse_trans_codegen.erl | 0.518059 | 0.453262 | parse_trans_codegen.erl | starcoder |
-module(els_hover_SUITE).
-include("erlang_ls.hrl").
%% CT Callbacks
-export([ suite/0
, init_per_suite/1
, end_per_suite/1
, init_per_testcase/2
, end_per_testcase/2
, groups/0
, all/0
]).
%% Test cases
-export([ hover_docs/1
, hover_docs_local/1
, hover_no_docs/1
]).
%%==============================================================================
%% Includes
%%==============================================================================
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
%%==============================================================================
%% Types
%%==============================================================================
-type config() :: [{atom(), any()}].
%%==============================================================================
%% CT Callbacks
%%==============================================================================
-spec suite() -> [tuple()].
suite() ->
[{timetrap, {seconds, 30}}].
-spec all() -> [atom()].
all() ->
[{group, tcp}, {group, stdio}].
-spec groups() -> [atom()].
groups() ->
els_test_utils:groups(?MODULE).
-spec init_per_suite(config()) -> config().
init_per_suite(Config) ->
els_test_utils:init_per_suite(Config).
-spec end_per_suite(config()) -> ok.
end_per_suite(Config) ->
els_test_utils:end_per_suite(Config).
-spec init_per_testcase(atom(), config()) -> config().
init_per_testcase(TestCase, Config) ->
els_test_utils:init_per_testcase(TestCase, Config).
-spec end_per_testcase(atom(), config()) -> ok.
end_per_testcase(TestCase, Config) ->
els_test_utils:end_per_testcase(TestCase, Config).
%%==============================================================================
%% Testcases
%%==============================================================================
-define( FUNCTION_J_DOC
, <<"```erlang\n"
"-spec function_j() -> pos_integer().\n"
"```\n\n"
"### code_navigation:function_j/0"
"\n\n"
"Such a wonderful function."
"\n\n">>
).
-spec hover_docs(config()) -> ok.
hover_docs(Config) ->
Uri = ?config(code_navigation_extra_uri, Config),
#{result := Result} = els_client:hover(Uri, 13, 26),
?assert(maps:is_key(contents, Result)),
Contents = maps:get(contents, Result),
Expected = #{ kind => <<"markdown">>
, value => ?FUNCTION_J_DOC
},
?assertEqual(Expected, Contents),
ok.
hover_docs_local(Config) ->
ct:comment("Hover the local function call"),
ExtraUri = ?config(code_navigation_extra_uri, Config),
Response1 = els_client:hover(ExtraUri, 6, 5),
?assertMatch(#{result := #{contents := _}}, Response1),
#{result := #{contents := Contents1}} = Response1,
Expected1 = #{ kind => <<"markdown">>
, value => <<"```erlang\n"
"-spec do_4(nat(), opaque_local()) -> {atom(),"
"\n\t\t\t\t"
" code_navigation_types:opaque_type_a()}.\n"
"```\n\n"
"### code_navigation_extra:do_4/2"
"\n\ndo_4 is a local-only function"
"\n\n">>
},
?assertEqual(Expected1, Contents1),
ct:comment("Hover the export entry for function_j/0"),
Uri = ?config(code_navigation_uri, Config),
Response2 = els_client:hover(Uri, 5, 55),
?assertMatch(#{result := #{contents := _}}, Response2),
#{result := #{contents := Contents2}} = Response2,
Expected2 =#{kind => <<"markdown">>, value => ?FUNCTION_J_DOC},
?assertEqual(Expected2, Contents2),
ok.
-spec hover_no_docs(config()) -> ok.
hover_no_docs(Config) ->
Uri = ?config(code_navigation_uri, Config),
#{result := Result} = els_client:hover(Uri, 32, 18),
?assertEqual(null, Result),
ok. | test/els_hover_SUITE.erl | 0.521715 | 0.723199 | els_hover_SUITE.erl | starcoder |
-module(erlplex).
-author("<NAME> <<EMAIL>>").
%-export([file/1, file/2, bootstrap/0]).
-include("../include/erlplex.hrl").
-compile([export_all]).
%% @doc Creates a new simplex from the evaluation function and some intial points
%% The function must take one vector and return the value at that point.
%% @spec create_simplex(F::fun/1(),Data::[vector[]]) -> simplex()
create_simplex(Func,Data)->
Points = [ #point{value=Func(D), data=D} || D<-Data],
#simplex{func=Func,points=lists:sort (fun(A,B)->A#point.value > B#point.value end, Points)}.
%% @doc Operates on a simplex using the Nelder-Mead algorithm
%% and returns the solution simplex.
%% @spec solve(S::simplex()) -> simplex()
%% @spec solve(S::simplex(),Precision::float) -> simplex()
%% @spec solve(S::simplex(),Precision::float,Limit::integer) -> simplex()
%%
%% precision specifies the acceptable error interval on the result (default 10^-6)
%% Limit specifies the maximum number of Nelder-Mead iterations (default 1000)
solve(S) -> solve(S,0.0000001).
solve(S,Precision) -> solve(S,Precision,1000).
solve(S,Precision,Limit)-> steps(S, Precision, Limit).
steps(S, _, 0) -> S;
steps(S, Precision, Steps) ->
NewS = step(S),
case (hd(NewS#simplex.points))#point.value - (lists:last(NewS#simplex.points))#point.value < Precision of
true ->
NewS;
false ->
steps(NewS, Precision, Steps-1)
end.
step(S) ->
CoG = average(tl(S#simplex.points), S#simplex.func),
do_reflect(S, CoG).
do_reflect(S, CoG) ->
Ref = reflect(hd(S#simplex.points),CoG,S#simplex.func),
case Ref#point.value < (lists:last(S#simplex.points))#point.value of
true ->
Ext = extend(hd(S#simplex.points),CoG,S#simplex.func),
case Ext#point.value < Ref#point.value of
true ->
S#simplex{points=lists:merge(fun(A,B)->A>B end,[Ext],tl(S#simplex.points))};
false ->
S#simplex{points=lists:merge(fun(A,B)->A>B end,[Ref],tl(S#simplex.points))}
end;
false ->
do_contract(S, CoG)
end.
do_contract(S,CoG) ->
Cont = contract(hd(S#simplex.points),CoG,S#simplex.func),
case Cont#point.value < (hd(S#simplex.points))#point.value of
true ->
S#simplex{points=lists:merge(fun(A,B)->A>B end,[Cont],tl(S#simplex.points))};
false ->
do_shrink(S)
end.
do_shrink(S) ->
Points = lists:reverse(S#simplex.points),
New_points = [hd(Points)|[average([hd(Points),Point],S#simplex.func) || Point <- tl(Points)]],
S#simplex{points=lists:sort(fun(A,B)->A#point.value>B#point.value end, New_points)}.
%% @doc Reflects one point in a second one.
%% If A is the vector of the point to be reflected in the point with vector B
%% then the resultant reflected point is B+(B-A) or 2B-A
%% @spec reflect(A::point(),B::point()) -> point()
reflect(A, B, Func) ->
Location = v_sum([v_scale(B#point.data,2),v_scale(A#point.data,-1)]),
#point{value=Func(Location),data=Location}.
%% @doc Extend one point in a second one.
%% This operation is like reflect, but goes twice the distance
%% past the reflection point
%% i.e. here the resultant reflected point is B+2(B-A) or 3B-2A
%% @spec reflect(A::point(),B::point()) -> point()
extend(A, B, Func) ->
Location = v_sum([v_scale(B#point.data,3),v_scale(A#point.data,-2)]),
#point{value=Func(Location),data=Location}.
%% @doc Contracts to either the C1 point or the C2 point.
%% These are represented by B-(B-A)/2 or B+(B-A)/2 respectively
%% i.e. (B+A)/2 and (3B-A)/2
%% @spec contract(A::point(),B::point()) -> point()
contract(A, B, Func) ->
Loc1 = v_sum([v_scale(B#point.data,0.5),v_scale(A#point.data,0.5)]),
Val1 = Func(Loc1),
Loc2 = v_sum([v_scale(B#point.data,1.5),v_scale(A#point.data,-0.5)]),
Val2 = Func(Loc2),
case Val1 < Val2 of
true -> #point{value=Val1,data=Loc1};
false -> #point{value=Val2,data=Loc2}
end.
%% @doc Shrinks a list of points towards common point.
%% i.e. produces a list whereeach of the points is replaced by the
%% average of that point and the specified point
%% @spec contract(A::point(),[B::point()]) -> [point()]
shrink(A, Points, Func) ->
[average([A,Point], Func) || Point <- Points].
%% @doc Calculates the average of a set of points
%% @spec average(Filename::string()) -> ok
average(Points,Func) ->
% io:format("Averaging ~p~n",[Points]),
Data = v_sum([v_scale(Point#point.data, 1 / length(Points)) || Point <- Points]),
#point{value=Func(Data),data=Data}.
%% @doc Sums a list of vectors (each represented by a list of co-ordinates).
%% @spec add([List::list()]) -> list()
v_sum(Vectors) ->
lists:foldl(
fun (X,Y) ->
case Y of
[] -> X;
_ -> lists:zipwith(fun (A,B) -> A+B end,X,Y)
end
end,
[], Vectors).
%% @doc Scales a vector by a scaling factor.
%% @spec scale(List::list(), Factor::float) -> list()
v_scale(Vector, Factor) ->
[Factor*Val || Val<-Vector]. | src/erlplex.erl | 0.52902 | 0.626524 | erlplex.erl | starcoder |
% Copyright 2010-2012 <NAME> (http://vmx.cx/)
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
% This code is heavily based on the ideas of the MochiWeb's JSON2 decoder
% http://code.google.com/p/mochiweb/source/browse/trunk/src/mochijson2.erl
% (Copyright 2006 Mochi Media, Inc.)
-module(wkt).
-export([parse/1]).
parse(Wkt) ->
case parse_char(Wkt) of
{{parsed, Parsed}, _Wkt} ->
Parsed;
{space, Wkt2} ->
parse(Wkt2);
_ ->
error
end.
parse_char([H|T]=Wkt) ->
case H of
$( ->
{start_list, T};
$) ->
{end_list, T};
$, ->
{comma, T};
C when ((C >= $0) and (C =< $9)) orelse (C == $-) orelse (C == $+) ->
parse_number(Wkt);
C when ((C >= $a) and (C =< $z)) orelse ((C >= $A) and (C =< $Z)) ->
parse_geometry(Wkt);
$\s ->
{space, T};
_ ->
parse_char(T)
end.
parse_list(Wkt) ->
parse_list(Wkt, []).
parse_list(Wkt, Acc) ->
case parse_char(Wkt) of
{start_list, Wkt2} ->
{{parsed_list, List}, Wkt3} = parse_list(Wkt2),
parse_list_inner(Wkt3, [List|Acc]);
{{parsed, Parsed}, Wkt2} ->
parse_list_inner(Wkt2, [Parsed|Acc]);
{space, Wkt2} ->
parse_list(Wkt2, Acc)
end.
parse_list_inner(Wkt, Acc) ->
case parse_char(Wkt) of
{end_list, Wkt2} ->
Acc2 = case tuple_them(Acc) of
% Else points will end up as [[x, y]] instead if [x, y]
[SingleItem] when is_number(hd(SingleItem)) ->
lists:reverse(SingleItem);
MultipleItems ->
MultipleItems
end,
{{parsed_list, lists:reverse(Acc2)}, Wkt2};
{space, Wkt2} ->
parse_list_inner(Wkt2, Acc);
{{parsed, Parsed}, Wkt2} ->
parse_list_inner(Wkt2, [Parsed|Acc]);
{comma, Wkt2} ->
Acc2 = tuple_them(Acc),
parse_list(Wkt2, Acc2)
end.
% converts leading non-list elements to a list
% i.e. [a,b,[c,d],[e,f]] -> [[a,b],[c,d],[e,f]]
tuple_them(List) ->
tuple_them(List, []).
% case when the comma is behind a parenthesis and not behind a number
tuple_them([], Acc) when is_list(hd(Acc)) ->
lists:reverse(Acc);
tuple_them([], Acc) ->
[Acc];
tuple_them([H|_T]=Rest, Acc) when is_tuple(H); is_list(H) ->
case Acc of
[] -> Rest;
_ -> [Acc|Rest]
end;
tuple_them([H|T], Acc) ->
tuple_them(T, [H|Acc]).
parse_number([H|T]) ->
{Num, Type, Wkt} = parse_number(T, int, [H]),
case Type of
int ->
{{parsed, list_to_integer(Num)}, Wkt};
float ->
{{parsed, list_to_float(Num)}, Wkt}
end.
parse_number([H|T], float, Acc) when H == $E orelse H == $- orelse H == $+ ->
parse_number(T, float, [H|Acc]);
parse_number([H|T], Type, Acc) when (H >= $0) and (H =< $9) ->
parse_number(T, Type, [H|Acc]);
parse_number([H|T], _Type, Acc) when H == $. ->
parse_number(T, float, [H|Acc]);
parse_number(Wkt, Type, Acc) ->
{lists:reverse(Acc), Type, Wkt}.
parse_geometry(Wkt) ->
case parse_string(Wkt) of
{{parsed_atom, Atom}, Wkt2} ->
{{parsed_list, List}, Wkt3} = parse_geometry_inner(Wkt2),
{{parsed, {Atom, List}}, Wkt3};
{{parsed_empty, Atom}, Wkt2} ->
{{parsed, {Atom, []}}, Wkt2}
end.
parse_geometry_inner(Wkt) ->
case parse_char(Wkt) of
{space, Wkt2} ->
parse_geometry_inner(Wkt2);
{start_list, Wkt2} ->
parse_list(Wkt2)
end.
% all keywords (the geometry type) become Erlang atoms
parse_string([H|T]) ->
%{String, Wkt} = parse_string(T, [H]),
{{Type2, String}, Wkt2} = case parse_string(T, [H]) of
{{type, Type}, Wkt} ->
{{parsed_atom, list_to_atom(Type)}, Wkt};
{{empty_type, Type}, Wkt} ->
{{parsed_empty, list_to_atom(Type)}, Wkt}
end,
% The WKT specification doesn't say anything about the case of the
% letters, therefore we transform them to camel case style, like in
% GeoJSON specification
String2 = case String of
point -> 'Point';
multipoint -> 'MultiPoint';
linestring -> 'LineString';
multilinestring -> 'MultiLineString';
polygon -> 'Polygon';
multipolygon -> 'MultiPolygon';
geometrycollection -> 'GeometryCollection';
_ -> String
end,
{{Type2, String2}, Wkt2}.
parse_string([H|T], Acc) when ((H >= $a) and (H =< $z)) orelse
((H >= $A) and (H =< $Z)) orelse
H == $\s ->
parse_string(T, [H|Acc]);
parse_string(Wkt, Acc) ->
Stripped = string:strip(Acc, left),
case Stripped of
"YTPME " ++ Geometry ->
Stripped2 = string:strip(Geometry, left),
{{empty_type, lists:reverse(string:to_lower(Stripped2))}, Wkt};
_ ->
{{type, lists:reverse(string:to_lower(Stripped))}, Wkt}
end. | src/wkt.erl | 0.61231 | 0.703409 | wkt.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
-module(percept_image).
-export([ proc_lifetime/5,
percentage/3,
graph/3,
graph/4,
activities/3,
activities/4]).
-record(graph_area, {x = 0, y = 0, width, height}).
-compile(inline).
%%% -------------------------------------
%%% GRAF
%%% -------------------------------------
%% graph(Widht, Height, Range, Data)
graph(Width, Height, {RXmin, RYmin, RXmax, RYmax}, Data) ->
Data2 = [{X, Y1 + Y2} || {X, Y1, Y2} <- Data],
MinMax = percept_analyzer:minmax(Data2),
{Xmin, Ymin, Xmax, Ymax} = MinMax,
graf1(Width, Height,{ lists:min([RXmin, Xmin]),
lists:min([RYmin, Ymin]),
lists:max([RXmax, Xmax]),
lists:max([RYmax, Ymax])}, Data).
%% graph(Widht, Height, Data) = Image
%% In:
%% Width = integer(),
%% Height = integer(),
%% Data = [{Time, Procs, Ports}]
%% Time = float()
%% Procs = integer()
%% Ports = integer()
%% Out:
%% Image = binary()
graph(Width, Height, Data) ->
Data2 = [{X, Y1 + Y2} || {X, Y1, Y2} <- Data],
Bounds = percept_analyzer:minmax(Data2),
graf1(Width, Height, Bounds, Data).
graf1(Width, Height, {Xmin, Ymin, Xmax, Ymax}, Data) ->
% Calculate areas
HO = 20,
GrafArea = #graph_area{x = HO, y = 4, width = Width - 2*HO, height = Height - 17},
XticksArea = #graph_area{x = HO, y = Height - 13, width = Width - 2*HO, height = 13},
YticksArea = #graph_area{x = 1, y = 4, width = HO, height = Height - 17},
%% Initiate Image
Image = egd:create(Width, Height),
%% Set colors
Black = egd:color(Image, {0, 0, 0}),
ProcColor = egd:color(Image, {0, 255, 0}),
PortColor = egd:color(Image, {255, 0, 0}),
%% Draw graf, xticks and yticks
draw_graf(Image, Data, {Black, ProcColor, PortColor}, GrafArea, {Xmin, Ymin, Xmax, Ymax}),
draw_xticks(Image, Black, XticksArea, {Xmin, Xmax}, Data),
draw_yticks(Image, Black, YticksArea, {Ymin, Ymax}),
%% Kill image and return binaries
Binary = egd:render(Image, png),
egd:destroy(Image),
Binary.
%% draw_graf(Image, Data, Color, GraphArea, DataBounds)
%% Image, port to Image
%% Data, list of three tuple data, (X, Y1, Y2)
%% Color, {ForegroundColor, ProcFillColor, PortFillColor}
%% DataBounds, {Xmin, Ymin, Xmax, Ymax}
draw_graf(Im, Data, Colors, GA = #graph_area{x = X0, y = Y0, width = Width, height = Height}, {Xmin, _Ymin, Xmax, Ymax}) ->
Dx = (Width)/(Xmax - Xmin),
Dy = (Height)/(Ymax),
Plotdata = [{trunc(X0 + X*Dx - Xmin*Dx), trunc(Y0 + Height - Y1*Dy), trunc(Y0 + Height - (Y1 + Y2)*Dy)} || {X, Y1, Y2} <- Data],
draw_graf(Im, Plotdata, Colors, GA).
draw_graf(Im, [{X1, Yproc1, Yport1}, {X2, Yproc2, Yport2}|Data], C, GA) when X2 - X1 < 1 ->
draw_graf(Im, [{X1, [{Yproc2, Yport2},{Yproc1, Yport1}]}|Data], C, GA);
draw_graf(Im, [{X1, Ys1}, {X2, Yproc2, Yport2}|Data], C, GA) when X2 - X1 < 1, is_list(Ys1) ->
draw_graf(Im, [{X1, [{Yproc2, Yport2}|Ys1]}|Data], C, GA);
draw_graf(Im, [{X1, Yproc1, Yport1}, {X2, Yproc2, Yport2}|Data], C = {B, PrC, PoC}, GA = #graph_area{y = Y0, height = H}) ->
GyZero = trunc(Y0 + H),
egd:filledRectangle(Im, {X1, GyZero}, {X2, Yproc1}, PrC),
egd:filledRectangle(Im, {X1, Yproc1}, {X2, Yport1}, PoC),
egd:line(Im, {X1, Yport1}, {X2, Yport1}, B), % top line
egd:line(Im, {X1, Yport2}, {X1, Yport1}, B), % right line
egd:line(Im, {X2, Yport1}, {X2, Yport2}, B), % right line
draw_graf(Im, [{X2, Yproc2, Yport2}|Data], C, GA);
draw_graf(Im, [{X1, Ys1 = [{Yproc1,Yport1}|_]}, {X2, Yproc2, Yport2}|Data], C = {B, PrC, PoC}, GA = #graph_area{y = Y0, height = H}) ->
GyZero = trunc(Y0 + H),
Yprocs = [Yp || {Yp, _} <- Ys1],
Yports = [Yp || {_, Yp} <- Ys1],
YprMin = lists:min(Yprocs),
YprMax = lists:max(Yprocs),
YpoMax = lists:max(Yports),
egd:filledRectangle(Im, {X1, GyZero}, {X2, Yproc1}, PrC),
egd:filledRectangle(Im, {X1, Yproc1}, {X2, Yport1}, PoC),
egd:filledRectangle(Im, {X1, Yport1}, {X2, Yport1}, B), % top line
egd:filledRectangle(Im, {X2, Yport1}, {X2, Yport2}, B), % right line
egd:filledRectangle(Im, {X1, GyZero}, {X1, YprMin}, PrC), % left proc green line
egd:filledRectangle(Im, {X1, YprMax}, {X1, YpoMax}, PoC), % left port line
egd:filledRectangle(Im, {X1, YprMax}, {X1, YprMin}, B),
draw_graf(Im, [{X2, Yproc2, Yport2}|Data], C, GA);
draw_graf(_, _, _, _) -> ok.
draw_xticks(Image, Color, XticksArea, {Xmin, Xmax}, Data) ->
#graph_area{x = X0, y = Y0, width = Width} = XticksArea,
DX = Width/(Xmax - Xmin),
Offset = X0 - Xmin*DX,
Y = trunc(Y0),
Font = load_font(),
{FontW, _FontH} = egd_font:size(Font),
egd:filledRectangle(Image, {trunc(X0), Y}, {trunc(X0 + Width), Y}, Color),
lists:foldl(
fun ({X,_,_}, PX) ->
X1 = trunc(Offset + X*DX),
% Optimization:
% if offset has past half the previous text
% start checking this text
if
X1 > PX ->
Text = lists:flatten(io_lib:format("~.3f", [float(X)])),
TextLength = length(Text),
TextWidth = TextLength*FontW,
Spacing = 2,
if
X1 > PX + round(TextWidth/2) + Spacing ->
egd:line(Image, {X1, Y - 3}, {X1, Y + 3}, Color),
text(Image, {X1 - round(TextWidth/2), Y + 2}, Font, Text, Color),
X1 + round(TextWidth/2) + Spacing;
true ->
PX
end;
true ->
PX
end
end, 0, Data).
draw_yticks(Im, Color, TickArea, {_,Ymax}) ->
#graph_area{x = X0, y = Y0, width = Width, height = Height} = TickArea,
Font = load_font(),
X = trunc(X0 + Width),
Dy = (Height)/(Ymax),
Yts = if
Height/(Ymax*12) < 1.0 -> round(1 + Ymax*15/Height);
true -> 1
end,
egd:filledRectangle(Im, {X, trunc(0 + Y0)}, {X, trunc(Y0 + Height)}, Color),
draw_yticks0(Im, Font, Color, 0, Yts, Ymax, {X, Height, Dy}).
draw_yticks0(Im, Font, Color, Yi, Yts, Ymax, Area) when Yi < Ymax ->
{X, Height, Dy} = Area,
Y = round(Height - (Yi*Dy) + 3),
egd:filledRectangle(Im, {X - 3, Y}, {X + 3, Y}, Color),
Text = lists:flatten(io_lib:format("~p", [Yi])),
text(Im, {0, Y - 4}, Font, Text, Color),
draw_yticks0(Im, Font, Color, Yi + Yts, Yts, Ymax, Area);
draw_yticks0(_, _, _, _, _, _, _) -> ok.
%%% -------------------------------------
%%% ACTIVITIES
%%% -------------------------------------
%% activities(Width, Height, Range, Activities) -> Binary
%% In:
%% Width = integer()
%% Height = integer()
%% Range = {float(), float()}
%% Activities = [{float(), active | inactive}]
%% Out:
%% Binary = binary()
activities(Width, Height, {UXmin, UXmax}, Activities) ->
Xs = [ X || {X,_} <- Activities],
Xmin = lists:min(Xs),
Xmax = lists:max(Xs),
activities0(Width, Height, {lists:min([Xmin, UXmin]), lists:max([UXmax, Xmax])}, Activities).
activities(Width, Height, Activities) ->
Xs = [ X || {X,_} <- Activities],
Xmin = lists:min(Xs),
Xmax = lists:max(Xs),
activities0(Width, Height, {Xmin, Xmax}, Activities).
activities0(Width, Height, {Xmin, Xmax}, Activities) ->
Image = egd:create(Width, Height),
Grey = egd:color(Image, {200, 200, 200}),
HO = 20,
ActivityArea = #graph_area{x = HO, y = 0, width = Width - 2*HO, height = Height},
egd:filledRectangle(Image, {0, 0}, {Width, Height}, Grey),
draw_activity(Image, {Xmin, Xmax}, ActivityArea, Activities),
Binary = egd:render(Image, png),
egd:destroy(Image),
Binary.
draw_activity(Image, {Xmin, Xmax}, Area = #graph_area{ width = Width }, Acts) ->
White = egd:color({255, 255, 255}),
Green = egd:color({0,250, 0}),
Black = egd:color({0, 0, 0}),
Dx = Width/(Xmax - Xmin),
draw_activity(Image, {Xmin, Xmax}, Area, {White, Green, Black}, Dx, Acts).
draw_activity(_, _, _, _, _, [_]) -> ok;
draw_activity(Image, {Xmin, Xmax}, Area = #graph_area{ height = Height, x = X0 }, {Cw, Cg, Cb}, Dx, [{Xa1, State}, {Xa2, Act2} | Acts]) ->
X1 = erlang:trunc(X0 + Dx*Xa1 - Xmin*Dx),
X2 = erlang:trunc(X0 + Dx*Xa2 - Xmin*Dx),
case State of
inactive ->
egd:filledRectangle(Image, {X1, 0}, {X2, Height - 1}, Cw),
egd:rectangle(Image, {X1, 0}, {X2, Height - 1}, Cb);
active ->
egd:filledRectangle(Image, {X1, 0}, {X2, Height - 1}, Cg),
egd:rectangle(Image, {X1, 0}, {X2, Height - 1}, Cb)
end,
draw_activity(Image, {Xmin, Xmax}, Area, {Cw, Cg, Cb}, Dx, [{Xa2, Act2} | Acts]).
%%% -------------------------------------
%%% Process lifetime
%%% Used by processes page
%%% -------------------------------------
proc_lifetime(Width, Height, Start, End, ProfileTime) ->
Im = egd:create(round(Width), round(Height)),
Black = egd:color(Im, {0, 0, 0}),
Green = egd:color(Im, {0, 255, 0}),
% Ratio and coordinates
DX = (Width-1)/ProfileTime,
X1 = round(DX*Start),
X2 = round(DX*End),
% Paint
egd:filledRectangle(Im, {X1, 0}, {X2, Height - 1}, Green),
egd:rectangle(Im, {X1, 0}, {X2, Height - 1}, Black),
Binary = egd:render(Im, png),
egd:destroy(Im),
Binary.
%%% -------------------------------------
%%% Percentage
%%% Used by process_info page
%%% Percentage should be 0.0 -> 1.0
%%% -------------------------------------
percentage(Width, Height, Percentage) ->
Im = egd:create(round(Width), round(Height)),
Font = load_font(),
Black = egd:color(Im, {0, 0, 0}),
Green = egd:color(Im, {0, 255, 0}),
% Ratio and coordinates
X = round(Width - 1 - Percentage*(Width - 1)),
% Paint
egd:filledRectangle(Im, {X, 0}, {Width - 1, Height - 1}, Green),
{FontW, _} = egd_font:size(Font),
String = lists:flatten(io_lib:format("~.10B %", [round(100*Percentage)])),
text( Im,
{round(Width/2 - (FontW*length(String)/2)), 0},
Font,
String,
Black),
egd:rectangle(Im, {X, 0}, {Width - 1, Height - 1}, Black),
Binary = egd:render(Im, png),
egd:destroy(Im),
Binary.
load_font() ->
Filename = filename:join([code:priv_dir(percept),"fonts", "6x11_latin1.wingsfont"]),
egd_font:load(Filename).
text(Image, {X,Y}, Font, Text, Color) ->
egd:text(Image, {X,Y-2}, Font, Text, Color). | lib/percept/src/percept_image.erl | 0.600423 | 0.511168 | percept_image.erl | starcoder |
%% Copyright (c) 2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : luerl_comp_peep.erl
%% Author : <NAME>
%% Purpose : A basic LUA 5.2 compiler for Luerl.
%% Does peep-hole optimisation in the compiler.
-module(luerl_comp_peep).
-include("luerl.hrl").
-include("luerl_comp.hrl").
-include("luerl_instrs.hrl").
-export([chunk/2]).
%% chunk(St0, Opts) -> {ok,St0}.
%% A chunk is now a list of instructions to define the function.
chunk(#code{code=Is0}=Code, Opts) ->
Is1 = instrs(Is0, nil), %No local state
luerl_comp:debug_print(Opts, "cp: ~p\n", [Is1]),
{ok,Code#code{code=Is1}}.
%% Combining instructions.
instrs([?PUSH_LIT(L),?GET_KEY|Is], St) ->
instrs([?GET_LIT_KEY(L)|Is], St);
instrs([?PUSH_LIT(L),?SET_KEY|Is], St) ->
instrs([?SET_LIT_KEY(L)|Is], St);
%% Must check these properly, probably seldom used anyway.
%% instrs([?STORE_EVAR(D, I),?PUSH_EVAR(D, I)|Is], St) ->
%% instrs([?DUP,?STORE_EVAR(D, I)|Is], St);
%% instrs([?STORE_LVAR(D, I),?PUSH_LVAR(D, I)|Is], St) ->
%% instrs([?DUP,?STORE_LVAR(D, I)|Is], St);
%% instrs([?STORE_GVAR(K),?PUSH_GVAR(K)|Is], St) ->
%% instrs([?DUP,?STORE_EVAR(D, I)|Is], St);
instrs([?PUSH_LIT(L),?MULTIPLE|Is], St) ->
instrs([?PUSH_LAST_LIT(L)|Is], St);
instrs([?PUSH_LVAR(D, I),?MULTIPLE|Is], St) ->
instrs([?PUSH_LAST_LVAR(D, I)|Is], St);
instrs([?PUSH_EVAR(D, I),?MULTIPLE|Is], St) ->
instrs([?PUSH_LAST_EVAR(D, I)|Is], St);
instrs([?PUSH_GVAR(K),?MULTIPLE|Is], St) ->
instrs([?PUSH_LAST_GVAR(K)|Is], St);
instrs([?POP,?POP|Is], St) ->
instrs([?POP2|Is], St);
%% Doing sub instructions.
instrs([?FDEF(Lsz,Esz,Pars,Fis0)|Is], St) ->
Fis1 = instrs(Fis0, St),
[?FDEF(Lsz,Esz,Pars,Fis1)|instrs(Is, St)];
instrs([?BLOCK(0,0,Bis)|Is], St) -> %No need for block
instrs(Bis ++ Is, St);
instrs([?BLOCK(Lsz,Esz,Bis0)|Is], St) ->
Bis1 = instrs(Bis0, St),
[?BLOCK(Lsz,Esz,Bis1)|instrs(Is, St)];
instrs([?REPEAT(Ris0)|Is], St) ->
Ris1 = instrs(Ris0, St),
[?REPEAT(Ris1)|instrs(Is, St)];
instrs([?WHILE(Eis0, Wis0)|Is], St) ->
Eis1 = instrs(Eis0, St),
Wis1 = instrs(Wis0, St),
[?WHILE(Eis1, Wis1)|instrs(Is, St)];
instrs([?AND_THEN(Tis0)|Is], St) ->
Tis1 = instrs(Tis0, St),
[?AND_THEN(Tis1)|instrs(Is, St)];
instrs([?OR_ELSE(Fis0)|Is], St) ->
Fis1 = instrs(Fis0, St),
[?OR_ELSE(Fis1)|instrs(Is, St)];
instrs([?IF_TRUE(Tis0)|Is], St) ->
Tis1 = instrs(Tis0, St),
[?IF_TRUE(Tis1)|instrs(Is, St)];
instrs([?IF_FALSE(Fis0)|Is], St) ->
Fis1 = instrs(Fis0, St),
[?IF_FALSE(Fis1)|instrs(Is, St)];
instrs([?IF(Tis, [])|Is], St) ->
instrs([?IF_TRUE(Tis)|Is], St);
instrs([?IF([], Fis)|Is], St) -> %This should never happen
instrs([?IF_FALSE(Fis)|Is], St);
instrs([?IF(Tis0, Fis0)|Is], St) ->
Tis1 = instrs(Tis0, St),
Fis1 = instrs(Fis0, St),
[?IF(Tis1, Fis1)|instrs(Is, St)];
instrs([?NFOR(V, Fis0)|Is], St) ->
Fis1 = instrs(Fis0, St),
[?NFOR(V, Fis1)|instrs(Is, St)];
instrs([?GFOR(Vs, Fis0)|Is], St) ->
Fis1 = instrs(Fis0, St),
[?GFOR(Vs, Fis1)|instrs(Is, St)];
%% Nothing to do.
instrs([I|Is], St) -> [I|instrs(Is, St)];
instrs([], _) -> []. | src/luerl_comp_peep.erl | 0.53048 | 0.545588 | luerl_comp_peep.erl | starcoder |
%% @copyright 2016 <NAME> <<EMAIL>>
%%
%% This software is released under the MIT License.
%% See the LICENSE file in the project root for full license information.
%%
%% @doc An Eventual Leader Election Library
%%
%% This module provides functionality to elect the leader
%% which will be eventually agreed by all member of the same distributed erlang cluster.
%%
%% ```
%% %%
%% %% Elects the leader
%% %%
%% > Leader = evel:elect(foo, self()).
%%
%% %%
%% %% Finds the leader of an election
%% %%
%% > {ok, Leader} = evel:find_leader(foo).
%% > error = evel:find_leader(bar).
%%
%% %%
%% %% Dismisses the leader
%% %%
%% > ok = evel:dismiss(foo).
%% > error = evel:find_leader(foo).
%% '''
%%
%% @end
-module(evel).
%%----------------------------------------------------------------------------------------------------------------------
%% Exported API
%%----------------------------------------------------------------------------------------------------------------------
-export([elect/2, elect/3]).
-export([dismiss/1, dismiss/2]).
-export([find_leader/1, find_leader/2]).
-export([known_leaders/0]).
-export([is_leader/1]).
-export([get_winner/1]).
-export([get_certificate/1]).
-export_type([election_id/0]).
-export_type([candidate/0]).
-export_type([leader/0]).
-export_type([winner/0]).
-export_type([certificate/0]).
-export_type([elect_option/0]).
-export_type([find_option/0]).
-export_type([dismiss_option/0]).
%%----------------------------------------------------------------------------------------------------------------------
%% Types
%%----------------------------------------------------------------------------------------------------------------------
-type election_id() :: term().
%% The identifier of an election.
%% In each election, only one leader is elected.
-type candidate() :: pid().
%% A candidate of an election.
-type winner() :: candidate().
%% The winner of an election.
-type certificate() :: pid().
%% The certificate to gurantee the legitimacy of a leader.
%%
%% If the certificate process is down, the corresponding candidate is no longer a leader.
-type leader() :: {winner(), certificate()}.
%% A candidate which wins the electoin and is certified as the leader.
-type elect_option() :: {priority, term()}
| {link, boolean()}
| find_option().
%% priority:
%% - The priority of the candidate.
%% - The smaller value means a higher priority.
%% - If conflict arises between multiple candidates in the same election, the highest priority one is eventually elected.
%% - The default value is `erlang:system_time(micro_seconds)'.
%%
%% link:
%% - If the value is `true', the candidate process and the certificate process will be linked.
%% - The default value is `true'.
-type find_option() :: {timeout, timeout()}
| {voter_count, pos_integer()}.
%% timeout:
%% - If some voter do not respond in the period, their votes are ignored.
%% - The default value is `100'.
%%
%% voter_count:
%% - The number of voters which vote for the election.
%% - The larger value is more tolerant to node failures and cluster member changes but more overhead occurs.
%% - Usually, but it is not mandatory, the same value in both {@link elect/2} and {@link find_leader/2} will be specified in the same election.
%% - The default value is `5'.
-type dismiss_option() :: {unlink, boolean()}
| {async, boolean()}.
%% unlink:
%% - If there is one, it removes the link between the candidate and the corresponding certificate process.
%% - Thus, the candidate process can survive after the dismissal.
%% - The default value is `false'.
%%
%% async:
%% - If the value is `true', the dismissal is processed by an asynchronous manner.
%% - The default value is `false'.
%%----------------------------------------------------------------------------------------------------------------------
%% Exported Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @equiv elect(ElectionId, Candidate, [])
-spec elect(election_id(), candidate()) -> leader().
elect(ElectionId, Candidate) ->
elect(ElectionId, Candidate, []).
%% @doc Elects the leader in the election
%%
%% If a leader have already been elected, it returns the leader.
%%
%% If conflict arises between multiple candidates in the same election,
%% the highest priority one is eventually elected (i.e., the leader is agreed by all member of the same erlang cluster).
%%
%% Point to notice is that temporary coexist of multiple leaders is not prohibited.
%% Some leaders will be eventually dismissed except highest priority one.
%%
%% If you are interested in the expiration of the term of office (or the dismissal) of a leader,
%% please you monitor the certificate process (i.e., `monitor(process, evel:get_certificate(Leader))').
%% The down of the certificate process indicates the retirement of the leader.
-spec elect(election_id(), candidate(), [elect_option()]) -> Leader :: leader().
elect(ElectionId, Candidate, Options) ->
_ = is_pid(Candidate) orelse error(badarg, [ElectionId, Candidate, Options]),
_ = is_list(Options) orelse error(badarg, [ElectionId, Candidate, Options]),
evel_commission:elect(ElectionId, Candidate, Options).
%% @equiv dismiss(Leader, [])
-spec dismiss(leader()) -> ok.
dismiss(Leader) ->
dismiss(Leader, []).
%% @doc Dismisses the leader
%%
%% It kills (i.e., `exit(Pid, kill)') the corresponding certificate process.
%% As a result, the candidate process may exit if it have linked to the certificate process.
-spec dismiss(leader(), [dismiss_option()]) -> ok.
dismiss(Leader, Options) ->
_ = is_leader(Leader) orelse error(badarg, [Leader, Options]),
_ = is_list(Options) orelse error(badarg, [Leader, Options]),
_ = case proplists:get_value(unlink, Options, false) of
false -> ok;
true -> catch evel_agent:unlink_candidate(get_certificate(Leader))
end,
Async = proplists:get_value(async, Options, false),
evel_commission:dismiss(Leader, Async).
%% @equiv find_leader(ElectionId, [])
-spec find_leader(election_id()) -> {ok, leader()} | error.
find_leader(ElectionId) ->
find_leader(ElectionId, []).
%% @doc Finds the leader elected in the election
%%
%% If own node have already known the leader, this function will retrieve it from local ETS.
%% Otherwise it will try fetching the election result from remote nodes.
-spec find_leader(election_id(), [find_option()]) -> {ok, leader()} | error.
find_leader(ElectionId, Options) ->
_ = is_list(Options) orelse error(badarg, [ElectionId, Options]),
evel_commission:find_leader(ElectionId, Options).
%% @doc Returns a list of locally known leaders
-spec known_leaders() -> [{election_id(), leader()}].
known_leaders() ->
evel_commission:known_leaders().
%% @doc Gets the winner part of `Leader'
-spec get_winner(leader()) -> winner().
get_winner(Leader) ->
_ = is_leader(Leader) orelse error(badarg, [Leader]),
element(1, Leader).
%% @doc Gets the certificate part of `Leader'
-spec get_certificate(leader()) -> certificate().
get_certificate(Leader) ->
_ = is_leader(Leader) orelse error(badarg, [leader]),
element(2, Leader).
%% @doc Returns `true' if `X' is a `leader()', otherwise `false'
-spec is_leader(X :: (leader() | term())) -> boolean().
is_leader({Winner, Certificate}) -> is_pid(Winner) andalso is_pid(Certificate);
is_leader(_) -> false. | src/evel.erl | 0.574514 | 0.49707 | evel.erl | starcoder |
%%%=============================================================================
%%% @doc Advent of code puzzle solution
%%% @end
%%%=============================================================================
-module(aoc2020_day24).
-behavior(aoc_puzzle).
-export([ parse/1
, solve1/1
, solve2/1
, info/0
]).
-include("aoc_puzzle.hrl").
%% This function is used in a call to lists:foldl/2, but does not
%% need its first argument.
-hank([{unnecessary_function_arguments, [{do_one_iter, 2, 1}]}]).
%%------------------------------------------------------------------------------
%% @doc info/0
%% Returns info about this puzzle.
%% @end
%%------------------------------------------------------------------------------
-spec info() -> aoc_puzzle().
info() ->
#aoc_puzzle{ module = ?MODULE
, year = 2020
, day = 24
, name = "Lobby Layout"
, expected = {300, 3466}
, has_input_file = true
}.
%%==============================================================================
%% Types
%%==============================================================================
-type coord() :: { X :: integer()
, Y :: integer()
, Z :: integer()
}.
-type hexgrid() :: #{ coord() => black }.
-type input_type() :: hexgrid().
-type result1_type() :: integer().
-type result2_type() :: result1_type().
%%------------------------------------------------------------------------------
%% @doc parse/1
%% Parses input file.
%% @end
%%------------------------------------------------------------------------------
-spec parse(Input :: binary()) -> input_type().
parse(Input) ->
flip_tiles(string:tokens(binary_to_list(Input), "\n\r")).
%%------------------------------------------------------------------------------
%% @doc solve1/1
%% Solves part 1. Receives parsed input as returned from parse/1.
%% @end
%%------------------------------------------------------------------------------
-spec solve1(Input :: input_type()) -> result1_type().
solve1(Tiles) ->
maps:size(Tiles).
%%------------------------------------------------------------------------------
%% @doc solve2/1
%% Solves part 2. Receives parsed input as returned from parse/1.
%% @end
%%------------------------------------------------------------------------------
-spec solve2(Tiles :: input_type()) -> result2_type().
solve2(Tiles) ->
N = 100,
Final = lists:foldl(fun do_one_iter/2, Tiles, lists:seq(1, N)),
maps:size(Final).
%%==============================================================================
%% Helpers
%%==============================================================================
%% Perform the initial tile flipping.
-spec flip_tiles(Input :: [string()]) -> hexgrid().
flip_tiles(Input) ->
lists:foldl(
fun(Line, Acc) ->
FinalCoord =
fold_coords(
fun(Dir, {X, Y, Z}) ->
%% Follow the conventions from
%% https://www.redblobgames.com/grids/hexagons/#neighbors
%% Note: sum of all the +1/-1 must be zero.
case Dir of
"ne" -> {X + 1, Y, Z - 1};
"e" -> {X + 1, Y - 1, Z };
"se" -> {X, Y - 1, Z + 1};
"sw" -> {X - 1, Y, Z + 1};
"w" -> {X - 1, Y + 1, Z };
"nw" -> {X, Y + 1, Z - 1}
end
end, {0, 0, 0}, Line),
%% Only store black keys, all other tiles are white
case maps:is_key(FinalCoord, Acc) of
true -> maps:remove(FinalCoord, Acc);
false -> maps:put(FinalCoord, black, Acc)
end
end, #{}, Input).
fold_coords(_Fun, State, []) ->
State;
fold_coords(Fun, State, [A, B|Rest]) when ([A, B] =:= "ne") orelse
([A, B] =:= "nw") orelse
([A, B] =:= "sw") orelse
([A, B] =:= "se") ->
fold_coords(Fun, Fun([A, B], State), Rest);
fold_coords(Fun, State, [A|Rest]) when ([A] =:= "e") orelse
([A] =:= "w") ->
fold_coords(Fun, Fun([A], State), Rest).
%% ======================================================================
%% Part 2 iteration code
%% ======================================================================
%% Represent coordinates using maps-as-sets.
get_coords_to_check(Tiles) ->
maps:fold(
fun(Coord, _, Acc) ->
maps:merge(
Acc,
maps:merge(#{Coord => ignore}, neighbors(Coord)))
end, #{}, Tiles).
do_one_iter(_N, Tiles) ->
maps:fold(
fun(Coord, ignore, Acc) ->
Neighbors = neighbors(Coord),
IsBlack = maps:is_key(Coord, Tiles),
case {IsBlack, count_black_neighbors(Neighbors, Tiles)} of
{true, 0} -> maps:remove(Coord, Acc);
{true, NB} when NB > 2 -> maps:remove(Coord, Acc);
{false, 2} -> maps:put(Coord, black, Acc);
_ -> Acc
end
end, Tiles, get_coords_to_check(Tiles)).
count_black_neighbors(Neighbors, Tiles) ->
maps:fold(
fun(Coord, ignore, N) ->
case maps:get(Coord, Tiles, white) of
black -> N + 1;
_ -> N
end
end, 0, Neighbors).
neighbors({X, Y, Z} = Coord) ->
%% Cache neighbor lists in the process dictionary (these are static;
%% the neighbors of a given tile will always be the same).
case get(Coord) of
undefined ->
Nbrs = maps:from_list(
[{{X + Dx, Y + Dy, Z + Dz}, ignore}
|| Dx <- [-1, 0, 1],
Dy <- [-1, 0, 1],
Dz <- [-1, 0, 1],
{Dy, Dx, Dz} =/= {0, 0, 0},
Dy + Dx + Dz == 0]),
put(Coord, Nbrs),
Nbrs;
Nbrs -> Nbrs
end.
%%%_* Emacs ====================================================================
%%% Local Variables:
%%% allout-layout: t
%%% erlang-indent-level: 2
%%% End: | src/2020/aoc2020_day24.erl | 0.609989 | 0.595669 | aoc2020_day24.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2019 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc map of rcvd dots.
-module(rcvd).
-author("<NAME> <<EMAIL>>").
-include("camus.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0,
init/2,
add_dot/2,
is_unrcvd/2,
is_rcvd/2,
get_exceptions/1,
is_exception/2,
to_binary/1,
from_binary/1]).
-export_type([received/0, binary_rcvd/0]).
-type received() :: maps:map(id(), {counter(), [{counter(), counter()}]}).
-type binary_rcvd() :: binary().
%% @doc Create a rcvd map.
-spec new() -> received().
new() ->
maps:new().
%% @doc Create a rcvd map.
-spec init([id()], received()) -> received().
init(Ids, Rcvd) ->
lists:foldl(
fun(Id, Acc) ->
maps:put(Id, {0, []}, Acc)
end,
Rcvd,
Ids).
%% @doc Add a dot to the rcvd map.
-spec add_dot(dot(), received()) -> received().
add_dot({Id, Ctr}, Rcvd) ->
{LastCtr, UnRcvdList} = maps:get(Id, Rcvd),
Entry = case Ctr-LastCtr of
1 ->
{Ctr, UnRcvdList};
N when N > 0 ->
{Ctr, [{LastCtr, N}|UnRcvdList]};
N when N < 0 ->
lists:foldl(
fun({A, B}, Acc) ->
case lists:member(Ctr, lists:seq(A+1, A+B)) of
true when A+1 == A+B ->
{LastCtr, lists:delete({A, B}, Acc)};
true when A+1 == Ctr ->
{LastCtr, lists:keyreplace(A, 1, Acc, {Ctr, B-1})};
true when Ctr == A+B ->
{LastCtr, lists:keyreplace(A, 1, Acc, {A, B-1})};
true ->
{LastCtr, [{N, B-1}|[{A, N-A-1}|lists:delete({A, B}, Acc)]]};
false ->
Acc
end
end,
UnRcvdList,
UnRcvdList)
end,
maps:update(Id, Entry, Rcvd).
%% @doc Check if a dot is an exception of the Rcvd.
-spec is_exception(dot(), [{counter(), counter()}]) -> boolean().
is_exception({_Id, Ctr}, UnRcvdList) ->
L = get_exceptions(UnRcvdList),
lists:member(Ctr, L).
%% @doc Returns exceptions of the Rcvd.
-spec get_exceptions([{counter(), counter()}]) -> [dot()].
get_exceptions(UnRcvdList) ->
lists:foldl(
fun({A, B}, Acc) ->
[lists:seq(A+1, A+B)|Acc]
end,
[],
UnRcvdList
).
%% @doc Check if a dot is in the Rcvd.
-spec is_rcvd(dot(), received()) -> boolean() | {error, term()}.
is_rcvd({Id, Ctr}=Dot, Rcvd) ->
case maps:get(Id, Rcvd) of
{LastCtr, UnRcvdList} when is_integer(LastCtr)->
Ctr =< LastCtr andalso not is_exception(Dot, UnRcvdList);
Ret ->
{error, Ret}
end.
%% @doc Check if a dot is unrcvd.
-spec is_unrcvd(dot(), received()) -> boolean().
is_unrcvd({Id, Ctr}=Dot, Rcvd) ->
case maps:get(Id, Rcvd, notfound) of
notfound ->
false;
{LastCtr, UnRcvdList} ->
Ctr > LastCtr orelse is_exception(Dot, UnRcvdList)
end.
%% @doc an effecient format for disk / wire.
%% @see from_binary/1
-spec to_binary(received()) -> binary_rcvd().
to_binary(Rcvd) ->
term_to_binary(Rcvd).
%% @doc takes the output of `to_binary/1' and returns a Rcvd
-spec from_binary(binary_rcvd()) -> received().
from_binary(Bin) ->
binary_to_term(Bin). | src/rcvd.erl | 0.599602 | 0.400896 | rcvd.erl | starcoder |
%% Copyright (c) 2018 <NAME>
%%
%% Permission is hereby granted, free of charge, to any person obtaining a
%% copy of this software and associated documentation files (the "Software"),
%% to deal in the Software without restriction, including without limitation
%% the rights to use, copy, modify, merge, publish, distribute, sublicense,
%% and/or sell copies of the Software, and to permit persons to whom the
%% Software is furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO WORK SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
%% DEALINGS IN THE SOFTWARE.
-module(stacktrace_compat_SUITE).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
%% ------------------------------------------------------------------
%% Boilerplate
%% ------------------------------------------------------------------
all() ->
[{group, GroupName} || {GroupName, _Options, _TestCases} <- groups()].
groups() ->
GroupNames = [individual_tests],
[{GroupName, [], individual_test_cases()} || GroupName <- GroupNames].
individual_test_cases() ->
ModuleInfo = ?MODULE:module_info(),
{exports, Exports} = lists:keyfind(exports, 1, ModuleInfo),
[Name || {Name, 1} <- Exports, lists:suffix("_test", atom_to_list(Name))].
%% ------------------------------------------------------------------
%% Initialization
%% ------------------------------------------------------------------
init_per_group(_Name, Config) ->
{ok, _} = application:ensure_all_started(sasl),
{ok, _} = application:ensure_all_started(stacktrace_compat),
Config.
end_per_group(_Name, Config) ->
Config.
%% ------------------------------------------------------------------
%% Definition
%% ------------------------------------------------------------------
naked_capture_test(_Config) ->
assert_expected_stacktraces_for(raise, naked_capture).
throw_capture_pattern_test(_Config) ->
assert_expected_stacktraces_for(raise, throw_capture_pattern).
capture_after_variable_export_test(_Config) ->
assert_expected_stacktraces_for(raise, capture_after_variable_export).
no_capture_test(_Config) ->
assert_expected_stacktraces_for(raise, no_capture).
function_capture_test(_Config) ->
assert_expected_stacktraces_for(raise, function_capture).
nested_function_capture_test(_Config) ->
assert_expected_stacktraces_for(raise, nested_function_capture).
nested_function_capture_with_both_test(_Config) ->
assert_expected_stacktraces_for(raise, nested_function_capture_with_both).
function_capture_in_expression_test(_Config) ->
assert_expected_stacktraces_for(raise, function_capture_in_expression).
function_capture_in_result_handler_test(_Config) ->
assert_expected_stacktraces_for(raise, function_capture_in_result_handler).
helper_capture_test(_Config) ->
assert_expected_stacktraces_for(raise, helper_capture).
-ifdef(POST_OTP_20).
unused_var_with_function_capture_test(_Config) ->
assert_expected_stacktraces_for(raise21, unused_var_with_function_capture).
var_capture_test(_Config) ->
assert_expected_stacktraces_for(raise21, var_capture).
nested_var_capture_with_both_test(_Config) ->
assert_expected_stacktraces_for(raise21, nested_var_capture_with_both).
-endif.
%% ------------------------------------------------------------------
%% Internal
%% ------------------------------------------------------------------
assert_expected_stacktraces_for(Function, CaseName) ->
{ok, Cwd} = file:get_cwd(),
TestModuleBeamPath = filename:join(Cwd, "test_module"),
ct:pal("TestModuleBeamPath: ~p", [TestModuleBeamPath]),
compile_and_load_test_module([]),
assert_test_module_has_no_transform(),
{CaseName, WithoutTransformST} = test_module:Function(CaseName),
compile_and_load_test_module([{parse_transform, stacktrace_transform}]),
assert_test_module_has_transform(),
{CaseName, WithTransformST} = test_module:Function(CaseName),
assert_expected_stacktraces(Function, CaseName, WithoutTransformST, WithTransformST).
compile_and_load_test_module(ExtraOptions) ->
_ = code:purge(test_module),
{ok, test_module, Beam} = compile_test_module(ExtraOptions),
{module, test_module} = code:load_binary(test_module, "test_module.erl", Beam),
ok.
compile_test_module(ExtraOptions) ->
Options =
[binary,
report_errors,
report_warnings,
debug_info
| case erlang:system_info(otp_release) of
[V|_] when V >= $3, V =< $9 ->
[{d, 'POST_OTP_20'},
{d, 'POST_OTP_22'}];
[$2,V|_] when V >= $3 ->
[{d, 'POST_OTP_20'},
{d, 'POST_OTP_22'}];
[$2,V|_] when V >= $1 ->
[{d, 'POST_OTP_20'}];
_ ->
[]
end
]
++ ExtraOptions,
TestModulePath = "../../lib/stacktrace_compat/test/test_module.erl",
compile:file(TestModulePath, Options).
assert_test_module_has_no_transform() ->
CompileAttr = test_module:module_info(compile),
CompileOptions = proplists:get_value(options, CompileAttr, []),
?assertNot(lists:member({parse_transform, stacktrace_transform}, CompileOptions)).
assert_test_module_has_transform() ->
CompileAttr = test_module:module_info(compile),
CompileOptions = proplists:get_value(options, CompileAttr, []),
?assert(lists:member({parse_transform, stacktrace_transform}, CompileOptions)).
-ifdef(POST_OTP_22).
assert_expected_stacktraces(Function, CaseName, WithoutTransformST, WithTransformST)
when Function =:= raise, (CaseName =/= naked_capture andalso
CaseName =/= no_capture andalso
CaseName =/= helper_capture);
Function =:= raise21, (CaseName =:= unused_var_with_function_capture) ->
% OTP 23 made `:get_stacktrace()` always return an empty list;
% OTP 24 will remove it entirely.
?assertEqual([], WithoutTransformST),
?assertMatch(
[{_Module, _Function, _ArtityOrArgs, _Info} | _],
WithTransformST);
assert_expected_stacktraces(Function, CaseName, WithoutTransformST, WithTransformST)
when Function =:= raise21, (CaseName =:= nested_var_capture_with_both orelse
CaseName =:= var_capture) ->
assert_stacktrace_equivalence(WithoutTransformST, WithTransformST);
assert_expected_stacktraces(_Function, _CaseName, WithoutTransformST, WithTransformST) ->
?assertEqual([], WithoutTransformST),
?assertEqual([], WithTransformST).
-else.
assert_expected_stacktraces(_, _CaseName, WithoutTransformST, WithTransformST) ->
assert_stacktrace_equivalence(WithoutTransformST, WithTransformST).
-endif. % -ifdef(POST_OTP_22)
assert_stacktrace_equivalence([{ModuleA, FunctionA, ArityOrArgsA, _InfoA} | NextA],
[{ModuleB, FunctionB, ArityOrArgsB, _InfoB} | NextB]) ->
?assertEqual(ModuleA, ModuleB),
?assertEqual(FunctionA, FunctionB),
?assertEqual(ArityOrArgsA, ArityOrArgsB),
assert_stacktrace_equivalence(NextA, NextB);
assert_stacktrace_equivalence([], []) ->
ok. | test/stacktrace_compat_SUITE.erl | 0.689096 | 0.460228 | stacktrace_compat_SUITE.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose: Prepare for code generation, including register allocation.
%%
%% The output of this compiler pass is still in the SSA format, but
%% it has been annotated and transformed to help the code generator.
%%
%% * Some instructions are translated to other instructions closer to
%% the BEAM instructions. For example, the put_tuple instruction is
%% broken apart into the put_tuple_arity and put_tuple_elements
%% instructions. Similary, the binary matching instructions are
%% transformed from the optimization-friendly internal format to
%% instruction more similar to the actual BEAM instructions.
%%
%% * Blocks that will need an instruction for allocating a stack frame
%% are annotated with a {frame_size,Size} annotation.
%%
%% * 'copy' instructions are added for all variables that need
%% to be saved to the stack frame. Additional 'copy' instructions
%% can be added as an optimization to reuse y registers (see
%% the copy_retval sub pass).
%%
%% * Each function is annotated with a {register,RegisterMap}
%% annotation that maps each variable to a BEAM register. The linear
%% scan algorithm is used to allocate registers.
%%
%% There are four kind of registers. x, y, fr (floating point register),
%% and z. A variable will be allocated to a z register if it is only
%% used by the instruction following the instruction that defines the
%% the variable. The code generator will typically combine those
%% instructions to a test instruction. z registers are also used for
%% some instructions that don't have a return value.
%%
%% References:
%%
%% [1] <NAME> and <NAME>. Linear scan register allocation
%% in the context of SSA form and register constraints. In Proceedings
%% of the International Conference on Compiler Construction, pages
%% 229–246. LNCS 2304, Springer-Verlag, 2002.
%%
%% [2] <NAME> and <NAME>. Optimized interval splitting in a
%% linear scan register allocator. In Proceedings of the ACM/USENIX
%% International Conference on Virtual Execution Environments, pages
%% 132–141. ACM Press, 2005.
%%
%% [3] <NAME> and <NAME>. Linear Scan Register Allocation on SSA
%% Form. In Proceedings of the International Symposium on Code
%% Generation and Optimization, pages 170-179. ACM Press, 2010.
%%
-module(beam_ssa_pre_codegen).
-export([module/2]).
-include("beam_ssa.hrl").
-import(lists, [all/2,any/2,append/1,duplicate/2,
foldl/3,last/1,map/2,member/2,partition/2,
reverse/1,reverse/2,sort/1,zip/2]).
-spec module(beam_ssa:b_module(), [compile:option()]) ->
{'ok',beam_ssa:b_module()}.
module(#b_module{body=Fs0}=Module, Opts) ->
ExtraAnnos = proplists:get_bool(dprecg, Opts),
Ps = passes(ExtraAnnos),
Fs = functions(Fs0, Ps),
{ok,Module#b_module{body=Fs}}.
functions([F|Fs], Ps) ->
[function(F, Ps)|functions(Fs, Ps)];
functions([], _Ps) -> [].
-type b_var() :: beam_ssa:b_var().
-type var_name() :: beam_ssa:var_name().
-type instr_number() :: pos_integer().
-type range() :: {instr_number(),instr_number()}.
-type reg_num() :: beam_asm:reg_num().
-type xreg() :: {'x',reg_num()}.
-type yreg() :: {'y',reg_num()}.
-type ypool() :: {'y',beam_ssa:label()}.
-type reservation() :: 'fr' | {'prefer',xreg()} | 'x' | {'x',xreg()} |
ypool() | {yreg(),ypool()} | 'z'.
-type ssa_register() :: beam_ssa_codegen:ssa_register().
-define(TC(Body), tc(fun() -> Body end, ?FILE, ?LINE)).
-record(st, {ssa :: beam_ssa:block_map(),
args :: [b_var()],
cnt :: beam_ssa:label(),
frames=[] :: [beam_ssa:label()],
intervals=[] :: [{b_var(),[range()]}],
aliases=[] :: [{b_var(),b_var()}],
res=[] :: [{b_var(),reservation()}] | #{b_var():=reservation()},
regs=#{} :: #{b_var():=ssa_register()},
extra_annos=[] :: [{atom(),term()}]
}).
-define(PASS(N), {N,fun N/1}).
passes(ExtraAnnos) ->
Ps = [?PASS(assert_no_critical_edges),
%% Preliminaries.
?PASS(fix_bs),
?PASS(sanitize),
?PASS(fix_tuples),
?PASS(place_frames),
?PASS(fix_receives),
%% Find and reserve Y registers.
?PASS(find_yregs),
?PASS(reserve_yregs),
%% Improve reuse of Y registers to potentially
%% reduce the size of the stack frame.
?PASS(copy_retval),
?PASS(opt_get_list),
%% Calculate live intervals.
?PASS(number_instructions),
?PASS(live_intervals),
?PASS(remove_unsuitable_aliases),
?PASS(reserve_regs),
?PASS(merge_intervals),
%% If needed for a .precg file, save the live intervals
%% so they can be included in an annotation.
case ExtraAnnos of
false -> ignore;
true -> ?PASS(save_live_intervals)
end,
%% Allocate registers.
?PASS(linear_scan),
?PASS(fix_aliased_regs),
?PASS(frame_size),
?PASS(turn_yregs)],
case ExtraAnnos of
false -> [P || P <- Ps, P =/= ignore];
true -> Ps
end.
function(#b_function{anno=Anno,args=Args,bs=Blocks0,cnt=Count0}=F0, Ps) ->
try
St0 = #st{ssa=Blocks0,args=Args,cnt=Count0},
St = compile:run_sub_passes(Ps, St0),
#st{ssa=Blocks,cnt=Count,regs=Regs,extra_annos=ExtraAnnos} = St,
F1 = add_extra_annos(F0, ExtraAnnos),
F = beam_ssa:add_anno(registers, Regs, F1),
F#b_function{bs=Blocks,cnt=Count}
catch
Class:Error:Stack ->
#{func_info:={_,Name,Arity}} = Anno,
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
save_live_intervals(#st{intervals=Intervals}=St) ->
St#st{extra_annos=[{live_intervals,Intervals}]}.
fix_aliased_regs(#st{aliases=Aliases,regs=Regs}=St) ->
St#st{regs=fix_aliased_regs(Aliases, Regs)}.
fix_aliased_regs([{Alias,V}|Aliases], Regs) ->
#{V:=Reg} = Regs,
fix_aliased_regs(Aliases, Regs#{Alias=>Reg});
fix_aliased_regs([], Regs) -> Regs.
%% Add extra annotations when a .precg listing file is being produced.
add_extra_annos(F, Annos) ->
foldl(fun({Name,Value}, Acc) ->
beam_ssa:add_anno(Name, Value, Acc)
end, F, Annos).
%% assert_no_critical_edges(St0) -> St.
%% The code generator will not work if there are critial edges.
%% Abort if any critical edges are found.
assert_no_critical_edges(#st{ssa=Blocks}=St) ->
F = fun assert_no_ces/3,
beam_ssa:fold_rpo(F, Blocks, Blocks),
St.
assert_no_ces(_, #b_blk{is=[#b_set{op=phi,args=[_,_]=Phis}|_]}, Blocks) ->
%% This block has multiple predecessors. Make sure that none
%% of the precessors have more than one successor.
true = all(fun({_,P}) ->
length(beam_ssa:successors(P, Blocks)) =:= 1
end, Phis), %Assertion.
Blocks;
assert_no_ces(_, _, Blocks) -> Blocks.
%% fix_bs(St0) -> St.
%% Fix up the binary matching instructions:
%%
%% * Insert bs_save and bs_restore instructions where needed.
%%
%% * Combine bs_match and bs_extract instructions to bs_get
%% instructions.
fix_bs(#st{ssa=Blocks,cnt=Count0}=St) ->
F = fun(#b_set{op=bs_start_match,dst=Dst}, A) ->
%% Mark the root of the match context list.
[{Dst,{context,Dst}}|A];
(#b_set{op=bs_match,dst=Dst,args=[_,ParentCtx|_]}, A) ->
%% Link this match context the previous match context.
[{Dst,ParentCtx}|A];
(_, A) ->
A
end,
case beam_ssa:fold_instrs_rpo(F, [0], [],Blocks) of
[] ->
%% No binary matching in this function.
St;
[_|_]=M ->
CtxChain = maps:from_list(M),
Linear0 = beam_ssa:linearize(Blocks),
%% Insert bs_save / bs_restore instructions where needed.
{Linear1,Count} = bs_save_restore(Linear0, CtxChain, Count0),
%% Rename instructions.
Linear = bs_instrs(Linear1, CtxChain, []),
St#st{ssa=maps:from_list(Linear),cnt=Count}
end.
%% Insert bs_save and bs_restore instructions as needed.
bs_save_restore(Linear0, CtxChain, Count0) ->
Rs0 = bs_restores(Linear0, CtxChain, #{}, #{}),
Rs = maps:values(Rs0),
S0 = sofs:relation(Rs, [{context,save_point}]),
S1 = sofs:relation_to_family(S0),
S = sofs:to_external(S1),
Slots = make_save_point_dict(S, []),
{Saves,Count1} = make_save_map(Rs, Slots, Count0, []),
{Restores,Count} = make_restore_map(maps:to_list(Rs0), Slots, Count1, []),
%% Now insert all saves and restores.
{bs_insert(Linear0, Saves, Restores, Slots),Count}.
make_save_map([{Ctx,Save}=Ps|T], Slots, Count, Acc) ->
Ignored = #b_var{name={'@ssa_ignored',Count}},
case make_slot(Ps, Slots) of
#b_literal{val=start} ->
make_save_map(T, Slots, Count, Acc);
Slot ->
I = #b_set{op=bs_save,dst=Ignored,args=[Ctx,Slot]},
make_save_map(T, Slots, Count+1, [{Save,I}|Acc])
end;
make_save_map([], _, Count, Acc) ->
{maps:from_list(Acc),Count}.
make_restore_map([{Bef,{Ctx,_}=Ps}|T], Slots, Count, Acc) ->
Ignored = #b_var{name={'@ssa_ignored',Count}},
I = #b_set{op=bs_restore,dst=Ignored,args=[Ctx,make_slot(Ps, Slots)]},
make_restore_map(T, Slots, Count+1, [{Bef,I}|Acc]);
make_restore_map([], _, Count, Acc) ->
{maps:from_list(Acc),Count}.
make_slot({Same,Same}, _Slots) ->
#b_literal{val=start};
make_slot({_,_}=Ps, Slots) ->
#b_literal{val=maps:get(Ps, Slots)}.
make_save_point_dict([{Ctx,Pts}|T], Acc0) ->
Acc = make_save_point_dict_1(Pts, Ctx, 0, Acc0),
make_save_point_dict(T, Acc);
make_save_point_dict([], Acc) ->
maps:from_list(Acc).
make_save_point_dict_1([Ctx|T], Ctx, I, Acc) ->
%% Special {atom,start} save point. Does not need a
%% bs_save instruction.
make_save_point_dict_1(T, Ctx, I, Acc);
make_save_point_dict_1([H|T], Ctx, I, Acc) ->
make_save_point_dict_1(T, Ctx, I+1, [{{Ctx,H},I}|Acc]);
make_save_point_dict_1([], Ctx, I, Acc) ->
[{Ctx,I}|Acc].
bs_restores([{L,#b_blk{is=Is,last=Last}}|Bs], CtxChain, D0, Rs0) ->
FPos = case D0 of
#{L:=Pos0} -> Pos0;
#{} -> #{}
end,
{SPos,Rs} = bs_restores_is(Is, CtxChain, FPos, Rs0),
D = bs_update_successors(Last, SPos, FPos, D0),
bs_restores(Bs, CtxChain, D, Rs);
bs_restores([], _, _, Rs) -> Rs.
bs_update_successors(#b_br{succ=Succ,fail=Fail}, SPos, FPos, D) ->
join_positions([{Succ,SPos},{Fail,FPos}], D);
bs_update_successors(#b_switch{fail=Fail,list=List}, SPos, FPos, D) ->
SPos = FPos, %Assertion.
Update = [{L,SPos} || {_,L} <- List] ++ [{Fail,SPos}],
join_positions(Update, D);
bs_update_successors(#b_ret{}, _, _, D) -> D.
join_positions([{L,MapPos0}|T], D) ->
case D of
#{L:=MapPos0} ->
%% Same map.
join_positions(T, D);
#{L:=MapPos1} ->
%% Different maps.
MapPos = join_positions_1(MapPos0, MapPos1),
join_positions(T, D#{L:=MapPos});
#{} ->
join_positions(T, D#{L=>MapPos0})
end;
join_positions([], D) -> D.
join_positions_1(MapPos0, MapPos1) ->
MapPos2 = maps:map(fun(Start, Pos) ->
case MapPos0 of
#{Start:=Pos} -> Pos;
#{Start:=_} -> unknown;
#{} -> Pos
end
end, MapPos1),
maps:merge(MapPos0, MapPos2).
bs_restores_is([#b_set{op=bs_start_match,dst=Start}|Is],
CtxChain, PosMap0, Rs) ->
PosMap = PosMap0#{Start=>Start},
bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([#b_set{op=bs_match,dst=NewPos,args=Args}=I|Is],
CtxChain, PosMap0, Rs0) ->
Start = bs_subst_ctx(NewPos, CtxChain),
[_,FromPos|_] = Args,
case PosMap0 of
#{Start:=FromPos} ->
%% Same position, no restore needed.
PosMap = case bs_match_type(I) of
plain ->
%% Update position to new position.
PosMap0#{Start:=NewPos};
_ ->
%% Position will not change (test_unit
%% instruction or no instruction at
%% all).
PosMap0#{Start:=FromPos}
end,
bs_restores_is(Is, CtxChain, PosMap, Rs0);
#{Start:=_} ->
%% Different positions, might need a restore instruction.
case bs_match_type(I) of
none ->
%% The tail test will be optimized away.
%% No need to do a restore.
PosMap = PosMap0#{Start:=FromPos},
bs_restores_is(Is, CtxChain, PosMap, Rs0);
test_unit ->
%% This match instruction will be replaced by
%% a test_unit instruction. We will need a
%% restore. The new position will be the position
%% restored to (NOT NewPos).
PosMap = PosMap0#{Start:=FromPos},
Rs = Rs0#{NewPos=>{Start,FromPos}},
bs_restores_is(Is, CtxChain, PosMap, Rs);
plain ->
%% Match or skip. Position will be changed.
PosMap = PosMap0#{Start:=NewPos},
Rs = Rs0#{NewPos=>{Start,FromPos}},
bs_restores_is(Is, CtxChain, PosMap, Rs)
end
end;
bs_restores_is([#b_set{op=bs_extract,args=[FromPos|_]}|Is],
CtxChain, PosMap, Rs) ->
Start = bs_subst_ctx(FromPos, CtxChain),
#{Start:=FromPos} = PosMap, %Assertion.
bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([#b_set{op=Op,dst=Dst,args=Args}|Is],
CtxChain, PosMap0, Rs0)
when Op =:= bs_test_tail;
Op =:= call ->
{Rs,PosMap} = bs_restore_args(Args, PosMap0, CtxChain, Dst, Rs0),
bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([_|Is], CtxChain, PosMap, Rs) ->
bs_restores_is(Is, CtxChain, PosMap, Rs);
bs_restores_is([], _CtxChain, PosMap, Rs) ->
{PosMap,Rs}.
bs_match_type(#b_set{args=[#b_literal{val=skip},_Ctx,
#b_literal{val=binary},_Flags,
#b_literal{val=all},#b_literal{val=U}]}) ->
case U of
1 -> none;
_ -> test_unit
end;
bs_match_type(_) ->
plain.
bs_restore_args([#b_var{}=Arg|Args], PosMap0, CtxChain, Dst, Rs0) ->
Start = bs_subst_ctx(Arg, CtxChain),
case PosMap0 of
#{Start:=Arg} ->
%% Same position, no restore needed.
bs_restore_args(Args, PosMap0, CtxChain, Dst, Rs0);
#{Start:=_} ->
%% Different positions, need a restore instruction.
PosMap = PosMap0#{Start:=Arg},
Rs = Rs0#{Dst=>{Start,Arg}},
bs_restore_args(Args, PosMap, CtxChain, Dst, Rs);
#{} ->
%% Not a match context.
bs_restore_args(Args, PosMap0, CtxChain, Dst, Rs0)
end;
bs_restore_args([_|Args], PosMap, CtxChain, Dst, Rs) ->
bs_restore_args(Args, PosMap, CtxChain, Dst, Rs);
bs_restore_args([], PosMap, _CtxChain, _Dst, Rs) ->
{Rs,PosMap}.
%% Insert all bs_save and bs_restore instructions.
bs_insert([{L,#b_blk{is=Is0}=Blk}|Bs0], Saves, Restores, Slots) ->
Is = bs_insert_is_1(Is0, Restores, Slots),
Bs = bs_insert_saves(Is, Bs0, Saves),
[{L,Blk#b_blk{is=Is}}|bs_insert(Bs, Saves, Restores, Slots)];
bs_insert([], _, _, _) -> [].
bs_insert_is_1([#b_set{op=Op,dst=Dst}=I0|Is], Restores, Slots) ->
if
Op =:= bs_test_tail;
Op =:= bs_match;
Op =:= call ->
Rs = case Restores of
#{Dst:=R} -> [R];
#{} -> []
end,
Rs ++ [I0|bs_insert_is_1(Is, Restores, Slots)];
Op =:= bs_start_match ->
NumSlots = case Slots of
#{Dst:=NumSlots0} -> NumSlots0;
#{} -> 0
end,
I = beam_ssa:add_anno(num_slots, NumSlots, I0),
[I|bs_insert_is_1(Is, Restores, Slots)];
true ->
[I0|bs_insert_is_1(Is, Restores, Slots)]
end;
bs_insert_is_1([], _, _) -> [].
bs_insert_saves([#b_set{dst=Dst}|Is], Bs, Saves) ->
case Saves of
#{Dst:=S} ->
bs_insert_save(S, Bs);
#{} ->
bs_insert_saves(Is, Bs, Saves)
end;
bs_insert_saves([], Bs, _) -> Bs.
bs_insert_save(Save, [{L,#b_blk{is=Is0}=Blk}|Bs]) ->
Is = case Is0 of
[#b_set{op=bs_extract}=Ex|Is1] ->
[Ex,Save|Is1];
_ ->
[Save|Is0]
end,
[{L,Blk#b_blk{is=Is}}|Bs].
%% Translate bs_match instructions to bs_get, bs_match_string,
%% or bs_skip. Also rename match context variables to use the
%% variable assigned to by the start_match instruction.
bs_instrs([{L,#b_blk{is=Is0}=Blk}|Bs], CtxChain, Acc0) ->
case bs_instrs_is(Is0, CtxChain, []) of
[#b_set{op=bs_extract,dst=Dst,args=[Ctx]}|Is] ->
%% Drop this instruction. Rewrite the corresponding
%% bs_match instruction in the previous block to
%% a bs_get instruction.
Acc = bs_combine(Dst, Ctx, Acc0),
bs_instrs(Bs, CtxChain, [{L,Blk#b_blk{is=Is}}|Acc]);
Is ->
bs_instrs(Bs, CtxChain, [{L,Blk#b_blk{is=Is}}|Acc0])
end;
bs_instrs([], _, Acc) ->
reverse(Acc).
bs_instrs_is([#b_set{op=Op,args=Args0}=I0|Is], CtxChain, Acc) ->
Args = [bs_subst_ctx(A, CtxChain) || A <- Args0],
I1 = I0#b_set{args=Args},
I = case {Op,Args} of
{bs_match,[#b_literal{val=skip},Ctx,Type|As]} ->
I1#b_set{op=bs_skip,args=[Type,Ctx|As]};
{bs_match,[#b_literal{val=string},Ctx|As]} ->
I1#b_set{op=bs_match_string,args=[Ctx|As]};
{_,_} ->
I1
end,
bs_instrs_is(Is, CtxChain, [I|Acc]);
bs_instrs_is([], _, Acc) ->
reverse(Acc).
%% Combine a bs_match instruction with the destination register
%% taken from a bs_extract instruction.
bs_combine(Dst, Ctx, [{L,#b_blk{is=Is0}=Blk}|Acc]) ->
[#b_set{}=Succeeded,
#b_set{op=bs_match,args=[Type,_|As]}=BsMatch|Is1] = reverse(Is0),
Is = reverse(Is1, [BsMatch#b_set{op=bs_get,dst=Dst,args=[Type,Ctx|As]},
Succeeded#b_set{args=[Dst]}]),
[{L,Blk#b_blk{is=Is}}|Acc].
bs_subst_ctx(#b_var{}=Var, CtxChain) ->
case CtxChain of
#{Var:={context,Ctx}} ->
Ctx;
#{Var:=ParentCtx} ->
bs_subst_ctx(ParentCtx, CtxChain);
#{} ->
%% Not a match context variable.
Var
end;
bs_subst_ctx(Other, _CtxChain) ->
Other.
%% sanitize(St0) -> St.
%% Remove constructs that can cause problems later:
%%
%% * Unreachable blocks may cause problems for determination of
%% dominators.
%%
%% * Some instructions (such as get_hd) don't accept literal
%% arguments. Evaluate the instructions and remove them.
sanitize(#st{ssa=Blocks0,cnt=Count0}=St) ->
Ls = beam_ssa:rpo(Blocks0),
{Blocks,Count} = sanitize(Ls, Count0, Blocks0, #{}),
St#st{ssa=Blocks,cnt=Count}.
sanitize([L|Ls], Count0, Blocks0, Values0) ->
#b_blk{is=Is0} = Blk0 = maps:get(L, Blocks0),
case sanitize_is(Is0, Count0, Values0, false, []) of
no_change ->
sanitize(Ls, Count0, Blocks0, Values0);
{Is,Count,Values} ->
Blk = Blk0#b_blk{is=Is},
Blocks = Blocks0#{L:=Blk},
sanitize(Ls, Count, Blocks, Values)
end;
sanitize([], Count, Blocks0, Values) ->
Blocks = if
map_size(Values) =:= 0 ->
Blocks0;
true ->
beam_ssa:rename_vars(Values, [0], Blocks0)
end,
%% Unreachable blocks can cause problems for the dominator calculations.
Ls = beam_ssa:rpo(Blocks),
Reachable = gb_sets:from_list(Ls),
{case map_size(Blocks) =:= gb_sets:size(Reachable) of
true -> Blocks;
false -> remove_unreachable(Ls, Blocks, Reachable, [])
end,Count}.
sanitize_is([#b_set{op=get_map_element,
args=[#b_literal{}=Map,Key]}=I0|Is],
Count0, Values, _Changed, Acc) ->
{MapVarName,Count} = new_var_name('@ssa_map', Count0),
MapVar = #b_var{name=MapVarName},
I = I0#b_set{args=[MapVar,Key]},
Copy = #b_set{op=copy,dst=MapVar,args=[Map]},
sanitize_is(Is, Count, Values, true, [I,Copy|Acc]);
sanitize_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args0}=I0|Is0],
Count, Values, Changed, Acc) ->
Args = map(fun(#b_var{name=V}=Var) ->
case Values of
#{V:=New} -> New;
#{} -> Var
end;
(Lit) -> Lit
end, Args0),
case sanitize_instr(Op, Args, I0) of
{value,Value0} ->
Value = #b_literal{val=Value0},
sanitize_is(Is0, Count, Values#{Dst=>Value}, true, Acc);
{ok,I} ->
sanitize_is(Is0, Count, Values, true, [I|Acc]);
ok ->
sanitize_is(Is0, Count, Values, Changed, [I0|Acc])
end;
sanitize_is([], Count, Values, Changed, Acc) ->
case Changed of
true ->
{reverse(Acc),Count,Values};
false ->
no_change
end.
sanitize_instr({bif,Bif}, [#b_literal{val=Lit}], _I) ->
case erl_bifs:is_pure(erlang, Bif, 1) of
false ->
ok;
true ->
try
{value,erlang:Bif(Lit)}
catch
error:_ ->
ok
end
end;
sanitize_instr({bif,Bif}, [#b_literal{val=Lit1},#b_literal{val=Lit2}], _I) ->
true = erl_bifs:is_pure(erlang, Bif, 2), %Assertion.
try
{value,erlang:Bif(Lit1, Lit2)}
catch
error:_ ->
ok
end;
sanitize_instr(get_hd, [#b_literal{val=[Hd|_]}], _I) ->
{value,Hd};
sanitize_instr(get_tl, [#b_literal{val=[_|Tl]}], _I) ->
{value,Tl};
sanitize_instr(get_tuple_element, [#b_literal{val=T},
#b_literal{val=I}], _I)
when I < tuple_size(T) ->
{value,element(I+1, T)};
sanitize_instr(is_nonempty_list, [#b_literal{val=Lit}], _I) ->
{value,case Lit of
[_|_] -> true;
_ -> false
end};
sanitize_instr(is_tagged_tuple, [#b_literal{val=Tuple},
#b_literal{val=Arity},
#b_literal{val=Tag}], _I)
when is_integer(Arity), is_atom(Tag) ->
if
tuple_size(Tuple) =:= Arity, element(1, Tuple) =:= Tag ->
{value,true};
true ->
{value,false}
end;
sanitize_instr(bs_init, [#b_literal{val=new},#b_literal{val=Sz}|_], I0) ->
if
is_integer(Sz), Sz >= 0 -> ok;
true -> {ok,sanitize_badarg(I0)}
end;
sanitize_instr(bs_init, [#b_literal{val=append},_,#b_literal{val=Sz}|_], I0) ->
if
is_integer(Sz), Sz >= 0 -> ok;
true -> {ok,sanitize_badarg(I0)}
end;
sanitize_instr(succeeded, [#b_literal{}], _I) ->
{value,true};
sanitize_instr(_, _, _) -> ok.
sanitize_badarg(I) ->
Func = #b_remote{mod=#b_literal{val=erlang},
name=#b_literal{val=error},arity=1},
I#b_set{op=call,args=[Func,#b_literal{val=badarg}]}.
remove_unreachable([L|Ls], Blocks, Reachable, Acc) ->
#b_blk{is=Is0} = Blk0 = maps:get(L, Blocks),
case split_phis(Is0) of
{[_|_]=Phis,Rest} ->
Is = [prune_phi(Phi, Reachable) || Phi <- Phis] ++ Rest,
Blk = Blk0#b_blk{is=Is},
remove_unreachable(Ls, Blocks, Reachable, [{L,Blk}|Acc]);
{[],_} ->
remove_unreachable(Ls, Blocks, Reachable, [{L,Blk0}|Acc])
end;
remove_unreachable([], _Blocks, _, Acc) ->
maps:from_list(Acc).
prune_phi(#b_set{args=Args0}=Phi, Reachable) ->
Args = [A || {_,Pred}=A <- Args0,
gb_sets:is_element(Pred, Reachable)],
Phi#b_set{args=Args}.
%%%
%%% Fix tuples.
%%%
%% fix_tuples(St0) -> St.
%% We must split tuple creation into two instruction to mirror the
%% the way tuples are created in BEAM. Each put_tuple instruction is
%% split into put_tuple_arity followed by put_tuple_elements.
fix_tuples(#st{ssa=Blocks0,cnt=Count0}=St) ->
F = fun (#b_set{op=put_tuple,args=Args}=Put, C0) ->
Arity = #b_literal{val=length(Args)},
{VarName,C} = new_var_name('@ssa_ignore', C0),
Ignore = #b_var{name=VarName},
{[Put#b_set{op=put_tuple_arity,args=[Arity]},
#b_set{dst=Ignore,op=put_tuple_elements,args=Args}],C};
(I, C) -> {[I],C}
end,
{Blocks,Count} = beam_ssa:flatmapfold_instrs_rpo(F, [0], Count0, Blocks0),
St#st{ssa=Blocks,cnt=Count}.
%%%
%%% Find out where frames should be placed.
%%%
%% place_frames(St0) -> St.
%% Return a list of the labels for the blocks that need stack frame
%% allocation instructions.
%%
%% This function attempts to place stack frames as tight as possible
%% around the code, to avoid building stack frames for code paths
%% that don't need one.
%%
%% Stack frames are placed in blocks that dominate all of their
%% descendants. That guarantees that the deallocation instructions
%% cannot be reached from other execution paths that didn't set up
%% a stack frame or set up a stack frame with a different size.
place_frames(#st{ssa=Blocks}=St) ->
Doms = beam_ssa:dominators(Blocks),
Ls = beam_ssa:rpo(Blocks),
Tried = gb_sets:empty(),
Frames0 = [],
{Frames,_} = place_frames_1(Ls, Blocks, Doms, Tried, Frames0),
St#st{frames=Frames}.
place_frames_1([L|Ls], Blocks, Doms, Tried0, Frames0) ->
Blk = maps:get(L, Blocks),
case need_frame(Blk) of
true ->
%% This block needs a frame. Try to place it here.
{Frames,Tried} = do_place_frame(L, Blocks, Doms, Tried0, Frames0),
%% Successfully placed. Try to place more frames in descendants
%% that are not dominated by this block.
place_frames_1(Ls, Blocks, Doms, Tried, Frames);
false ->
try
place_frames_1(Ls, Blocks, Doms, Tried0, Frames0)
catch
throw:{need_frame,For,Tried1}=Reason ->
%% An descendant block needs a stack frame. Try to
%% place it here.
case is_dominated_by(For, L, Doms) of
true ->
%% Try to place a frame here.
{Frames,Tried} = do_place_frame(L, Blocks, Doms,
Tried1, Frames0),
place_frames_1(Ls, Blocks, Doms, Tried, Frames);
false ->
%% Wrong place. This block does not dominate
%% the block that needs the frame. Pass it on
%% to our ancestors.
throw(Reason)
end
end
end;
place_frames_1([], _, _, Tried, Frames) ->
{Frames,Tried}.
%% do_place_frame(Label, Blocks, Dominators, Tried0, Frames0) -> {Frames,Tried}.
%% Try to place a frame in this block. This function returns
%% successfully if it either succeds at placing a frame in this
%% block, if an ancestor that dominates this block has already placed
%% a frame, or if we have already tried to put a frame in this block.
%%
%% An {need_frame,Label,Tried} exception will be thrown if this block
%% block is not suitable for having a stack frame (i.e. it does not dominate
%% all of its descendants). The exception means that an ancestor will have to
%% place the frame needed by this block.
do_place_frame(L, Blocks, Doms, Tried0, Frames) ->
case gb_sets:is_element(L, Tried0) of
true ->
%% We have already tried to put a frame in this block.
{Frames,Tried0};
false ->
%% Try to place a frame in this block.
Tried = gb_sets:insert(L, Tried0),
case place_frame_here(L, Blocks, Doms, Frames) of
yes ->
%% We need a frame and it is safe to place it here.
{[L|Frames],Tried};
no ->
%% An ancestor has a frame. Not needed.
{Frames,Tried};
ancestor ->
%% This block does not dominate all of its
%% descendants. We must place the frame in
%% an ancestor.
throw({need_frame,L,Tried})
end
end.
%% place_frame_here(Label, Blocks, Doms, Frames) -> no|yes|ancestor.
%% Determine whether a frame should be placed in block Label.
place_frame_here(L, Blocks, Doms, Frames) ->
B0 = any(fun(DomBy) ->
is_dominated_by(L, DomBy, Doms)
end, Frames),
case B0 of
true ->
%% This block is dominated by an ancestor block that
%% defines a frame. Not needed/allowed to put a frame
%% here.
no;
false ->
%% No frame in any ancestor. We need a frame.
%% Now check whether the frame can be placed here.
%% If this block dominates all of its descendants
%% and the predecessors of any phi nodes it can be
%% placed here.
Descendants = beam_ssa:rpo([L], Blocks),
PhiPredecessors = phi_predecessors(L, Blocks),
MustDominate = ordsets:from_list(PhiPredecessors ++ Descendants),
Dominates = all(fun(?BADARG_BLOCK) ->
%% This block defines no variables and calls
%% erlang:error(badarg). It does not matter
%% whether L dominates ?BADARG_BLOCK or not;
%% it is still safe to put the frame in L.
true;
(Bl) ->
is_dominated_by(Bl, L, Doms)
end, MustDominate),
%% Also, this block must not be a loop header.
IsLoopHeader = is_loop_header(L, Blocks),
case Dominates andalso not IsLoopHeader of
true -> yes;
false -> ancestor
end
end.
%% phi_predecessors(Label, Blocks) ->
%% Return all predecessors referenced in phi nodes.
phi_predecessors(L, Blocks) ->
#b_blk{is=Is} = maps:get(L, Blocks),
[P || #b_set{op=phi,args=Args} <- Is, {_,P} <- Args].
%% is_dominated_by(Label, DominatedBy, Dominators) -> true|false.
%% Test whether block Label is dominated by block DominatedBy.
is_dominated_by(L, DomBy, Doms) ->
DominatedBy = maps:get(L, Doms),
ordsets:is_element(DomBy, DominatedBy).
%% need_frame(#b_blk{}) -> true|false.
%% Test whether any of the instructions in the block requires a stack frame.
need_frame(#b_blk{is=Is,last=#b_ret{arg=Ret}}) ->
need_frame_1(Is, {return,Ret});
need_frame(#b_blk{is=Is}) ->
need_frame_1(Is, body).
need_frame_1([#b_set{op=make_fun,dst=#b_var{name=Fun}}|Is], {return,_}=Context) ->
%% Since make_fun clobbers X registers, a stack frame is needed if
%% any of the following instructions use any other variable than
%% the one holding the reference to the created fun.
need_frame_1(Is, Context) orelse
case beam_ssa:used(#b_blk{is=Is,last=#b_ret{arg=#b_var{name=Fun}}}) of
[Fun] -> false;
[_|_] -> true
end;
need_frame_1([#b_set{op=new_try_tag}|_], _) ->
true;
need_frame_1([#b_set{op=call,dst=Val}]=Is, {return,Ret}) ->
if
Val =:= Ret -> need_frame_1(Is, tail);
true -> need_frame_1(Is, body)
end;
need_frame_1([#b_set{op=call,args=[Func|_]}|Is], Context) ->
case Func of
#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Name},
arity=Arity} ->
case erl_bifs:is_exit_bif(Mod, Name, Arity) of
true ->
false;
false ->
Context =:= body orelse
Is =/= [] orelse
is_trap_bif(Mod, Name, Arity)
end;
#b_remote{} ->
%% This is an apply(), which always needs a frame.
true;
#b_var{} ->
%% A fun call always needs a frame.
true;
_ ->
Context =:= body orelse Is =/= []
end;
need_frame_1([I|Is], Context) ->
beam_ssa:clobbers_xregs(I) orelse need_frame_1(Is, Context);
need_frame_1([], _) -> false.
%% is_trap_bif(Mod, Name, Arity) -> true|false.
%% Test whether we need a stack frame for this BIF.
is_trap_bif(erlang, '!', 2) -> true;
is_trap_bif(erlang, link, 1) -> true;
is_trap_bif(erlang, unlink, 1) -> true;
is_trap_bif(erlang, monitor_node, 2) -> true;
is_trap_bif(erlang, group_leader, 2) -> true;
is_trap_bif(erlang, exit, 2) -> true;
is_trap_bif(_, _, _) -> false.
%%%
%%% Fix variables used in matching in receive.
%%%
%%% The loop_rec/2 instruction may return a reference to a
%%% message outside of any heap or heap fragment. If the message
%%% does not match, it is not allowed to store any reference to
%%% the message (or part of the message) on the stack. If we do,
%%% the message will be corrupted if there happens to be a GC.
%%%
%%% Here we make sure to introduce copies of variables that are
%%% matched out and subsequently used after the remove_message/0
%%% instructions. That will make sure that only X registers are
%%% used during matching.
%%%
%%% Depending on where variables are defined and used, they must
%%% be handling in two different ways.
%%%
%%% Variables that are always defined in the receive (before branching
%%% out into the different clauses of the receive) and used after the
%%% receive, must be handled in the following way: Before each
%%% remove_message instruction, each such variable must be copied, and
%%% all variables must be consolidated using a phi node in the
%%% common exit block for the receive.
%%%
%%% Variables that are matched out and used in the same clause
%%% need copy instructions before the remove_message instruction
%%% in that clause.
%%%
fix_receives(#st{ssa=Blocks0,cnt=Count0}=St) ->
{Blocks,Count} = fix_receives_1(maps:to_list(Blocks0),
Blocks0, Count0),
St#st{ssa=Blocks,cnt=Count}.
fix_receives_1([{L,Blk}|Ls], Blocks0, Count0) ->
case Blk of
#b_blk{is=[#b_set{op=peek_message}|_]} ->
Rm = find_rm_blocks(L, Blocks0),
LoopExit = find_loop_exit(Rm, Blocks0),
Defs0 = beam_ssa:def([L], Blocks0),
CommonUsed = recv_common(Defs0, LoopExit, Blocks0),
{Blocks1,Count1} = recv_fix_common(CommonUsed, LoopExit, Rm,
Blocks0, Count0),
Defs = ordsets:subtract(Defs0, CommonUsed),
{Blocks,Count} = fix_receive(Rm, Defs, Blocks1, Count1),
fix_receives_1(Ls, Blocks, Count);
#b_blk{} ->
fix_receives_1(Ls, Blocks0, Count0)
end;
fix_receives_1([], Blocks, Count) ->
{Blocks,Count}.
recv_common(_Defs, none, _Blocks) ->
%% There is no common exit block because receive is used
%% in the tail position of a function.
[];
recv_common(Defs, Exit, Blocks) ->
{ExitDefs,ExitUsed} = beam_ssa:def_used([Exit], Blocks),
Def = ordsets:subtract(Defs, ExitDefs),
ordsets:intersection(Def, ExitUsed).
%% recv_fix_common([CommonVar], LoopExit, [RemoveMessageLabel],
%% Blocks0, Count0) -> {Blocks,Count}.
%% Handle variables alwys defined in a receive and used
%% in the exit block following the receive.
recv_fix_common([Msg0|T], Exit, Rm, Blocks0, Count0) ->
{Msg1,Count1} = new_var_name('@recv', Count0),
Msg = #b_var{name=Msg1},
Blocks1 = beam_ssa:rename_vars(#{Msg0=>Msg}, [Exit], Blocks0),
N = length(Rm),
{MsgVars0,Count} = new_var_names(duplicate(N, '@recv'), Count1),
MsgVars = [#b_var{name=V} || V <- MsgVars0],
PhiArgs = fix_exit_phi_args(MsgVars, Rm, Exit, Blocks1),
Phi = #b_set{op=phi,dst=Msg,args=PhiArgs},
ExitBlk0 = maps:get(Exit, Blocks1),
ExitBlk = ExitBlk0#b_blk{is=[Phi|ExitBlk0#b_blk.is]},
Blocks2 = Blocks1#{Exit:=ExitBlk},
Blocks = recv_fix_common_1(MsgVars, Rm, Msg0, Blocks2),
recv_fix_common(T, Exit, Rm, Blocks, Count);
recv_fix_common([], _, _, Blocks, Count) ->
{Blocks,Count}.
recv_fix_common_1([V|Vs], [Rm|Rms], Msg, Blocks0) ->
Ren = #{Msg=>V},
Blocks1 = beam_ssa:rename_vars(Ren, [Rm], Blocks0),
#b_blk{is=Is0} = Blk0 = maps:get(Rm, Blocks1),
Copy = #b_set{op=copy,dst=V,args=[#b_var{name=Msg}]},
Is = insert_after_phis(Is0, [Copy]),
Blk = Blk0#b_blk{is=Is},
Blocks = Blocks1#{Rm:=Blk},
recv_fix_common_1(Vs, Rms, Msg, Blocks);
recv_fix_common_1([], [], _Msg, Blocks) -> Blocks.
fix_exit_phi_args([V|Vs], [Rm|Rms], Exit, Blocks) ->
Path = beam_ssa:rpo([Rm], Blocks),
Pred = exit_predecessor(Path, Exit),
[{V,Pred}|fix_exit_phi_args(Vs, Rms, Exit, Blocks)];
fix_exit_phi_args([], [], _, _) -> [].
exit_predecessor([Pred,Exit|_], Exit) ->
Pred;
exit_predecessor([_|Bs], Exit) ->
exit_predecessor(Bs, Exit).
%% fix_receive([Label], Defs, Blocks0, Count0) -> {Blocks,Count}.
%% Add a copy instruction for all variables that are matched out and
%% later used within a clause of the receive.
fix_receive([L|Ls], Defs, Blocks0, Count0) ->
{RmDefs,Used0} = beam_ssa:def_used([L], Blocks0),
Def = ordsets:subtract(Defs, RmDefs),
Used = ordsets:intersection(Def, Used0),
{NewVs,Count} = new_var_names(Used, Count0),
NewVars = [#b_var{name=V} || V <- NewVs],
Ren = zip(Used, NewVars),
Blocks1 = beam_ssa:rename_vars(Ren, [L], Blocks0),
#b_blk{is=Is0} = Blk1 = maps:get(L, Blocks1),
CopyIs = [#b_set{op=copy,dst=New,args=[#b_var{name=Old}]} ||
{Old,New} <- Ren],
Is = insert_after_phis(Is0, CopyIs),
Blk = Blk1#b_blk{is=Is},
Blocks = maps:put(L, Blk, Blocks1),
fix_receive(Ls, Defs, Blocks, Count);
fix_receive([], _Defs, Blocks, Count) ->
{Blocks,Count}.
%% find_loop_exit([Label], Blocks) -> Label | none.
%% Find the block to which control is transferred when the
%% the receive loop is exited.
find_loop_exit([L1,L2|_Ls], Blocks) ->
Path1 = beam_ssa:rpo([L1], Blocks),
Path2 = beam_ssa:rpo([L2], Blocks),
find_loop_exit_1(reverse(Path1), reverse(Path2), none);
find_loop_exit(_, _) -> none.
find_loop_exit_1([H|T1], [H|T2], _) ->
find_loop_exit_1(T1, T2, H);
find_loop_exit_1(_, _, Exit) -> Exit.
%% find_rm_blocks(StartLabel, Blocks) -> [Label].
%% Find all blocks that start with remove_message within the receive
%% loop whose peek_message label is StartLabel.
find_rm_blocks(L, Blocks) ->
Seen = gb_sets:singleton(L),
Blk = maps:get(L, Blocks),
Succ = beam_ssa:successors(Blk),
find_rm_blocks_1(Succ, Seen, Blocks).
find_rm_blocks_1([L|Ls], Seen0, Blocks) ->
case gb_sets:is_member(L, Seen0) of
true ->
find_rm_blocks_1(Ls, Seen0, Blocks);
false ->
Seen = gb_sets:insert(L, Seen0),
Blk = maps:get(L, Blocks),
case find_rm_act(Blk#b_blk.is) of
prune ->
%% Looping back. Don't look at any successors.
find_rm_blocks_1(Ls, Seen, Blocks);
continue ->
%% Neutral block. Do nothing here, but look at
%% all successors.
Succ = beam_ssa:successors(Blk),
find_rm_blocks_1(Succ++Ls, Seen, Blocks);
found ->
%% Found remove_message instruction.
[L|find_rm_blocks_1(Ls, Seen, Blocks)]
end
end;
find_rm_blocks_1([], _, _) -> [].
find_rm_act([#b_set{op=Op}|Is]) ->
case Op of
remove_message -> found;
peek_message -> prune;
recv_next -> prune;
wait_timeout -> prune;
wait -> prune;
_ -> find_rm_act(Is)
end;
find_rm_act([]) ->
continue.
%%%
%%% Find out which variables need to be stored in Y registers.
%%%
-record(dk, {d :: ordsets:ordset(var_name()),
k :: ordsets:ordset(var_name())
}).
%% find_yregs(St0) -> St.
%% Find all variables that must be stored in Y registers. Annotate
%% the blocks that allocate frames with the set of Y registers
%% used within that stack frame.
%%
%% Basically, we following all execution paths starting from a block
%% that allocates a frame, keeping track of of all defined registers
%% and all registers killed by an instruction that clobbers X
%% registers. For every use of a variable, we check if if it is in
%% the set of killed variables; if it is, it must be stored in an Y
%% register.
find_yregs(#st{frames=[]}=St) ->
St;
find_yregs(#st{frames=[_|_]=Frames,args=Args,ssa=Blocks0}=St) ->
FrameDefs = find_defs(Frames, Blocks0, [V || #b_var{name=V} <- Args]),
Blocks = find_yregs_1(FrameDefs, Blocks0),
St#st{ssa=Blocks}.
find_yregs_1([{F,Defs}|Fs], Blocks0) ->
DK = #dk{d=Defs,k=[]},
D0 = #{F=>DK},
Ls = beam_ssa:rpo([F], Blocks0),
Yregs0 = [],
Yregs = find_yregs_2(Ls, Blocks0, D0, Yregs0),
Blk0 = maps:get(F, Blocks0),
Blk = beam_ssa:add_anno(yregs, Yregs, Blk0),
Blocks = Blocks0#{F:=Blk},
find_yregs_1(Fs, Blocks);
find_yregs_1([], Blocks) -> Blocks.
find_yregs_2([L|Ls], Blocks0, D0, Yregs0) ->
Blk0 = maps:get(L, Blocks0),
#b_blk{is=Is,last=Last} = Blk0,
Ys0 = maps:get(L, D0),
{Yregs1,Ys} = find_yregs_is(Is, Ys0, Yregs0),
Yregs = find_yregs_terminator(Last, Ys, Yregs1),
Successors = beam_ssa:successors(Blk0),
D = find_update_succ(Successors, Ys, D0),
find_yregs_2(Ls, Blocks0, D, Yregs);
find_yregs_2([], _Blocks, _D, Yregs) -> Yregs.
find_defs(Frames, Blocks, Defs) ->
Seen = gb_sets:empty(),
FramesSet = gb_sets:from_list(Frames),
{FrameDefs,_} = find_defs_1([0], Blocks, FramesSet, Seen, Defs, []),
FrameDefs.
find_defs_1([L|Ls], Blocks, Frames, Seen0, Defs0, Acc0) ->
case gb_sets:is_member(L, Frames) of
true ->
OrderedDefs = ordsets:from_list(Defs0),
find_defs_1(Ls, Blocks, Frames, Seen0, Defs0,
[{L,OrderedDefs}|Acc0]);
false ->
case gb_sets:is_member(L, Seen0) of
true ->
find_defs_1(Ls, Blocks, Frames, Seen0, Defs0, Acc0);
false ->
Seen1 = gb_sets:insert(L, Seen0),
{Acc,Seen} = find_defs_1(Ls, Blocks, Frames, Seen1, Defs0, Acc0),
#b_blk{is=Is} = Blk = maps:get(L, Blocks),
Defs = find_defs_is(Is, Defs0),
Successors = beam_ssa:successors(Blk),
find_defs_1(Successors, Blocks, Frames, Seen, Defs, Acc)
end
end;
find_defs_1([], _, _, Seen, _, Acc) ->
{Acc,Seen}.
find_defs_is([#b_set{dst=#b_var{name=Dst}}|Is], Acc) ->
find_defs_is(Is, [Dst|Acc]);
find_defs_is([], Acc) -> Acc.
find_update_succ([S|Ss], #dk{d=Defs0,k=Killed0}=DK0, D0) ->
case D0 of
#{S:=#dk{d=Defs1,k=Killed1}} ->
Defs = ordsets:intersection(Defs0, Defs1),
Killed = ordsets:union(Killed0, Killed1),
DK = #dk{d=Defs,k=Killed},
D = maps:put(S, DK, D0),
find_update_succ(Ss, DK0, D);
#{} ->
D = maps:put(S, DK0, D0),
find_update_succ(Ss, DK0, D)
end;
find_update_succ([], _, D) -> D.
find_yregs_is([#b_set{dst=#b_var{name=Dst}}=I|Is], #dk{d=Defs0,k=Killed0}=Ys, Yregs0) ->
Used = beam_ssa:used(I),
Yregs1 = ordsets:intersection(Used, Killed0),
Yregs = ordsets:union(Yregs0, Yregs1),
case beam_ssa:clobbers_xregs(I) of
false ->
Defs = ordsets:add_element(Dst, Defs0),
find_yregs_is(Is, Ys#dk{d=Defs}, Yregs);
true ->
Killed = ordsets:union(Defs0, Killed0),
Defs = [Dst],
find_yregs_is(Is, Ys#dk{d=Defs,k=Killed}, Yregs)
end;
find_yregs_is([], Ys, Yregs) -> {Yregs,Ys}.
find_yregs_terminator(Terminator, #dk{k=Killed}, Yregs0) ->
Used = beam_ssa:used(Terminator),
Yregs = ordsets:intersection(Used, Killed),
ordsets:union(Yregs0, Yregs).
%%%
%%% Try to reduce the size of the stack frame, by adding an explicit
%%% 'copy' instructions for return values from 'call' and 'make_fun' that
%%% need to be saved in Y registers. Here is an example to show
%%% how that's useful. First, here is the Erlang code:
%%%
%%% f(Pid) ->
%%% Res = foo(42),
%%% _ = node(Pid),
%%% bar(),
%%% Res.
%%%
%%% Compiled to SSA format, the main part of the code looks like this:
%%%
%%% 0:
%%% Res = call local literal foo/1, literal 42
%%% _1 = bif:node Pid
%%% @ssa_bool = succeeded _1
%%% br @ssa_bool, label 3, label 1
%%% 3:
%%% @ssa_ignored = call local literal bar/0
%%% ret Res
%%%
%%% It can be seen that the variables Pid and Res must be saved in Y
%%% registers in order to survive the function calls. A previous sub
%%% pass has inserted a 'copy' instruction to save the value of the
%%% variable Pid:
%%%
%%% 0:
%%% Pid:4 = copy Pid
%%% Res = call local literal foo/1, literal 42
%%% _1 = bif:node Pid:4
%%% @ssa_bool = succeeded _1
%%% br @ssa_bool, label 3, label 1
%%%
%%% 3:
%%% @ssa_ignored = call local literal bar/0
%%% ret Res
%%%
%%% The Res and Pid:4 variables must be assigned to different Y registers
%%% because they are live at the same time. copy_retval() inserts a
%%% 'copy' instruction to copy Res to a new variable:
%%%
%%% 0:
%%% Pid:4 = copy Pid
%%% Res:6 = call local literal foo/1, literal 42
%%% _1 = bif:node Pid:4
%%% @ssa_bool = succeeded _1
%%% br @ssa_bool, label 3, label 1
%%%
%%% 3:
%%% Res = copy Res:6
%%% @ssa_ignored = call local literal bar/0
%%% ret Res
%%%
%%% The new variable Res:6 is used to capture the return value from the call.
%%% The variables Pid:4 and Res are no longer live at the same time, so they
%%% can be assigned to the same Y register.
%%%
copy_retval(#st{frames=Frames,ssa=Blocks0,cnt=Count0}=St) ->
{Blocks,Count} = copy_retval_1(Frames, Blocks0, Count0),
St#st{ssa=Blocks,cnt=Count}.
copy_retval_1([F|Fs], Blocks0, Count0) ->
#b_blk{anno=#{yregs:=Yregs0},is=Is} = maps:get(F, Blocks0),
Yregs1 = gb_sets:from_list(Yregs0),
Yregs = collect_yregs(Is, Yregs1),
Ls = beam_ssa:rpo([F], Blocks0),
{Blocks,Count} = copy_retval_2(Ls, Yregs, none, Blocks0, Count0),
copy_retval_1(Fs, Blocks, Count);
copy_retval_1([], Blocks, Count) ->
{Blocks,Count}.
collect_yregs([#b_set{op=copy,dst=#b_var{name=Y},args=[#b_var{name=X}]}|Is],
Yregs0) ->
true = gb_sets:is_member(X, Yregs0), %Assertion.
Yregs = gb_sets:insert(Y, gb_sets:delete(X, Yregs0)),
collect_yregs(Is, Yregs);
collect_yregs([#b_set{}|Is], Yregs) ->
collect_yregs(Is, Yregs);
collect_yregs([], Yregs) -> Yregs.
copy_retval_2([L|Ls], Yregs, Copy0, Blocks0, Count0) ->
#b_blk{is=Is0,last=Last} = Blk = maps:get(L, Blocks0),
RC = case {Last,Ls} of
{#b_br{succ=Succ,fail=?BADARG_BLOCK},[Succ|_]} ->
true;
{_,_} ->
false
end,
case copy_retval_is(Is0, RC, Yregs, Copy0, Count0, []) of
{Is,Count} ->
case Copy0 =:= none andalso Count0 =:= Count of
true ->
copy_retval_2(Ls, Yregs, none, Blocks0, Count0);
false ->
Blocks = Blocks0#{L=>Blk#b_blk{is=Is}},
copy_retval_2(Ls, Yregs, none, Blocks, Count)
end;
{Is,Count,Copy} ->
Blocks = Blocks0#{L=>Blk#b_blk{is=Is}},
copy_retval_2(Ls, Yregs, Copy, Blocks, Count)
end;
copy_retval_2([], _Yregs, none, Blocks, Count) ->
{Blocks,Count}.
copy_retval_is([#b_set{op=put_tuple_elements,args=Args0}=I0], false, _Yregs,
Copy, Count, Acc) ->
I = I0#b_set{args=copy_sub_args(Args0, Copy)},
{reverse(Acc, [I|acc_copy([], Copy)]),Count};
copy_retval_is([#b_set{}]=Is, false, _Yregs, Copy, Count, Acc) ->
{reverse(Acc, acc_copy(Is, Copy)),Count};
copy_retval_is([#b_set{},#b_set{op=succeeded}]=Is, false, _Yregs, Copy, Count, Acc) ->
{reverse(Acc, acc_copy(Is, Copy)),Count};
copy_retval_is([#b_set{op=Op,dst=#b_var{name=RetVal}=Dst}=I0|Is], RC, Yregs,
Copy0, Count0, Acc0) when Op =:= call; Op =:= make_fun ->
{I1,Count1,Acc} = place_retval_copy(I0, Yregs, Copy0, Count0, Acc0),
case gb_sets:is_member(RetVal, Yregs) of
true ->
{NewVarName,Count} = new_var_name(RetVal, Count1),
NewVar = #b_var{name=NewVarName},
Copy = #b_set{op=copy,dst=Dst,args=[NewVar]},
I = I1#b_set{dst=NewVar},
copy_retval_is(Is, RC, Yregs, Copy, Count, [I|Acc]);
false ->
copy_retval_is(Is, RC, Yregs, none, Count1, [I1|Acc])
end;
copy_retval_is([#b_set{args=Args0}=I0|Is], RC, Yregs, Copy, Count, Acc) ->
I = I0#b_set{args=copy_sub_args(Args0, Copy)},
case beam_ssa:clobbers_xregs(I) of
true ->
copy_retval_is(Is, RC, Yregs, none, Count, [I|acc_copy(Acc, Copy)]);
false ->
copy_retval_is(Is, RC, Yregs, Copy, Count, [I|Acc])
end;
copy_retval_is([], RC, _, Copy, Count, Acc) ->
case {Copy,RC} of
{none,_} ->
{reverse(Acc),Count};
{#b_set{},true} ->
{reverse(Acc),Count,Copy};
{#b_set{},false} ->
{reverse(Acc, [Copy]),Count}
end.
%%
%% Consider this code:
%%
%% Var = ...
%% ...
%% A1 = call foo/0
%% A = copy A1
%% B = call bar/1, Var
%%
%% If the Var variable is no longer used after this code, its Y register
%% can't be reused for A. To allow the Y register to be reused
%% we will need to insert 'copy' instructions for arguments that are
%% in Y registers:
%%
%% Var = ...
%% ...
%% A1 = call foo/0
%% Var1 = copy Var
%% A = copy A1
%% B = call bar/1, Var1
%%
place_retval_copy(I, _Yregs, none, Count, Acc) ->
{I,Count,Acc};
place_retval_copy(#b_set{args=[F|Args0]}=I, Yregs, Copy, Count0, Acc0) ->
#b_set{dst=#b_var{name=Avoid}} = Copy,
{Args,Acc1,Count} = copy_func_args(Args0, Yregs, Avoid, Acc0, [], Count0),
Acc = [Copy|Acc1],
{I#b_set{args=[F|Args]},Count,Acc}.
copy_func_args([#b_var{name=V}=A|As], Yregs, Avoid, CopyAcc, Acc, Count0) ->
case gb_sets:is_member(V, Yregs) of
true when V =/= Avoid ->
{NewVarName,Count} = new_var_name(V, Count0),
NewVar = #b_var{name=NewVarName},
Copy = #b_set{op=copy,dst=NewVar,args=[A]},
copy_func_args(As, Yregs, Avoid, [Copy|CopyAcc], [NewVar|Acc], Count);
_ ->
copy_func_args(As, Yregs, Avoid, CopyAcc, [A|Acc], Count0)
end;
copy_func_args([A|As], Yregs, Avoid, CopyAcc, Acc, Count) ->
copy_func_args(As, Yregs, Avoid, CopyAcc, [A|Acc], Count);
copy_func_args([], _Yregs, _Avoid, CopyAcc, Acc, Count) ->
{reverse(Acc),CopyAcc,Count}.
acc_copy(Acc, none) -> Acc;
acc_copy(Acc, #b_set{}=Copy) -> [Copy|Acc].
copy_sub_args(Args, none) ->
Args;
copy_sub_args(Args, #b_set{dst=Dst,args=[Src]}) ->
[sub_arg(A, Dst, Src) || A <- Args].
sub_arg(Old, Old, New) -> New;
sub_arg(Old, _, _) -> Old.
%%%
%%% Consider:
%%%
%%% x1/Hd = get_hd x0/Cons
%%% y0/Tl = get_tl x0/Cons
%%%
%%% Register x0 can't be reused for Hd. If Hd needs to be in x0,
%%% a 'move' instruction must be inserted.
%%%
%%% If we swap get_hd and get_tl when Tl is in a Y register,
%%% x0 can be used for Hd if Cons is not used again:
%%%
%%% y0/Tl = get_tl x0/Cons
%%% x0/Hd = get_hd x0/Cons
%%%
opt_get_list(#st{ssa=Blocks,res=Res}=St) ->
ResMap = maps:from_list(Res),
Ls = beam_ssa:rpo(Blocks),
St#st{ssa=opt_get_list_1(Ls, ResMap, Blocks)}.
opt_get_list_1([L|Ls], Res, Blocks0) ->
#b_blk{is=Is0} = Blk = maps:get(L, Blocks0),
case opt_get_list_is(Is0, Res, [], false) of
no ->
opt_get_list_1(Ls, Res, Blocks0);
{yes,Is} ->
Blocks = Blocks0#{L:=Blk#b_blk{is=Is}},
opt_get_list_1(Ls, Res, Blocks)
end;
opt_get_list_1([], _, Blocks) -> Blocks.
opt_get_list_is([#b_set{op=get_hd,dst=#b_var{name=Hd},
args=[Cons]}=GetHd,
#b_set{op=get_tl,dst=#b_var{name=Tl},
args=[Cons]}=GetTl|Is],
Res, Acc, Changed) ->
%% Note that when this pass is run, only Y registers have
%% reservations. The absence of an entry for a variable therefore
%% means that the variable will be in an X register.
case Res of
#{Hd:={y,_}} ->
%% Hd will be in a Y register. Don't swap.
opt_get_list_is([GetTl|Is], Res, [GetHd|Acc], Changed);
#{Tl:={y,_}} ->
%% Tl will be in a Y register. Swap.
opt_get_list_is([GetHd|Is], Res, [GetTl|Acc], true);
#{} ->
%% Both are in X registers. Nothing to do.
opt_get_list_is([GetTl|Is], Res, [GetHd|Acc], Changed)
end;
opt_get_list_is([I|Is], Res, Acc, Changed) ->
opt_get_list_is(Is, Res, [I|Acc], Changed);
opt_get_list_is([], _Res, Acc, Changed) ->
case Changed of
true ->
{yes,reverse(Acc)};
false ->
no
end.
%%%
%%% Number instructions in the order they are executed.
%%%
%% number_instructions(St0) -> St.
%% Number instructions in the order they are executed. Use a step
%% size of 2. Don't number phi instructions. All phi variables in
%% a block will be live one unit before the first non-phi instruction
%% in the block.
number_instructions(#st{ssa=Blocks0}=St) ->
Ls = beam_ssa:rpo(Blocks0),
St#st{ssa=number_is_1(Ls, 1, Blocks0)}.
number_is_1([L|Ls], N0, Blocks0) ->
#b_blk{is=Is0,last=Last0} = Bl0 = maps:get(L, Blocks0),
{Is,N1} = number_is_2(Is0, N0, []),
Last = beam_ssa:add_anno(n, N1, Last0),
N = N1 + 2,
Bl = Bl0#b_blk{is=Is,last=Last},
Blocks = maps:put(L, Bl, Blocks0),
number_is_1(Ls, N, Blocks);
number_is_1([], _, Blocks) -> Blocks.
number_is_2([#b_set{op=phi}=I|Is], N, Acc) ->
number_is_2(Is, N, [I|Acc]);
number_is_2([I0|Is], N, Acc) ->
I = beam_ssa:add_anno(n, N, I0),
number_is_2(Is, N+2, [I|Acc]);
number_is_2([], N, Acc) ->
{reverse(Acc),N}.
%%%
%%% Calculate live intervals.
%%%
live_intervals(#st{args=Args,ssa=Blocks}=St) ->
Vars0 = [{V,{0,1}} || #b_var{name=V} <- Args],
F = fun(L, _, A) -> live_interval_blk(L, Blocks, A) end,
LiveMap0 = #{},
Acc0 = {[],[],LiveMap0},
{Vars,Aliases,_} = beam_ssa:fold_po(F, Acc0, Blocks),
Intervals = merge_ranges(rel2fam(Vars0++Vars)),
St#st{intervals=Intervals,aliases=Aliases}.
merge_ranges([{V,Rs}|T]) ->
[{V,merge_ranges_1(Rs)}|merge_ranges(T)];
merge_ranges([]) -> [].
merge_ranges_1([{A,N},{N,Z}|Rs]) ->
merge_ranges_1([{A,Z}|Rs]);
merge_ranges_1([R|Rs]) ->
[R|merge_ranges_1(Rs)];
merge_ranges_1([]) -> [].
live_interval_blk(L, Blocks, {Vars0,Aliases0,LiveMap0}) ->
Live0 = [],
Successors = beam_ssa:successors(L, Blocks),
Live1 = update_successors(Successors, L, Blocks, LiveMap0, Live0),
%% Add ranges for all variables that are live in the successors.
#b_blk{is=Is,last=Last} = maps:get(L, Blocks),
End = beam_ssa:get_anno(n, Last),
Use = [{V,{use,End+1}} || V <- Live1],
%% Determine used and defined variables in this block.
FirstNumber = first_number(Is, Last),
{UseDef0,Aliases} = live_interval_blk_1([Last|reverse(Is)],
FirstNumber, Aliases0, Use),
UseDef = rel2fam(UseDef0),
%% Update what is live at the beginning of this block and
%% store it.
Used = [V || {V,[{use,_}|_]} <- UseDef],
Live2 = ordsets:union(Live1, Used),
Killed = [V || {V,[{def,_}|_]} <- UseDef],
Live = ordsets:subtract(Live2, Killed),
LiveMap = LiveMap0#{L=>Live},
%% Construct the ranges for this block.
Vars = make_block_ranges(UseDef, FirstNumber, Vars0),
{Vars,Aliases,LiveMap}.
make_block_ranges([{V,[{def,Def}]}|Vs], First, Acc) ->
make_block_ranges(Vs, First, [{V,{Def,Def}}|Acc]);
make_block_ranges([{V,[{def,Def}|Uses]}|Vs], First, Acc) ->
{use,Last} = last(Uses),
make_block_ranges(Vs, First, [{V,{Def,Last}}|Acc]);
make_block_ranges([{V,[{use,_}|_]=Uses}|Vs], First, Acc) ->
{use,Last} = last(Uses),
make_block_ranges(Vs, First, [{V,{First,Last}}|Acc]);
make_block_ranges([], _, Acc) -> Acc.
live_interval_blk_1([#b_set{op=phi,dst=#b_var{name=Dst}}|Is],
FirstNumber, Aliases, Acc0) ->
Acc = [{Dst,{def,FirstNumber}}|Acc0],
live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
live_interval_blk_1([#b_set{op=bs_start_match}=I|Is], FirstNumber,
Aliases0, Acc0) ->
N = beam_ssa:get_anno(n, I),
#b_set{dst=#b_var{name=Dst}} = I,
Acc1 = [{Dst,{def,N}}|Acc0],
Aliases = case beam_ssa:get_anno(reuse_for_context, I) of
true ->
#b_set{args=[#b_var{name=Src}]} = I,
[{Dst,Src}|Aliases0];
false ->
Aliases0
end,
Acc = [{V,{use,N}} || V <- beam_ssa:used(I)] ++ Acc1,
live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
live_interval_blk_1([I|Is], FirstNumber, Aliases, Acc0) ->
N = beam_ssa:get_anno(n, I),
Acc1 = case I of
#b_set{dst=#b_var{name=Dst}} ->
[{Dst,{def,N}}|Acc0];
_ ->
Acc0
end,
Used = beam_ssa:used(I),
Acc = [{V,{use,N}} || V <- Used] ++ Acc1,
live_interval_blk_1(Is, FirstNumber, Aliases, Acc);
live_interval_blk_1([], _FirstNumber, Aliases, Acc) ->
{Acc,Aliases}.
%% first_number([#b_set{}]) -> InstructionNumber.
%% Return the number for the first instruction for the block.
%% Note that this number is one less than the first
%% non-phi instruction in the block.
first_number([#b_set{op=phi}|Is], Last) ->
first_number(Is, Last);
first_number([I|_], _) ->
beam_ssa:get_anno(n, I) - 1;
first_number([], Last) ->
beam_ssa:get_anno(n, Last) - 1.
update_successors([L|Ls], Pred, Blocks, LiveMap, Live0) ->
Live1 = ordsets:union(Live0, get_live(L, LiveMap)),
#b_blk{is=Is} = maps:get(L, Blocks),
Live = update_live_phis(Is, Pred, Live1),
update_successors(Ls, Pred, Blocks, LiveMap, Live);
update_successors([], _, _, _, Live) -> Live.
get_live(L, LiveMap) ->
case LiveMap of
#{L:=Live} -> Live;
#{} -> []
end.
update_live_phis([#b_set{op=phi,dst=#b_var{name=Killed},args=Args}|Is],
Pred, Live0) ->
Used = [V || {#b_var{name=V},L} <- Args, L =:= Pred],
Live1 = ordsets:union(ordsets:from_list(Used), Live0),
Live = ordsets:del_element(Killed, Live1),
update_live_phis(Is, Pred, Live);
update_live_phis(_, _, Live) -> Live.
%%%
%%% Reserve Y registers.
%%%
%% reserve_yregs(St0) -> St.
%% In each block that allocates a stack frame, insert instructions
%% that copy variables that must be in Y registers (given by
%% YRegisters) to new variables.
%%
%% Also allocate specific Y registers for try and catch tags.
%% The outermost try/catch tag is placed in y0, any directly
%% nested tag in y1, and so on. Note that this is the reversed
%% order as required by BEAM; it will be corrected later by
%% turn_yregs().
reserve_yregs(#st{frames=Frames}=St0) ->
foldl(fun reserve_yregs_1/2, St0, Frames).
reserve_yregs_1(L, #st{ssa=Blocks0,cnt=Count0,res=Res0}=St) ->
Blk = maps:get(L, Blocks0),
Yregs = beam_ssa:get_anno(yregs, Blk),
{Def,Used} = beam_ssa:def_used([L], Blocks0),
UsedYregs = ordsets:intersection(Yregs, Used),
DefBefore = ordsets:subtract(UsedYregs, Def),
{BeforeVars,Blocks,Count} = rename_vars(DefBefore, L, Blocks0, Count0),
InsideVars = ordsets:subtract(UsedYregs, DefBefore),
ResTryTags0 = reserve_try_tags(L, Blocks),
ResTryTags = [{V,{Reg,Count}} || {V,Reg} <- ResTryTags0],
Vars = BeforeVars ++ InsideVars,
Res = [{V,{y,Count}} || V <- Vars] ++ ResTryTags ++ Res0,
St#st{res=Res,ssa=Blocks,cnt=Count+1}.
reserve_try_tags(L, Blocks) ->
Seen = gb_sets:empty(),
{Res0,_} = reserve_try_tags_1([L], Blocks, Seen, #{}),
Res1 = [maps:to_list(M) || {_,M} <- maps:to_list(Res0)],
Res = [{V,{y,Y}} || {V,Y} <- append(Res1)],
ordsets:from_list(Res).
reserve_try_tags_1([L|Ls], Blocks, Seen0, ActMap0) ->
case gb_sets:is_element(L, Seen0) of
true ->
reserve_try_tags_1(Ls, Blocks, Seen0, ActMap0);
false ->
Seen1 = gb_sets:insert(L, Seen0),
#b_blk{is=Is} = Blk = maps:get(L, Blocks),
Active0 = get_active(L, ActMap0),
Active = reserve_try_tags_is(Is, Active0),
Successors = beam_ssa:successors(Blk),
ActMap1 = update_act_map(Successors, Active, ActMap0),
{ActMap,Seen} = reserve_try_tags_1(Ls, Blocks, Seen1, ActMap1),
reserve_try_tags_1(Successors, Blocks, Seen,ActMap)
end;
reserve_try_tags_1([], _Blocks, Seen, ActMap) ->
{ActMap,Seen}.
get_active(L, ActMap) ->
case ActMap of
#{L:=Active} -> Active;
#{} -> #{}
end.
reserve_try_tags_is([#b_set{op=new_try_tag,dst=#b_var{name=V}}|Is], Active) ->
N = map_size(Active),
reserve_try_tags_is(Is, Active#{V=>N});
reserve_try_tags_is([#b_set{op=kill_try_tag,args=[#b_var{name=Tag}]}|Is], Active) ->
reserve_try_tags_is(Is, maps:remove(Tag, Active));
reserve_try_tags_is([_|Is], Active) ->
reserve_try_tags_is(Is, Active);
reserve_try_tags_is([], Active) -> Active.
update_act_map([L|Ls], Active0, ActMap0) ->
case ActMap0 of
#{L:=Active1} ->
ActMap = ActMap0#{L=>maps:merge(Active0, Active1)},
update_act_map(Ls, Active0, ActMap);
#{} ->
ActMap = ActMap0#{L=>Active0},
update_act_map(Ls, Active0, ActMap)
end;
update_act_map([], _, ActMap) -> ActMap.
rename_vars([], _, Blocks, Count) ->
{[],Blocks,Count};
rename_vars(Vs, L, Blocks0, Count0) ->
{NewVs,Count} = new_var_names(Vs, Count0),
NewVars = [#b_var{name=V} || V <- NewVs],
Ren = zip(Vs, NewVars),
Blocks1 = beam_ssa:rename_vars(Ren, [L], Blocks0),
#b_blk{is=Is0} = Blk0 = maps:get(L, Blocks1),
CopyIs = [#b_set{op=copy,dst=New,args=[#b_var{name=Old}]} ||
{Old,New} <- Ren],
Is = insert_after_phis(Is0, CopyIs),
Blk = Blk0#b_blk{is=Is},
Blocks = maps:put(L, Blk, Blocks1),
{NewVs,Blocks,Count}.
insert_after_phis([#b_set{op=phi}=I|Is], InsertIs) ->
[I|insert_after_phis(Is, InsertIs)];
insert_after_phis(Is, InsertIs) ->
InsertIs ++ Is.
%% frame_size(St0) -> St.
%% Calculate the frame size for each block that allocates a frame.
%% Annotate the block with the frame size. Also annotate all
%% return instructions with {deallocate,FrameSize} to simplify
%% code generation.
frame_size(#st{frames=Frames,regs=Regs,ssa=Blocks0}=St) ->
Blocks = foldl(fun(L, Blks) ->
frame_size_1(L, Regs, Blks)
end, Blocks0, Frames),
St#st{ssa=Blocks}.
frame_size_1(L, Regs, Blocks0) ->
Def = beam_ssa:def([L], Blocks0),
Yregs0 = [maps:get(V, Regs) || V <- Def, is_yreg(maps:get(V, Regs))],
Yregs = ordsets:from_list(Yregs0),
FrameSize = length(ordsets:from_list(Yregs)),
if
FrameSize =/= 0 ->
[{y,0}|_] = Yregs, %Assertion.
{y,Last} = last(Yregs),
Last = FrameSize - 1, %Assertion.
ok;
true ->
ok
end,
Blk0 = maps:get(L, Blocks0),
Blk = beam_ssa:add_anno(frame_size, FrameSize, Blk0),
%% Insert an annotation for frame deallocation on
%% each #b_ret{}.
Blocks = maps:put(L, Blk, Blocks0),
Reachable = beam_ssa:rpo([L], Blocks),
frame_deallocate(Reachable, FrameSize, Blocks).
frame_deallocate([L|Ls], Size, Blocks0) ->
Blk0 = maps:get(L, Blocks0),
Blk = case Blk0 of
#b_blk{last=#b_ret{}=Ret0} ->
Ret = beam_ssa:add_anno(deallocate, Size, Ret0),
Blk0#b_blk{last=Ret};
#b_blk{} ->
Blk0
end,
Blocks = maps:put(L, Blk, Blocks0),
frame_deallocate(Ls, Size, Blocks);
frame_deallocate([], _, Blocks) -> Blocks.
%% turn_yregs(St0) -> St.
%% Renumber y registers so that {y,0} becomes {y,FrameSize-1},
%% {y,FrameSize-1} becomes {y,0} and so on. This is to make nested
%% catches work. The register allocator (linear_scan()) has given
%% a lower number to the outermost catch.
turn_yregs(#st{frames=Frames,regs=Regs0,ssa=Blocks}=St) ->
Regs1 = foldl(fun(L, A) ->
Blk = maps:get(L, Blocks),
FrameSize = beam_ssa:get_anno(frame_size, Blk),
Def = beam_ssa:def([L], Blocks),
[turn_yregs_1(Def, FrameSize, Regs0)|A]
end, [], Frames),
Regs = maps:merge(Regs0, maps:from_list(append(Regs1))),
St#st{regs=Regs}.
turn_yregs_1(Def, FrameSize, Regs) ->
Yregs0 = [{maps:get(V, Regs),V} || V <- Def, is_yreg(maps:get(V, Regs))],
Yregs1 = rel2fam(Yregs0),
FrameSize = length(Yregs1),
Yregs2 = [{{y,FrameSize-Y-1},Vs} || {{y,Y},Vs} <- Yregs1],
R0 = sofs:family(Yregs2),
R1 = sofs:family_to_relation(R0),
R = sofs:converse(R1),
sofs:to_external(R).
%%%
%%% Reserving registers before register allocation.
%%%
%% reserve_regs(St0) -> St.
%% Reserve registers prior to register allocation. Y registers
%% have already been reserved. This function will reserve z,
%% fr, and specific x registers.
reserve_regs(#st{args=Args,ssa=Blocks,intervals=Intervals,res=Res0}=St) ->
%% Reserve x0, x1, and so on for the function arguments.
Res1 = reserve_arg_regs(Args, 0, Res0),
%% Reserve Z registers (dummy registers) for instructions with no
%% return values (e.g. remove_message) or pseudo-return values
%% (e.g. landingpad).
Res2 = reserve_zregs(Blocks, Intervals, Res1),
%% Reserve float registers.
Res3 = reserve_fregs(Blocks, Res2),
%% Reserve all remaining unreserved variables as X registers.
Res = maps:from_list(Res3),
St#st{res=reserve_xregs(Blocks, Res)}.
reserve_arg_regs([#b_var{name=Arg}|Is], N, Acc) ->
reserve_arg_regs(Is, N+1, [{Arg,{x,N}}|Acc]);
reserve_arg_regs([], _, Acc) -> Acc.
reserve_zregs(Blocks, Intervals, Res) ->
ShortLived0 = [V || {V,[{Start,End}]} <- Intervals, Start+2 =:= End],
ShortLived = cerl_sets:from_list(ShortLived0),
F = fun(_, #b_blk{is=Is,last=Last}, A) ->
reserve_zreg(Is, Last, ShortLived, A)
end,
beam_ssa:fold_rpo(F, [0], Res, Blocks).
reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst},
#b_set{op={bif,'=:='},args=[Dst,Val]}], _Last, ShortLived, A0) ->
case Val of
#b_literal{val=Arity} when Arity bsr 32 =:= 0 ->
%% These two instructions can be combined to a test_arity
%% instruction provided that the arity variable is short-lived.
reserve_zreg_1(Dst, ShortLived, A0);
_ ->
A0
end;
reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst}],
#b_switch{}, ShortLived, A) ->
reserve_zreg_1(Dst, ShortLived, A);
reserve_zreg([#b_set{op=Op,dst=#b_var{name=Dst}}|Is], Last, ShortLived, A0) ->
IsZReg = case Op of
context_to_binary -> true;
bs_match_string -> true;
bs_restore -> true;
bs_save -> true;
{float,clearerror} -> true;
kill_try_tag -> true;
landingpad -> true;
put_tuple_elements -> true;
remove_message -> true;
set_tuple_element -> true;
succeeded -> true;
timeout -> true;
wait_timeout -> true;
_ -> false
end,
A = case IsZReg of
true -> [{Dst,z}|A0];
false -> A0
end,
reserve_zreg(Is, Last, ShortLived, A);
reserve_zreg([], #b_br{bool=Bool}, ShortLived, A) ->
reserve_zreg_1(Bool, ShortLived, A);
reserve_zreg([], _, _, A) -> A.
reserve_zreg_1(#b_var{name=V}, ShortLived, A) ->
case cerl_sets:is_element(V, ShortLived) of
true -> [{V,z}|A];
false -> A
end;
reserve_zreg_1(#b_literal{}, _, A) -> A.
reserve_fregs(Blocks, Res) ->
F = fun(_, #b_blk{is=Is}, A) ->
reserve_freg(Is, A)
end,
beam_ssa:fold_rpo(F, [0], Res, Blocks).
reserve_freg([#b_set{op={float,Op},dst=#b_var{name=V}}|Is], Res) ->
case Op of
get ->
reserve_freg(Is, Res);
_ ->
reserve_freg(Is, [{V,fr}|Res])
end;
reserve_freg([_|Is], Res) ->
reserve_freg(Is, Res);
reserve_freg([], Res) -> Res.
%% reserve_xregs(St0) -> St.
%% Reserve all remaining variables as X registers.
%%
%% If a variable will need to be in a specific X register for a
%% 'call' or 'make_fun' (and there is nothing that will kill it
%% between the definition and use), reserve the register using a
%% {prefer,{x,X} annotation. That annotation means that the linear
%% scan algorithm will place the variable in the preferred register,
%% unless that register is already occupied.
%%
%% All remaining variables are reserved as X registers. Linear scan
%% will allocate the lowest free X register for the variable.
reserve_xregs(Blocks, Res) ->
F = fun(L, #b_blk{is=Is,last=Last}, R) ->
{Xs0,Used0} = reserve_terminator(L, Last, Blocks, R),
reserve_xregs_is(reverse(Is), R, Xs0, Used0)
end,
beam_ssa:fold_po(F, Res, Blocks).
reserve_xregs_is([#b_set{op=Op,dst=#b_var{name=Dst},args=Args}=I|Is], Res0, Xs0, Used0) ->
Xs1 = case is_gc_safe(I) of
true ->
Xs0;
false ->
%% There may be a garbage collection after executing this
%% instruction. We will need prune the list of preferred
%% X registers.
res_xregs_prune(Xs0, Used0, Res0)
end,
Res = reserve_xreg(Dst, Xs1, Res0),
Used1 = ordsets:union(Used0, beam_ssa:used(I)),
Used = ordsets:del_element(Dst, Used1),
case Op of
call ->
Xs = reserve_call_args(tl(Args)),
reserve_xregs_is(Is, Res, Xs, Used);
make_fun ->
Xs = reserve_call_args(tl(Args)),
reserve_xregs_is(Is, Res, Xs, Used);
_ ->
reserve_xregs_is(Is, Res, Xs1, Used)
end;
reserve_xregs_is([], Res, _Xs, _Used) -> Res.
reserve_terminator(L, #b_br{bool=#b_literal{val=true},succ=Succ}, Blocks, Res) ->
case maps:get(Succ, Blocks) of
#b_blk{is=[],last=Last} ->
reserve_terminator(Succ, Last, Blocks, Res);
#b_blk{is=[_|_]=Is} ->
{res_xregs_from_phi(Is, L, Res, #{}),[]}
end;
reserve_terminator(_, Last, _, _) ->
{#{},beam_ssa:used(Last)}.
res_xregs_from_phi([#b_set{op=phi,dst=#b_var{name=Dst},args=Args}|Is],
Pred, Res, Acc) ->
case [V || {#b_var{name=V},L} <- Args, L =:= Pred] of
[] ->
res_xregs_from_phi(Is, Pred, Res, Acc);
[V] ->
case Res of
#{Dst:={prefer,Reg}} ->
res_xregs_from_phi(Is, Pred, Res, Acc#{V=>Reg});
#{Dst:=_} ->
res_xregs_from_phi(Is, Pred, Res, Acc)
end
end;
res_xregs_from_phi(_, _, _, Acc) -> Acc.
reserve_call_args(Args) ->
reserve_call_args(Args, 0, #{}).
reserve_call_args([#b_var{name=Name}|As], X, Xs) ->
reserve_call_args(As, X+1, Xs#{Name=>{x,X}});
reserve_call_args([#b_literal{}|As], X, Xs) ->
reserve_call_args(As, X+1, Xs);
reserve_call_args([], _, Xs) -> Xs.
reserve_xreg(V, Xs, Res) ->
case Res of
#{V:=_} ->
%% Already reserved.
Res;
#{} ->
case Xs of
#{V:=X} ->
%% Add a hint that a specific X register is
%% preferred, unless it is already in use.
Res#{V=>{prefer,X}};
#{} ->
%% Reserve as an X register in general.
Res#{V=>x}
end
end.
is_gc_safe(#b_set{op=phi}) ->
false;
is_gc_safe(#b_set{op=Op,args=Args}) ->
case beam_ssa_codegen:classify_heap_need(Op, Args) of
neutral -> true;
{put,_} -> true;
_ -> false
end.
%% res_xregs_prune(PreferredRegs, Used, Res) -> PreferredRegs.
%% Prune the list of preferred to only include X registers that
%% are guaranteed to survice a garbage collection.
res_xregs_prune(Xs, Used, Res) ->
%% The number of safe registers is the number of the X registers
%% used after this point. The actual number of safe registers may
%% be highter than this number, but this is a conservative safe
%% estimate.
NumSafe = foldl(fun(V, N) ->
case Res of
#{V:={x,_}} -> N + 1;
#{V:=_} -> N;
#{} -> N + 1
end
end, 0, Used),
%% Remove unsafe registers from the list of potential
%% preferred registers.
maps:filter(fun(_, {x,X}) -> X < NumSafe end, Xs).
%%%
%%% Remove unsuitable aliases.
%%%
%%% If a binary is matched more than once, we must not put the
%%% the match context in the same register as the binary to
%%% avoid the following situation:
%%%
%%% {test,bs_start_match2,{f,3},1,[{x,0},0],{x,0}}.
%%% .
%%% .
%%% .
%%% {test,bs_start_match2,{f,6},1,[{x,0},0],{x,1}}. %% ILLEGAL!
%%%
%%% The second instruction is illegal because a match context source
%%% is only allowed if source and destination registers are identical.
%%%
remove_unsuitable_aliases(#st{aliases=[_|_]=Aliases0,ssa=Blocks}=St) ->
R = rem_unsuitable(maps:values(Blocks)),
Unsuitable0 = [V || {V,[_,_|_]} <- rel2fam(R)],
Unsuitable = gb_sets:from_list(Unsuitable0),
Aliases =[P || {_,V}=P <- Aliases0,
not gb_sets:is_member(V, Unsuitable)],
St#st{aliases=Aliases};
remove_unsuitable_aliases(#st{aliases=[]}=St) -> St.
rem_unsuitable([#b_blk{is=Is}|Bs]) ->
Vs = [{V,Dst} ||
#b_set{op=bs_start_match,dst=#b_var{name=Dst},
args=[#b_var{name=V}]} <- Is],
Vs ++ rem_unsuitable(Bs);
rem_unsuitable([]) -> [].
%%%
%%% Merge intervals.
%%%
merge_intervals(#st{aliases=Aliases0,intervals=Intervals0,
res=Reserved}=St) ->
Aliases1 = [A || A <- Aliases0,
is_suitable_alias(A, Reserved)],
case Aliases1 of
[] ->
St#st{aliases=Aliases1};
[_|_] ->
Intervals1 = maps:from_list(Intervals0),
{Intervals,Aliases} =
merge_intervals_1(Aliases1, Intervals1, []),
St#st{aliases=Aliases,intervals=Intervals}
end.
merge_intervals_1([{Alias,V}|Vs], Intervals0, Acc) ->
#{Alias:=Int1,V:=Int2} = Intervals0,
Int3 = lists:merge(Int1, Int2),
Int = merge_intervals_2(Int3),
Intervals1 = maps:remove(Alias, Intervals0),
Intervals = Intervals1#{V:=Int},
merge_intervals_1(Vs, Intervals, [{Alias,V}|Acc]);
merge_intervals_1([], Intervals, Acc) ->
{maps:to_list(Intervals),Acc}.
merge_intervals_2([{A1,B1},{A2,B2}|Is]) when A2 =< B1 ->
merge_intervals_2([{min(A1, A2),max(B1, B2)}|Is]);
merge_intervals_2([{_A1,B1}=R|[{A2,_B2}|_]=Is]) when B1 < A2 ->
[R|merge_intervals_2(Is)];
merge_intervals_2([_]=Is) -> Is.
is_suitable_alias({V1,V2}, Reserved) ->
#{V1:=Res1,V2:=Res2} = Reserved,
case {Res1,Res2} of
{x,x} -> true;
{x,{x,_}} -> true;
{{x,_},x} -> true;
{_,_} -> false
end.
%%%
%%% Register allocation using linear scan.
%%%
-record(i,
{sort=1 :: instr_number(),
reg=none :: i_reg(),
pool=x :: pool_id(),
var=#b_var{} :: b_var(),
rs=[] :: [range()]
}).
-record(l,
{cur=#i{} :: interval(),
unhandled_res=[] :: [interval()],
unhandled_any=[] :: [interval()],
active=[] :: [interval()],
inactive=[] :: [interval()],
free=#{} :: #{var_name()=>pool(),
{'next',pool_id()}:=reg_num()},
regs=[] :: [{b_var(),ssa_register()}]
}).
-type interval() :: #i{}.
-type i_reg() :: ssa_register() | {'prefer',xreg()} | 'none'.
-type pool_id() :: 'fr' | 'x' | 'z' | instr_number().
-type pool() :: ordsets:ordset(ssa_register()).
linear_scan(#st{intervals=Intervals0,res=Res}=St0) ->
St = St0#st{intervals=[],res=[]},
Free = init_free(maps:to_list(Res)),
Intervals1 = [init_interval(Int, Res) || Int <- Intervals0],
Intervals = sort(Intervals1),
IsReserved = fun (#i{reg=Reg}) -> Reg =/= none end,
{UnhandledRes,Unhandled} = partition(IsReserved, Intervals),
L = #l{unhandled_res=UnhandledRes,
unhandled_any=Unhandled,free=Free},
#l{regs=Regs} = do_linear(L),
St#st{regs=maps:from_list(Regs)}.
init_interval({V,[{Start,_}|_]=Rs}, Res) ->
Info = maps:get(V, Res),
Pool = case Info of
{prefer,{x,_}} -> x;
x -> x;
{x,_} -> x;
{y,Uniq} -> Uniq;
{{y,_},Uniq} -> Uniq;
z -> z;
fr -> fr
end,
Reg = case Info of
{prefer,{x,_}} -> Info;
{x,_} -> Info;
{{y,_}=Y,_} -> Y;
_ -> none
end,
#i{sort=Start,var=V,reg=Reg,pool=Pool,rs=Rs}.
init_free(Res) ->
Free0 = rel2fam([{x,{x,0}}|init_free_1(Res)]),
#{x:=Xs0} = Free1 = maps:from_list(Free0),
Xs = init_xregs(Xs0),
Free = Free1#{x:=Xs},
Next = maps:fold(fun(K, V, A) -> [{{next,K},length(V)}|A] end, [], Free),
maps:merge(Free, maps:from_list(Next)).
init_free_1([{_,{prefer,{x,_}=Reg}}|Res]) ->
[{x,Reg}|init_free_1(Res)];
init_free_1([{_,{x,_}=Reg}|Res]) ->
[{x,Reg}|init_free_1(Res)];
init_free_1([{_,{y,Uniq}}|Res]) ->
[{Uniq,{y,0}}|init_free_1(Res)];
init_free_1([{_,{{y,_}=Reg,Uniq}}|Res]) ->
[{Uniq,Reg}|init_free_1(Res)];
init_free_1([{_,z}|Res]) ->
[{z,{z,0}}|init_free_1(Res)];
init_free_1([{_,fr}|Res]) ->
[{fr,{fr,0}}|init_free_1(Res)];
init_free_1([{_,x}|Res]) ->
init_free_1(Res);
init_free_1([]) -> [].
%% Make sure that the pool of xregs is contiguous.
init_xregs([{x,N},{x,M}|Is]) when N+1 =:= M ->
[{x,N}|init_xregs([{x,M}|Is])];
init_xregs([{x,N}|[{x,_}|_]=Is]) ->
[{x,N}|init_xregs([{x,N+1}|Is])];
init_xregs([{x,_}]=Is) -> Is.
do_linear(L0) ->
case set_next_current(L0) of
done ->
L0;
L1 ->
L2 = expire_active(L1),
L3 = check_inactive(L2),
Available = collect_available(L3),
L4 = select_register(Available, L3),
L = make_cur_active(L4),
do_linear(L)
end.
set_next_current(#l{unhandled_res=[Cur1|T1],
unhandled_any=[Cur2|T2]}=L) ->
case {Cur1,Cur2} of
{#i{sort=N1},#i{sort=N2}} when N1 < N2 ->
L#l{cur=Cur1,unhandled_res=T1};
{_,_} ->
L#l{cur=Cur2,unhandled_any=T2}
end;
set_next_current(#l{unhandled_res=[],
unhandled_any=[Cur|T]}=L) ->
L#l{cur=Cur,unhandled_any=T};
set_next_current(#l{unhandled_res=[Cur|T],
unhandled_any=[]}=L) ->
L#l{cur=Cur,unhandled_res=T};
set_next_current(#l{unhandled_res=[],unhandled_any=[]}) ->
done.
expire_active(#l{cur=#i{sort=CurBegin},active=Act0}=L0) ->
{Act,L} = expire_active(Act0, CurBegin, L0, []),
L#l{active=Act}.
expire_active([#i{reg=Reg,rs=Rs0}=I|Is], CurBegin, L0, Acc) ->
{_,_} = Reg, %Assertion.
case overlap_status(Rs0, CurBegin) of
ends_before_cur ->
L = free_reg(I, L0),
expire_active(Is, CurBegin, L, Acc);
overlapping ->
expire_active(Is, CurBegin, L0, [I|Acc]);
not_overlapping ->
Rs = strip_before_current(Rs0, CurBegin),
L1 = free_reg(I, L0),
L = L1#l{inactive=[I#i{rs=Rs}|L1#l.inactive]},
expire_active(Is, CurBegin, L, Acc)
end;
expire_active([], _CurBegin, L, Acc) ->
{Acc,L}.
check_inactive(#l{cur=#i{sort=CurBegin},inactive=InAct0}=L0) ->
{InAct,L} = check_inactive(InAct0, CurBegin, L0, []),
L#l{inactive=InAct}.
check_inactive([#i{rs=Rs0}=I|Is], CurBegin, L0, Acc) ->
case overlap_status(Rs0, CurBegin) of
ends_before_cur ->
check_inactive(Is, CurBegin, L0, Acc);
not_overlapping ->
check_inactive(Is, CurBegin, L0, [I|Acc]);
overlapping ->
Rs = strip_before_current(Rs0, CurBegin),
L1 = L0#l{active=[I#i{rs=Rs}|L0#l.active]},
L = reserve_reg(I, L1),
check_inactive(Is, CurBegin, L, Acc)
end;
check_inactive([], _CurBegin, L, Acc) ->
{Acc,L}.
strip_before_current([{_,E}|Rs], CurBegin) when E =< CurBegin ->
strip_before_current(Rs, CurBegin);
strip_before_current(Rs, _CurBegin) -> Rs.
collect_available(#l{cur=#i{reg={prefer,{_,_}=Prefer}}=I}=L) ->
%% Use the preferred register if it is available.
Avail = collect_available(L#l{cur=I#i{reg=none}}),
case member(Prefer, Avail) of
true -> [Prefer];
false -> Avail
end;
collect_available(#l{cur=#i{reg={_,_}=ReservedReg}}) ->
%% Return the already reserved register.
[ReservedReg];
collect_available(#l{unhandled_res=Unhandled,cur=Cur}=L) ->
Free = get_pool(Cur, L),
%% Note that since the live intervals are constructed from
%% SSA form, there cannot be any overlap of the current interval
%% with any inactive interval. See [3], page 175. Therefore we
%% only have check the unhandled intervals for overlap with
%% the current interval. As a further optimization, we only need
%% to check the intervals that have reserved registers.
collect_available(Unhandled, Cur, Free).
collect_available([#i{pool=Pool1}|Is], #i{pool=Pool2}=Cur, Free)
when Pool1 =/= Pool2 ->
%% Wrong pool. Ignore this interval.
collect_available(Is, Cur, Free);
collect_available([#i{reg={_,_}=Reg}=I|Is], Cur, Free0) ->
case overlaps(I, Cur) of
true ->
Free = ordsets:del_element(Reg, Free0),
collect_available(Is, Cur, Free);
false ->
collect_available(Is, Cur, Free0)
end;
collect_available([], _, Free) -> Free.
select_register([{_,_}=Reg|_], #l{cur=Cur0,regs=Regs}=L) ->
Cur = Cur0#i{reg=Reg},
reserve_reg(Cur, L#l{cur=Cur,regs=[{Cur#i.var,Reg}|Regs]});
select_register([], #l{cur=Cur0,regs=Regs}=L0) ->
%% Allocate a new register in the pool.
{Reg,L1} = get_next_free(Cur0, L0),
Cur = Cur0#i{reg=Reg},
L = L1#l{cur=Cur,regs=[{Cur#i.var,Reg}|Regs]},
reserve_reg(Cur, L).
make_cur_active(#l{cur=Cur,active=Act}=L) ->
L#l{active=[Cur|Act]}.
overlaps(#i{rs=Rs1}, #i{rs=Rs2}) ->
are_overlapping(Rs1, Rs2).
overlap_status([{S,E}], CurBegin) ->
if
E =< CurBegin -> ends_before_cur;
CurBegin < S -> not_overlapping;
true -> overlapping
end;
overlap_status([{S,E}|Rs], CurBegin) ->
if
E =< CurBegin ->
overlap_status(Rs, CurBegin);
S =< CurBegin ->
overlapping;
true ->
not_overlapping
end.
reserve_reg(#i{reg={_,_}=Reg}=I, L) ->
FreeRegs0 = get_pool(I, L),
FreeRegs = ordsets:del_element(Reg, FreeRegs0),
update_pool(I, FreeRegs, L).
free_reg(#i{reg={_,_}=Reg}=I, L) ->
FreeRegs0 = get_pool(I, L),
FreeRegs = ordsets:add_element(Reg, FreeRegs0),
update_pool(I, FreeRegs, L).
get_pool(#i{pool=Pool}, #l{free=Free}) ->
maps:get(Pool, Free).
update_pool(#i{pool=Pool}, New, #l{free=Free0}=L) ->
Free = maps:put(Pool, New, Free0),
L#l{free=Free}.
get_next_free(#i{pool=Pool}, #l{free=Free0}=L0) ->
K = {next,Pool},
N = maps:get(K, Free0),
Free = maps:put(K, N+1, Free0),
L = L0#l{free=Free},
if
is_integer(Pool) -> {{y,N},L};
is_atom(Pool) -> {{Pool,N},L}
end.
%%%
%%% Interval utilities.
%%%
are_overlapping([R|Rs1], Rs2) ->
case are_overlapping_1(R, Rs2) of
true ->
true;
false ->
are_overlapping(Rs1, Rs2)
end;
are_overlapping([], _) -> false.
are_overlapping_1({_S1,E1}, [{S2,_E2}|_]) when E1 < S2 ->
false;
are_overlapping_1({S1,E1}=R, [{S2,E2}|Rs]) ->
(S2 < E1 andalso E2 > S1) orelse are_overlapping_1(R, Rs);
are_overlapping_1({_,_}, []) -> false.
%%%
%%% Utilities.
%%%
%% is_loop_header(L, Blocks) -> false|true.
%% Check whether the block is a loop header.
is_loop_header(L, Blocks) ->
%% We KNOW that a loop header must start with a peek_message
%% instruction.
case maps:get(L, Blocks) of
#b_blk{is=[#b_set{op=peek_message}|_]} -> true;
_ -> false
end.
rel2fam(S0) ->
S1 = sofs:relation(S0),
S = sofs:rel2fam(S1),
sofs:to_external(S).
split_phis(Is) ->
partition(fun(#b_set{op=Op}) -> Op =:= phi end, Is).
is_yreg({y,_}) -> true;
is_yreg({x,_}) -> false;
is_yreg({z,_}) -> false;
is_yreg({fr,_}) -> false.
new_var_names([V0|Vs0], Count0) ->
{V,Count1} = new_var_name(V0, Count0),
{Vs,Count} = new_var_names(Vs0, Count1),
{[V|Vs],Count};
new_var_names([], Count) -> {[],Count}.
new_var_name({Base,Int}, Count) ->
true = is_integer(Int), %Assertion.
{{Base,Count},Count+1};
new_var_name(Base, Count) ->
{{Base,Count},Count+1}. | lib/compiler/src/beam_ssa_pre_codegen.erl | 0.719384 | 0.528168 | beam_ssa_pre_codegen.erl | starcoder |
%% @author Couchbase <<EMAIL>>
%% @copyright 2015 Couchbase, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(single_bucket_kv_sup).
-behaviour(supervisor).
-include("ns_common.hrl").
-export([start_link/1, init/1]).
start_link(BucketName) ->
Name = list_to_atom(atom_to_list(?MODULE) ++ "-" ++ BucketName),
supervisor:start_link({local, Name}, ?MODULE, [BucketName]).
child_specs(BucketName) ->
[{{docs_kv_sup, BucketName}, {docs_kv_sup, start_link, [BucketName]},
permanent, infinity, supervisor, [docs_kv_sup]},
{{ns_memcached_sup, BucketName}, {ns_memcached_sup, start_link, [BucketName]},
permanent, infinity, supervisor, [ns_memcached_sup]},
{{dcp_sup, BucketName}, {dcp_sup, start_link, [BucketName]},
permanent, infinity, supervisor, [dcp_sup]},
{{dcp_replication_manager, BucketName}, {dcp_replication_manager, start_link, [BucketName]},
permanent, 1000, worker, []},
{{replication_manager, BucketName}, {replication_manager, start_link, [BucketName]},
permanent, 1000, worker, []},
{{dcp_notifier, BucketName}, {dcp_notifier, start_link, [BucketName]},
permanent, 1000, worker, []},
{{janitor_agent_sup, BucketName}, {janitor_agent_sup, start_link, [BucketName]},
permanent, 10000, worker, [janitor_agent_sup]},
{{stats_collector, BucketName}, {stats_collector, start_link, [BucketName]},
permanent, 1000, worker, [stats_collector]},
{{stats_archiver, BucketName}, {stats_archiver, start_link, [BucketName]},
permanent, 1000, worker, [stats_archiver]},
{{stats_reader, BucketName}, {stats_reader, start_link, [BucketName]},
permanent, 1000, worker, [stats_reader]},
{{goxdcr_stats_collector, BucketName}, {goxdcr_stats_collector, start_link, [BucketName]},
permanent, 1000, worker, [goxdcr_stats_collector]},
{{goxdcr_stats_archiver, BucketName}, {stats_archiver, start_link, ["@xdcr-" ++ BucketName]},
permanent, 1000, worker, [stats_archiver]},
{{goxdcr_stats_reader, BucketName}, {stats_reader, start_link, ["@xdcr-" ++ BucketName]},
permanent, 1000, worker, [stats_reader]},
{{failover_safeness_level, BucketName},
{failover_safeness_level, start_link, [BucketName]},
permanent, 1000, worker, [failover_safeness_level]}].
init([BucketName]) ->
{ok, {{one_for_one,
misc:get_env_default(max_r, 3),
misc:get_env_default(max_t, 10)},
child_specs(BucketName)}}. | src/single_bucket_kv_sup.erl | 0.59408 | 0.41401 | single_bucket_kv_sup.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
%%
%% -------------------------------------------------------------------
%% @doc
%%
%% A custom reporter plugin, executing in its own process, can receive
%% updated metric values by having its module referenced in an
%% `exometer_report:subscribe()' call.
%%
%% The reporter, once it is setup as a subscription destination, will
%% receive periodic calls with updated metrics and data points to be
%% reported.
%%
%% Each custom plugin implements the exometer_report behavior.
%%
%% The life cycle of a a custom reporter consists of the following steps.
%%
%% + Reporter creation <br/>`exometer_init/1' is invoked by exometer when
%% the reporter is configured in the reporter application
%% environment. See {@section Configuring reporter plugins} for
%% details.
%%
%% + Setup subscription<br/>When `exometer_report:subscribe()' is called, targeting the
%% custom report plugin, the gen_server's `exometer_subscribe()' function
%% will be invoked to notify the plugin of the new metrics subscription.
%%
%% + Report Metrics<br/>Updated metrics are sent by exometer to the
%% `exometer_report/4'. All reported metrics will have been notified
%% to the recipient through a previous `exometer_report()' function.
%%
%% + Tear down subscription<br/>When `exometer_report:unsubscribe()' is called, addressing the
%% custom report plugin, the recipient's `exometer_unsubscribe()' function
%% will be invoked to notify the plugin of the deleted subscription.
%%
%%
%% The following chapters details each of the callbacks to be implemented
%% in the exometer_report behavior.
%%
%% === exometer_init/1 ===
%%
%% The `exometer_init()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% exometer_init(Options)</pre>
%%
%% The custom reporter plugin should create the necessary state for the
%% new plugin and return a state to be used in future plugin calls.
%%
%% + `Options'<br/>Provides the prop list with attributes from the application environment
%% for the cusom recipient. See {@section Configuring reporter plugins} for
%%
%% The `exomoeter_init()' function should return `{ok, State}' where
%% State is a tuple that will be provided as a reference argument to
%% future calls made into the plugin. Any other return formats will
%% cancel the creation of the custom reporting plugin.
%%
%%
%% === exometer_subscribe/5 ===
%%
%% The `exometer_subscribe()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% exometer_subscribe(Metric, DataPoint, Interval, Extra, State)</pre>
%%
%% The custom plugin can use this notification to modify and return its
%% state in order to prepare for future calls to `exometer_report()' with
%% the given meteric and data point.
%%
%% + `Metric'<br/>Specifies the metric that is now subscribed to by the plugin
%% as a list of atoms.
%%
%% + `DataPoint'<br/>Specifies the data point within the subscribed-to metric
%% as an atom, or a list of atoms.
%%
%% + `Interval'<br/>Specifies the interval, in milliseconds, that the
%% subscribed-to value will be reported at, or an atom, referring to a named
%% interval configured in the reporter.
%%
%% + `Extra'<br/>Specifies the extra data, which can be anything the reporter
%% can understand.
%%
%% + `State'<br/>Contains the state returned by the last called plugin function.
%%
%% The `exomoeter_subscribe()' function should return `{ok, State}' where
%% State is a tuple that will be provided as a reference argument to
%% future calls made into the plugin. Any other return formats will
%% generate an error log message by exometer.
%%
%%
%% === exometer_report/5 ===
%%
%% The `exometer_report()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% exometer_report(Metric, DataPoint, Extra, Value, State)</pre>
%%
%% The custom plugin will receive this call when a periodic subscription
%% triggers and wants to report its current value through the plugin.
%% The plugin should export the value to the external system it interfaces and
%% return its possibly modified state.
%%
%% + `Metric'<br/>Specifies the metric that is to be reported.
%%
%% + `DataPoint'<br/>Specifies the data point or data points within the metric
%% to be reported.
%%
%% + `Extra'<br/>Specifies the extra data, which can be anything the reporter
%% can understand.
%%
%% + `State'<br/>Contains the state returned by the last called plugin function.
%%
%% The `exometer_report()' function should return `{ok, State}' where
%% State is a tuple that will be provided as a reference argument to
%% future calls made into the plugin. Any other return formats will
%% generate an error log message by exometer.
%%
%%
%% === exometer_unsubscribe/4 ===
%%
%% The `exometer_unsubscribe()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% exometer_unsubscribe(Metric, DataPoint, Extra, State)</pre>
%%
%% The custom plugin can use this notification to modify and return its
%% state in order to free resources used to maintain the now de-activated
%% subscription. When this call returns, the given metric / data point
%% will not be present in future calls to `exometer_report()'.
%%
%% + `Metric'<br/>Specifies the metric that is now subscribed to by the plugin
%% as a list of atoms.
%%
%% + `DataPoint'<br/>Specifies the data point or data points within the
%% subscribed-to metric as an atom or a list of atoms.
%%
%% + `Extra'<br/>Specifies the extra data, which can be anything the reporter
%% can understand.
%%
%% + `Value'<br/>Specifies the value for the datapoint, which is reported.
%%
%% + `State'<br/>Contains the state returned by the last called plugin function.
%%
%% The `exometer_unsubscribe()' function should return `{ok, State}' where
%% State is a tuple that will be provided as a reference argument to
%% future calls made into the plugin. Any other return formats will
%% generate an error log message by exometer.
%%
%% === exometer_report_bulk/3 (Optional) ===
%%
%% If the option `{report_bulk, true}' has been given when starting the
%% reporter, <em>and</em> this function is exported, it will be called as:
%%
%% <pre lang="erlang">
%% exometer_report_bulk(Found, Extra, State)
%% </pre>
%%
%% where `Found' has the format `[{Metric, [{DataPoint, Value}|_]}|_]'
%%
%% That is, e.g. when a `select' pattern is used, all found values are passed
%% to the reporter in one message. If bulk reporting is not enabled, each
%% datapoint/value pair will be passed separately to the
%% <a href="#exometer_report/5"><code>exometer_report/5</code></a> function. If `report_bulk' was enabled, the
%% reporter callback will get all values at once. Note that this happens
%% also for single values, which are then passed as a list of one metric,
%% with a list of one datapoint/value pair.
%%
%% @end
-module(exometer_report).
-behaviour(gen_server).
%% API
-export(
[
start_link/0,
subscribe/4, subscribe/5, subscribe/6,
unsubscribe/3, unsubscribe/4,
unsubscribe_all/2,
list_metrics/0, list_metrics/1,
list_reporters/0,
list_subscriptions/1,
add_reporter/2,
set_interval/3,
delete_interval/2,
restart_intervals/1,
trigger_interval/2,
get_intervals/1,
remove_reporter/1, remove_reporter/2,
terminate_reporter/1,
enable_reporter/1,
disable_reporter/1,
call_reporter/2,
cast_reporter/2,
setopts/3,
new_entry/1
]).
%% Start phase function
-export([start_reporters/0]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-export([disable_me/2]).
-export_type([metric/0, datapoint/0, interval/0, extra/0]).
-include_lib("hut/include/hut.hrl").
-include("exometer.hrl").
-define(SERVER, ?MODULE).
-type error() :: {error, any()}.
-type metric() :: exometer:name()
| {find, exometer:name()}
| {select, ets:match_spec()}.
-type datapoint() :: exometer:datapoint().
-type datapoints() :: datapoint() | [datapoint()].
-type options() :: [{atom(), any()}].
-type mod_state() :: any().
-type value() :: any().
-type interval() :: pos_integer() | atom().
-type time_ms() :: pos_integer().
-type delay() :: time_ms().
-type named_interval() :: {atom(), time_ms()}
| {atom(), time_ms(), delay()}.
-type callback_result() :: {ok, mod_state()} | any().
-type extra() :: any().
-type retry() :: boolean().
-type reporter_name() :: atom().
%% Restart specification
-type maxR() :: pos_integer().
-type maxT() :: pos_integer().
-type action() :: {atom(), atom()}.
-type restart() :: [{maxR(), maxT()} | action()].
%% Callback for function, not cast-based, reports that
%% are invoked in-process.
-callback exometer_init(options()) -> callback_result().
-callback exometer_report(metric(), datapoint(),
extra(), value(), mod_state()) ->
callback_result().
-callback exometer_subscribe(metric(), datapoint(),
interval(), extra(), mod_state()) ->
callback_result().
-callback exometer_unsubscribe(metric(), datapoint(),
extra(), mod_state()) ->
callback_result().
-callback exometer_info(any(),mod_state()) ->
callback_result().
-callback exometer_call(any(), pid(), mod_state()) ->
{reply, any(), mod_state()} | {noreply, mod_state()} | any().
-callback exometer_cast(any(), mod_state()) ->
{noreply, mod_state()} | any().
-callback exometer_terminate(any(), mod_state()) ->
any().
-callback exometer_setopts(exometer:entry(), options(),
exometer:status(), mod_state()) ->
callback_result().
-callback exometer_newentry(exometer:entry(), mod_state()) ->
callback_result().
-record(key, {
reporter :: module() | '_',
metric :: metric() | '_',
datapoint :: datapoints() | '_',
retry_failed_metrics :: boolean() | undefined | '_',
extra :: extra() | '_'
}).
-record(subscriber, {
key :: #key{} | '_',
interval :: interval() | '_',
t_ref :: reference() | undefined | '_'
}).
-record(restart, {
spec = default_restart() :: restart(),
history = [] :: [pos_integer()],
save_n = 10 :: pos_integer()}
).
-record(interval, {
name :: atom(),
time = 0 :: non_neg_integer() | 'manual',
delay = 0 :: non_neg_integer(),
t_ref :: reference() | undefined
}).
-record(reporter, {
name :: atom() | '_',
pid :: pid() | atom(), % in select()
mref :: reference() | undefined | '_',
module :: module() | '_',
opts = [] :: [{atom(), any()}] | '_',
intervals = [] :: [#interval{}] | '_',
restart = #restart{} :: #restart{} | '_',
status = enabled :: enabled | disabled | '_'
}).
-record(st, {
subscribers = [] :: [#subscriber{}],
reporters = [] :: [#reporter{}]
}).
-record(rst, {st, bulk = false}).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc Starts the server
%%--------------------------------------------------------------------
-spec start_link() -> {ok, pid()} | ignore | {error, any()}.
start_link() ->
%% Launch the main server.
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec subscribe(reporter_name(), metric(), datapoints(), interval()) ->
ok | not_found | unknown_reporter | error.
%% @equiv subscribe(Reporter, Metric, DataPoint, Interval, [], true)
subscribe(Reporter, Metric, DataPoint, Interval) ->
subscribe(Reporter, Metric, DataPoint, Interval, []).
-spec subscribe(reporter_name(), metric(), datapoints(), interval(), extra()) ->
ok | not_found | unknown_reporter | error.
%% @equiv subscribe(Reporter, Metric, DataPoint, Interval, Extra, false)
subscribe(Reporter, Metric, DataPoint, Interval, Extra) ->
call({subscribe, #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
retry_failed_metrics = true,
extra = Extra}, Interval}).
-spec subscribe(reporter_name(), metric(), datapoints(), interval(),
extra(), retry()) ->
ok | not_found | unknown_reporter | error.
%% @doc Add a subscription to an existing reporter.
%%
%% The reporter must first be started using {@link add_reporter/2}, or through
%% a static configuration. `Metric' is the name of an exometer entry. `DataPoint'
%% is either a single data point (an atom) or a list of data points (a list).
%%
%% `Interval' is the sampling/reporting interval in milliseconds, or an atom,
%% referring to a named interval configured in the reporter. The named
%% interval need not be defined yet in the reporter (the subscription will
%% not trigger until it <em>is</em> defined.)
%%
%% `Extra' can be anything that the chosen reporter understands (default: `[]').
%% If the reporter uses {@link exometer_util:report_type/3}, `Extra' should be
%% a proplist, and the option `{report_type, T}' can control which type (e.g.
%% for collectd or statsd) that the value corresponds to.
%%
%% `Retry': boolean(). If true, retry the subscription at the next interval,
%% even if the metric cannot be read.
%% @end
subscribe(Reporter, Metric, DataPoint, Interval, Extra, Retry)
when is_boolean(Retry) ->
call({subscribe, #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
retry_failed_metrics = Retry,
extra = Extra}, Interval}).
-dialyzer({no_return, unsubscribe/3}).
-spec unsubscribe(module(), metric(), datapoint()) ->
ok | not_found.
%% @equiv unsubscribe(Reporter, Metric, DataPoint, [])
unsubscribe(Reporter, Metric, DataPoint) ->
unsubscribe(Reporter, Metric, DataPoint, []).
-dialyzer({no_return, unsubscribe/4}).
-spec unsubscribe(module(), metric(), datapoint() | [datapoint()], extra()) ->
ok | not_found.
%% @doc Removes a subscription.
%%
%% Note that the subscription is identified by the combination
%% `{Reporter, Metric, DataPoint, Extra}'. The exact information can be
%% extracted using {@link list_subscriptions/1}.
%% @end
unsubscribe(Reporter, Metric, DataPoint, Extra) ->
call({unsubscribe, #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
extra = Extra}}).
-spec unsubscribe_all(module(), metric()) -> ok.
%% @doc Removes all subscriptions related to Metric in Reporter.
%% @end
unsubscribe_all(Reporter, Metric) ->
call({unsubscribe_all, Reporter, Metric}).
-spec list_metrics() -> {ok, [{ exometer:name(),
[datapoint()],
[{reporter_name(), datapoint()}],
exometer:status() }]} | {error, any()}.
%% @equiv list_metrics([])
list_metrics() ->
list_metrics([]).
-spec list_metrics(Path :: metric()) ->
{ok, [{ exometer:name(),
[datapoint()],
[{reporter_name(), datapoint()}],
exometer:status() }]} | {error, any()}.
%% @doc List all metrics matching `Path', together with subscription status.
%%
%% This function performs a metrics search using `exometer:find_entries/1',
%% then matches the result against known subscriptions. It reports, for each
%% metric, the available data points, as well as which reporters subscribe to
%% which data points.
%% @end
list_metrics(Path) ->
call({list_metrics, Path}).
-spec list_reporters() -> [{reporter_name(), pid()}].
%% @doc List the name and pid of each known reporter.
list_reporters() ->
call(list_reporters).
-spec list_subscriptions(reporter_name()) ->
[{metric(), datapoint(), interval(), extra()}].
%% @doc List all subscriptions for `Reporter'.
list_subscriptions(Reporter) ->
call({list_subscriptions, Reporter}).
-spec add_reporter(reporter_name(), options()) -> ok | {error, any()}.
%% @doc Add a reporter.
%%
%% The reporter can be configured using the following options. Note that all
%% options are also passed to the reporter callback module, which may support
%% additional options.
%%
%% `{module, atom()}' - The name of the reporter callback module. If no module
%% is given, the module name defaults to the given reporter name.
%%
%% `{status, enabled | disabled}' - The operational status of the reporter
%% if enabled, the reporter will report values to its target. If disabled, the
%% reporter process will be terminated and subscription timers canceled, but
%% the subscriptions will remain, and it will also be possible to add new
%% subscriptions to the reporter.
%%
%% `{intervals, [named_interval()]}'
%% named_interval() :: {Name::atom(), Interval::pos_integer()}
%% | {Name::atom(), Interval::time_ms(), delay()::time_ms()}
%% | {Name::atom(), 'manual'}
%% Define named intervals. The name can be used by subscribers, so that all
%% subsriptions for a given named interval will be reported when the interval
%% triggers. An optional delay (in ms) can be given: this will cause the first
%% interval to start in `Delay' milliseconds. When all intervals are named
%% at the same time, the delay parameter can be used to achieve staggered
%% reporting. If the interval is specified as ```'manual'''', it will have
%% to be triggered manually using {@link trigger_interval/2}.
%%
%% `{report_bulk, true | false}'
%% Pass all found datapoint/value pairs for a given subscription at once to
%% the `exometer_report_bulk/3' function, if it is exported, otherwise use
%% `exometer_report/4' as usual.
%%
%% @end
add_reporter(Reporter, Options) ->
call({add_reporter, Reporter, Options}).
-spec remove_reporter(reporter_name()) -> ok | {error, any()}.
%% @doc Remove reporter and all its subscriptions.
remove_reporter(Reporter) ->
call({remove_reporter, Reporter}).
-spec set_interval(reporter_name(), atom(),
time_ms() | {time_ms(), delay()} | manual) -> ok |error().
%% @doc Specify a named interval.
%%
%% See {@link add_reporter/2} for a description of named intervals.
%% The named interval is here specified as either `Time' (milliseconds) or
%% `{Time, Delay}', where a delay in milliseconds is provided. It is also
%% specify an interval as ```'manual'''', indicating that the interval can
%% only be triggered manually via {@link trigger_interval/2}.
%%
%% If the named interval exists, it will be replaced with the new definition.
%% Otherwise, it will be added. Use {@link restart_intervals/1} if you want
%% all intervals to be restarted/resynched with corresponding relative delays.
%% @end
set_interval(Reporter, Name, Time) when is_atom(Name),
is_integer(Time), Time >= 0 ->
call({set_interval, Reporter, Name, Time});
set_interval(Reporter, Name, manual) when is_atom(Name) ->
call({set_interval, Reporter, Name, manual});
set_interval(Reporter, Name, {Time, Delay}) when is_atom(Name),
is_integer(Time), Time >= 0,
is_integer(Delay),
Delay >= 0 ->
call({set_interval, Reporter, Name, {Time, Delay}}).
-spec delete_interval(reporter_name(), atom()) -> ok | error().
%% @doc Delete a named interval.
%%
delete_interval(Reporter, Name) ->
call({delete_interval, Reporter, Name}).
-spec restart_intervals(reporter_name()) -> ok.
%% @doc Restart all named intervals, respecting specified delays.
%%
%% This function can be used if named intervals are added incrementally, and
%% it is important that all intervals trigger separated by the given delays.
%% @end
restart_intervals(Reporter) ->
call({restart_intervals, Reporter}).
-spec trigger_interval(reporter_name(), atom()) -> ok.
%% @doc Trigger a named interval.
%%
%% This function is mainly used to trigger intervals defined as ```'manual'''',
%% but can be used to trigger any named interval. If a named interval with
%% a specified time in milliseconds is triggered this way, it will effectively
%% be restarted, and will repeat as usual from that point on.
%% @end
trigger_interval(Reporter, Name) ->
cast({trigger_interval, Reporter, Name}).
-spec get_intervals(reporter_name()) ->
[{atom(), [{time, pos_integer()}
| {delay, pos_integer()}
| {timer_ref, reference()}]}].
%% @doc List the named intervals for `Reporter'.
get_intervals(Reporter) ->
call({get_intervals, Reporter}).
-spec enable_reporter(reporter_name()) -> ok | {error, any()}.
%% @doc Enable `Reporter'.
%%
%% The reporter will be 'restarted' in the same way as if it had crashed
%% and was restarted by the supervision logic, but without counting it as
%% a restart.
%%
%% If the reporter was already enabled, nothing is changed.
%% @end
enable_reporter(Reporter) ->
call({change_reporter_status, Reporter, enabled}).
-spec disable_reporter(reporter_name()) -> ok | {error, any()}.
%% @doc Disable `Reporter'.
%%
%% The reporter will be terminated, and all subscription timers will be
%% canceled, but the subscriptions themselves and reporter metadata are kept.
%% @end
disable_reporter(Reporter) ->
call({change_reporter_status, Reporter, disabled}).
-spec disable_me(module(), any()) -> no_return().
%% @doc Used by a reporter to disable itself.
%%
%% This function can be called from a reporter instance if it wants to be
%% disabled, e.g. after exhausting a configured number of connection attempts.
%% The arguments passed are the name of the reporter callback module and the
%% module state, and are used to call the `Mod:terminate/2' function.
%% @end
disable_me(Mod, St) ->
cast({disable, self()}),
receive
{exometer_terminate, shutdown} ->
Mod:exometer_terminate(shutdown, St),
exit(shutdown)
end.
-spec call_reporter(reporter_name(), any()) -> any() | {error, any()}.
%% @doc Send a custom (synchronous) call to `Reporter'.
%%
%% This function is used to make a client-server call to a given reporter
%% instance. Note that the reporter type must recognize the request.
%% @end
call_reporter(Reporter, Msg) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{pid = Pid}] when is_pid(Pid) ->
exometer_proc:call(Pid, Msg);
[#reporter{status = disabled}] ->
{error, disabled};
[] ->
{error, {no_such_reporter, Reporter}}
end.
-spec cast_reporter(reporter_name(), any()) -> ok | {error, any()}.
%% @doc Send a custom (asynchronous) cast to `Reporter'.
%%
%% This function is used to make an asynchronous cast to a given reporter
%% instance. Note that the reporter type must recognize the message.
%% @end
cast_reporter(Reporter, Msg) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{pid = Pid}] when is_pid(Pid) ->
exometer_proc:cast(Pid, Msg);
[#reporter{status = disabled}] ->
{error, disabled};
[] ->
{error, {no_such_reporter, Reporter}}
end.
-spec remove_reporter(reporter_name(), _Reason::any()) -> ok | {error, any()}.
%% @doc Remove `Reporter' (non-blocking call).
%%
%% This function can be used to order removal of a reporter with a custom
%% reason. Note that the function is asynchronous, making it suitable e.g.
%% for calling from within the reporter itself.
%% @end
remove_reporter(Reporter, Reason) ->
cast({remove_reporter, Reporter, Reason}).
-spec setopts(exometer:entry(), options(), exometer:status()) -> ok.
%% @doc Called by exometer when options of a metric entry are changed.
%%
%% Reporters subscribing to the metric get a chance to process the options
%% change in the function `Mod:exometer_setopts(Metric,Options,Status,St)'.
%% @end
setopts(Metric, Options, Status) ->
call({setopts, Metric, Options, Status}).
-spec new_entry(exometer:entry()) -> ok.
%% @doc Called by exometer whenever a new entry is created.
%%
%% This function is called whenever a new metric is created, giving each
%% reporter the chance to enable a subscription for it. Note that each
%% reporter is free to call the subscription management functions, as there
%% is no risk of deadlock. The callback function triggered by this call is
%% `Mod:exometer_newentry(Entry, St)'.
%% @end
new_entry(Entry) ->
cast({new_entry, Entry}).
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Initializes the server
%%
%% @spec init(Args) -> {ok, State} |
%% {ok, State, Timeout} |
%% ignore |
%% {stop, Reason}
%% @end
%%--------------------------------------------------------------------
init([]) ->
process_flag(trap_exit, true),
D = ets:foldl(
fun (#reporter{name = Name, module = Module, restart = Restart} = R, Acc) ->
terminate_reporter(R),
case add_restart(Restart) of
{remove, How} ->
case How of
{M, F} when is_atom(M), is_atom(F) ->
try M:F(Module, {?MODULE, parent_restart}) catch _:_ -> ok end;
_ ->
ok
end,
[Name | Acc];
{restart, Restart1} ->
restart_reporter(R#reporter{restart = Restart1}),
Acc
end
end,
[],
?EXOMETER_REPORTERS),
[ets:delete(?EXOMETER_REPORTERS, R) || R <- D],
{ok, #st{}}.
start_reporters() ->
call(start_reporters).
do_start_reporters(S) ->
Opts = get_report_env(),
?log(info, "Starting reporters with ~p~n", [ Opts ]),
%% Dig out the mod opts.
%% { reporters, [ {reporter1, [{opt1, val}, ...]}, {reporter2, [...]}]}
%% Traverse list of reporter and launch reporter gen servers as dynamic
%% supervisor children.
case lists:keyfind(reporters, 1, Opts) of
{reporters, ReporterList} ->
ReporterRecs = make_reporter_recs(ReporterList),
assert_no_duplicates(ReporterRecs),
lists:foreach(
fun(#reporter{name = Reporter,
status = Status,
opts = ROpts,
intervals = Ints0} = R) ->
Restart = get_restart(ROpts),
{Pid, MRef, Ints} =
if Status =:= enabled ->
{P1,R1} = spawn_reporter(Reporter, ROpts),
I1 = start_interval_timers(R),
{P1,R1,I1};
true -> {undefined, undefined, Ints0}
end,
ets:insert(?EXOMETER_REPORTERS,
R#reporter{pid = Pid,
mref = MRef,
intervals = Ints,
restart = Restart})
end, ReporterRecs);
false ->
[]
end,
%% Dig out configured 'static' subscribers
case lists:keyfind(subscribers, 1, Opts) of
{subscribers, Subscribers} ->
lists:foreach(fun init_subscriber/1, Subscribers);
false -> []
end,
S#st{}.
-spec make_reporter_recs([{atom(), list()}]) -> [#reporter{}].
make_reporter_recs([{R, Opts}|T]) when is_atom(R), is_list(Opts) ->
[#reporter{name = R,
module = get_module(R, Opts),
status = proplists:get_value(status, Opts, enabled),
opts = Opts,
intervals = get_interval_opts(Opts)}|make_reporter_recs(T)];
make_reporter_recs([]) ->
[].
get_module(R, Opts) ->
proplists:get_value(module, Opts, R).
-spec get_interval_opts([named_interval() | any()]) -> [#interval{}].
get_interval_opts(Opts) ->
Is1 = [singelton_interval(I) || {interval, I} <- Opts],
Is = proplists:get_value(intervals, Opts, []),
lists:map(
fun({Name, Time}) when is_atom(Name),
is_integer(Time), Time >= 0 ->
#interval{name = Name, time = Time};
({Name, Time, Delay}) when is_atom(Name),
is_integer(Time), Time >= 0,
is_integer(Delay), Delay >= 0 ->
#interval{name = Name, time = Time, delay = Delay};
({Name, manual}) when is_atom(Name) ->
#interval{name = Name, time = manual};
(Other) ->
error({invalid_interval, Other})
end, Is ++ Is1).
singelton_interval({N,T}=I) when is_atom(N), is_integer(T) -> I;
singelton_interval({N,T,D}=I) when is_atom(N),
is_integer(T),
is_integer(D) -> I.
start_interval_timers(#reporter{name = R, intervals = Ints}) ->
lists:map(fun(I) -> start_interval_timer(I, R) end, Ints).
start_interval_timer(#interval{time = manual} = I, _) ->
I;
start_interval_timer(#interval{name = Name, delay = Delay,
t_ref = Ref} = I, R) ->
cancel_timer(Ref),
case Delay of
0 ->
do_start_interval_timer(I, R);
D ->
TRef = erlang:send_after(D, self(), {start_interval, R, Name}),
I#interval{t_ref = TRef}
end.
do_start_interval_timer(#interval{name = Name, time = Time} = I, R) ->
TRef = erlang:send_after(Time, self(), batch_timer_msg(R, Name, Time)),
I#interval{t_ref = TRef}.
batch_timer_msg(R, Name, Time) ->
batch_timer_msg(R, Name, Time, os:timestamp()).
batch_timer_msg(R, Name, Time, TS) ->
{report_batch, R, Name, Time, TS}.
subscr_timer_msg(Key, Interval) ->
subscr_timer_msg(Key, Interval, os:timestamp()).
subscr_timer_msg(Key, Interval, TS) ->
{report, Key, Interval, TS}.
get_report_env() ->
Opts0 = exometer_util:get_env(report, []),
{Rs1, Opts1} = split_env(reporters, Opts0),
{Ss2, Opts2} = split_env(subscribers, Opts1),
get_reporters(Rs1) ++ get_subscribers(Ss2) ++ Opts2.
split_env(Tag, Opts) ->
case lists:keytake(Tag, 1, Opts) of
{value, {_, L}, Rest} -> {L, Rest};
false -> {[], Opts}
end.
get_reporters(L0) ->
Rs = exometer_util:get_env(reporters, []),
Ext = setup:find_env_vars(exometer_reporters),
merge_env(reporters, Rs ++ L0, Ext).
get_subscribers(L0) ->
Ss = exometer_util:get_env(subscribers, []),
Ext = setup:find_env_vars(exometer_subscribers),
merge_env(subscribers, Ss ++ L0, Ext).
merge_env(_, [], []) -> [];
merge_env(Tag, L, E) ->
[{Tag, L} || L =/= []] ++ [{Tag, X} || {_, X} <- E].
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling call messages
%%
%% @spec handle_call(Request, From, State) ->
%% {reply, Reply, State} |
%% {reply, Reply, State, Timeout} |
%% {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, Reply, State} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_call(start_reporters, _From, S) ->
{reply, ok, do_start_reporters(S)};
handle_call({subscribe,
#key{ reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
retry_failed_metrics = RetryFailedMetrics,
extra = Extra} , Interval },
_From, #st{} = St) ->
%% Verify that the given metric/data point actually exist.
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{status = Status, pid=ReporterPid}] ->
case is_valid_metric(Metric, DataPoint) of
true ->
if Status =:= enabled ->
ReporterPid ! {exometer_subscribe, Metric,
DataPoint, Interval, Extra};
true -> ignore
end,
subscribe_(Reporter, Metric, DataPoint,
Interval, RetryFailedMetrics,
Extra, Status),
{reply, ok, St};
%% Nope - Not found.
false ->
case RetryFailedMetrics of
true ->
subscribe_(Reporter, Metric, DataPoint,
Interval, RetryFailedMetrics,
Extra, Status),
{reply, ok, St};
false ->
{reply, not_found, St}
end;
error -> {reply, error, St}
end;
[] ->
{reply, unknown_reporter, St}
end;
handle_call({unsubscribe,
#key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
extra = Extra}}, _, #st{} = St) ->
Res = unsubscribe_(Reporter, Metric, DataPoint, Extra),
{reply, Res, St};
handle_call({unsubscribe_all, Reporter, Metric}, _,
#st{}=St) ->
Subs = ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{reporter = Reporter,
metric = Metric,
_ = '_'},
_ = '_'}, [], ['$_']}]),
lists:foreach(fun unsubscribe_/1, Subs),
{reply, ok, St};
handle_call({list_metrics, Path}, _, St) ->
if is_list(Path) ->
DP = lists:foldr(fun(Metric, Acc) ->
retrieve_metric(Metric, Acc)
end, [], exometer:find_entries(Path)),
{reply, {ok, DP}, St};
true ->
{reply, {error, badarg}, St}
end;
handle_call({list_subscriptions, Reporter}, _, #st{} = St) ->
Subs1 = lists:foldl(
fun
(#subscriber{key=#key{reporter=Rep}}=Sub, Acc) when Reporter == Rep ->
#subscriber{
key=#key{
metric=Metric,
datapoint=Dp,
extra=Extra},
interval=Interval} = Sub,
[{Metric, Dp, Interval, Extra} | Acc];
(_, Acc) ->
Acc
end, [], ets:select(?EXOMETER_SUBS, [{'_',[],['$_']}])),
{reply, Subs1, St};
handle_call(list_reporters, _, #st{} = St) ->
Info = ets:select(?EXOMETER_REPORTERS,
[{#reporter{name = '$1', pid = '$2', _ = '_'},
[], [{{'$1', '$2'}}]}]),
{reply, Info, St};
handle_call({add_reporter, Reporter, Opts}, _, #st{} = St) ->
case ets:member(?EXOMETER_REPORTERS, Reporter) of
true ->
{reply, {error, already_running}, St};
false ->
try
[R] = make_reporter_recs([{Reporter, Opts}]),
{Pid, MRef} = spawn_reporter(Reporter, Opts),
Ints = start_interval_timers(R),
R1 = R#reporter{intervals = Ints,
pid = Pid,
mref = MRef},
ets:insert(?EXOMETER_REPORTERS, R1),
{reply, ok, St}
catch
error:Reason ->
{reply, {error, Reason}, St}
end
end;
handle_call({remove_reporter, Reporter}, _, St) ->
case do_remove_reporter(Reporter) of
ok ->
{reply, ok, St};
E ->
{reply, E, St}
end;
handle_call({change_reporter_status, Reporter, Status}, _, St) ->
case change_reporter_status(Reporter, Status) of
ok ->
{reply, ok, St};
E ->
{reply, E, St}
end;
handle_call({set_interval, Reporter, Name, Int}, _, #st{}=St) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{intervals = Ints}] ->
try
I0 = case lists:keyfind(Name, #interval.name, Ints) of
false -> #interval{name = Name};
Interval -> Interval
end,
I1 = case Int of
{Time, Delay} when is_integer(Time), Time >= 0,
is_integer(Delay), Delay >= 0 ->
I0#interval{time = Time, delay = Delay};
Time when is_integer(Time), Time >= 0 ->
I0#interval{time = Time};
manual ->
cancel_timer(I0#interval.t_ref),
I0#interval{time = manual}
end,
ets:update_element(?EXOMETER_REPORTERS, Reporter,
[{#reporter.intervals,
lists:keystore(
Name, #interval.name, Ints,
start_interval_timer(I1, Reporter))}]),
{reply, ok, St}
catch
error:Reason ->
{reply, {error, Reason}, St}
end;
[] ->
{reply, {error, not_found}, St}
end;
handle_call({delete_interval, Reporter, Name}, _, #st{} = St) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{intervals = Ints}] ->
case lists:keyfind(Name, #interval.name, Ints) of
#interval{t_ref = TRef} ->
cancel_timer(TRef),
ets:update_element(?EXOMETER_REPORTERS, Reporter,
[{#reporter.intervals,
lists:keydelete(
Name, #interval.name, Ints)}]),
{reply, ok, St};
false ->
{reply, {error, not_found}, St}
end;
[] ->
{reply, {error, not_found}, St}
end;
handle_call({restart_intervals, Reporter}, _, #st{} = St) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{} = R] ->
Ints = start_interval_timers(R),
ets:update_element(?EXOMETER_REPORTERS, Reporter,
[{#reporter.intervals, Ints}]),
{reply, ok, St};
[] ->
{reply, {error, not_found}, St}
end;
handle_call({get_intervals, Reporter}, _, #st{} = St) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{intervals = Ints}] ->
Info =
[{Name, [{time, T},
{delay, D},
{timer_ref, TR}]} || #interval{name = Name,
time = T,
delay = D,
t_ref = TR} <- Ints],
{reply, Info, St};
[] ->
{reply, {error, not_found}, St}
end;
handle_call({setopts, Metric, Options, Status}, _, #st{}=St) ->
[erlang:send(Pid, {exometer_setopts, Metric, Options, Status})
|| Pid <- reporter_pids()],
{reply, ok, St};
handle_call(_Request, _From, State) ->
{reply, {error, unknown_call}, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc Handling cast messages.
%%
%% @spec handle_cast(Msg, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_cast({new_entry, Entry}, #st{} = St) ->
[try erlang:send(Pid, {exometer_newentry, Entry})
catch error:_ -> ok end
|| Pid <- reporter_pids()],
maybe_enable_subscriptions(Entry),
{noreply, St};
handle_cast({remove_reporter, Reporter, Reason}, St) ->
Terminate = case Reason of
user ->
true;
_ ->
false
end,
do_remove_reporter(Reporter, Terminate),
{noreply, St};
handle_cast({disable, Pid}, #st{} = St) ->
case reporter_by_pid(Pid) of
[#reporter{} = Reporter] ->
do_change_reporter_status(Reporter, disabled);
[] -> ok
end,
{noreply, St};
handle_cast({trigger_interval, Reporter, Name}, #st{} = St) ->
report_batch(Reporter, Name, os:timestamp()),
{noreply, St};
handle_cast(_Msg, State) ->
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc Handling all non call/cast messages.
%%
%% @spec handle_info(Info, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_info({start_interval, Reporter, Name}, #st{} = St) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{intervals = Ints, status = enabled}] ->
case lists:keyfind(Name, #interval.name, Ints) of
#interval{time = Time} = I when is_integer(Time) ->
I1 = do_start_interval_timer(I, Reporter),
ets:update_element(?EXOMETER_REPORTERS, Reporter,
[{#reporter.intervals,
lists:keyreplace(
Name, #interval.name, Ints, I1)}]);
#interval{time = manual} ->
ok;
false ->
ok
end;
_ ->
ok
end,
{noreply, St};
handle_info({report_batch, Reporter, Name}, #st{} = St) ->
%% Find all entries where reporter is Reporter and interval is Name,
%% and report them.
report_batch(Reporter, Name, os:timestamp()),
{noreply, St};
handle_info({report_batch, Reporter, Name, Int, TS}, #st{} = St) ->
%% Find all entries where reporter is Reporter and interval is Name,
%% and report them.
TS1 = calc_fire_time(TS, Int),
report_batch(Reporter, Name, TS1),
{noreply, St};
handle_info({report, #key{} = Key, Interval}, #st{} = St) ->
%% BW Compat. Old-style timeout msg, which doesn't include timestamp
{noreply, handle_report(Key, Interval, os:timestamp(), St)};
handle_info({report, #key{} = Key, Interval, TS}, #st{} = St) ->
TS1 = calc_fire_time(TS, Interval),
{noreply, handle_report(Key, Interval, TS1, St)};
handle_info({'DOWN', Ref, process, _Pid, Reason}, #st{} = S) ->
case reporter_by_mref(Ref) of
[#reporter{module = Module, restart = Restart} = R] ->
case add_restart(Restart) of
{remove, How} ->
case How of
{M, F} when is_atom(M), is_atom(F) ->
try M:F(Module, Reason) catch _:_ -> ok end;
_ ->
ok
end,
S;
{restart, Restart1} ->
restart_reporter(R#reporter{restart = Restart1})
end;
_ -> S
end,
{noreply, S};
handle_info(_Info, State) ->
?log(warning, "exometer_report:info(??): ~p~n", [ _Info ]),
{noreply, State}.
restart_reporter(#reporter{name = Name, opts = Opts, restart = Restart}) ->
{Pid, MRef} = spawn_reporter(Name, Opts),
[resubscribe(S) ||
S <- ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{reporter = Name,
_ = '_'},
_ = '_'}, [], ['$_']}])],
ets:update_element(?EXOMETER_REPORTERS, Name,
[{#reporter.pid, Pid},
{#reporter.mref, MRef},
{#reporter.restart, Restart},
{#reporter.status, enabled}]),
ok.
%% If there are already subscriptions, enable them.
maybe_enable_subscriptions(#exometer_entry{name = Metric}) ->
lists:foreach(
fun(#subscriber{key = #key{reporter = RName}} = S) ->
case get_reporter_status(RName) of
enabled ->
resubscribe(S);
_ ->
ok
end
end, ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{metric = Metric,
_ = '_'},
_ = '_'}, [], ['$_']}])),
%% Also re-check the static subscribers for select and apply
case lists:keyfind(subscribers, 1, get_report_env()) of
{subscribers, Subscribers} ->
lists:foreach(
fun(Sub) ->
case Sub of
{select, _} -> init_subscriber(Sub);
{apply, _} -> init_subscriber(Sub);
_ -> ok
end
end, Subscribers);
false -> []
end.
resubscribe(#subscriber{key = #key{reporter = RName,
metric = Metric,
datapoint = DataPoint,
extra = Extra} = Key,
t_ref = OldTRef,
interval = Interval}) when is_integer(Interval) ->
try_send(RName, {exometer_subscribe, Metric, DataPoint, Interval, Extra}),
cancel_timer(OldTRef),
TRef = erlang:send_after(Interval, self(),
subscr_timer_msg(Key, Interval)),
ets:update_element(?EXOMETER_SUBS, Key, [{#subscriber.t_ref, TRef}]);
resubscribe(_) -> undefined.
handle_report(#key{reporter = Reporter} = Key, Interval, TS, #st{} = St) ->
_ = case ets:member(?EXOMETER_SUBS, Key) andalso
get_reporter_status(Reporter) == enabled of
true ->
case do_report(Key, Interval) of
true -> restart_subscr_timer(Key, Interval, TS);
false -> ok
end;
false ->
%% Possibly an unsubscribe removed the subscriber
?log(error, "No such subscriber (Key=~p)~n", [Key])
end,
St.
do_report(#key{metric = Metric,
datapoint = DataPoint,
retry_failed_metrics = RetryFailedMetrics} = Key, Interval) ->
case {RetryFailedMetrics, get_values(Metric, DataPoint)} of
%% We found a value, or values.
{_, [_|_] = Found} ->
%% Distribute metric value to the correct process
report_values(Found, Key),
true;
%% We did not find a value, but we should try again.
{true, _ } ->
?log(debug, "Metric(~p) Datapoint(~p) not found."
" Will try again in ~p msec~n",
[Metric, DataPoint, Interval]),
true;
%% We did not find a value, and we should not retry.
_ ->
%% Entry removed while timer in progress.
?log(warning, "Metric(~p) Datapoint(~p) not found. Will not try again~n",
[Metric, DataPoint]),
false
end.
report_batch(Reporter, Name, T0) when is_atom(Name) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{status = disabled}] ->
false;
[R] ->
Entries = ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{reporter = Reporter,
_ = '_'},
interval = Name,
_ = '_'}, [], ['$_']}]),
lists:foreach(
fun(#subscriber{key = Key}) ->
do_report(Key, Name)
end, Entries),
restart_batch_timer(Name, R, T0);
[] ->
false
end.
cancel_subscr_timers(Reporter) ->
lists:foreach(
fun(#subscriber{key = Key, t_ref = TRef}) ->
cancel_timer(TRef),
ets:update_element(?EXOMETER_SUBS, Key,
[{#subscriber.t_ref, undefined}])
end, ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{reporter = Reporter,
_ = '_'},
_ = '_'}, [], ['$_']}])).
restart_subscr_timer(Key, Interval, T0) when is_integer(Interval) ->
{AdjInt, RptTime} = adjust_interval(Interval, T0),
TRef = erlang:send_after(AdjInt, self(),
subscr_timer_msg(Key, Interval, RptTime)),
ets:update_element(?EXOMETER_SUBS, Key,
[{#subscriber.t_ref, TRef}]);
restart_subscr_timer(_, _, _) ->
true.
restart_batch_timer(Name, #reporter{name = Reporter,
intervals = Ints}, T0) when is_list(Ints) ->
case lists:keyfind(Name, #interval.name, Ints) of
#interval{time = Time, t_ref = OldTRef} = I when is_integer(Time) ->
cancel_timer(OldTRef),
{Int, RptTime} = adjust_interval(Time, T0),
TRef = erlang:send_after(Int, self(),
batch_timer_msg(
Reporter, Name, Time, RptTime)),
ets:update_element(?EXOMETER_REPORTERS, Reporter,
[{#reporter.intervals,
lists:keyreplace(Name, #interval.name, Ints,
I#interval{t_ref = TRef})}]);
#interval{time = manual} ->
false;
false ->
false
end.
adjust_interval(Time, T0) ->
T1 = os:timestamp(),
case tdiff(T1, T0) of
D when D > Time; D < 0 ->
%% Most likely due to clock adjustment
{Time, T1};
D ->
{Time-D, T0}
end.
tdiff(T1, T0) ->
timer:now_diff(T1, T0) div 1000.
%% Calculate time when timer should have fired, based on timestamp logged
%% at send_after/3 and the intended interval (in ms).
calc_fire_time({manual, TS}, _) ->
TS;
calc_fire_time({M,S,U}, Int) ->
{M, S, U + (Int*1000)}.
cancel_timer(undefined) ->
false;
cancel_timer(TRef) ->
erlang:cancel_timer(TRef).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function is called by a gen_server when it is about to
%% terminate. It should be the opposite of Reporter:init/1 and do any
%% necessary cleaning up. When it returns, the gen_server terminates
%% with Reason. The return value is ignored.
%%
%% @spec terminate(Reason, State) -> void()
%% @end
%%--------------------------------------------------------------------
terminate(_Reason, _) ->
[terminate_reporter(R) || R <- ets:tab2list(?EXOMETER_REPORTERS)],
ok.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Convert process state when code is changed
%%
%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
%% @end
%%--------------------------------------------------------------------
%% -record(reporter, {
%% name :: atom(),
%% pid :: pid(),
%% mref :: reference(),
%% module :: module(),
%% opts = [] :: [{atom(), any()}],
%% restart = #restart{}
%% }).
code_change(_OldVan, #st{reporters = Rs, subscribers = Ss} = S, _Extra) ->
Rs1 = lists:map(
fun({reporter,Pid,MRef,Module,Opts,Restart}) ->
#reporter{name = Module, pid = Pid, mref = MRef,
module = Module, opts = Opts,
restart = Restart};
({reporter,Name,Pid,MRef,Module,Opts,Restart}) ->
#reporter{name = Name, pid = Pid, mref = MRef,
module = Module, opts = Opts,
restart = Restart};
({reporter,Name,Pid,Mref,Module,Opts,Restart,Status}) ->
#reporter{name = Name, pid = Pid, mref = Mref,
module = Module, opts = Opts,
restart = Restart, status = Status};
(#reporter{} = R) -> R
end, Rs),
[ets:insert(?EXOMETER_REPORTERS, R) || R <- Rs1],
[ets:insert(?EXOMETER_SUBS, Sub) || Sub <- Ss],
{ok, S#st{reporters = [], subscribers = []}};
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
reporter_pids() ->
ets:select(?EXOMETER_REPORTERS,
[{#reporter{pid = '$1', _ = '_'},
[{is_pid,'$1'}], ['$1']}]).
reporter_by_pid(Pid) ->
ets:select(?EXOMETER_REPORTERS,
[{#reporter{pid = Pid, _='_'}, [], ['$_']}]).
reporter_by_mref(Ref) ->
ets:select(?EXOMETER_REPORTERS,
[{#reporter{mref = Ref, _='_'}, [], ['$_']}]).
try_send(To, Msg) ->
try To ! Msg
catch
error:_ ->
Msg
end.
is_valid_metric({find, Name}, _DataPoint) when is_list(Name) ->
true;
is_valid_metric({select, Name}, _DataPoint) when is_list(Name) ->
try ets:match_spec_compile(Name), true
catch
error:_ -> error
end;
is_valid_metric(Name, default) when is_list(Name) ->
case exometer:info(Name, type) of
undefined -> false;
_ -> true
end;
is_valid_metric(Name, DataPoint) when is_list(Name) ->
case dp_list(DataPoint) of
[] -> error;
[_|_] = DataPoints ->
case exometer:info(Name, datapoints) of
undefined -> false;
DPs ->
case DataPoints -- DPs of
[] -> true;
_ -> false
end
end
end;
is_valid_metric(_, _) ->
false.
dp_list(DP) when is_list(DP) -> DP;
dp_list(DP) when is_atom(DP) -> [DP];
dp_list(50) -> [50];
dp_list(75) -> [75];
dp_list(90) -> [90];
dp_list(95) -> [95];
dp_list(99) -> [99];
dp_list(999) -> [999].
get_values(Name, DataPoint) when is_list(Name) ->
case exometer:get_value(Name, DataPoint) of
{ok, Values} when is_list(Values) ->
[{Name, Values}];
_ ->
[]
end;
get_values({How, Path}, DataPoint) ->
Entries = case How of
find -> exometer:find_entries(Path);
select -> exometer:select(Path)
end,
lists:foldr(
fun({Name, _, enabled}, Acc) ->
case exometer:get_value(Name, DataPoint) of
{ok, Values} when is_list(Values) ->
[{Name, Values}|Acc];
_ ->
Acc
end;
(_, Acc) -> Acc
end, [], Entries).
assert_no_duplicates([#reporter{name = R}|T]) ->
case lists:keymember(R, #reporter.name, T) of
true -> error({duplicate_reporter, R});
false -> assert_no_duplicates(T)
end;
assert_no_duplicates([]) ->
ok.
-spec spawn_reporter(reporter_name(), options()) -> {pid(), reference()}.
spawn_reporter(Reporter, Opt) when is_atom(Reporter), is_list(Opt) ->
Fun = fun() ->
{ok, Mod, St} = reporter_init(Reporter, Opt),
reporter_loop(Mod, St)
end,
Pid = proc_lib:spawn(Fun),
maybe_register(Reporter, Pid, Opt),
MRef = erlang:monitor(process, Pid),
{Pid, MRef}.
maybe_register(R, Pid, Opts) ->
case lists:keyfind(registered_name, 1, Opts) of
{_, none} -> ok;
{_, Name} -> register(Name, Pid);
false -> register(R, Pid)
end.
terminate_reporter(#reporter{pid = Pid, mref = MRef}) when is_pid(Pid) ->
Pid ! {exometer_terminate, shutdown},
receive
{'DOWN', MRef, _, _, _} ->
ok
after 1000 ->
exit(Pid, kill),
erlang:demonitor(MRef, [flush])
end;
terminate_reporter(#reporter{pid = undefined}) ->
ok.
subscribe_(Reporter, Metric, DataPoint, Interval, RetryFailedMetrics,
Extra, Status) ->
?log(debug, "subscribe_(~p, ~p, ~p, ~p, ~p, ~p, ~p)~n", [Reporter, Metric, DataPoint, Interval, RetryFailedMetrics, Extra, Status]),
Key = #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
extra = Extra,
retry_failed_metrics = RetryFailedMetrics
},
case ets:lookup(?EXOMETER_SUBS, Key) of
[] -> ets:insert(?EXOMETER_SUBS,
#subscriber{key = Key,
interval = Interval,
t_ref = maybe_send_after(Status, Key, Interval)});
_ ->
?log(debug, "subscribe_(): not adding duplicate subscription")
end.
maybe_send_after(enabled, Key, Interval) when is_integer(Interval) ->
erlang:send_after(
Interval, self(), subscr_timer_msg(Key, Interval));
maybe_send_after(_, _, _) ->
undefined.
-dialyzer({no_return, unsubscribe_/4}).
unsubscribe_(Reporter, Metric, DataPoint, Extra) ->
?log(info, "unsubscribe_(~p, ~p, ~p, ~p)~n",
[ Reporter, Metric, DataPoint, Extra]),
case ets:lookup(?EXOMETER_SUBS, #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
extra = Extra}) of
[#subscriber{} = Sub] ->
unsubscribe_(Sub);
[] ->
not_found
end.
unsubscribe_(#subscriber{key = #key{reporter = Reporter,
metric = Metric,
datapoint = DataPoint,
extra = Extra} = Key, t_ref = TRef}) ->
try_send(Reporter, {exometer_unsubscribe, Metric, DataPoint, Extra}),
cancel_timer(TRef),
ets:delete(?EXOMETER_SUBS, Key),
ok.
report_values(Found, #key{reporter = Reporter, extra = Extra} = Key) ->
try Reporter ! {exometer_report, Found, Extra}
catch
?EXCEPTION(error, Reason, Stacktrace) ->
?log(error, "~p~nKey = ~p~nTrace: ~p",
[Reason, Key, ?GET_STACK(Stacktrace)])
end.
retrieve_metric({Metric, Type, Enabled}, Acc) ->
Cands = ets:select(
?EXOMETER_SUBS,
[{#subscriber{key = #key{metric = Metric, _='_'},
_ = '_'}, [], ['$_']}]),
[ { Metric, exometer:info(Metric, datapoints),
get_subscribers(Metric, Type, Enabled, Cands), Enabled } | Acc ].
%% find_entries_in_list(find, Path, List) ->
%% Pat = Path ++ '_',
%% Spec = ets:match_spec_compile([{ {Pat, '_', '_'}, [], ['$_'] }]),
%% ets:match_spec_run(List, Spec);
%% find_entries_in_list(select, Pat, List) ->
%% Spec = ets:match_spec_compile(Pat),
%% ets:match_spec_run(List, Spec).
get_subscribers(_Metric, _Type, _Status, []) ->
[];
%% This subscription matches Metric
get_subscribers(Metric, Type, Status,
[ #subscriber {
key = #key {
reporter = SReporter,
metric = Metric,
datapoint = SDataPoint
}} | T ]) ->
?log(debug,"get_subscribers(~p, ~p, ~p): match~n", [ Metric, SDataPoint, SReporter]),
[ { SReporter, SDataPoint } | get_subscribers(Metric, Type, Status, T) ];
%% get_subscribers(Metric, Type, Status,
%% [ #subscriber {
%% key = #key {
%% metric = {How, Path},
%% reporter = SReporter,
%% datapoint = SDataPoint
%% }} | T ]) ->
%% case find_entries_in_list(How, Path, [{Metric, Type, Status}]) of
%% [] ->
%% get_subscribers(Metric, Type, Status, T);
%% [_] ->
%% [ { SReporter, SDataPoint }
%% | get_subscribers(Metric, Type, Status, T) ]
%% end;
%% This subscription does not match Metric.
get_subscribers(Metric, Type, Status,
[ #subscriber {
key = #key {
reporter = SReporter,
metric = SMetric,
datapoint = SDataPoint
}} | T]) ->
?log(debug, "get_subscribers(~p, ~p, ~p) nomatch(~p) ~n",
[ SMetric, SDataPoint, SReporter, Metric]),
get_subscribers(Metric, Type, Status, T).
%% Purge all subscriptions associated with a specific reporter
%% (that just went down).
purge_subscriptions(R) ->
%% Go through all #subscriber elements in Subs and
%% cancel the timer of those who match the provided reporter
%%
%% Return new #subscriber list with all original subscribers
%% that do not reference reporter R.
Subs = ets:select(?EXOMETER_SUBS,
[{#subscriber{key = #key{reporter = R, _='_'},
_ = '_'}, [], ['$_']}]),
lists:foreach(fun(#subscriber {key = Key, t_ref = TRef}) ->
cancel_timer(TRef),
ets:delete(?EXOMETER_SUBS, Key)
end, Subs).
%% Called by the spawn_monitor() call in init
%% Loop and run reporters.
%% Module is expected to implement exometer_report behavior
reporter_init(Reporter, Opts) ->
Module = proplists:get_value(module, Opts, Reporter),
Bulk = proplists:get_value(report_bulk, Opts, false),
case Module:exometer_init(Opts) of
{ok, St} ->
{ok, Module, #rst{st = St, bulk = Bulk}};
{error, Reason} ->
?log(error, "Failed to start reporter ~p: ~p~n", [Module, Reason]),
exit(Reason)
end.
reporter_loop(Module, #rst{st = St, bulk = Bulk} = RSt) ->
NSt = receive
{exometer_report, Found, Extra} ->
{ok, r_exometer_report(
Bulk, Module, Found, Extra, St)};
{exometer_unsubscribe, Metric, DataPoint, Extra } ->
case Module:exometer_unsubscribe(Metric, DataPoint, Extra, St) of
{ok, St1} -> {ok, St1};
_ -> {ok, St}
end;
{exometer_subscribe, Metric, DataPoint, Interval, Extra } ->
case Module:exometer_subscribe(Metric, DataPoint, Interval, Extra, St) of
{ok, St1} -> {ok, St1};
_ -> {ok, St}
end;
{exometer_newentry, Entry} ->
case Module:exometer_newentry(Entry, St) of
{ok, St1} -> {ok, St1};
_ -> {ok, St}
end;
{exometer_setopts, Metric, Options, Status} ->
case Module:exometer_setopts(Metric, Options, Status, St) of
{ok, St1} -> {ok, St1};
_ -> {ok, St}
end;
{exometer_terminate, Reason} ->
Module:exometer_terminate(Reason, St),
terminate;
{exometer_proc, {From, Ref}, Req} ->
case Module:exometer_call(Req, From, St) of
{reply, Reply, St1} ->
From ! {Ref, Reply},
{ok, St1};
{noreply, St1} ->
{ok, St1};
_ ->
{ok, St}
end;
{exometer_proc, Req} ->
case Module:exometer_cast(Req, St) of
{noreply, St1} ->
{ok, St1};
_ ->
{ok, St}
end;
%% Allow reporters to generate their own callbacks.
Other ->
?log(debug, "Custom invocation: ~p(~p)~n", [ Module, Other]),
case Module:exometer_info(Other, St) of
{ok, St1} -> {ok, St1};
_ -> {ok, St}
end
end,
case NSt of
{ok, St2} ->
reporter_loop(Module, RSt#rst{st = St2});
_ ->
ok
end.
r_exometer_report(false, Module, Found, Extra, St) ->
lists:foldl(
fun({Name, Values}, Acc) ->
lists:foldl(
fun({DP, Val}, Acc1) ->
case Module:exometer_report(
Name, DP, Extra, Val, Acc1) of
{ok, St1} -> St1;
_ -> St
end
end, Acc, Values)
end, St, Found);
r_exometer_report(true, Module, Found, Extra, St) ->
case erlang:function_exported(Module, exometer_report_bulk, 3) of
true ->
case Module:exometer_report_bulk(Found, Extra, St) of
{ok, St1} ->
St1;
_ ->
St
end;
false ->
r_exometer_report(false, Module, Found, Extra, St)
end.
call(Req) ->
gen_server:call(?MODULE, Req).
cast(Req) ->
gen_server:cast(?MODULE, Req).
init_subscriber({Reporter, Metric, DataPoint, Interval, RetryFailedMetrics}) ->
Status = get_reporter_status(Reporter),
subscribe_(Reporter, Metric, DataPoint, Interval,
RetryFailedMetrics, undefined, Status);
init_subscriber({Reporter, Metric, DataPoint, Interval,
RetryFailedMetrics, Extra}) ->
Status = get_reporter_status(Reporter),
subscribe_(Reporter, Metric, DataPoint, Interval,
RetryFailedMetrics, Extra, Status);
init_subscriber({Reporter, Metric, DataPoint, Interval}) ->
Status = get_reporter_status(Reporter),
subscribe_(Reporter, Metric, DataPoint, Interval,
true, undefined, Status);
init_subscriber({apply, {M, F, A}}) ->
lists:foreach(fun(Sub) ->
init_subscriber(Sub)
end, apply(M, F, A));
init_subscriber({select, Expr}) when tuple_size(Expr)==3;
tuple_size(Expr)==4;
tuple_size(Expr)==5 ->
{Pattern, Reporter, DataPoint, Interval, Retry, Extra} =
case Expr of
{P, R, D, I} -> {P, R, D, I, true, undefined};
{P, R, D, I, Rf} -> {P, R, D, I, Rf, undefined};
{P, R, D, I, Rf, X} -> {P, R, D, I, Rf, X}
end,
Status = get_reporter_status(Reporter),
Entries = exometer:select(Pattern),
lists:foreach(
fun({Entry, _, _}) ->
subscribe_(Reporter, Entry, DataPoint, Interval,
Retry, Extra, Status)
end, Entries);
init_subscriber(Other) ->
?log(warning, "Incorrect static subscriber spec ~p. "
"Use { Reporter, Metric, DataPoint, Interval [, Extra ]}~n",
[ Other ]).
get_reporter_status(R) ->
try ets:lookup_element(?EXOMETER_REPORTERS, R, #reporter.status)
catch
error:_ -> disabled
end.
add_restart(#restart{spec = Spec,
history = H,
save_n = N} = R) ->
T = exometer_util:timestamp(),
H1 = lists:sublist([T|H], 1, N),
case match_frequency(H1, Spec) of
{remove, Action} ->
{remove, Action};
restart ->
{restart, R#restart{history = H1}}
end.
match_frequency([H|T], Spec) ->
match_frequency(T, 1, H, Spec).
match_frequency([H|T], R, Since, Spec) ->
R1 = R+1,
%% Note that we traverse millisec timestamps backwards in time
Span = (Since - H) div 1000,
case find_match(Spec, R1, Span) of
{true, Action} ->
{remove, Action};
false ->
match_frequency(T, R1, Since, Spec)
end;
match_frequency([], _, _, _) ->
restart.
find_match([{R1,T1}|Tail], R, T) when R1 =< R, T1 >= T ->
{true, find_action(Tail)};
find_match([_|Tail], R, T) ->
find_match(Tail, R, T);
find_match([], _, _) ->
false.
find_action([{M,F} = H|_]) when is_atom(M), is_atom(F) -> H;
find_action([_|T]) ->
find_action(T);
find_action([]) ->
no_action.
default_restart() ->
[{3, 1}, {10, 30}, {?MODULE, remove_reporter}].
get_restart(Opts) ->
case lists:keyfind(restart, 1, Opts) of
{_, R} ->
restart_rec(valid_restart(R));
false ->
restart_rec(default_restart())
end.
restart_rec(L) ->
Save = lists:foldl(
fun
({R,_}, Acc) when is_integer(R) ->
erlang:max(R, Acc);
(_, Acc) ->
Acc
end, 0, L),
#restart{spec = L, save_n = Save}.
valid_restart(L) when is_list(L) ->
lists:foreach(
fun({R,T}) when is_integer(R), is_integer(T), R > 0, T > 0 ->
ok;
({M,F}) when is_atom(M), is_atom(F) -> ok;
(_) ->
erlang:error({invalid_restart_spec, L})
end, L),
L.
do_remove_reporter(Reporter) ->
do_remove_reporter(Reporter, true).
do_remove_reporter(Reporter, Terminate) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[#reporter{} = R] ->
case Terminate of
true ->
terminate_reporter(R);
false ->
ok
end,
ets:delete(?EXOMETER_REPORTERS, Reporter),
purge_subscriptions(Reporter),
ok;
[] ->
{error, not_found}
end.
change_reporter_status(Reporter, New) ->
case ets:lookup(?EXOMETER_REPORTERS, Reporter) of
[R] -> do_change_reporter_status(R, New);
[] -> {error, not_found}
end.
do_change_reporter_status(#reporter{name = Reporter,
status = Old} = R, New) ->
case {Old, New} of
{disabled, enabled} ->
restart_reporter(R);
{enabled, disabled} ->
cancel_subscr_timers(Reporter),
terminate_reporter(R),
ets:update_element(?EXOMETER_REPORTERS,
Reporter, [{#reporter.status, disabled}]);
{Old, Old} ->
ok
end,
ok. | src/external/cloudi_x_exometer_core/src/exometer_report.erl | 0.769687 | 0.475301 | exometer_report.erl | starcoder |
%%% @doc A consistent hashing library. The output range of the ring is the same
%%% as what SHA-256 produces. Nodes and keys are mapped to the ring using SHA-256 as well.
%%% @end
-module(concha).
%% API exports
-export([add/2,
contains/2,
lookup/2,
members/1,
new/1,
new/2,
remove/2,
size/1]).
-export_type([ring/0]).
-define(HASH, sha256).
-type num_vnodes() :: pos_integer().
-type node_entry() :: term().
-type key() :: term().
-type position() :: binary().
-type positions() :: [{position(), node_entry()}].
-type nodes() :: [node_entry()].
-type inner_ring() :: gb_trees:tree(position(), node_entry()).
-opaque ring() :: {num_vnodes(), inner_ring()}.
%%====================================================================
%% API functions
%%====================================================================
%% @doc Adds a node (and its virtual nodes) to the ring. Returns the new ring.
-spec add(node_entry(), Ring :: ring()) -> ring().
add(Node, {NumVNodes, InnerRing}) ->
NewInnerRing = build_ring(position_node(NumVNodes, Node), InnerRing),
{NumVNodes, NewInnerRing}.
%% @doc Returns true if the given node is present in the ring, otherwise false.
-spec contains(node_entry(), Ring :: ring()) -> boolean().
contains(Node, {_NumVNodes, InnerRing}) ->
case gb_trees:lookup(chash(Node), InnerRing) of
none -> false;
{value, _} -> true
end.
%% @doc Returns the node associated with the given key. Returns an error if the ring is empty.
-spec lookup(key(), Ring :: ring()) -> node_entry() | {error, empty_ring}.
lookup(Key, {_NumVNodes, InnerRing}) ->
case gb_trees:is_empty(InnerRing) of
true -> {error, empty_ring};
false ->
HKey = chash(Key),
Iter = gb_trees:iterator_from(HKey, InnerRing),
case gb_trees:next(Iter) of
{_, Node, _} -> Node;
none -> element(2, gb_trees:smallest(InnerRing))
end
end.
%% @doc Returns the ordered list of nodes in the ring.
-spec members(Ring :: ring()) -> nodes().
members({_NumVNodes, InnerRing}) ->
lists:usort(gb_trees:values(InnerRing)).
%% @doc Creates a new ring without virtual nodes.
-spec new(nodes()) -> ring().
new(Nodes) ->
new(1, Nodes).
%% @doc Creates a new ring with `NumVNodes' of virtual nodes.
-spec new(num_vnodes(), nodes()) -> ring().
new(NumVNodes, Nodes) ->
Ring = build_ring(lists:flatten([position_node(NumVNodes, Node) || Node <- Nodes])),
{NumVNodes, Ring}.
%% @doc Removes the given node (and its virtual nodes) from the ring if the node is present in the ring, otherwise does nothing. Returns the new ring.
-spec remove(node_entry(), Ring :: ring()) -> ring().
remove(Node, {NumVNodes, InnerRing}) ->
Positions = position_node(NumVNodes, Node),
NewInnerRing = lists:foldl(fun({Pos, _}, Tree) -> gb_trees:delete_any(Pos, Tree) end, InnerRing, Positions),
{NumVNodes, NewInnerRing}.
%% @doc Returns the number of nodes (including virtual nodes) in the ring.
-spec size(Ring :: ring()) -> non_neg_integer().
size({_NumVNodes, InnerRing}) ->
gb_trees:size(InnerRing).
%%====================================================================
%% Internal functions
%%====================================================================
-spec build_ring(positions()) -> inner_ring().
build_ring(Nodes) ->
gb_trees:from_orddict(lists:keysort(1, Nodes)).
-spec build_ring(positions(), inner_ring()) -> inner_ring().
build_ring(Nodes, Ring) ->
lists:foldl(fun({Pos, Node}, Tree) -> gb_trees:insert(Pos, Node, Tree) end, Ring, Nodes).
chash(X) -> crypto:hash(?HASH, term_to_binary(X)).
chash(X, Y) ->
XBin = term_to_binary(X),
YBin = term_to_binary(Y),
crypto:hash(?HASH, <<XBin/binary, YBin/binary>>).
position_node(Node) ->
{chash(Node), Node}.
-spec position_node(num_vnodes(), node_entry()) -> positions().
position_node(1, Node) ->
[position_node(Node)];
position_node(NumVNodes, Node) ->
Replicas = [{chash(Node, Idx), Node} || Idx <- lists:seq(1, NumVNodes - 1)],
[position_node(Node) | Replicas]. | src/concha.erl | 0.712432 | 0.639201 | concha.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Perform "chain repair", i.e., resynchronization of Machi file
%% contents and metadata as servers are (re-)added to the chain.
%%
%% The implementation here is a very basic one, and is probably a bit
%% slower than the original "demo day" implementation at
%% [https://github.com/basho/machi/blob/master/prototype/demo-day-hack/file0_repair_server.escript]
%%
%% It's so easy to bikeshed this into a 1 year programming exercise.
%%
%% General TODO note: There are a lot of areas for exploiting parallelism here.
%% I've set the bikeshed aside for now, but "make repair faster" has a
%% lot of room for exploiting concurrency, overlapping reads & writes,
%% etc etc. There are also lots of different trade-offs to make with
%% regard to RAM use vs. disk use.
%%
%% There's no reason why repair can't be done:
%%
%% <ol>
%% <li> Repair in parallel across multiple repairees ... Optimization.
%% </li>
%% <li> Repair multiple byte ranges concurrently ... Optimization.
%% </li>
%% <li> Use bigger chunks than the client originally used to write the file
%% ... Optimization ... but it would be the easiest to implement, e.g. use
%% constant-sized 4MB chunks. Unfortuntely, it would also destroy
%% the ability to verify here that the chunk checksums are correct
%% *and* also propagate the correct checksum metadata to the
%% destination FLU.
%%
%% As an additional optimization, add a bit of #2 to start the next
%% read while the current write is still in progress.
%% </li>
%% <li> The current method centralizes the "smarts" required to compare
%% checksum differences ... move some computation to each FLU, then use
%% a Merkle- or other-compression-style scheme to reduce the amount of
%% data sent across a network.
%% </li>
%% </ol>
%%
%% Most/all of this could be executed in parallel on each FLU relative to
%% its own files. Then, in another TODO option, perhaps build a Merkle tree
%% or other summary of the local files and send that data structure to the
%% repair coordinator.
%%
%% Also, as another TODO note, repair_both_present() in the
%% prototype/demo-day code uses an optimization of calculating the MD5
%% checksum of the chunk checksum data as it arrives, and if the two MD5s
%% match, then we consider the two files in sync. If there isn't a match,
%% then we sort the lines and try another MD5, and if they match, then we're
%% in sync. In theory, that's lower overhead than the procedure used here.
%%
%% NOTE that one reason I chose the "directives list" method is to have an
%% option, later, of choosing to repair a subset of repairee FLUs if there
%% is a big discrepency between out of sync files: e.g., if FLU x has N
%% bytes out of sync but FLU y has 50N bytes out of sync, then it's likely
%% better to repair x only so that x can return to the UPI list quickly.
%% Also, in the event that all repairees are roughly comparably out of sync,
%% then the repair network traffic can be minimized by reading each chunk
%% only once.
-module(machi_chain_repair).
-include("machi_projection.hrl").
-define(SHORT_TIMEOUT, 5*1000).
-define(LONG_TIMEOUT, 60*1000).
-define(MAX_OFFSET, 999*1024*1024*1024*1024*1024*1024*1024).
%% These macros assume there's a bound variable called Verb.
-define(VERB(Fmt), if Verb -> io:format(Fmt ); true -> ok end).
-define(VERB(Fmt, Args), if Verb -> io:format(Fmt, Args); true -> ok end).
-ifdef(TEST).
-compile(export_all).
-endif. % TEST
-export([repair/7]).
repair(ap_mode=ConsistencyMode, Src, Repairing, UPI, MembersDict, ETS, Opts) ->
%% Use process dict so that 'after' clause can always quit all
%% proxy pids.
put(proxies_dict, orddict:new()),
Add = fun(Name, Pid) -> put(proxies_dict, orddict:store(Name, Pid, get(proxies_dict))) end,
OurFLUs = lists:usort([Src] ++ Repairing ++ UPI), % AP assumption!
RepairMode = proplists:get_value(repair_mode, Opts, repair),
Verb = proplists:get_value(verbose, Opts, false),
RepairId = proplists:get_value(repair_id, Opts, id1),
Res = try
_ = [begin
{ok, Proxy} = machi_proxy_flu1_client:start_link(P),
Add(FLU, Proxy)
end || {FLU,P} <- MembersDict, lists:member(FLU, OurFLUs)],
ProxiesDict = get(proxies_dict),
D = dict:new(),
D2 = lists:foldl(fun({FLU, Proxy}, Dict) ->
get_file_lists(Proxy, FLU, Dict)
end, D, ProxiesDict),
MissingFileSummary = make_missing_file_summary(D2, OurFLUs),
%% ?VERB("~w MissingFileSummary ~p\n",[RepairId,MissingFileSummary]),
lager:info("Repair ~w MissingFileSummary ~p\n",
[RepairId, MissingFileSummary]),
[ets:insert(ETS, {{directive_bytes, FLU}, 0}) || FLU <- OurFLUs],
%% Repair files from perspective of Src, i.e. tail(UPI).
SrcProxy = orddict:fetch(Src, ProxiesDict),
{ok, EpochID} = machi_proxy_flu1_client:get_epoch_id(
SrcProxy, ?SHORT_TIMEOUT),
%% ?VERB("Make repair directives: "),
Ds =
[{File, make_repair_directives(
ConsistencyMode, RepairMode, File, Size, EpochID,
Verb,
Src, OurFLUs, ProxiesDict, ETS)} ||
{File, {Size, _MissingList}} <- MissingFileSummary],
%% ?VERB(" done\n"),
lager:info("Repair ~w repair directives finished\n", [RepairId]),
[begin
[{_, Bytes}] = ets:lookup(ETS, {directive_bytes, FLU}),
%% ?VERB("Out-of-sync data for FLU ~p: ~s MBytes\n",
%% [FLU, mbytes(Bytes)]),
lager:info("Repair ~w "
"Out-of-sync data for FLU ~p: ~s MBytes\n",
[RepairId, FLU, mbytes(Bytes)]),
ok
end || FLU <- OurFLUs],
%% ?VERB("Execute repair directives: "),
ok = execute_repair_directives(ConsistencyMode, Ds, Src, EpochID,
Verb, OurFLUs, ProxiesDict, ETS),
%% ?VERB(" done\n"),
lager:info("Repair ~w repair directives finished\n", [RepairId]),
ok
catch
What:Why ->
Stack = erlang:get_stacktrace(),
{error, {What, Why, Stack}}
after
[(catch machi_proxy_flu1_client:quit(Pid)) ||
Pid <- orddict:to_list(get(proxies_dict))]
end,
Res;
repair(cp_mode=_ConsistencyMode, Src, Repairing, UPI, MembersDict, ETS, Opts) ->
io:format(user, "\n\nTODO! cp_mode repair is not fully implemented!\n\n", []),
repair(ap_mode, Src, Repairing, UPI, MembersDict, ETS, Opts).
%% Create a list of servers where the file is completely missing.
%% In the "demo day" implementation and in an early integration WIP,
%% this was a useful thing. TODO: Can this be removed?
make_missing_file_summary(Dict, AllFLUs) ->
%% FileFilterFun = fun(_) -> true end,
FoldRes = lists:sort(dict:to_list(Dict)),
%% NOTE: MissingFileSummary = [{File, {FileSize, ServersMissingFrom}}]
MissingFileSummary =
[begin
{GotIt, Sizes} = lists:unzip(GotSizes),
Size = lists:max(Sizes),
Missing = {File, {Size, AllFLUs -- GotIt}},
Missing
end || {File, GotSizes} <- FoldRes %% , FileFilterFun(File)
],
MissingFileSummary.
get_file_lists(Proxy, FLU_name, D) ->
{ok, Res} = machi_proxy_flu1_client:list_files(Proxy, ?DUMMY_PV1_EPOCH,
?SHORT_TIMEOUT),
lists:foldl(fun({Size, File}, Dict) ->
dict:append(File, {FLU_name, Size}, Dict)
end, D, Res).
make_repair_compare_fun(SrcFLU) ->
fun({{Offset_X, _Sz_a, _Cs_a, FLU_a}, _N_a},
{{Offset_X, _Sz_b, _CS_b, FLU_b}, _N_b}) ->
%% The repair source FLU always sorts less/earlier than anything else.
if FLU_a == SrcFLU ->
true;
FLU_b == SrcFLU ->
false;
true ->
%% Implicitly, smallest offset first.
%% Secondarily (and implicitly), sort smallest chunk size first
FLU_a < FLU_b
end;
(T_a, T_b) ->
%% See implicitly comments above
T_a =< T_b
end.
make_repair_directives(ConsistencyMode, RepairMode, File, Size, EpochID,
Verb, Src, FLUs0, ProxiesDict, ETS) ->
true = (Size < ?MAX_OFFSET),
FLUs = lists:usort(FLUs0),
C0 = [begin
%% erlang:garbage_collect(),
Proxy = orddict:fetch(FLU, ProxiesDict),
OffSzCs =
case machi_proxy_flu1_client:checksum_list(
Proxy, EpochID, File, ?LONG_TIMEOUT) of
{ok, InfoBin} ->
machi_csum_table:split_checksum_list_blob_decode(InfoBin);
{error, no_such_file} ->
[]
end,
[{?MAX_OFFSET, 0, <<>>, FLU}] % our end-of-file marker
++
[{Off, Sz, Cs, FLU} || {Off, Sz, Cs} <- OffSzCs]
end || FLU <- FLUs],
C1 = lists:append(C0),
%% erlang:garbage_collect(),
C2 = lists:sort(make_repair_compare_fun(Src), C1),
%% erlang:garbage_collect(),
Ds = make_repair_directives2(C2, ConsistencyMode, RepairMode,
File, Verb, Src, FLUs, ProxiesDict, ETS),
Ds.
make_repair_directives2(C2, ConsistencyMode, RepairMode,
File, Verb, Src, FLUs, ProxiesDict, ETS) ->
?VERB("."),
make_repair_directives3(C2, ConsistencyMode, RepairMode,
File, Verb, Src, FLUs, ProxiesDict, ETS, []).
make_repair_directives3([{?MAX_OFFSET, 0, <<>>, _FLU}|_Rest],
_ConsistencyMode, _RepairMode,
_File, _Verb, _Src, _FLUs, _ProxiesDict, _ETS, Acc) ->
lists:reverse(Acc);
make_repair_directives3([{Offset, Size, CSum, _FLU}=A|Rest0],
ConsistencyMode, RepairMode,
File, Verb, Src, FLUs, ProxiesDict, ETS, Acc) ->
{As0, Rest1} = take_same_offset_size(Rest0, Offset, Size),
As = [A|As0],
%% Sanity checking time
case lists:all(fun({_, _, Cs, _}) when Cs == CSum -> true;
(_) -> false
end, As) of
true ->
ok;
false ->
%% TODO: Pathology: someone has the wrong checksum.
%% 1. Fetch Src's chunk. If checksum is valid, use this chunk
%% to repair any invalid value.
%% 2. If Src's chunk is invalid, then check for other copies
%% in the UPI. If there is a valid chunk there, use it to
%% repair any invalid value.
%% 3a. If there is no valid UPI chunk, then delete this
%% byte range from all FLUs
%% 3b. Log big warning about data loss.
%% 4. Log any other checksum discrepencies as they are found.
exit({todo_repair_sanity_check, ?LINE, File, Offset, As})
end,
%% List construction guarantees us that there's at least one ?MAX_OFFSET
%% item remains. Sort order + our "taking" of all exact Offset+Size
%% tuples guarantees that if there's a disagreement about chunk size at
%% this offset, we can look ahead exactly one to see if there is sanity
%% or not.
[{Offset_next, _Size_next, _, _}=A_next|_] = Rest1,
if Offset + Size =< Offset_next ->
ok;
true ->
exit({todo_repair_sanity_check, ?LINE, File, Offset, Size,
next_is, A_next})
end,
Do = if ConsistencyMode == ap_mode ->
Gots = [FLU || {_Off, _Sz, _Cs, FLU} <- As],
Missing = FLUs -- Gots,
_ThisSrc = case lists:member(Src, Gots) of
true -> Src;
false -> hd(Gots)
end,
_ = [ets:update_counter(ETS, {directive_bytes, FLU_m}, Size) ||
FLU_m <- Missing],
if Missing == [] ->
noop;
true ->
{copy, A, Missing}
end
%% end;
%% ConsistencyMode == cp_mode ->
%% exit({todo_cp_mode, ?MODULE, ?LINE})
end,
Acc2 = if Do == noop -> Acc;
true -> [Do|Acc]
end,
make_repair_directives3(Rest1,
ConsistencyMode, RepairMode,
File, Verb, Src, FLUs, ProxiesDict, ETS, Acc2).
take_same_offset_size(L, Offset, Size) ->
take_same_offset_size(L, Offset, Size, []).
take_same_offset_size([{Offset, Size, _CSum, _FLU}=A|Rest], Offset, Size, Acc) ->
take_same_offset_size(Rest, Offset, Size, [A|Acc]);
take_same_offset_size(Rest, _Offset, _Size, Acc) ->
{Acc, Rest}.
execute_repair_directives(ap_mode=_ConsistencyMode, Ds, _Src, EpochID, Verb,
_OurFLUs, ProxiesDict, ETS) ->
{_,_,_,_} = lists:foldl(fun execute_repair_directive/2,
{ProxiesDict, EpochID, Verb, ETS}, Ds),
ok.
execute_repair_directive({File, Cmds}, {ProxiesDict, EpochID, Verb, ETS}=Acc) ->
EtsKeys = [{in_files, t_in_files}, {in_chunks, t_in_chunks},
{in_bytes, t_in_bytes}, {out_files, t_out_files},
{out_chunks, t_out_chunks}, {out_bytes, t_out_bytes}],
[ets:insert(ETS, {L_K, 0}) || {L_K, _T_K} <- EtsKeys],
F = fun({copy, {Offset, Size, TaggedCSum, MySrc}, MyDsts}, Acc2) ->
SrcP = orddict:fetch(MySrc, ProxiesDict),
case ets:lookup_element(ETS, in_chunks, 2) rem 100 of
0 -> ?VERB(".", []);
_ -> ok
end,
_T1 = os:timestamp(),
%% TODO: support case multiple written or trimmed chunks returned
{ok, {[{_, Offset, Chunk, _}], _}} =
machi_proxy_flu1_client:read_chunk(
SrcP, EpochID, File, Offset, Size, [],
?SHORT_TIMEOUT),
_T2 = os:timestamp(),
<<_Tag:1/binary, CSum/binary>> = TaggedCSum,
case machi_util:checksum_chunk(Chunk) of
CSum_now when CSum_now == CSum ->
_ = [begin
DstP = orddict:fetch(DstFLU, ProxiesDict),
_T3 = os:timestamp(),
ok = machi_proxy_flu1_client:write_chunk(
DstP, EpochID, File, Offset, Chunk,
?SHORT_TIMEOUT),
_T4 = os:timestamp()
end || DstFLU <- MyDsts],
_ = ets:update_counter(ETS, in_chunks, 1),
_ = ets:update_counter(ETS, in_bytes, Size),
N = length(MyDsts),
_ = ets:update_counter(ETS, out_chunks, N),
_ = ets:update_counter(ETS, out_bytes, N*Size),
Acc2;
CSum_now ->
error_logger:error_msg(
"TODO: Checksum failure: "
"file ~p offset ~p size ~p: "
"expected ~p got ~p\n",
[File, Offset, Size, CSum, CSum_now]),
case ets:update_counter(ETS, t_bad_chunks, 1) of
N when N > 100 ->
throw(todo_wow_so_many_errors_so_verbose);
_ ->
ok
end,
Acc2
end
end,
ok = lists:foldl(F, ok, Cmds),
%% Copy this file's stats to the total counts.
_ = [ets:update_counter(ETS, T_K, ets:lookup_element(ETS, L_K, 2)) ||
{L_K, T_K} <- EtsKeys],
Acc.
mbytes(N) ->
machi_util:mbytes(N).
-ifdef(TEST).
repair_compare_fun_test() ->
F = make_repair_compare_fun(b),
List = [{{1,10,x,b},y},{{50,10,x,a},y},{{50,10,x,b},y},{{50,10,x,c},y},{{90,10,x,d},y}],
Input = lists:reverse(lists:sort(List)),
%% Although the merge func should never have two of the same FLU
%% represented, it doesn't matter for the purposes of this test.
%% 1. Smaller offset (element #1) wins, else...
%% 2. The FLU (element #2) that's the repair source always wins, else...
%% 3. The FLU with smallest name wins.
Expect = [{{1,10,x,b},y},{{50,10,x,b},y},{{50,10,x,a},y},{{50,10,x,c},y},{{90,10,x,d},y}],
Expect = lists:sort(F, Input).
-endif. % TEST | src/machi_chain_repair.erl | 0.621885 | 0.495239 | machi_chain_repair.erl | starcoder |
%%======================================================================
%%
%% LeoFS Doctor
%%
%% Copyright (c) 2012-2016 Rakuten, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%======================================================================
-module(ets_view).
-include("leofs_doctor.hrl").
%% Module API
-export([draw/1]).
%% =============================================================================
%% Module API
%% =============================================================================
draw(State) ->
remote_load_code(ets_collector, State#state.node),
fetch_and_update(State).
%% =============================================================================
%% Internal Functions
%% =============================================================================
remote_load_code(Module, Node) ->
{_, Binary, Filename} = code:get_object_code(Module),
rpc:call(Node, code, load_binary, [Module, Filename, Binary]).
fetch_and_update(State) ->
case rpc:call(State#state.node, ets_collector, get_data, []) of
{badrpc, _} ->
{error, badrpc};
{ok, TableProplist} ->
update_screen(TableProplist)
end.
update_screen(TableProplist) ->
print_tableinfo(TableProplist).
cols() ->
[{"Table Name", 25, [{align, right}]},
{"Access Mode", 12, []},
{"Memory", 10, []},
{"Size", 10, []}].
row(TableInfo) ->
Name = proplists:get_value(name, TableInfo),
Access = proplists:get_value(protection, TableInfo, public),
Memory = proplists:get_value(memory, TableInfo, 0),
Size = proplists:get_value(size, TableInfo, 0),
{ok, {Name, Access, Memory, Size}}.
prd([], Acc) ->
Acc;
prd([RowData|Rest], Acc) ->
case row(RowData) of
{ok, Row} ->
prd(Rest, [Row|Acc]);
_ ->
prd(Rest, Acc)
end.
draw_title_bar([], Acc) ->
?PRINT(Acc),
?PRINT("~n"),
ok;
draw_title_bar([{Title, Width, Options}|Rest], Acc) ->
Align = proplists:get_value(align, Options, left),
NAcc = Acc ++ string:Align(Title, Width) ++ " ",
draw_title_bar(Rest, NAcc).
print_tableinfo(TableProplist) ->
?PRINT("~n- Table Info~n~n"),
Cols = cols(),
draw_title_bar(Cols, ""),
RowList = prd(TableProplist, []),
update_rows(RowList, Cols).
update_rows([], _) ->
ok;
update_rows([RowValues|Rest], Columns) ->
update_row(tuple_to_list(RowValues), Columns, ""),
update_rows(Rest, Columns).
update_row(R, C, Acc) when R == [] orelse C == [] ->
?PRINT(Acc),
?PRINT("~n"),
ok;
update_row([RowColValue|Rest], [{_,Width,Options}|RestColumns], Acc) ->
StrColVal = if is_list(RowColValue) ->
RowColValue;
true ->
lists:flatten(io_lib:format("~1000p",[RowColValue]))
end,
Aligned = case proplists:get_value(align, Options) of
right ->
string:right(StrColVal, Width);
_ ->
string:left(StrColVal, Width)
end,
update_row(Rest, RestColumns, Acc ++ Aligned ++ " "). | src/ets_view.erl | 0.561215 | 0.470493 | ets_view.erl | starcoder |
%%
%% @doc direct test check (dtchk) provider for rebar3
%%
%% This provider will determine the length of the call path between a test
%% function and any function called in the target module.
%%
%% This code based in part on the xref provider from the rebar3 project
%% https://github.com/rebar/rebar3/blob/master/src/rebar_prv_xref.erl
%%
%% Copyright <NAME> 2016
%%
%% The code is licensed under the Apache 2.0 License.
-module(dtchk_prv).
-behaviour(provider).
-export([init/1,
do/1,
format_error/1]).
-include("rebar.hrl").
-include_lib("providers/include/providers.hrl").
-define(PROVIDER, dtchk).
-define(DEPS, [compile]).
%% ===================================================================
%% Public API
%% ===================================================================
-spec init(rebar_state:t()) -> {ok, rebar_state:t()}.
init(State) ->
Provider = providers:create([
{name, ?PROVIDER}, % The 'user friendly' name of the task
{module, ?MODULE}, % The module implementation of the task
{bare, true}, % The task can be run by the user, always true
{deps, ?DEPS}, % The list of dependencies
{example, "rebar dtchk"}, % How to use the plugin
{opts, []}, % list of options understood by the plugin
{short_desc, short_desc()},
{desc, desc()}
]),
{ok, rebar_state:add_provider(State, Provider)}.
-spec do(rebar_state:t()) -> {ok, rebar_state:t()} | {error, string()}.
do(State) ->
code:add_pathsa(rebar_state:code_paths(State, all_deps)),
XrefChecks = prepare(State),
%% Run xref checks
?INFO("Running cross reference analysis...", []),
XrefResults = xref_checks(XrefChecks),
%% Run custom queries
QueryChecks = rebar_state:get(State, xref_queries, []),
QueryResults = lists:foldl(fun check_query/2, [], QueryChecks),
stopped = xref:stop(xref),
rebar_utils:cleanup_code_path(rebar_state:code_paths(State, default)),
case XrefResults =:= [] andalso QueryResults =:= [] of
true ->
{ok, State};
false ->
?PRV_ERROR({xref_issues, XrefResults, QueryResults})
end.
-spec format_error(any()) -> iolist().
format_error(Reason) ->
io_lib:format("~p", [Reason]).
%% ===================================================================
%% Internal functions
%% ===================================================================
short_desc() ->
"List the minimal call path distance between tests and target module".
desc() ->
io_lib:format(
"~s~n"
"~n"
"Valid rebar.config options:~n"
" ~p~n"
" ~p~n"
" ~p~n"
" ~p~n",
[short_desc()
]). | src/dtchk_prv.erl | 0.509032 | 0.468061 | dtchk_prv.erl | starcoder |
-module(cauder_eval).
%% API
-export([seq/3, abstract/1, concrete/1, is_value/1, is_reducible/2]).
-export([match_rec_pid/6, match_rec_uid/4]).
-export([clause_line/3]).
-include("cauder.hrl").
%%%=============================================================================
%%% API
%%%=============================================================================
%%------------------------------------------------------------------------------
%% @doc Evaluates the first reducible expression from the given list.
%%
%% Evaluates the first reducible expression from the given list of expressions,
%% given an environment and a call stack, then returns a record with the updated
%% information and a label indicating the type of step performed.
%%
%% @see is_reducible/2
-spec eval_list(Bindings, Expressions, Stack) -> Result when
Bindings :: cauder_types:environment(),
Expressions :: [cauder_types:abstract_expr()],
Stack :: cauder_types:stack(),
Result :: cauder_types:result().
eval_list(Bs, [E | Es], Stk) ->
case is_reducible(E, Bs) of
true ->
R = #result{exprs = Es1} = expr(Bs, E, Stk),
R#result{exprs = Es1 ++ Es};
false ->
R = #result{exprs = Es1} = eval_list(Bs, Es, Stk),
R#result{exprs = [E | Es1]}
end.
%%------------------------------------------------------------------------------
%% @doc Evaluates the given sequence of expression.
%%
%% If the first expression in the sequence is reducible, then it is evaluated,
%% otherwise it is consumed, given an environment and a call stack.
%% Also if the first expression is not reducible, and there are no more
%% expressions in the sequence, then consumes the first element in the call
%% stack and retrieves the stored information.
%% Returns a record with the updated information and a label indicating the type
%% of step performed.
%%
%% @see is_reducible/2
-spec seq(Bindings, Expressions, Stack) -> Result when
Bindings :: cauder_types:environment(),
Expressions :: [cauder_types:abstract_expr()],
Stack :: cauder_types:stack(),
Result :: cauder_types:result().
seq(Bs, [E | Es], Stk) ->
case is_reducible(E, Bs) of
false ->
case Es of
[] ->
Line = element(2, E),
case Stk of
% Call entry
[{{_M, _F, _A}, Bs1, Es1, Var} | Stk1] ->
Es2 = cauder_syntax:replace_variable(Es1, setelement(2, Var, Line), concrete(E)),
#result{env = Bs1, exprs = Es2, stack = Stk1};
% Block entry
[{_Type, Es1, Var} | Stk1] ->
Es2 = cauder_syntax:replace_variable(Es1, setelement(2, Var, Line), concrete(E)),
#result{env = Bs, exprs = Es2, stack = Stk1}
end;
_ ->
#result{env = Bs, exprs = Es, stack = Stk}
end;
true ->
#result{env = Bs1, exprs = Es1, stack = Stk1, label = L} = expr(Bs, E, Stk),
case Stk1 of
[{{M, F, A}, Bs2, Es2, Var} | Stk] ->
#result{env = Bs2, exprs = Es2, stack = [{{M, F, A}, Bs1, Es1 ++ Es, Var} | Stk], label = L};
[{Type, Es2, Var} | Stk] ->
#result{env = Bs1, exprs = Es2, stack = [{Type, Es1 ++ Es, Var} | Stk], label = L};
_ ->
#result{env = Bs1, exprs = Es1 ++ Es, stack = Stk1, label = L}
end
end.
%%------------------------------------------------------------------------------
%% @doc Evaluates the given `Expression' and returns a tuple with an updated
%% environment, the expression that resulted from the evaluation, and a label.
-spec expr(Bindings, Expression, Stack) -> Result when
Bindings :: cauder_types:environment(),
Expression :: cauder_types:abstract_expr(),
Stack :: cauder_types:stack(),
Result :: cauder_types:result().
expr(Bs, {var, Line, Name}, Stk) ->
Value = maps:get(Name, Bs),
#result{env = Bs, exprs = [{value, Line, Value}], stack = Stk};
expr(Bs, E = {cons, Line, H0, T0}, Stk) ->
case is_reducible(H0, Bs) of
true ->
R = #result{exprs = [H]} = expr(Bs, H0, Stk),
case is_value(H) andalso is_value(T0) of
true -> R#result{exprs = [{value, Line, [concrete(H) | concrete(T0)]}]};
false -> R#result{exprs = [setelement(3, E, H)]}
end;
false ->
case is_reducible(T0, Bs) of
true ->
R = #result{exprs = [T]} = expr(Bs, T0, Stk),
case is_value(H0) andalso is_value(T) of
true -> R#result{exprs = [{value, Line, [concrete(H0) | concrete(T)]}]};
false -> R#result{exprs = [setelement(4, E, T)]}
end;
false -> #result{env = Bs, exprs = [{value, Line, [concrete(H0) | concrete(T0)]}], stack = Stk}
end
end;
expr(Bs, E = {tuple, Line, Es0}, Stk) ->
R = #result{exprs = Es} = eval_list(Bs, Es0, Stk),
case is_value(Es) of
true ->
Tuple = list_to_tuple(lists:map(fun concrete/1, Es)),
#result{env = Bs, exprs = [{value, Line, Tuple}], stack = Stk};
false -> R#result{exprs = [setelement(3, E, Es)]}
end;
expr(Bs, {'if', Line, Cs}, Stk0) ->
case match_if(Bs, Cs) of
{match, Body} ->
Var = cauder_utils:temp_variable(Line),
Stk = [{'if', Body, Var} | Stk0],
#result{env = Bs, exprs = [Var], stack = Stk};
nomatch -> error(if_clause)
end;
expr(Bs0, E = {'case', Line, A, Cs}, Stk0) ->
case is_reducible(A, Bs0) of
true -> eval_and_update({Bs0, A, Stk0}, {3, E});
false ->
case match_case(Bs0, Cs, A) of
{match, Bs, Body} ->
Var = cauder_utils:temp_variable(Line),
Stk = [{'case', Body, Var} | Stk0],
#result{env = Bs, exprs = [Var], stack = Stk};
nomatch -> error({case_clause, concrete(A)})
end
end;
%% TODO Support receive with timeout
expr(Bs, {'receive', Line, Cs}, Stk0) ->
% TODO One of these variables is not necessary
Var = cauder_utils:temp_variable(Line),
VarBody = cauder_utils:temp_variable(Line),
Stk = [{'receive', [VarBody], Var} | Stk0],
#result{env = Bs, exprs = [Var], stack = Stk, label = {rec, VarBody, Cs}};
% TODO Support fun() as entry point argument?
% TODO Handle calls to interpreted fun() from uninterpreted module
expr(Bs, {'make_fun', Line, Name, Cs}, Stk0) ->
{ok, M} = current_module(Stk0),
Arity = length(element(3, hd(Cs))),
Info = {{M, Name}, Bs, Cs},
Fun =
case Arity of
0 -> fun() -> {[], Info} end;
1 -> fun(A) -> {[A], Info} end;
2 -> fun(A, B) -> {[A, B], Info} end;
3 -> fun(A, B, C) -> {[A, B, C], Info} end;
4 -> fun(A, B, C, D) -> {[A, B, C, D], Info} end;
5 -> fun(A, B, C, D, E) -> {[A, B, C, D, E], Info} end;
_ -> error({argument_limit, Arity}) % TODO Support more arities
end,
#result{env = Bs, exprs = [{value, Line, Fun}], stack = Stk0};
expr(Bs, E = {bif, Line, M, F, As}, Stk) ->
case is_reducible(As, Bs) of
true -> eval_and_update({Bs, As, Stk}, {5, E});
false ->
Value = apply(M, F, lists:map(fun concrete/1, As)),
#result{env = Bs, exprs = [{value, Line, Value}], stack = Stk}
end;
expr(Bs, {self, Line}, Stk) ->
Var = cauder_utils:temp_variable(Line),
#result{env = Bs, exprs = [Var], stack = Stk, label = {self, Var}};
expr(Bs, {node, Line}, Stk) ->
Var = cauder_utils:temp_variable(Line),
#result{env = Bs, exprs = [Var], stack = Stk, label = {node, Var}};
expr(Bs, {nodes, Line}, Stk) ->
Var = cauder_utils:temp_variable(Line),
#result{env = Bs, exprs = [Var], stack = Stk, label = {nodes, Var}};
expr(Bs, E = {spawn, Line, Fun}, Stk) ->
case is_reducible(Fun, Bs) of
true -> eval_and_update({Bs, Fun, Stk}, {3, E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {spawn, Var, Fun},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end;
expr(Bs, E = {spawn, Line, N, Fun}, Stk) ->
case is_reducible(N, Bs) of
true -> eval_and_update({Bs, N, Stk}, {3, E});
false ->
case is_reducible(Fun, Bs) of
true -> eval_and_update({Bs, Fun, Stk}, {4, E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {spawn, Var, concrete(N), Fun},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end
end;
expr(Bs, E = {spawn, Line, M, F, As}, Stk) ->
case is_reducible(M, Bs) of
true -> eval_and_update({Bs, M, Stk}, {3, E});
false ->
case is_reducible(F, Bs) of
true -> eval_and_update({Bs, F, Stk}, {4, E});
false ->
case is_reducible(As, Bs) of
true -> eval_and_update({Bs, As, Stk}, {5, E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {spawn, Var, concrete(M), concrete(F), concrete(As)},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end
end
end;
expr(Bs, E = {spawn, Line, N, M, F, As}, Stk) ->
case is_reducible(N, Bs) of
true -> eval_and_update({Bs, N, Stk}, {3, E});
false ->
case is_reducible(M, Bs) of
true -> eval_and_update({Bs, M, Stk}, {4, E});
false ->
case is_reducible(F, Bs) of
true -> eval_and_update({Bs, F, Stk}, {5, E});
false ->
case is_reducible(As, Bs) of
true -> eval_and_update({Bs, As, Stk}, {6, E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {spawn, Var, concrete(N), concrete(M), concrete(F), concrete(As)},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end
end
end
end;
expr(Bs, E = {start, Line, N}, Stk) ->
case is_reducible(N, Bs) of
true -> eval_and_update({Bs, N, Stk}, {3,E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {start, Var, concrete(N)},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end;
expr(Bs, E = {start, Line, H, N}, Stk) ->
case is_reducible(H, Bs) of
true -> eval_and_update({Bs, H, Stk}, {3,E});
false ->
case is_reducible(N, Bs) of
true -> eval_and_update({Bs, N, Stk}, {4,E});
false ->
Var = cauder_utils:temp_variable(Line),
Label = {start, Var, concrete(H), concrete(N)},
#result{env = Bs, exprs = [Var], stack = Stk, label = Label}
end
end;
expr(Bs, E = {Send, _, L, R}, Stk) when Send =:= 'send' orelse Send =:= 'send_op' ->
case is_reducible(L, Bs) of
true -> eval_and_update({Bs, L, Stk}, {3, E});
false ->
case is_reducible(R, Bs) of
true -> eval_and_update({Bs, R, Stk}, {4, E});
false ->
Label = {send, concrete(L), concrete(R)},
#result{env = Bs, exprs = [R], stack = Stk, label = Label}
end
end;
expr(Bs0, E = {local_call, Line, F, As}, Stk0) ->
case is_reducible(As, Bs0) of
true -> eval_and_update({Bs0, As, Stk0}, {4, E});
false ->
{ok, M} = current_module(Stk0),
A = length(As),
{_, Cs} = cauder_utils:fundef_lookup({M, F, A}),
{match, Bs, Body} = match_fun(Cs, As),
Var = cauder_utils:temp_variable(Line),
Stk = [{{M, F, A}, Bs, Body, Var} | Stk0],
#result{env = Bs0, exprs = [Var], stack = Stk}
end;
expr(Bs0, E = {remote_call, Line, M, F, As}, Stk0) ->
case is_reducible(As, Bs0) of
true -> eval_and_update({Bs0, As, Stk0}, {5, E});
false ->
A = length(As),
case cauder_utils:fundef_lookup({M, F, A}) of
{Exported, Cs} ->
% Check if function is accessible
case current_module(Stk0) of
{ok, M} -> ok;
{ok, _} -> true = Exported;
error when Stk0 =:= [] -> ok
end,
{match, Bs, Body} = match_fun(Cs, As),
Var = cauder_utils:temp_variable(Line),
Stk = [{{M, F, A}, Bs, Body, Var} | Stk0],
#result{env = Bs0, exprs = [Var], stack = Stk};
error ->
Value = apply(M, F, lists:map(fun concrete/1, As)),
#result{env = Bs0, exprs = [{value, Line, Value}], stack = Stk0}
end
end;
% TODO Handle calls to self/0, spawn/1, spawn/3
expr(Bs0, E = {apply, Line, M0, F0, As}, Stk0) ->
case is_reducible(M0, Bs0) of
true -> eval_and_update({Bs0, M0, Stk0}, {3, E});
false ->
case is_reducible(F0, Bs0) of
true -> eval_and_update({Bs0, F0, Stk0}, {4, E});
false ->
case is_reducible(As, Bs0) of
true -> eval_and_update({Bs0, As, Stk0}, {5, E});
false ->
M = concrete(M0),
F = concrete(F0),
A = length(As),
case cauder_utils:fundef_lookup({M, F, A}) of
{Exported, Cs} ->
% Check if function is accessible
case current_module(Stk0) of
{ok, M} -> ok;
{ok, _} -> true = Exported;
error when Stk0 =:= [] -> ok
end,
{match, Bs, Body} = match_fun(Cs, As),
Var = cauder_utils:temp_variable(Line),
Stk = [{{M, F, A}, Bs, Body, Var} | Stk0],
#result{env = Bs0, exprs = [Var], stack = Stk};
error ->
Value = apply(M, F, lists:map(fun concrete/1, As)),
#result{env = Bs0, exprs = [{value, Line, Value}], stack = Stk0}
end
end
end
end;
expr(Bs0, E = {apply_fun, Line, Fun, As}, Stk0) ->
case is_reducible(Fun, Bs0) of
true -> eval_and_update({Bs0, Fun, Stk0}, {3, E});
false ->
case is_reducible(As, Bs0) of
true -> eval_and_update({Bs0, As, Stk0}, {4, E});
false ->
A = length(As),
{env, [{{M, F}, Bs1, Cs}]} = erlang:fun_info(concrete(Fun), env),
{match, Bs2, Body} = match_fun(Cs, As),
Var = cauder_utils:temp_variable(Line),
Stk = [{{M, F, A}, cauder_utils:merge_bindings(Bs1, Bs2), Body, Var} | Stk0],
#result{env = Bs0, exprs = [Var], stack = Stk}
end
end;
expr(Bs0, E = {match, _, Lhs, Rhs}, Stk) ->
case is_reducible(Lhs, Bs0) of
true -> eval_and_update({Bs0, Lhs, Stk}, {3, E});
false ->
case is_reducible(Rhs, Bs0) of
true -> eval_and_update({Bs0, Rhs, Stk}, {4, E});
false ->
case match(Bs0, [Lhs], [Rhs]) of
{match, Bs} -> #result{env = Bs, exprs = [Rhs], stack = Stk};
nomatch -> error({badmatch, concrete(Rhs)})
end
end
end;
expr(Bs, E = {op, Line, Op, As}, Stk) ->
case is_reducible(As, Bs) of
true -> eval_and_update({Bs, As, Stk}, {4, E});
false ->
Value = apply(erlang, Op, lists:map(fun concrete/1, As)),
#result{env = Bs, exprs = [{value, Line, Value}], stack = Stk}
end;
expr(Bs, E = {'andalso', Line, Lhs, Rhs}, Stk) ->
case is_reducible(Lhs, Bs) of
true -> eval_and_update({Bs, Lhs, Stk}, {3, E});
false ->
case Lhs of
{value, _, false} -> #result{env = Bs, exprs = [Lhs], stack = Stk};
{value, _, true} ->
case is_reducible(Rhs, Bs) of
true -> eval_and_update({Bs, Rhs, Stk}, {4, E});
false ->
Value = apply(erlang, 'and', [concrete(Lhs), concrete(Rhs)]),
#result{env = Bs, exprs = [{value, Line, Value}], stack = Stk}
end
end
end;
expr(Bs, E = {'orelse', Line, Lhs, Rhs}, Stk) ->
case is_reducible(Lhs, Bs) of
true -> eval_and_update({Bs, Lhs, Stk}, {3, E});
false ->
case Lhs of
{value, _, true} -> #result{env = Bs, exprs = [Lhs], stack = Stk};
{value, _, false} ->
case is_reducible(Rhs, Bs) of
true -> eval_and_update({Bs, Rhs, Stk}, {4, E});
false ->
Value = apply(erlang, 'or', [concrete(Lhs), concrete(Rhs)]),
#result{env = Bs, exprs = [{value, Line, Value}], stack = Stk}
end
end
end.
%%%=============================================================================
-spec match_if(Bindings, Clauses) -> {match, Body} | nomatch when
Bindings :: cauder_types:environment(),
Clauses :: cauder_types:af_clause_seq(),
Body :: cauder_types:af_body().
match_if(_, []) -> nomatch;
match_if(Bs, [{'clause', _, [], G, B} | Cs]) ->
case concrete(eval_guard_seq(Bs, G)) of
true -> {match, B};
false -> match_if(Bs, Cs)
end.
-spec match_case(Bindings, Clauses, Argument) -> {match, ScopeBindings, Body} | nomatch when
Bindings :: cauder_types:environment(),
Clauses :: cauder_types:af_clause_seq(),
Argument :: cauder_types:af_literal(),
ScopeBindings :: cauder_types:environment(),
Body :: cauder_types:af_body().
match_case(Bs, Cs, V) -> match_clause(Bs, Cs, [V]).
-spec match_fun(Clauses, Arguments) -> {match, ScopeBindings, Body} | nomatch when
Clauses :: cauder_types:af_clause_seq(),
Arguments :: [cauder_types:af_literal()],
ScopeBindings :: cauder_types:environment(),
Body :: cauder_types:af_body().
match_fun(Cs, Vs) -> match_clause(#{}, Cs, Vs).
-spec match_rec_pid(Clauses, Bindings, RecipientPid, Mail, Sched, Sys) -> {NewBindings, Body, {Message, QueuePosition}, NewMail} | nomatch when
Clauses :: cauder_types:af_clause_seq(),
Bindings :: cauder_types:environment(),
RecipientPid :: cauder_types:proc_id(),
Mail :: cauder_mailbox:mailbox(),
Sched :: cauder_types:message_scheduler(),
Sys :: cauder_types:system(),
NewBindings :: cauder_types:environment(),
Body :: cauder_types:af_body(),
Message :: cauder_types:message(),
QueuePosition :: pos_integer(),
NewMail :: cauder_mailbox:mailbox().
match_rec_pid(Cs, Bs, Pid, Mail, Sched, Sys) ->
case cauder_mailbox:pid_get(Pid, Mail) of
[] -> nomatch;
QueueList ->
case Sched of
?SCHEDULER_Manual ->
FoldQueue =
fun
(Msg, Map1) ->
case match_rec(Cs, Bs, #message{uid = Uid} = Msg) of
{match, Bs1, Body} -> maps:put(Uid, {Bs1, Body, Msg}, Map1);
nomatch -> skip
end
end,
FoldQueueList = fun(Queue, Map0) -> lists:foldl(FoldQueue, Map0, queue:to_list(Queue)) end,
MatchingBranchesMap = lists:foldl(FoldQueueList, maps:new(), QueueList),
case maps:size(MatchingBranchesMap) of
0 -> nomatch;
_ ->
MatchingMessages = lists:map(fun({_, _, Msg}) -> Msg end, maps:values(MatchingBranchesMap)),
case cauder:suspend_task(Pid, MatchingMessages, Sys) of
{_SuspendTime, {resume, Uid}} -> % TODO Use suspend time
cauder:resume_task(),
{Bs1, Body, Msg} = maps:get(Uid, MatchingBranchesMap),
{QPos, NewMail} = cauder_mailbox:delete(Msg, Mail),
{Bs1, Body, {Msg, QPos}, NewMail};
{_SuspendTime, cancel} -> % TODO Use suspend time
throw(cancel)
end
end;
?SCHEDULER_Random ->
MatchingBranches = lists:filtermap(
fun(Queue) ->
{value, Msg} = queue:peek(Queue),
case match_rec(Cs, Bs, Msg) of
{match, Bs1, Body} -> {true, {Bs1, Body, Msg}};
nomatch -> false
end
end,
QueueList
),
case length(MatchingBranches) of
0 -> nomatch;
Length ->
{Bs1, Body, Msg} = lists:nth(rand:uniform(Length), MatchingBranches),
{QPos, NewMail} = cauder_mailbox:delete(Msg, Mail),
{Bs1, Body, {Msg, QPos}, NewMail}
end
end
end.
-spec match_rec(Clauses, Bindings, Message) -> {match, NewBindings, Body} | nomatch when
Clauses :: cauder_types:af_clause_seq(),
Bindings :: cauder_types:environment(),
Message :: cauder_mailbox:message(),
NewBindings :: cauder_types:environment(),
Body :: cauder_types:af_body().
match_rec(Cs, Bs0, #message{value = Value}) -> match_clause(Bs0, Cs, [abstract(Value)]).
-spec match_rec_uid(Clauses, Bindings, Uid, Mail) -> {NewBindings, Body, {Message, QueuePosition}, NewMail} | nomatch when
Clauses :: cauder_types:af_clause_seq(),
Bindings :: cauder_types:environment(),
Uid :: cauder_mailbox:uid(),
Mail :: cauder_mailbox:mailbox(),
NewBindings :: cauder_types:environment(),
Body :: cauder_types:af_body(),
Message :: cauder_types:message(),
QueuePosition :: pos_integer(),
NewMail :: cauder_mailbox:mailbox().
match_rec_uid(Cs, Bs0, Uid, Mail0) ->
case cauder_mailbox:uid_take(Uid, Mail0) of
false -> nomatch;
{value, {Msg, QPos}, Mail1} ->
case match_clause(Bs0, Cs, [abstract(Msg#message.value)]) of
{match, Bs, Body} -> {Bs, Body, {Msg, QPos}, Mail1};
nomatch -> nomatch
end
end.
-spec match_clause(Bindings, Clauses, Arguments) -> {match, ScopeBindings, Body} | nomatch when
Bindings :: cauder_types:environment(),
Clauses :: cauder_types:af_clause_seq(),
Arguments :: [cauder_types:af_literal()],
ScopeBindings :: cauder_types:environment(),
Body :: cauder_types:af_body().
match_clause(_, [], _) -> nomatch;
match_clause(Bs0, [{'clause', _, Ps, G, B} | Cs], Vs) ->
case match(Bs0, Ps, Vs) of
{match, Bs} ->
case concrete(eval_guard_seq(Bs, G)) of
true -> {match, Bs, B};
false -> match_clause(Bs0, Cs, Vs)
end;
nomatch -> match_clause(Bs0, Cs, Vs)
end.
-spec clause_line(Bindings, Clauses, Arguments) -> Line when
Bindings :: cauder_types:environment(),
Clauses :: cauder_types:af_clause_seq(),
Arguments :: [cauder_types:af_literal()],
Line :: non_neg_integer().
clause_line(_, [], _) -> -1;
clause_line(Bs0, [{'clause', Line, Ps, G, _} | Cs], Vs) ->
case match(Bs0, Ps, Vs) of
{match, Bs} ->
case concrete(eval_guard_seq(Bs, G)) of
true -> Line;
false -> clause_line(Bs0, Cs, Vs)
end;
nomatch -> clause_line(Bs0, Cs, Vs)
end.
%% Tries to match a list of values against a list of patterns using the given environment.
%% The list of values should have no variables.
-spec match(Bindings, Patterns, Arguments) -> {match, NewBindings} | nomatch when
Bindings :: cauder_types:environment(),
Patterns :: [cauder_types:af_pattern()],
Arguments :: [cauder_types:af_literal()],
NewBindings :: cauder_types:environment().
match(Bs, [], []) -> {match, Bs};
match(Bs0, [Pat | Ps0], [{value, _, Val} | Vs0]) when length(Ps0) == length(Vs0) ->
case catch match1(Pat, Val, Bs0) of
{match, Bs} -> match(Bs, Ps0, Vs0);
nomatch -> nomatch
end;
match(_Bs, _Ps, _Vs) -> nomatch.
% TODO Organize arguments to be consistent
-spec match1(Pattern, Term, Bindings) -> {match, NewBindings} | no_return() when
Pattern :: cauder_types:af_pattern(),
Term :: term(),
Bindings :: cauder_types:environment(),
NewBindings :: cauder_types:environment().
match1({value, _, V}, V, Bs) ->
{match, Bs};
match1({var, _, '_'}, _, Bs) ->
{match, Bs};
match1({var, _, Name}, Term, Bs) ->
case Bs of
#{Name := Term} -> {match, Bs};
#{Name := _} -> throw(nomatch);
_ -> {match, Bs#{Name => Term}} % Add the new binding
end;
match1({match, _, Pat1, Pat2}, Term, Bs0) ->
{match, Bs1} = match1(Pat1, Term, Bs0),
match1(Pat2, Term, Bs1);
match1({cons, _, H, T}, [H1 | T1], Bs0) ->
{match, Bs} = match1(H, H1, Bs0),
match1(T, T1, Bs);
match1({tuple, _, Es}, Tuple, Bs) when length(Es) =:= tuple_size(Tuple) ->
match_tuple(Es, Tuple, 1, Bs);
match1(_, _, _) ->
throw(nomatch).
-spec match_tuple(Values, Tuple, Index, Bindings) -> {match, NewBindings} | no_return() when
Values :: [cauder_types:af_literal()],
Tuple :: tuple(),
Index :: pos_integer(),
Bindings :: cauder_types:environment(),
NewBindings :: cauder_types:environment().
match_tuple([], _, _, Bs) -> {match, Bs};
match_tuple([E | Es], Tuple, I, Bs0) ->
{match, Bs} = match1(E, element(I, Tuple), Bs0),
match_tuple(Es, Tuple, I + 1, Bs).
-spec eval_guard_seq(Bindings, Guards) -> Boolean when
Bindings :: cauder_types:environment(),
Guards :: cauder_types:af_guard_seq(),
Boolean :: cauder_types:af_boolean().
eval_guard_seq(_, []) -> abstract(true);
eval_guard_seq(Bs, Gs) when is_list(Gs) ->
% In a guard sequence, guards are evaluated until one is true. The remaining guards, if any, are not evaluated.
% See: https://erlang.org/doc/reference_manual/expressions.html#guard-sequences
abstract(lists:any(fun(G) -> concrete(eval_guard(Bs, G)) end, Gs)).
-spec eval_guard(Bindings, Guard) -> Boolean when
Bindings :: cauder_types:environment(),
Guard :: cauder_types:af_guard(),
Boolean :: cauder_types:af_boolean().
eval_guard(Bs, G) when is_list(G) ->
abstract(lists:all(fun(Gt) -> concrete(eval_guard_test(Bs, Gt)) end, G)).
-spec eval_guard_test(Bindings, GuardTest) -> GuardTest | Boolean when
Bindings :: cauder_types:environment(),
GuardTest :: cauder_types:af_guard_test(),
Boolean :: cauder_types:af_boolean().
eval_guard_test(Bs, Gt) ->
case is_reducible(Gt, Bs) of
true ->
#result{exprs = [Gt1]} = expr(Bs, Gt, []),
eval_guard_test(Bs, Gt1);
false -> Gt
end.
%%%=============================================================================
%%------------------------------------------------------------------------------
%% @doc Converts the given Erlang term into its abstract form.
-spec abstract(Term) -> Literal when
Term :: term(),
Literal :: cauder_types:af_literal().
abstract(Value) -> {value, 0, Value}.
%%------------------------------------------------------------------------------
%% @doc Converts the given abstract literal element into the Erlang term that it
%% represents.
-spec concrete(Literal) -> Term when
Literal :: cauder_types:af_literal(),
Term :: term().
concrete({value, _, Value}) -> Value;
concrete({cons, _, {value, _, H}, {value, _, T}}) -> [H | T].
%%------------------------------------------------------------------------------
%% @doc Checks if the given abstract expression (or list of expressions) can be
%% reduced any further or not, given an environment.
-spec is_reducible(Expression | [Expression], Bindings) -> IsReducible when
Expression :: cauder_types:abstract_expr(),
Bindings :: cauder_types:environment(),
IsReducible :: boolean().
is_reducible([], _) -> false;
is_reducible([E | Es], Bs) -> is_reducible(E, Bs) orelse is_reducible(Es, Bs);
is_reducible({value, _, _}, _) -> false;
is_reducible({var, _, '_'}, _) -> false;
is_reducible({var, _, Name}, Bs) -> not cauder_utils:is_temp_variable_name(Name) andalso maps:is_key(Name, Bs);
is_reducible({cons, _, H, T}, Bs) -> is_reducible(H, Bs) orelse is_reducible(T, Bs);
is_reducible({tuple, _, Es}, Bs) -> is_reducible(Es, Bs);
is_reducible(E, _) when is_tuple(E) -> true.
%%------------------------------------------------------------------------------
%% @doc Checks if the given abstract expression is a literal value.
-spec is_value(Expression | [Expression]) -> IsValue when
Expression :: cauder_types:abstract_expr(),
IsValue :: boolean().
is_value([]) -> true;
is_value([E | Es]) -> is_value(E) andalso is_value(Es);
is_value({value, _, _}) -> true;
is_value({cons, _, H, T}) -> is_value(H) andalso is_value(T);
is_value({tuple, _, Es}) -> is_value(Es);
is_value(E) when is_tuple(E) -> false.
%%------------------------------------------------------------------------------
%% @doc Returns the current module according to the stack.
-spec current_module(Stack) -> {ok, Module} | error when
Stack :: cauder_types:stack(),
Module :: module().
current_module([{{M, _, _}, _, _, _} | _]) -> {ok, M};
current_module([_ | Stk]) -> current_module(Stk);
current_module([]) -> error.
-spec eval_and_update({Bindings, Expression | [Expression], Stack}, {Index, Tuple}) -> Result when
Bindings :: cauder_types:environment(),
Expression :: cauder_types:abstract_expr(),
Stack :: cauder_types:stack(),
Index :: pos_integer(),
Tuple :: tuple(),
Result :: cauder_types:result().
eval_and_update({Bs, Es, Stk}, {Index, Tuple}) when is_list(Es) ->
R = #result{exprs = Es1} = eval_list(Bs, Es, Stk),
R#result{exprs = [setelement(Index, Tuple, Es1)]};
eval_and_update({Bs, E, Stk}, {Index, Tuple}) ->
R = #result{exprs = [E1]} = expr(Bs, E, Stk),
R#result{exprs = [setelement(Index, Tuple, E1)]}. | src/cauder_eval.erl | 0.557725 | 0.562267 | cauder_eval.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2021 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module implements a datastore internal structure that stores an
%%% infinite log - an append-only list of logs (with arbitrary text content)
%%% with monotonic timestamps. Apart from timestamp, each entry is assigned a
%%% consecutive index, starting from zero (first, oldest log). The API offers
%%% listing the log entries, starting from requested offsets, entry indices or
%%% timestamps.
%%%
%%% Internally, the log is divided into nodes, stored in separate documents,
%%% according to the requested max_entries_per_node parameter. The nodes do not
%%% need any relations between each other, as they are addressed using offsets
%%% in the log and deterministically generated documents ids.
%%% Consider a log with Id "ab", its documents would look like the following:
%%%
%%% Id: ab x 0 Id: ab x 1 Id: ab x n Id: ab
%%% +--------+ +--------+ +--------+ +----------+
%%% | | | | | | | sentinel |
%%% | node | | node | ... | node | |+--------+|
%%% | 0 | | 1 | | n | || ||
%%% +--------+ +--------+ +--------+ || node ||
%%% || n + 1 ||
%%% |+--------+|
%%% +----------+
%%%
%%% The newest node is stored in the sentinel for performance reasons. When it
%%% gets full, it is saved as a new archival node with number (n + 1) and no
%%% longer modified - only read when listing is performed.
%%%
%%% Entries are stored in lists and new ones are always prepended, which means
%%% that the order of entries in a node is descending index-wise.
%%%
%%% The infinite log supports three ways of automatic cleaning:
%%%
%%% * TTL (Time To Live) - a TTL can be explicitly set, making ALL the log
%%% data expire after a certain time. During that time, the log can still
%%% be read and new entries can still be appended.
%%%
%%% * size based pruning - oldest nodes are pruned when the total log size
%%% exceed a certain threshold. The threshold is soft - the pruning happens
%%% when the log size is equal to threshold + max_elements_per_node, so that
%%% after the pruning, the number of entries left is equal to the threshold.
%%%
%%% * age based pruning - oldest nodes are pruned when all entries in given
%%% node are older than the threshold. If this option is chosen, the nodes
%%% are assigned a TTL so that they expire on the database level, even if
%%% the pruning is not applied.
%%%
%%% In case of size/age based pruning, only whole nodes are deleted (when all
%%% entries in the node satisfy the pruning condition). The newest node
%%% (buffered inside sentinel) is never pruned, which means that the log can
%%% still contain some entries that satisfy the pruning threshold, but will not
%%% be pruned unless the log grows.
%%%
%%% Setting a TTL causes the whole log to be completely deleted after given
%%% time. In the specific case when the age-based pruning is also set, the TTL
%%% overrides the document's expiration time, even if pruning threshold is longer.
%%% @end
%%%-------------------------------------------------------------------
-module(infinite_log).
-author("<NAME>").
-include("modules/datastore/infinite_log.hrl").
-include_lib("ctool/include/logging.hrl").
%% API
-export([create/4, destroy/3]).
-export([append/4]).
-export([list/4, list/5]).
-export([set_ttl/4]).
% unit of timestamps used across the module for stamping entries and searching
-type timestamp() :: time:millis().
% id of an infinite log instance as stored in database
-type log_id() :: binary().
%% @formatter:off
-type log_opts() :: #{
max_entries_per_node => pos_integer(),
size_pruning_threshold => undefined | non_neg_integer(),
age_pruning_threshold => undefined | time:seconds()
}.
%% @formatter:on
% content of a log entry, must be a text (suitable for JSON),
% if needed may encode some arbitrary structures as a JSON or base64
-type content() :: binary().
-type entry() :: {timestamp(), content()}.
% single entry in the log, numbered from 0 (oldest entry)
-type entry_index() :: non_neg_integer().
-export_type([timestamp/0, log_id/0, log_opts/0, content/0, entry/0, entry_index/0]).
% Indicates if the calling process is suitable for updating the log data, or may
% only cause read only access to the documents. In case of 'readonly' access
% mode, when a log document update is required during the requested operation,
% the operation will fail with '{error, update_required}'.
-type access_mode() :: readonly | allow_updates.
-export_type([access_mode/0]).
-type ctx() :: datastore_doc:ctx().
-type batch() :: datastore_doc:batch() | undefined.
-export_type([ctx/0, batch/0]).
% macros used to determine safe log content size
% couchbase sets the document size limit at 20MB, assume 19MB as safe
-define(SAFE_NODE_DB_SIZE, 19000000).
-define(APPROXIMATE_EMPTY_ENTRY_DB_SIZE, 500).
%%=====================================================================
%% API
%%=====================================================================
-spec create(ctx(), log_id(), log_opts(), batch()) -> {ok | {error, term()}, batch()}.
create(Ctx, LogId, Opts, InitialBatch) ->
infinite_log_sentinel:save(Ctx, LogId, #infinite_log_sentinel{
log_id = LogId,
max_entries_per_node = maps:get(max_entries_per_node, Opts, ?DEFAULT_MAX_ENTRIES_PER_NODE),
size_pruning_threshold = maps:get(size_pruning_threshold, Opts, undefined),
age_pruning_threshold = maps:get(age_pruning_threshold, Opts, undefined)
}, InitialBatch).
-spec destroy(ctx(), log_id(), batch()) -> {ok | {error, term()}, batch()}.
destroy(Ctx, LogId, InitialBatch) ->
case infinite_log_sentinel:acquire(Ctx, LogId, skip_pruning, allow_updates, InitialBatch) of
{{ok, Sentinel}, AcquireBatch} ->
Callback = fun(_LogId, NodeNumber, AccBatch) ->
infinite_log_node:delete(Ctx, LogId, NodeNumber, AccBatch)
end,
case apply_for_archival_log_nodes(Sentinel, Callback, AcquireBatch) of
{{error, _}, _} = ErrorResponse ->
ErrorResponse;
{ok, ReturnedBatch} ->
infinite_log_sentinel:delete(Ctx, LogId, ReturnedBatch)
end;
{{error, not_found}, Batch2} ->
{ok, Batch2}
end.
-spec append(ctx(), log_id(), content(), batch()) -> {ok | {error, term()}, batch()}.
append(Ctx, LogId, Content, InitialBatch) when is_binary(LogId) ->
case infinite_log_sentinel:acquire(Ctx, LogId, skip_pruning, allow_updates, InitialBatch) of
{{error, _}, _} = AcquireError ->
AcquireError;
{{ok, Sentinel}, AcquireBatch} ->
case sanitize_append_request(Sentinel, Content) of
{error, _} = SanitizeError ->
{SanitizeError, AcquireBatch};
ok ->
infinite_log_sentinel:append(Ctx, Sentinel, Content, AcquireBatch)
end
end.
-spec list(ctx(), log_id(), infinite_log_browser:listing_opts(), batch()) ->
{{ok, infinite_log_browser:listing_result()} | {error, term()}, batch()}.
list(Ctx, LogId, Opts, InitialBatch) ->
list(Ctx, LogId, Opts, allow_updates, InitialBatch).
-spec list(ctx(), log_id(), infinite_log_browser:listing_opts(), access_mode(), batch()) ->
{{ok, infinite_log_browser:listing_result()} | {error, term()}, batch()}.
list(Ctx, LogId, Opts, AccessMode, InitialBatch) ->
% age based pruning must be attempted at every listing as some of
% the log nodes may have expired
case infinite_log_sentinel:acquire(Ctx, LogId, apply_pruning, AccessMode, InitialBatch) of
{{error, _}, _} = AcquireError ->
AcquireError;
{{ok, Sentinel}, AcquireBatch} ->
try
{Res, FinalDatastoreBatch} = infinite_log_browser:list(Ctx, Sentinel, Opts, AcquireBatch),
{{ok, Res}, FinalDatastoreBatch}
catch Class:Reason:Stacktrace ->
?error_stacktrace("Unexpected error during infinite log listing (id: ~s) - ~w:~p", [
LogId, Class, Reason
], Stacktrace),
{{error, internal_server_error}, AcquireBatch}
end
end.
%%--------------------------------------------------------------------
%% @doc
%% Makes the log expire (be deleted from database) after specified Time To Live.
%% The procedure iterates through all documents used up by the log.
%% @end
%%--------------------------------------------------------------------
-spec set_ttl(ctx(), log_id(), time:seconds(), batch()) -> {ok | {error, term()}, batch()}.
set_ttl(Ctx, LogId, Ttl, InitialBatch) ->
case infinite_log_sentinel:acquire(Ctx, LogId, skip_pruning, allow_updates, InitialBatch) of
{{error, _}, _} = AcquireError ->
AcquireError;
{{ok, Sentinel}, AcquireBatch} ->
SetNodeTtl = fun(_LogId, NodeNumber, InternalBatch) ->
infinite_log_node:set_ttl(Ctx, LogId, NodeNumber, Ttl, InternalBatch)
end,
case apply_for_archival_log_nodes(Sentinel, SetNodeTtl, AcquireBatch) of
{{error, _}, _} = SetTtlError ->
SetTtlError;
{ok, UpdatedBatch} ->
infinite_log_sentinel:sve_with_ttl(Ctx, LogId, Ttl, UpdatedBatch)
end
end.
%%=====================================================================
%% Internal functions
%%=====================================================================
%% @private
-spec sanitize_append_request(infinite_log_sentinel:record(), content()) -> ok | {error, term()}.
sanitize_append_request(Sentinel, Content) ->
case byte_size(Content) > safe_log_content_size(Sentinel) of
true -> {error, log_content_too_large};
false -> ok
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% The limit is calculated not to exceed max couchbase document size, assuming
%% all logs are of maximum length and adding some safe margins for json encoding.
%% This averaged maximum value is used as the limit for all entries in the log.
%% @end
%%--------------------------------------------------------------------
-spec safe_log_content_size(infinite_log_sentinel:record()) -> integer().
safe_log_content_size(#infinite_log_sentinel{max_entries_per_node = MaxEntriesPerNode}) ->
?SAFE_NODE_DB_SIZE div MaxEntriesPerNode - ?APPROXIMATE_EMPTY_ENTRY_DB_SIZE.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Applies given function on all log nodes that are archival (will never be modified),
%% i.e. all nodes apart from the newest one (buffer) included in the sentinel.
%% Stops with an error if the function returns one.
%% @end
%%--------------------------------------------------------------------
-spec apply_for_archival_log_nodes(
infinite_log_sentinel:record(),
fun((log_id(), infinite_log_node:node_number(), batch()) -> {ok | {error, term()}, batch()}),
batch()
) ->
{ok | {error, term()}, batch()}.
apply_for_archival_log_nodes(Sentinel = #infinite_log_sentinel{log_id = LogId}, Callback, InitialBatch) ->
BufferNodeNumber = infinite_log_node:newest_node_number(Sentinel),
ArchivalNodeNumbers = lists:seq(0, BufferNodeNumber - 1),
lists_utils:foldl_while(fun(NodeNumber, {ok, AccBatch}) ->
case Callback(LogId, NodeNumber, AccBatch) of
{ok, _} = OkResponse->
{cont, OkResponse};
{{error, _}, _} = ErrorResponse ->
{halt, ErrorResponse}
end
end, {ok, InitialBatch}, ArchivalNodeNumbers). | src/modules/datastore/infinite_log/infinite_log.erl | 0.520253 | 0.53777 | infinite_log.erl | starcoder |
-module(slacker_conversation).
-include("spec.hrl").
-export([archive/2, create/2, history/3, info/2, invite/3,
join/2, kick/3, leave/2, list/2, mark/3, rename/3,
set_purpose/3, set_topic/3, unarchive/2]).
%% @doc Archives a conversation.
-spec archive(Token :: string(), Channel :: string()) -> http_response().
archive(Token, Channel) ->
slacker_request:send("conversations.archive", [{"token", Token},{"channel", Channel}]).
%% @doc Creates a conversation.
-spec create(Token :: string(), Name :: string()) -> http_response().
create(Token, Name) ->
slacker_request:send("conversations.create", [{"token", Token},{"name", Name}]).
%% @doc Fetches history of messages and events from a conversation.
%%
%% Options can be:
%% latest: end of time range of messages to include in results
%% oldest: start of time range of messages to include in results
%% inclusive: include messages with latest or oldest timestamp in results (default: 0)
%% count: number of messages to return, between 1 and 1000 (default: 100)
%% unreads: include unread_count_display in the output (default: 0)
%%
-spec history(Token :: string(), Channel :: string(), Options :: list()) -> http_response().
history(Token, Channel, Options) ->
slacker_request:send("conversations.history", [{"token", Token},{"channel", Channel}], Options).
%% @doc Returns information about a team conversation.
-spec info(Token :: string(), Channel :: string()) -> http_response().
info(Token, Channel) ->
slacker_request:send("conversations.info", [{"token", Token},{"channel", Channel}]).
%% @doc Invites a user to a conversation.
-spec invite(Token :: string(), Channel :: string(), User :: string()) -> http_response().
invite(Token, Channel, User) ->
slacker_request:send("conversations.invite", [{"token", Token},{"channel", Channel},{"user", User}]).
%% @doc Join a conversation. If the conversation does not exist, it is created.
-spec join(Token :: string(), Channel :: string()) -> http_response().
join(Token, Channel) ->
slacker_request:send("conversations.join", [{"token", Token},{"channel", Channel}]).
%% @doc Removes a user from a conversation.
-spec kick(Token :: string(), Channel :: string(), User :: string()) -> http_response().
kick(Token, Channel, User) ->
slacker_request:send("conversations.kick", [{"token", Token},{"channel", Channel},{"user", User}]).
%% @doc Leave a conversation.
-spec leave(Token :: string(), Channel :: string()) -> http_response().
leave(Token, Channel) ->
slacker_request:send("conversations.leave", [{"token", Token},{"channel", Channel}]).
%% @doc List of all conversations in the team.
%%
%% Options can be:
%% exclude_archived: do not return archived conversations (default: 0)
%%
-spec list(Token :: string(), Options :: list()) -> http_response().
list(Token, Options) ->
slacker_request:send("conversations.list", [{"token", Token}], Options).
%% @doc Set read cursor in a conversation.
-spec mark(Token :: string(), Channel :: string(), Timestamp :: string()) -> http_response().
mark(Token, Channel, Timestamp) ->
slacker_request:send("conversations.mark", [{"token", Token},{"channel", Channel},{"ts", Timestamp}]).
%% @doc Rename a conversation.
-spec rename(Token :: string(), Channel :: string(), Name :: string()) -> http_response().
rename(Token, Channel, Name) ->
slacker_request:send("conversations.rename", [{"token", Token},{"channel", Channel},{"name", Name}]).
%% @doc Sets the purpose for a conversation.
-spec set_purpose(Token :: string(), Channel :: string(), Purpose :: string()) -> http_response().
set_purpose(Token, Channel, Purpose) ->
slacker_request:send("conversations.setPurpose", [{"token", Token},{"channel", Channel},{"purpose", Purpose}]).
%% @doc Sets the topic for a conversation.
-spec set_topic(Token :: string(), Channel :: string(), Topic :: string()) -> http_response().
set_topic(Token, Channel, Topic) ->
slacker_request:send("conversations.setTopic", [{"token", Token},{"channel", Channel},{"topic", Topic}]).
%% @doc Unarchives a conversation.
-spec unarchive(Token :: string(), Channel :: string()) -> http_response().
unarchive(Token, Channel) ->
slacker_request:send("conversations.unarchive", [{"token", Token},{"channel", Channel}]). | src/slacker_conversation.erl | 0.564098 | 0.439266 | slacker_conversation.erl | starcoder |
-module(crdt_set_or_SUITE).
%% Callbacks
-export(
[ all/0
, groups/0
]).
%% Test cases
-export(
[ t_replicate_crud_merge/1
, t_serialization/1
]).
-define(GROUP , crdt_set_or).
%% ============================================================================
%% Common Test callbacks
%% ============================================================================
all() ->
[{group, ?GROUP}].
groups() ->
Tests =
[ t_replicate_crud_merge
, t_serialization
],
Properties = [],
[{?GROUP, Properties, Tests}].
%% =============================================================================
%% Test cases
%% =============================================================================
t_replicate_crud_merge(_Cfg) ->
ValA = <<"foo">>, % Replicated from A into B and C
ValB = <<"bar">>, % Concurrently added in B and C
ValC = <<"qux">>, % Unique to C, unobserved from B
% Initialize original
SetA1 = crdt_set_or:empty(),
false = crdt_set_or:is_member(SetA1, ValA),
SetA2 = crdt_set_or:add (SetA1, ValA),
SetA2 = crdt_set_or:add (SetA2, ValA), % Should have no effect
true = crdt_set_or:is_member(SetA2, ValA),
% Replicate
SetB1 = SetA2,
SetC1 = SetA2,
% Assert replicated value in both replicas
true = crdt_set_or:is_member(SetB1, ValA),
true = crdt_set_or:is_member(SetC1, ValA),
% Concurrently add
SetB2 = crdt_set_or:add (SetB1, ValB),
SetC2 = crdt_set_or:add (SetC1, ValB),
% Remove unobserved
SetB3 = crdt_set_or:remove (SetB2, ValC),
% Add unique
SetC3 = crdt_set_or:add (SetC2, ValC),
% Remove replicated from one (should take effect in all replicas)
SetB4 = crdt_set_or:remove (SetB3, ValA),
false = crdt_set_or:is_member(SetB4, ValA),
% Remove concurrently added from one (should take effect in only one)
SetB5 = crdt_set_or:remove (SetB4, ValB),
false = crdt_set_or:is_member(SetB5, ValB),
% Merge
SetD = lists:foldl(fun crdt_set_or:merge/2, SetA2, [SetB4, SetC3]),
% Assert removed replicated is gone
false = crdt_set_or:is_member(SetD , ValA),
% Assert one of concurrently added is still there
true = crdt_set_or:is_member(SetD , ValB),
% Assert removed unobserved still there
true = crdt_set_or:is_member(SetD , ValB).
t_serialization(_Cfg) ->
ValToBin = fun (Val) -> Val end,
ValOfBin = fun (Bin) -> {ok, Bin} end,
ValA = <<"foo">>,
ValB = <<"bar">>,
Set0 = crdt_set_or:empty(),
Set1 = crdt_set_or:add (Set0 , ValA),
Set2 = crdt_set_or:add (Set1 , ValB),
Set3 = crdt_set_or:remove (Set2 , ValB),
SetBin = crdt_set_or:to_bin (Set3 , ValToBin),
{ok, Set3} = crdt_set_or:of_bin (SetBin , ValOfBin),
true = crdt_set_or:is_member(Set3 , ValA),
false = crdt_set_or:is_member(Set3 , ValB),
BadBin = <<"{\"foo\": []}">>,
{error, {parsing_error, _}} = crdt_set_or:of_bin(BadBin , ValOfBin). | test/crdt_set_or_SUITE.erl | 0.503418 | 0.507324 | crdt_set_or_SUITE.erl | starcoder |
%% @author Couchbase <<EMAIL>>
%% @copyright 2016-2019 Couchbase, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% 1. Permission is defined as a pair {object, operation}
%% 2. Objects are organized in the tree structure with common root []
%% 3. One vertex of this tree can be parametrized: {bucket, bucket_name},
%% wildcard all can be used in place of bucket_name
%% 4. Permission pattern is a pair {Object pattern, Allowed operations}
%% 5. Allowed operations can be list of operations, all or none
%% 6. Object pattern is a list of vertices that define a certain subtree of the objects tree
%% 7. Object pattern vertex {bucket, bucket_name} always matches object vertex {bucket, any},
%% object pattern vertex {bucket, any} matches {bucket, bucket_name} with any bucket_name
%% otherwise vertices match if they are equal
%% 8. Object matches the object pattern if all the vertices of object pattern match
%% corresponding vertices of the object.
%% 9. Each role is defined as a list of permission patterns.
%% 10.To find which operations are allowed for certain object in certain role we look for the
%% first permission pattern with matching object pattern in the permission pattern list of
%% the role.
%% 11.The permission is allowed by the role if its operation is among the allowed operations
%% for its object.
%% 12.Each user can have multiple roles assigned
%% 13.Certain permission is allowed to the user if it is allowed at least by one of the roles
%% assigned to user.
%% @doc roles and permissions implementation
-module(menelaus_roles).
-include("ns_common.hrl").
-include("ns_config.hrl").
-include("rbac.hrl").
-include("pipes.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-define(DEFAULT_EXTERNAL_ROLES_POLLING_INTERVAL, 10*60*1000).
-export([get_definitions/0,
get_definitions/1,
roles_45/0,
is_allowed/2,
get_roles/1,
get_compiled_roles/1,
compile_roles/3,
validate_roles/2,
calculate_possible_param_values/1,
filter_out_invalid_roles/3,
produce_roles_by_permission/3,
get_security_roles/0,
external_auth_polling_interval/0]).
-export([start_compiled_roles_cache/0]).
%% for RPC from ns_couchdb node
-export([build_compiled_roles/1]).
-spec roles_45() -> [rbac_role_def(), ...].
roles_45() ->
[{admin, [],
[{name, <<"Admin">>},
{desc, <<"Can manage ALL cluster features including security.">>}],
[{[], all}]},
{ro_admin, [],
[{name, <<"Read Only Admin">>},
{desc, <<"Can view ALL cluster features.">>}],
[{[{bucket, any}, password], none},
{[{bucket, any}, data], none},
{[admin, security], [read]},
{[admin], none},
{[], [read]}]},
{cluster_admin, [],
[{name, <<"Cluster Admin">>},
{desc, <<"Can manage all cluster features EXCEPT security.">>}],
[{[admin], none},
{[n1ql, curl], none},
{[], all}]},
{bucket_admin, [bucket_name],
[{name, <<"Bucket Admin">>},
{desc, <<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket, bucket_name}, xdcr], [read, execute]},
{[{bucket, bucket_name}], all},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], none},
{[admin], none},
{[], [read]}]},
{bucket_full_access, [bucket_name],
[],
[{[{bucket, bucket_name}, data], all},
{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, n1ql, index], all},
{[{bucket, bucket_name}, n1ql], [execute]},
{[{bucket, bucket_name}], [read, flush]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{views_admin, [bucket_name],
[{name, <<"Views Admin">>},
{desc, <<"Can manage views for specified buckets">>}],
[{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[{bucket, bucket_name}, n1ql], [execute]},
{[xdcr], none},
{[admin], none},
{[], [read]}]},
{replication_admin, [],
[{name, <<"Replication Admin">>},
{desc, <<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket, any}, xdcr], all},
{[{bucket, any}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], all},
{[admin], none},
{[], [read]}]}].
-spec roles_50() -> [rbac_role_def(), ...].
roles_50() ->
[{admin, [],
[{name, <<"Admin">>},
{desc, <<"Can manage ALL cluster features including security.">>},
{ce, true}],
[{[], all}]},
{ro_admin, [],
[{name, <<"Read Only Admin">>},
{desc, <<"Can view ALL cluster features.">>},
{ce, true}],
[{[{bucket, any}, password], none},
{[{bucket, any}, data], none},
{[admin, security], [read]},
{[admin], none},
{[], [read, list]}]},
{cluster_admin, [],
[{name, <<"Cluster Admin">>},
{desc, <<"Can manage all cluster features EXCEPT security.">>}],
[{[admin, internal], none},
{[admin, security], none},
{[admin, diag], read},
{[n1ql, curl], none},
{[], all}]},
{bucket_admin, [bucket_name],
[{name, <<"Bucket Admin">>},
{desc, <<"Can manage ALL bucket features for specified buckets (incl. start/stop XDCR)">>}],
[{[{bucket, bucket_name}, xdcr], [read, execute]},
{[{bucket, bucket_name}], all},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], none},
{[admin], none},
{[], [read]}]},
{bucket_full_access, [bucket_name],
[{name, <<"Bucket Full Access">>},
{desc, <<"Full access to bucket data">>},
{ce, true}],
[{[{bucket, bucket_name}, data], all},
{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, n1ql, index], all},
{[{bucket, bucket_name}, n1ql], [execute]},
{[{bucket, bucket_name}], [read, flush]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{views_admin, [bucket_name],
[{name, <<"Views Admin">>},
{desc, <<"Can manage views for specified buckets">>}],
[{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[{bucket, bucket_name}, n1ql], [execute]},
{[xdcr], none},
{[admin], none},
{[], [read]}]},
{views_reader, [bucket_name],
[{name, <<"Views Reader">>},
{desc, <<"Can read data from the views of specified bucket">>}],
[{[{bucket, bucket_name}, views], [read]},
{[{bucket, bucket_name}, data, docs], [read]},
{[pools], [read]}]},
{replication_admin, [],
[{name, <<"<NAME>">>},
{desc, <<"Can manage ONLY XDCR features (cluster AND bucket level)">>}],
[{[{bucket, any}, xdcr], all},
{[{bucket, any}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], all},
{[admin], none},
{[], [read]}]},
{data_reader, [bucket_name],
[{name, <<"Data Reader">>},
{desc, <<"Can read information from specified bucket">>}],
[{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, data, meta], [read]},
{[{bucket, bucket_name}, data, xattr], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{data_writer, [bucket_name],
[{name, <<"Data Writer">>},
{desc, <<"Can write information from/to specified bucket">>}],
[{[{bucket, bucket_name}, data, docs], [insert, upsert, delete]},
{[{bucket, bucket_name}, data, xattr], [write]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{data_dcp_reader, [bucket_name],
[{name, <<"Data DCP Reader">>},
{desc, <<"Can read DCP data streams">>}],
[{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, data, meta], [read]},
{[{bucket, bucket_name}, data, dcp], [read]},
{[{bucket, bucket_name}, data, sxattr], [read]},
{[{bucket, bucket_name}, data, xattr], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[admin, memcached, idle], [write]},
{[pools], [read]}]},
{data_backup, [bucket_name],
[{name, <<"Data Backup">>},
{desc, <<"Can backup and restore bucket data">>}],
[{[{bucket, bucket_name}, data], all},
{[{bucket, bucket_name}, views], [read, write]},
{[{bucket, bucket_name}, fts], [read, write, manage]},
{[{bucket, bucket_name}, stats], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[{bucket, bucket_name}, n1ql, index], [create, list, build]},
{[pools], [read]}]},
{data_monitoring, [bucket_name],
[{name, <<"Data Monitoring">>},
{desc, <<"Can read full bucket stats">>}],
[{[{bucket, bucket_name}, stats], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{fts_admin, [bucket_name],
[{name, <<"FTS Admin">>},
{desc, <<"Can administer all FTS features">>}],
[{[{bucket, bucket_name}, fts], [read, write, manage]},
{[settings, fts], [read, write, manage]},
{[ui], [read]},
{[pools], [read]},
{[{bucket, bucket_name}, settings], [read]}]},
{fts_searcher, [bucket_name],
[{name, <<"FTS Searcher">>},
{desc, <<"Can query FTS indexes if they have bucket permissions">>}],
[{[{bucket, bucket_name}, fts], [read]},
{[settings, fts], [read]},
{[ui], [read]},
{[pools], [read]},
{[{bucket, bucket_name}, settings], [read]}]},
{query_select, [bucket_name],
[{name, <<"Query Select">>},
{desc, <<"Can execute SELECT statement on bucket to retrieve data">>}],
[{[{bucket, bucket_name}, n1ql, select], [execute]},
{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_update, [bucket_name],
[{name, <<"Query Update">>},
{desc, <<"Can execute UPDATE statement on bucket to update data">>}],
[{[{bucket, bucket_name}, n1ql, update], [execute]},
{[{bucket, bucket_name}, data, docs], [upsert]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_insert, [bucket_name],
[{name, <<"Query Insert">>},
{desc, <<"Can execute INSERT statement on bucket to add data">>}],
[{[{bucket, bucket_name}, n1ql, insert], [execute]},
{[{bucket, bucket_name}, data, docs], [insert]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_delete, [bucket_name],
[{name, <<"Query Delete">>},
{desc, <<"Can execute DELETE statement on bucket to delete data">>}],
[{[{bucket, bucket_name}, n1ql, delete], [execute]},
{[{bucket, bucket_name}, data, docs], [delete]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_manage_index, [bucket_name],
[{name, <<"Query Manage Index">>},
{desc, <<"Can manage indexes for the bucket">>}],
[{[{bucket, bucket_name}, n1ql, index], all},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_system_catalog, [],
[{name, <<"Query System Catalog">>},
{desc, <<"Can lookup system catalog information">>}],
[{[{bucket, any}, n1ql, index], [list]},
{[{bucket, any}, settings], [read]},
{[n1ql, meta], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_external_access, [],
[{name, <<"Query External Access">>},
{desc, <<"Can execute CURL statement">>}],
[{[n1ql, curl], [execute]},
{[{bucket, any}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{replication_target, [bucket_name],
[{name, <<"Replication Target">>},
{desc, <<"XDC replication target for bucket">>}],
[{[{bucket, bucket_name}, settings], [read]},
{[{bucket, bucket_name}, data, meta], [read, write]},
{[{bucket, bucket_name}, stats], [read]},
{[pools], [read]}]}].
-spec roles_55() -> [rbac_role_def(), ...].
roles_55() ->
[{admin, [],
[{name, <<"Full Admin">>},
{desc, <<"Can manage all cluster features (including security). "
"This user can access the web console. This user can read and "
"write all data.">>},
{ce, true}],
[{[], all}]},
{ro_admin, [],
[{name, <<"Read-Only Admin">>},
{desc, <<"Can view all cluster statistics. This user can access the "
"web console. This user can read some data.">>},
{ce, true}],
[{[{bucket, any}, password], none},
{[{bucket, any}, data], none},
{[{bucket, any}, fts], none},
{[admin, security], [read]},
{[admin], none},
{[eventing], none},
{[], [read, list]}]},
{security_admin, [],
[{name, <<"Security Admin">>},
{desc, <<"Can view all cluster statistics and manage user roles, but "
"not grant Full Admin or Security Admin roles to other users "
"or alter their own role. This user can access the web "
"console. This user cannot read data.">>}],
[{[admin, security, admin], none},
{[admin, security], all},
{[admin, logs], none},
{[{bucket, any}, data], none},
{[{bucket, any}, views], none},
{[{bucket, any}, n1ql], none},
{[{bucket, any}, fts], none},
{[{bucket, any}, password], none},
{[{bucket, any}], [read]},
{[], [read, list]}]},
{cluster_admin, [],
[{name, <<"Cluster Admin">>},
{desc, <<"Can manage all cluster features except security. This user "
"can access the web console. This user cannot read data.">>}],
[{[admin, internal], none},
{[admin, security], none},
{[admin, diag], read},
{[{bucket, any}, data], none},
{[{bucket, any}, views], none},
{[{bucket, any}, n1ql], none},
{[{bucket, any}, fts], none},
{[{bucket, any}, password], none},
{[n1ql, curl], none},
{[eventing], none},
{[], all}]},
{bucket_admin, [bucket_name],
[{name, <<"Bucket Admin">>},
{desc, <<"Can manage ALL bucket features for a given bucket (including "
"start/stop XDCR). This user can access the web console. This "
"user cannot read data.">>}],
[{[{bucket, bucket_name}, xdcr], [read, execute]},
{[{bucket, bucket_name}, data], none},
{[{bucket, bucket_name}, views], none},
{[{bucket, bucket_name}, n1ql], none},
{[{bucket, bucket_name}, password], none},
{[{bucket, bucket_name}, fts], none},
{[{bucket, bucket_name}], all},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], none},
{[admin], none},
{[eventing], none},
{[], [read]}]},
{bucket_full_access, [bucket_name],
[{name, <<"Application Access">>},
{desc, <<"Full access to bucket data. This user cannot access the web "
"console and is intended only for application access. This "
"user can read and write data.">>},
{ce, true}],
[{[{bucket, bucket_name}, data], all},
{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, n1ql, index], all},
{[{bucket, bucket_name}, n1ql], [execute]},
{[{bucket, bucket_name}], [read, flush]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{views_admin, [bucket_name],
[{name, <<"Views Admin">>},
{desc, <<"Can create and manage views of a given bucket. This user can "
"access the web console. This user can read some data.">>}],
[{[{bucket, bucket_name}, views], all},
{[{bucket, bucket_name}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[{bucket, bucket_name}, n1ql], [execute]},
{[xdcr], none},
{[admin], none},
{[eventing], none},
{[], [read]}]},
{views_reader, [bucket_name],
[{name, <<"Views Reader">>},
{desc, <<"Can read data from the views of a given bucket. This user "
"cannot access the web console and is intended only for "
"application access. This user can read some data.">>}],
[{[{bucket, bucket_name}, views], [read]},
{[{bucket, bucket_name}, data, docs], [read]},
{[pools], [read]}]},
{replication_admin, [],
[{name, <<"XDCR Admin">>},
{desc, <<"Can administer XDCR features to create cluster references and "
"replication streams out of this cluster. This user can "
"access the web console. This user can read some data.">>}],
[{[{bucket, any}, xdcr], all},
{[{bucket, any}, data], [read]},
{[{bucket, any}, settings], [read]},
{[{bucket, any}], none},
{[xdcr], all},
{[admin], none},
{[eventing], none},
{[], [read]}]},
{data_reader, [bucket_name],
[{name, <<"Data Reader">>},
{desc, <<"Can read data from a given bucket. This user cannot access "
"the web console and is intended only for application access. "
"This user can read data, but cannot write it.">>}],
[{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, data, meta], [read]},
{[{bucket, bucket_name}, data, xattr], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{data_writer, [bucket_name],
[{name, <<"Data Writer">>},
{desc, <<"Can write data to a given bucket. This user cannot access the "
"web console and is intended only for application access. This "
"user can write data, but cannot read it.">>}],
[{[{bucket, bucket_name}, data, docs], [insert, upsert, delete]},
{[{bucket, bucket_name}, data, xattr], [write]},
{[{bucket, bucket_name}, settings], [read]},
{[pools], [read]}]},
{data_dcp_reader, [bucket_name],
[{name, <<"Data DCP Reader">>},
{desc, <<"Can initiate DCP streams for a given bucket. This user cannot "
"access the web console and is intended only for application "
"access. This user can read data.">>}],
[{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, data, meta], [read]},
{[{bucket, bucket_name}, data, dcp], [read]},
{[{bucket, bucket_name}, data, sxattr], [read]},
{[{bucket, bucket_name}, data, xattr], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[admin, memcached, idle], [write]},
{[pools], [read]}]},
{data_backup, [bucket_name],
[{name, <<"Data Backup & Restore">>},
{desc, <<"Can backup and restore a given bucket’s data. This user "
"cannot access the web console and is intended only for "
"application access. This user can read data.">>}],
[{[{bucket, bucket_name}, data], all},
{[{bucket, bucket_name}, views], [read, write]},
{[{bucket, bucket_name}, fts], [read, write, manage]},
{[{bucket, bucket_name}, stats], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[{bucket, bucket_name}, n1ql, index], [create, list, build]},
{[{bucket, bucket_name}, analytics], [manage]},
{[analytics], [select, backup]},
{[pools], [read]}]},
{data_monitoring, [bucket_name],
[{name, <<"Data Monitor">>},
{desc, <<"Can read statistics for a given bucket. This user cannot "
"access the web console and is intended only for application "
"access. This user cannot read data.">>}],
[{[{bucket, bucket_name}, stats], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[tasks], [read]},
{[pools], [read]}]},
{fts_admin, [bucket_name],
[{name, <<"Search Admin">>},
{desc, <<"Can administer all Full Text Search features. This user can "
"access the web console. This user can read some data.">>}],
[{[{bucket, bucket_name}, fts], [read, write, manage]},
{[settings, fts], [read, write, manage]},
{[ui], [read]},
{[pools], [read]},
{[{bucket, bucket_name}, settings], [read]}]},
{fts_searcher, [bucket_name],
[{name, <<"Search Reader">>},
{desc, <<"Can query Full Text Search indexes for a given bucket. This "
"user can access the web console. This user can read some "
"data.">>}],
[{[{bucket, bucket_name}, fts], [read]},
{[settings, fts], [read]},
{[ui], [read]},
{[pools], [read]},
{[{bucket, bucket_name}, settings], [read]}]},
{query_select, [bucket_name],
[{name, <<"Query Select">>},
{desc, <<"Can execute a SELECT statement on a given bucket to retrieve "
"data. This user can access the web console and can read data, "
"but not write it.">>}],
[{[{bucket, bucket_name}, n1ql, select], [execute]},
{[{bucket, bucket_name}, data, docs], [read]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_update, [bucket_name],
[{name, <<"Query Update">>},
{desc, <<"Can execute an UPDATE statement on a given bucket to update "
"data. This user can access the web console and write data, "
"but cannot read it.">>}],
[{[{bucket, bucket_name}, n1ql, update], [execute]},
{[{bucket, bucket_name}, data, docs], [upsert]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_insert, [bucket_name],
[{name, <<"Query Insert">>},
{desc, <<"Can execute an INSERT statement on a given bucket to add "
"data. This user can access the web console and insert data, "
"but cannot read it.">>}],
[{[{bucket, bucket_name}, n1ql, insert], [execute]},
{[{bucket, bucket_name}, data, docs], [insert]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_delete, [bucket_name],
[{name, <<"Query Delete">>},
{desc, <<"Can execute a DELETE statement on a given bucket to delete "
"data. This user can access the web console, but cannot read "
"data. This user can delete data.">>}],
[{[{bucket, bucket_name}, n1ql, delete], [execute]},
{[{bucket, bucket_name}, data, docs], [delete]},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_manage_index, [bucket_name],
[{name, <<"Query Manage Index">>},
{desc, <<"Can manage indexes for a given bucket. This user can access "
"the web console, but cannot read data.">>}],
[{[{bucket, bucket_name}, n1ql, index], all},
{[{bucket, bucket_name}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_system_catalog, [],
[{name, <<"Query System Catalog">>},
{desc, <<"Can look up system catalog information via N1QL. This user "
"can access the web console, but cannot read user data.">>}],
[{[{bucket, any}, n1ql, index], [list]},
{[{bucket, any}, settings], [read]},
{[n1ql, meta], [read]},
{[ui], [read]},
{[pools], [read]}]},
{query_external_access, [],
[{name, <<"Query CURL Access">>},
{desc, <<"Can execute the CURL statement from within N1QL. This user "
"can access the web console, but cannot read data (within "
"Couchbase).">>}],
[{[n1ql, curl], [execute]},
{[{bucket, any}, settings], [read]},
{[ui], [read]},
{[pools], [read]}]},
{replication_target, [bucket_name],
[{name, <<"XDCR Inbound">>},
{desc, <<"Can create XDCR streams into a given bucket. This user cannot "
"access the web console or read any data.">>}],
[{[{bucket, bucket_name}, settings], [read]},
{[{bucket, bucket_name}, data, meta], [read, write]},
{[{bucket, bucket_name}, stats], [read]},
{[pools], [read]}]},
{analytics_manager, [bucket_name],
[{name, <<"Analytics Manager">>},
{desc, <<"Can manage Analytics links. Can manage datasets on a given bucket. "
"Can query datasets. This user can access the web console and read "
"some data.">>}],
[{[{bucket, bucket_name}, analytics], [manage]},
{[analytics], [select]},
{[ui], [read]},
{[pools], [read]}]},
{analytics_reader, [],
[{name, <<"Analytics Reader">>},
{desc, <<"Can query datasets. This is a global role as datasets may "
"be created on different buckets. This user can access the "
"web console and read some data.">>}],
[{[analytics], [select]},
{[ui], [read]},
{[pools], [read]}]}].
-spec get_definitions() -> [rbac_role_def(), ...].
get_definitions() ->
get_definitions(ns_config:latest()).
-spec get_definitions(ns_config()) -> [rbac_role_def(), ...].
get_definitions(Config) ->
case cluster_compat_mode:is_cluster_50(Config) of
true ->
case cluster_compat_mode:is_cluster_55(Config) of
true ->
roles_55();
false ->
roles_50()
end;
false ->
roles_45()
end.
-spec object_match(rbac_permission_object(), rbac_permission_pattern_object()) ->
boolean().
object_match(_, []) ->
true;
object_match([], [_|_]) ->
false;
object_match([{_Same, _} | RestOfObject], [{_Same, any} | RestOfObjectPattern]) ->
object_match(RestOfObject, RestOfObjectPattern);
object_match([{_Same, any} | RestOfObject], [{_Same, _} | RestOfObjectPattern]) ->
object_match(RestOfObject, RestOfObjectPattern);
object_match([_Same | RestOfObject], [_Same | RestOfObjectPattern]) ->
object_match(RestOfObject, RestOfObjectPattern);
object_match(_, _) ->
false.
-spec get_allowed_operations(rbac_permission_object(), [rbac_permission_pattern()]) ->
rbac_permission_pattern_operations().
get_allowed_operations(_Object, []) ->
none;
get_allowed_operations(Object, [{ObjectPattern, AllowedOperations} | Rest]) ->
case object_match(Object, ObjectPattern) of
true ->
AllowedOperations;
false ->
get_allowed_operations(Object, Rest)
end.
-spec operation_allowed(rbac_operation(), rbac_permission_pattern_operations()) ->
boolean().
operation_allowed(_, all) ->
true;
operation_allowed(_, none) ->
false;
operation_allowed(any, _) ->
true;
operation_allowed(Operation, AllowedOperations) ->
lists:member(Operation, AllowedOperations).
-spec is_allowed(rbac_permission(), rbac_identity() | [rbac_compiled_role()]) -> boolean().
is_allowed(Permission, {_, _} = Identity) ->
Roles = get_compiled_roles(Identity),
is_allowed(Permission, Roles);
is_allowed({Object, Operation}, Roles) ->
lists:any(fun (Role) ->
Operations = get_allowed_operations(Object, Role),
operation_allowed(Operation, Operations)
end, Roles).
-spec substitute_params([string()], [atom()], [rbac_permission_pattern_raw()]) ->
[rbac_permission_pattern()].
substitute_params([], [], Permissions) ->
Permissions;
substitute_params(Params, ParamDefinitions, Permissions) ->
ParamPairs = lists:zip(ParamDefinitions, Params),
lists:map(fun ({ObjectPattern, AllowedOperations}) ->
{lists:map(fun ({Name, any}) ->
{Name, any};
({Name, Param}) ->
{Param, Subst} = lists:keyfind(Param, 1, ParamPairs),
{Name, Subst};
(Vertex) ->
Vertex
end, ObjectPattern), AllowedOperations}
end, Permissions).
-spec compile_params([atom()], [rbac_role_param()], rbac_all_param_values()) ->
false | [[rbac_role_param()]].
compile_params(ParamDefs, Params, AllParamValues) ->
PossibleValues = get_possible_param_values(ParamDefs, AllParamValues),
case find_matching_value(ParamDefs, Params, PossibleValues) of
false ->
false;
Values ->
Values
end.
compile_roles(CompileRole, Roles, Definitions, AllParamValues) ->
lists:filtermap(fun (Name) when is_atom(Name) ->
case lists:keyfind(Name, 1, Definitions) of
{Name, [], _Props, Permissions} ->
{true, CompileRole(Name, [], [], Permissions)};
false ->
false
end;
({Name, Params}) ->
case lists:keyfind(Name, 1, Definitions) of
{Name, ParamDefs, _Props, Permissions} ->
case compile_params(ParamDefs, Params, AllParamValues) of
false ->
false;
NewParams ->
{true, CompileRole(Name, NewParams, ParamDefs, Permissions)}
end;
false ->
false
end
end, Roles).
-spec compile_roles([rbac_role()], [rbac_role_def()] | undefined, rbac_all_param_values()) ->
[rbac_compiled_role()].
compile_roles(_Roles, undefined, _AllParamValues) ->
%% can happen briefly after node joins the cluster on pre 5.0 clusters
[];
compile_roles(Roles, Definitions, AllParamValues) ->
compile_roles(
fun (_Name, Params, ParamDefs, Permissions) ->
substitute_params(strip_ids(ParamDefs, Params),
ParamDefs, Permissions)
end, Roles, Definitions, AllParamValues).
-spec get_roles(rbac_identity()) -> [rbac_role()].
get_roles({"", wrong_token}) ->
case ns_config_auth:is_system_provisioned() of
false ->
[admin];
true ->
[]
end;
get_roles({"", anonymous}) ->
case ns_config_auth:is_system_provisioned() of
false ->
[admin];
true ->
[{bucket_full_access, [BucketName]} ||
BucketName <- ns_config_auth:get_no_auth_buckets(ns_config:latest())]
end;
get_roles({_, admin}) ->
[admin];
get_roles({_, ro_admin}) ->
[ro_admin];
get_roles({BucketName, bucket}) ->
[{bucket_full_access, [BucketName]}];
get_roles({User, external} = Identity) ->
case cluster_compat_mode:is_cluster_45() of
true ->
menelaus_users:get_roles(Identity);
false ->
case saslauthd_auth:get_role_pre_45(User) of
admin ->
[admin];
ro_admin ->
[ro_admin];
false ->
[]
end
end;
get_roles({_User, local} = Identity) ->
menelaus_users:get_roles(Identity).
compiled_roles_cache_name() ->
compiled_roles_cache.
start_compiled_roles_cache() ->
UsersFilter =
fun ({user_version, _V}) ->
true;
({group_version, _V}) ->
true;
(_) ->
false
end,
ConfigFilter =
fun ({buckets, _}) ->
true;
({cluster_compat_version, _}) ->
true;
({rest_creds, _}) ->
true;
(_) ->
false
end,
GetVersion =
fun () ->
{cluster_compat_mode:get_compat_version(ns_config:latest()),
menelaus_users:get_users_version(),
menelaus_users:get_groups_version(),
ns_config_auth:is_system_provisioned(),
[{Name, proplists:get_value(uuid, BucketConfig)} ||
{Name, BucketConfig} <- ns_bucket:get_buckets(ns_config:latest())]}
end,
GetEvents =
case ns_node_disco:couchdb_node() == node() of
true ->
fun () ->
dist_manager:wait_for_node(fun ns_node_disco:ns_server_node/0),
[{{user_storage_events, ns_node_disco:ns_server_node()}, UsersFilter},
{ns_config_events, ConfigFilter}]
end;
false ->
fun () ->
[{user_storage_events, UsersFilter},
{ns_config_events, ConfigFilter}]
end
end,
versioned_cache:start_link(
compiled_roles_cache_name(), 200, fun build_compiled_roles/1,
GetEvents, GetVersion).
-spec get_compiled_roles(rbac_identity()) -> [rbac_compiled_role()].
get_compiled_roles({_, external} = Identity) ->
roles_cache:build_compiled_roles(Identity);
get_compiled_roles(Identity) ->
versioned_cache:get(compiled_roles_cache_name(), Identity).
build_compiled_roles(Identity) ->
case ns_node_disco:couchdb_node() == node() of
false ->
?log_debug("Compile roles for user ~p",
[ns_config_log:tag_user_data(Identity)]),
Definitions = get_definitions(),
AllPossibleValues = calculate_possible_param_values(ns_bucket:get_buckets()),
compile_roles(get_roles(Identity), Definitions, AllPossibleValues);
true ->
?log_debug("Retrieve compiled roles for user ~p from ns_server "
"node", [ns_config_log:tag_user_data(Identity)]),
rpc:call(ns_node_disco:ns_server_node(), ?MODULE, build_compiled_roles, [Identity])
end.
filter_out_invalid_roles(Roles, Definitions, AllPossibleValues) ->
compile_roles(fun (Name, [], _, _) ->
Name;
(Name, Params, _, _) ->
{Name, Params}
end, Roles, Definitions, AllPossibleValues).
calculate_possible_param_values(_Buckets, []) ->
[[]];
calculate_possible_param_values(Buckets, [bucket_name]) ->
[[any] | [[{Name, proplists:get_value(uuid, Props)}] || {Name, Props} <- Buckets]].
all_params_combinations() ->
[[], [bucket_name]].
-spec calculate_possible_param_values(list()) -> rbac_all_param_values().
calculate_possible_param_values(Buckets) ->
[{Combination, calculate_possible_param_values(Buckets, Combination)} ||
Combination <- all_params_combinations()].
-spec get_possible_param_values([atom()], rbac_all_param_values()) -> [[rbac_role_param()]].
get_possible_param_values(ParamDefs, AllValues) ->
{ParamDefs, Values} = lists:keyfind(ParamDefs, 1, AllValues),
Values.
visible_roles_filter() ->
case cluster_compat_mode:is_enterprise() of
true ->
pipes:filter(fun ({_, _, Props, _}) -> Props =/= [] end);
false ->
pipes:filter(fun ({_, _, Props, _}) ->
proplists:get_value(ce, Props, false)
end)
end.
expand_params(AllPossibleValues) ->
?make_transducer(
pipes:foreach(
?producer(),
fun ({Role, [], Props, _}) ->
?yield({Role, Props});
({Role, ParamDefs, Props, _}) ->
lists:foreach(
fun (Values) ->
?yield({{Role, Values}, Props})
end, get_possible_param_values(ParamDefs, AllPossibleValues))
end)).
filter_by_permission(undefined, _ParamValues, _Definitions) ->
pipes:filter(fun (_) -> true end);
filter_by_permission(Permission, ParamValues, Definitions) ->
pipes:filter(
fun ({Role, _}) ->
menelaus_roles:is_allowed(
Permission, compile_roles([Role], Definitions, ParamValues))
end).
-spec produce_roles_by_permission(rbac_permission(), ns_config(), list()) ->
pipes:producer(rbac_role()).
produce_roles_by_permission(Permission, Config, Buckets) ->
AllValues = calculate_possible_param_values(Buckets),
Definitions = get_definitions(Config),
pipes:compose(
[pipes:stream_list(Definitions),
visible_roles_filter(),
expand_params(AllValues),
filter_by_permission(Permission, AllValues, Definitions)]).
strip_id(bucket_name, {P, _Id}) ->
P;
strip_id(bucket_name, P) ->
P.
strip_ids(ParamDefs, Params) ->
[strip_id(ParamDef, Param) || {ParamDef, Param} <- lists:zip(ParamDefs, Params)].
match_param(bucket_name, P, P) ->
true;
match_param(bucket_name, P, {P, _Id}) ->
true;
match_param(bucket_name, _, _) ->
false.
match_params([], [], []) ->
true;
match_params(ParamDefs, Params, Values) ->
case lists:dropwhile(
fun ({ParamDef, Param, Value}) ->
match_param(ParamDef, Param, Value)
end, lists:zip3(ParamDefs, Params, Values)) of
[] ->
true;
_ ->
false
end.
-spec find_matching_value([atom()], [rbac_role_param()], [[rbac_role_param()]]) ->
false | [rbac_role_param()].
find_matching_value(ParamDefs, Params, PossibleValues) ->
case lists:dropwhile(
fun (Values) ->
not match_params(ParamDefs, Params, Values)
end, PossibleValues) of
[] ->
false;
[V | _] ->
V
end.
-spec validate_role(rbac_role(), [rbac_role_def()], [[rbac_role_param()]]) ->
false | {ok, rbac_role()}.
validate_role(Role, Definitions, AllValues) when is_atom(Role) ->
validate_role(Role, [], Definitions, AllValues);
validate_role({Role, Params}, Definitions, AllValues) ->
validate_role(Role, Params, Definitions, AllValues).
validate_role(Role, Params, Definitions, AllValues) ->
case lists:keyfind(Role, 1, Definitions) of
{Role, ParamsDef, _, _} when length(Params) =:= length(ParamsDef) ->
PossibleValues = get_possible_param_values(ParamsDef, AllValues),
case find_matching_value(ParamsDef, Params, PossibleValues) of
false ->
false;
[] ->
{ok, Role};
Expanded ->
{ok, {Role, Expanded}}
end;
_ ->
false
end.
validate_roles(Roles, Config) ->
Definitions = pipes:run(pipes:stream_list(get_definitions(Config)),
visible_roles_filter(),
pipes:collect()),
AllValues = calculate_possible_param_values(ns_bucket:get_buckets(Config)),
lists:foldl(fun (Role, {Validated, Unknown}) ->
case validate_role(Role, Definitions, AllValues) of
false ->
{Validated, [Role | Unknown]};
{ok, R} ->
{[R | Validated], Unknown}
end
end, {[], []}, Roles).
get_security_roles() ->
get_security_roles(ns_config:latest()).
get_security_roles(Config) ->
pipes:run(produce_roles_by_permission({[admin, security], any}, Config, []),
pipes:collect()).
external_auth_polling_interval() ->
ns_config:read_key_fast(external_auth_polling_interval,
?DEFAULT_EXTERNAL_ROLES_POLLING_INTERVAL).
-ifdef(TEST).
filter_out_invalid_roles_test() ->
Roles = [{role1, [{"bucket1", <<"id1">>}]},
{role2, [{"bucket2", <<"id2">>}]}],
Definitions = [{role1, [bucket_name],
[{name,<<"">>},{desc, <<"">>}],
[{[{bucket,bucket_name},settings],[read]}]},
{role2, [bucket_name],
[{name,<<"">>},{desc, <<"">>}],
[{[{bucket,bucket_name},n1ql,update],[execute]}]}],
PossibleValues = [{[],[[]]},
{[bucket_name],
[[any],
[{"bucket1",<<"id1">>}]]}],
?assertEqual([{role1, [{"bucket1", <<"id1">>}]}],
filter_out_invalid_roles(Roles, Definitions, PossibleValues)).
%% assertEqual is used instead of assert and assertNot to avoid
%% dialyzer warnings
object_match_test() ->
?assertEqual(true, object_match([o1, o2], [o1, o2])),
?assertEqual(false, object_match([o1], [o1, o2])),
?assertEqual(true, object_match([o1, o2], [o1])),
?assertEqual(true, object_match([{b, "a"}], [{b, "a"}])),
?assertEqual(false, object_match([{b, "a"}], [{b, "b"}])),
?assertEqual(true, object_match([{b, any}], [{b, "b"}])),
?assertEqual(true, object_match([{b, "a"}], [{b, any}])),
?assertEqual(true, object_match([{b, any}], [{b, any}])).
toy_config() ->
[[{buckets,
[{configs,
[{"test", [{uuid, <<"test_id">>}]},
{"default", [{uuid, <<"default_id">>}]}]}]}]].
compile_roles(Roles, Definitions) ->
AllPossibleValues = calculate_possible_param_values(ns_bucket:get_buckets(toy_config())),
compile_roles(Roles, Definitions, AllPossibleValues).
compile_roles_test() ->
?assertEqual([[{[{bucket, "test"}], none}]],
compile_roles([{test_role, ["test"]}],
[{test_role, [bucket_name], [], [{[{bucket, bucket_name}], none}]}])).
admin_test() ->
Roles = compile_roles([admin], roles_45()),
?assertEqual(true, is_allowed({[buckets], create}, Roles)),
?assertEqual(true, is_allowed({[something, something], anything}, Roles)).
ro_admin_test() ->
Roles = compile_roles([ro_admin], roles_45()),
?assertEqual(false, is_allowed({[{bucket, "test"}, password], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "test"}, data], read}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "test"}, something], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "test"}, something], write}, Roles)),
?assertEqual(false, is_allowed({[admin, security], write}, Roles)),
?assertEqual(true, is_allowed({[admin, security], read}, Roles)),
?assertEqual(false, is_allowed({[admin, other], write}, Roles)),
?assertEqual(true, is_allowed({[anything], read}, Roles)),
?assertEqual(false, is_allowed({[anything], write}, Roles)).
bucket_views_admin_check_global(Roles) ->
?assertEqual(false, is_allowed({[xdcr], read}, Roles)),
?assertEqual(false, is_allowed({[admin], read}, Roles)),
?assertEqual(true, is_allowed({[something], read}, Roles)),
?assertEqual(false, is_allowed({[something], write}, Roles)),
?assertEqual(false, is_allowed({[buckets], create}, Roles)).
bucket_views_admin_check_another(Roles) ->
?assertEqual(false, is_allowed({[{bucket, "another"}, xdcr], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "another"}, views], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "another"}, data], read}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "another"}, settings], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "another"}, settings], write}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "another"}], read}, Roles)),
?assertEqual(false, is_allowed({[buckets], create}, Roles)).
bucket_admin_check_default(Roles) ->
?assertEqual(true, is_allowed({[{bucket, "default"}, xdcr], read}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, xdcr], execute}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, anything], anything}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, anything], anything}, Roles)).
bucket_admin_test() ->
Roles = compile_roles([{bucket_admin, ["default"]}], roles_45()),
bucket_admin_check_default(Roles),
bucket_views_admin_check_another(Roles),
bucket_views_admin_check_global(Roles).
bucket_admin_wildcard_test() ->
Roles = compile_roles([{bucket_admin, [any]}], roles_45()),
bucket_admin_check_default(Roles),
bucket_views_admin_check_global(Roles).
views_admin_check_default(Roles) ->
?assertEqual(true, is_allowed({[{bucket, "default"}, views], anything}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, data], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, data], write}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, settings], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, settings], write}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}], read}, Roles)).
views_admin_test() ->
Roles = compile_roles([{views_admin, ["default"]}], roles_45()),
views_admin_check_default(Roles),
bucket_views_admin_check_another(Roles),
bucket_views_admin_check_global(Roles).
views_admin_wildcard_test() ->
Roles = compile_roles([{views_admin, [any]}], roles_45()),
views_admin_check_default(Roles),
bucket_views_admin_check_global(Roles).
bucket_full_access_check(Roles, Bucket, Allowed) ->
?assertEqual(Allowed, is_allowed({[{bucket, Bucket}, data], anything}, Roles)),
?assertEqual(Allowed, is_allowed({[{bucket, Bucket}], flush}, Roles)),
?assertEqual(Allowed, is_allowed({[{bucket, Bucket}], flush}, Roles)),
?assertEqual(false, is_allowed({[{bucket, Bucket}], write}, Roles)).
bucket_full_access_test() ->
Roles = compile_roles([{bucket_full_access, ["default"]}], roles_45()),
bucket_full_access_check(Roles, "default", true),
bucket_full_access_check(Roles, "another", false),
?assertEqual(true, is_allowed({[pools], read}, Roles)),
?assertEqual(false, is_allowed({[another], read}, Roles)).
replication_admin_test() ->
Roles = compile_roles([replication_admin], roles_45()),
?assertEqual(true, is_allowed({[{bucket, "default"}, xdcr], anything}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, password], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, views], read}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, settings], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, settings], write}, Roles)),
?assertEqual(true, is_allowed({[{bucket, "default"}, data], read}, Roles)),
?assertEqual(false, is_allowed({[{bucket, "default"}, data], write}, Roles)),
?assertEqual(true, is_allowed({[xdcr], anything}, Roles)),
?assertEqual(false, is_allowed({[admin], read}, Roles)),
?assertEqual(true, is_allowed({[other], read}, Roles)).
validate_role_test() ->
Config = toy_config(),
Definitions = roles_45(),
AllParamValues = calculate_possible_param_values(ns_bucket:get_buckets(Config)),
?assertEqual({ok, admin}, validate_role(admin, Definitions, AllParamValues)),
?assertEqual({ok, {bucket_admin, [{"test", <<"test_id">>}]}},
validate_role({bucket_admin, ["test"]}, Definitions, AllParamValues)),
?assertEqual({ok, {views_admin, [any]}},
validate_role({views_admin, [any]}, Definitions, AllParamValues)),
?assertEqual(false, validate_role(something, Definitions, AllParamValues)),
?assertEqual(false, validate_role({bucket_admin, ["something"]}, Definitions, AllParamValues)),
?assertEqual(false, validate_role({something, ["test"]}, Definitions, AllParamValues)),
?assertEqual(false, validate_role({admin, ["test"]}, Definitions, AllParamValues)),
?assertEqual(false, validate_role(bucket_admin, Definitions, AllParamValues)),
?assertEqual(false, validate_role({bucket_admin, ["test", "test"]}, Definitions, AllParamValues)).
-endif. | src/menelaus_roles.erl | 0.648578 | 0.425009 | menelaus_roles.erl | starcoder |
%%%
%
% https://www.futurelearn.com/courses/functional-programming-erlang/1/assignments/161825/
%
-module(ex1_24).
-export([perimiter/1,area/1,enclose/1,bits/1,bits_tail/1]).
-include_lib("eunit/include/eunit.hrl").
%%% First part the shapes
% I have assumed that triangels are right-angled trianges for simplicity of the math.
% I'm using three shapes for this, the shapes only have basic properies and no position in any way.
%
% A triangle, e.g. a right-angled triables with the legs A amd B.
% A rectangle, with the width and height, W and H
% A circle, with the radious R
% Useing pythagoras theorome to figure out the hypotenose of a right angeles triangle.
hypotenuse(A,B) ->
math:sqrt((A*A)+(B*B)).
perimiter({triangle,A,B}) ->
(A+B+hypotenuse(A,B));
perimiter({rectangle,W,H}) ->
2.0*(W+H);
perimiter({circle,R}) ->
(2.0*math:pi()*R).
area({triangle,A,B}) ->
(A*B)/2.0;
area({rectangle,W,H}) ->
(W*H)*1.0;
area({circle,R}) ->
R*R*math:pi().
enclose({triangle,A,B}) ->
{rectangle,A,B};
enclose({rectangle,W,H}) ->
{rectangle,W,H};
enclose({circle,R}) ->
{rectangle,2*R,2*R}.
%%% Second part
%
% Bit calculations
% Simple recursion.
bits(0) ->
0;
bits(N) ->
bits(N div 2) + N rem 2.
% With tail recursion.
bits_tail(N) ->
bits_tail(N,0).
bits_tail(0,Accumulator) ->
Accumulator;
bits_tail(N,Accumulator) ->
bits_tail(N div 2, Accumulator + N rem 2).
% In this case I prefer the tail recursion. For me simple
% recursion vs. tail recursion is about readability versus
% efficiency.
%
% When we compare the tail recursive call in this case:
% bits_tail(N div 2, Accumulator + N rem 2).
% it's very simular to the simple recursion:
% bits(N div 2) + N rem 2.
% so the readability of both versions will be simular, this
% makes the tail recursive winning on being a bit more effective
% even if i guess in most practical terms this will be a minimal
% gain in this examples.
%%% Tests to see the solutions are sane.
%
perimiter_test_() ->
[?_assert(perimiter({triangle,3,4}) =:= 12.0),
?_assert(perimiter({triangle,9,12}) =:= 36.0),
?_assert(perimiter({rectangle,3,4}) =:= 14.0),
?_assert(perimiter({rectangle,9,12}) =:= 42.0),
?_assert(perimiter({circle,1}) =:= 2*math:pi()),
?_assert(perimiter({circle,3}) =:= 6*math:pi())
].
area_test_() ->
[?_assert(area({triangle,3,4}) =:= 6.0),
?_assert(area({triangle,9,12}) =:= 54.0),
?_assert(area({rectangle,3,4}) =:= 12.0),
?_assert(area({rectangle,9,12}) =:= 108.0),
?_assert(area({circle,1}) =:= math:pi()),
?_assert(area({circle,3}) =:= 9*math:pi())
].
enclose_test_() ->
[?_assert(enclose({triangle,3,4}) =:= {rectangle,3,4}),
?_assert(enclose({triangle,9,12}) =:= {rectangle,9,12}),
?_assert(enclose({rectangle,3,4}) =:= {rectangle,3,4}),
?_assert(enclose({rectangle,9,12}) =:= {rectangle,9,12}),
?_assert(enclose({circle,1}) =:= {rectangle,2,2}),
?_assert(enclose({circle,3}) =:= {rectangle,6,6})
].
bits_test_() ->
[?_assert(bits(7) =:= 3),
?_assert(bits(8) =:= 1),
?_assert(bits_tail(7) =:= 3),
?_assert(bits_tail(8) =:= 1)
]. | week1/ex1_24.erl | 0.665084 | 0.820829 | ex1_24.erl | starcoder |
%% Copyright 2019, JobTeaser
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(hotp_validator).
-export([init/1, init/2, authenticate/2,
otpauth_uri/3]).
-export_type([validator_options/0, validator_option/0]).
-record(validator, {key :: binary(),
counter :: hotp:counter(),
nb_digits :: pos_integer(),
look_ahead :: non_neg_integer()}).
-type validator() :: #validator{}.
-type validator_options() :: [validator_option()].
-type validator_option() :: {initial_counter, hotp:counter()}
| {nb_digits, pos_integer()}
| {look_ahead, non_neg_integer()}.
%% @doc Initialize and return a new HOTP validator using default settings.
%%
%% @see init/2
-spec init(Key :: binary()) -> validator().
init(Key) ->
init(Key, []).
%% @doc Initialize and return a new HOTP validator.
-spec init(Key :: binary(), Options :: validator_options()) -> validator().
init(Key, Options) ->
Counter = proplists:get_value(initial_counter, Options, 0),
NbDigits = proplists:get_value(nb_digits, Options, 6),
LookAhead = proplists:get_value(look_ahead, Options, 5),
#validator{key = Key,
counter = Counter,
nb_digits = NbDigits,
look_ahead = LookAhead}.
%% @doc Authenticate a client password.
%%
%% See <a href="https://tools.ietf.org/html/rfc4226#section-7.2">RFC 4226
%% 7.2</a>.
-spec authenticate(validator(), Password) ->
{validator(), valid | invalid} when
Password :: pos_integer().
authenticate(Validator, Password) ->
Counter = Validator#validator.counter,
LookAhead = Validator#validator.look_ahead,
NextCounters = lists:seq(Counter + 1, Counter + 1 + LookAhead),
IsPasswordValid = fun (C) -> is_password_valid(Validator, Password, C) end,
case lists:search(IsPasswordValid, NextCounters) of
{value, MatchingCounter} ->
Validator2 = Validator#validator{counter = MatchingCounter},
{Validator2, valid};
false ->
{Validator, invalid}
end.
%% @doc Return whether a password is valid for a specific counter or not.
-spec is_password_valid(validator(), Password, Counter) ->
boolean() when
Password :: pos_integer(),
Counter :: hotp:counter().
is_password_valid(Validator, Password, Counter) ->
Key = Validator#validator.key,
NbDigits = Validator#validator.nb_digits,
ServerPassword = hotp:generate(Key, Counter, NbDigits),
Password == ServerPassword.
%% @doc Return an URI representing a validator that can be used to
%% automatically configure a client (or at least a Google authenticator). See
%% <a
%% href="https://github.com/google/google-authenticator/wiki/Key-Uri-Format">the
%% Google authenticator documentation</a>.
-spec otpauth_uri(validator(), Issuer, AccountName) -> URI when
Issuer :: binary(),
AccountName :: binary(),
URI :: binary().
otpauth_uri(Validator, Issuer, AccountName) ->
Key = Validator#validator.key,
NbDigits = Validator#validator.nb_digits,
Counter = Validator#validator.counter,
Parameters = [{<<"counter">>, integer_to_list(Counter)}],
otpauth_uri:generate(hotp, Key, NbDigits, Issuer, AccountName, Parameters). | src/hotp_validator.erl | 0.726426 | 0.448547 | hotp_validator.erl | starcoder |
%%%----------------------------------------------------------------------
%%% File : cord.erl
%%% Author : <NAME> <<EMAIL>>
%%% Purpose : Data structure for large strings of text
%%% Created : 21 Oct 2000 by <NAME> <<EMAIL>>
%%%----------------------------------------------------------------------
%%
%% Cords - a scalable data structure for strings of text.
%%
%% Cords are binary trees with erlang binaries as leaves. The trees
%% are kept fairly balanced, and the sizes of the binary objects are
%% kept within acceptable bounds. By using binaries, it should only
%% cost about one byte per character.
%%
%% The main idea is to have a fast insert/replace operation which
%% doesn't change the binaries too much. By keeping the binaries on
%% the leaves fairly small, we generally get to move them about in one
%% piece while we're balancing things after large updates or pulling
%% out large regions. An added advantage of not disturbing the
%% binaries is that it's not too expensive to keep old copies of cords
%% around, since most of the binaries get shared on the heap.
%%
%% API:
%% new():
%% Creates a new, empty cord.
%% new(ListOrBinary):
%% Creates a new cord from some input characters.
%% replace(Cord, NewText, Start, Length):
%% Replaces the `Length' long portion of `Cord' starting at `Start'
%% with `NewText' (a cord, binary, or iolist).
%% join(Left, Right)
%% Join two cords together into a new one.
%% to_binary(Cord): Convert a cord into a binary
%% to_list(Cord): Convert a cord into a list
%% walk(Cord, Pos, Direction, Fun):
%% "Walk" character by character along `Cord' in `Direction'. For each
%% character, we call Fun(Char) which returns either {result, R}, or
%% {more, NextFun}. If we run out of characters before the fun returns
%% a result, we call Fun(finish), which is required to return
%% {result, R}.
-module(cord).
-author('<EMAIL>').
-compile(export_all).
-export([new/0, new/1, cord_size/1,
replace/4, region/3, region_binary/3, region_list/3,
to_binary/1, to_list/1, to_iolist/1,
walk/4]).
%% It seems that static values won't do for scaling to gigantic (tens
%% of megabytes) sizes.
-define(MIN_SIZE, 512).
-define(MAX_SIZE, 2048).
%% A cord is either a #cord record or a binary.
-record(cord, {size, % Combined size
left, % cord()
right, % cord()
%% The dirty flag indicates that a cord (or one of its
%% children) has been changed since the last time the
%% cord was 'fixed'
dirty=false
}).
-define(assert(X),
(case X of
true ->
true;
false ->
exit(lists:flatten(io_lib:format("Assertion failed at ~p:~p",
[?MODULE, ?LINE])))
end)).
new() -> <<>>.
new(B) when binary(B) -> fix_cord(B);
new(L) when list(L) -> new(list_to_binary(L)).
%% More efficient way to create a cord from a file. This
%% implementation is not terribly clever (read small chunks, balance
%% at the end), but at least it isn't a memory hog.
%%
%% Returns: {ok, cord()} | {error, Reason}
new_from_file(Filename) ->
case file:open(Filename, [raw, binary]) of
X = {error, Rsn} ->
X;
{ok, F} ->
read_chunks(F)
end.
read_chunks(F) ->
read_chunks(F, new()).
read_chunks(F, Acc) ->
case file:read(F, ?MAX_SIZE) of
eof ->
{ok, fix_cord(Acc)};
{ok, Bin} ->
read_chunks(F, make_cord(Acc, Bin));
X = {error, Reason} ->
X
end.
%% Make a cord.
%% This is a "dirty" operation that doesn't rebalance the tree.
make_cord(Left, Right) ->
#cord{size=cord_size(Left) + cord_size(Right),
left=Left,
right=Right,
dirty=true}.
cord_size(Cord) when binary(Cord) ->
size(Cord);
cord_size(Cord) when record(Cord, cord) ->
Cord#cord.size.
max_depth(Cord) when binary(Cord) ->
1;
max_depth(Cord) ->
1 + max(max_depth(Cord#cord.left),
max_depth(Cord#cord.right)).
nr_nodes(Cord) when binary(Cord) ->
1;
nr_nodes(Cord) ->
1 + nr_nodes(Cord#cord.left) + nr_nodes(Cord#cord.right).
mean_leaf_size(Cord) ->
Sizes = leaf_sizes(Cord),
Sum = lists:foldr(fun(X, Acc) -> X + Acc end,
0,
Sizes),
round(Sum / length(Sizes)).
leaf_sizes(Cord) when binary(Cord) ->
[size(Cord)];
leaf_sizes(Cord) ->
leaf_sizes(Cord#cord.left) ++ leaf_sizes(Cord#cord.right).
max(X, Y) when X > Y -> X;
max(X, Y) -> Y.
insert(Cord, New, Point) ->
replace(Cord, New, Point, 0).
delete(Cord, Point, Length) ->
replace(Cord, [], Point, Length).
%% replace/4: Replace a region of the cord. `New' is the text to
%% replace the region with, and can be either a list, binary, or cord.
replace(Cord, New, Start, Length) when list(New) ->
replace(Cord, list_to_binary(New), Start, Length);
replace(Cord, New, Start, Length) ->
%% Replace is done by copying the areas on the left and right of
%% the region, and joining them together with the new cord in the
%% middle.
{A, B} = split(Cord, Start-1),
{C, D} = split(B, Length),
fix_cord(make_cord(make_cord(A, New), D)).
split(Cord, 0) when binary(Cord) ->
{<<>>, Cord};
split(Cord, Pos) when binary(Cord) ->
?assert(Pos =< cord_size(Cord)),
<<Left:Pos/binary, Right/binary>> = Cord,
{Left, Right};
split(Cord, Pos) when record(Cord, cord) ->
?assert(Pos =< cord_size(Cord)),
LeftSz = cord_size(Cord#cord.left),
RightSz = cord_size(Cord#cord.right),
% error_logger:info_msg("Split - left:~p right:~p~n", [LeftSz, RightSz]),
if LeftSz == Pos ->
{Cord#cord.left, Cord#cord.right};
LeftSz > Pos ->
{SplitLeft, SplitRight} = split(Cord#cord.left, Pos),
{SplitLeft, make_cord(SplitRight, Cord#cord.right)};
LeftSz < Pos ->
{SplitLeft, SplitRight} = split(Cord#cord.right, Pos-LeftSz),
{make_cord(Cord#cord.left, SplitLeft), SplitRight}
end.
%% join two cords together and rebalance.
join(Left, Right) when binary(Left) ->
fix_cord(make_cord(Left, Right)).
%% fix_cord/1
%%
%% "Fix" a cord so that it's reasonably balanced, and it's leaves are
%% reasonable sizes.
%% Leaf (binary) - break it up if it's too big
fix_cord(Bin) when binary(Bin) ->
if size(Bin) > ?MAX_SIZE ->
{Left, Right} = split(Bin, round(size(Bin) / 2)),
fix_cord(make_cord(Left, Right));
true ->
Bin
end;
fix_cord(Cord) when Cord#cord.dirty == false ->
Cord;
%% Branch (cord) - merge its children if they're too small, balance it
%% if it's too unbalanced.
fix_cord(Cord) when record(Cord, cord) ->
Sz = cord_size(Cord),
Left = Cord#cord.left,
Right = Cord#cord.right,
LeftSz = cord_size(Left),
RightSz = cord_size(Right),
SzDiff = abs(LeftSz - RightSz),
if Sz < ?MIN_SIZE ->
%% Too small - make it into a binary
to_binary(Cord);
SzDiff > (Sz/3) ->
%% needs rebalancing
if LeftSz > RightSz ->
if binary(Left) ->
fix_cord(to_binary(Cord));
true ->
balance_from_left(Cord)
end;
LeftSz =< RightSz ->
if binary(Right) ->
fix_cord(to_binary(Cord));
true ->
balance_from_right(Cord)
end
end;
true ->
%% this cord is ok, fix the children
Cord#cord{left=fix_cord(Left),
right=fix_cord(Right),
dirty=false}
end.
%% Balance by taking from the left side.
%% Left must be a #cord, right can be a binary.
balance_from_left(#cord{left=Left, right=Right}) when record(Left, cord) ->
LLSz = cord_size(Left#cord.left),
LRSz = cord_size(Left#cord.right),
if
LRSz < LLSz ->
%% single rotate
fix_cord(make_cord(Left#cord.left,
make_cord(Left#cord.right,
Right)));
record(Left#cord.right, cord) ->
%% double rotate
LeftRight = Left#cord.right,
fix_cord(make_cord(make_cord(Left#cord.left,
LeftRight#cord.left),
make_cord(LeftRight#cord.right,
Right)));
true ->
fix_cord(to_binary(make_cord(Left, Right)))
end.
%% oh, pain, duplication. never have been good at taking redundancy
%% out of symmetric functions. -luke
balance_from_right(#cord{left=Left, right=Right}) when record(Right, cord) ->
RLSz = cord_size(Right#cord.left),
RRSz = cord_size(Right#cord.right),
if
RLSz < RRSz ->
%% single rotate
fix_cord(make_cord(make_cord(Left,
Right#cord.left),
Right#cord.right));
record(Right#cord.left, cord) ->
%% double rotate
RightLeft = Right#cord.left,
fix_cord(make_cord(make_cord(Left,
RightLeft#cord.left),
make_cord(RightLeft#cord.right,
Right#cord.right)));
true ->
fix_cord(to_binary(make_cord(Left, Right)))
end.
%% Return: cord()
region(Cord, Start, Length) ->
{A, B} = split(Cord, Start-1),
{C, D} = split(B, Length),
C.
%% Return: binary()
region_binary(Cord, Start, Length) ->
to_binary(region(Cord, Start, Length)).
%% Return: list()
region_list(Cord, Start, Length) ->
binary_to_list(region_binary(Cord, Start, Length)).
to_binary(Cord) when binary(Cord) ->
Cord;
to_binary(Cord) ->
list_to_binary(to_binary1(Cord)).
to_binary1(Cord) when binary(Cord) ->
Cord;
to_binary1(Cord) when record(Cord, cord) ->
[to_binary1(Cord#cord.left),to_binary1(Cord#cord.right)].
to_list(Cord) ->
binary_to_list(to_binary(Cord)).
to_iolist(Cord) when binary(Cord) ->
[Cord];
to_iolist(Cord) ->
[to_iolist(Cord#cord.left) | to_iolist(Cord#cord.right)].
%% Walk backwards along a cord, character by character.
%% F = fun(X) -> {more, F2} | {result, R}
%% X = char() | finish
walk(Cord, Pos, Direction, F) ->
%% Make this simple: extract the region we want to walk along.
Region = case Direction of
backward ->
{A, B} = split(Cord, Pos),
A;
forward ->
{A, B} = split(Cord, Pos-1),
B
end,
case walk1(Region, Direction, F) of
{result, R} ->
R;
{more, FNext} ->
{result, R} = FNext(finish),
R
end.
walk1(<<>>, Direction, F) ->
{more, F};
walk1(Bin, Direction, F) when binary(Bin) ->
{Chunk, Char} = case Direction of
backward ->
Sz = size(Bin) - 1,
<<Front:Sz/binary, Back>> = Bin,
{Front, Back};
forward ->
<<Front, Back/binary>> = Bin,
{Back, Front}
end,
case F(Char) of
{more, F2} ->
walk1(Chunk, Direction, F2);
{result, R} ->
{result, R}
end;
walk1(Cord, Direction, F) when record(Cord, cord) ->
{First, Second} = case Direction of
backward ->
{Cord#cord.right, Cord#cord.left};
forward ->
{Cord#cord.left, Cord#cord.right}
end,
case walk1(First, Direction, F) of
{more, F2} ->
walk1(Second, Direction, F2);
{result, R} ->
{result, R}
end.
walker(Cord) ->
walker(Cord, forward).
walker(Cord, Direction) ->
walker(Cord, Direction, <<>>).
walker(Cord, Direction, More) when binary(Cord) ->
{Cord, Direction, More};
walker(Cord, forward, More) ->
walker(Cord#cord.left, forward, make_cord(Cord#cord.right, More));
walker(Cord, backward, More) ->
walker(Cord#cord.right, backward, make_cord(More, Cord#cord.left)).
walker_at_end({walked_to_end, _}) ->
true;
walker_at_end({C,_,Rest}) ->
cord_size(C) + cord_size(Rest) == 0.
walker_direction({walked_to_end, Direction}) -> Direction;
walker_direction({_, Direction, _}) -> Direction.
walker_next({walked_to_end, Direction}) ->
{done, {walked_to_end, Direction}};
walker_next({<<>>, Direction, More}) ->
case cord_size(More) of
0 ->
{done, {walked_to_end, Direction}};
_ ->
walker_next(walker(More, Direction))
end;
walker_next({<<A, Chunk/binary>>, forward, More}) ->
{A, {Chunk, forward, More}};
walker_next({Bin, backward, More}) ->
ChunkSz = size(Bin) - 1,
<<Chunk:ChunkSz/binary, A>> = Bin,
{A, {Chunk, backward, More}}.
walker_push(done, Walker) ->
Walker;
walker_push(X, {walked_to_end, Dir}) ->
{<<X>>, Dir, <<>>};
walker_push(X, {Bin, forward, More}) ->
{<<X>>, forward, make_cord(Bin, More)};
walker_push(X, {Bin, backward, More}) ->
{<<X>>, backward, make_cord(Bin, More)}.
walker_test() ->
Cord = make_cord(make_cord(<<1,2>>, <<3>>),
make_cord(<<4,5,6>>, <<7,8,9>>)),
W = walker(Cord, forward),
walker_test_loop(W).
walker_test_loop(W) ->
case walker_next(W) of
{done, _} ->
[];
{Ch, W2} ->
[Ch|walker_test_loop(W2)]
end.
test() ->
%% Test of binary cords
BinCord = <<1, 2, 3, 4, 5>> ,
{BinCord, <<>>} = split(BinCord, size(BinCord)),
{<<>>, BinCord} = split(BinCord, 0),
{<<1, 2>>, <<3, 4, 5>>} = split(BinCord, 2),
%% Test of a simple cord
Cord1 = make_cord(<<1, 2>>, <<3, 4, 5>>),
{<<>>, <<1, 2, 3, 4, 5>>} = binsplit(Cord1, 0),
{<<1, 2>>, <<3, 4, 5>>} = binsplit(Cord1, 2),
{<<1, 2, 3>>, <<4, 5>>} = binsplit(Cord1, 3),
%% A less trivial cord
%% (spaces before commas are to workaround an erlang-mode indent problem)
A = <<1, 2>> ,
B = <<3, 4>> ,
C = <<5, 6>> ,
D = <<7, 8>> ,
E = <<9>> ,
Cord2 = make_cord(make_cord(A, B), make_cord(C, make_cord(D, E))),
{<<>>, <<1, 2, 3, 4, 5, 6, 7, 8, 9>>} = binsplit(Cord2, 0),
{<<1, 2, 3, 4, 5>>, <<6, 7, 8, 9>>} = binsplit(Cord2, 5),
%% Joining
BinCord2 = to_binary(Cord2),
%% Why does = fail but == work?
true = <<BinCord/binary, BinCord2/binary>> == binjoin(BinCord, Cord2),
true = <<BinCord2/binary, BinCord/binary>> == binjoin(Cord2, Cord1),
true = <<BinCord2/binary, BinCord2/binary>> == binjoin(Cord2, Cord2),
%% Test "walking"
[1,2,3,4,5] = walk(Cord2, 5, backward, walk_test([])),
[9, 8, 7, 6, 5] = walk(Cord2, 5, forward, walk_test([])),
%% Test some operations
<<1, 2, 3, 4>> = delete(Cord2, 5, 5),
<<1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 8, 9,
4, 5, 6, 7, 8, 9>> = cord:insert(Cord2, Cord2, 4),
ok.
walk_test(Acc) ->
fun(finish) ->
{result, Acc};
(X) ->
{more, walk_test([X|Acc])}
end.
test2() ->
A = <<1, 2, 3, 4>> ,
B = <<5, 6>> ,
C = <<>> ,
D = <<7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17>> ,
join(A, join(B, join(C, D))).
binsplit(Cord, Pos) ->
{Left, Right} = split(Cord, Pos),
{to_binary(Left), to_binary(Right)}.
binjoin(Left, Right) ->
to_binary(make_cord(Left, Right)).
benchmark(File) ->
{ok, Cord} = cord:new_from_file(File),
Sz = cord_size(Cord),
random:seed(),
Randoms = [random:uniform(Sz) || _ <- lists:seq(1, 100)],
timer:tc(?MODULE, split_with_each, [Cord, Randoms, 10]).
split_with_each(Cord, L, N) ->
split_with_each(Cord, L, L, N).
split_with_each(Cord, _, _, 0) ->
ok;
split_with_each(Cord, [H|T], L, N) ->
split(Cord, H),
split_with_each(Cord, T, L, N);
split_with_each(Cord, [], L, N) ->
split_with_each(Cord, L, L, N-1). | apps/pie/src/cord.erl | 0.538983 | 0.491822 | cord.erl | starcoder |
%%
%% Copyright (c) 2016-2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(ldb_util).
-author("<NAME> <<EMAIL>").
-include("ldb.hrl").
%% ldb_util callbacks
-export([new_crdt/2,
get_backend/0,
atom_to_binary/1,
binary_to_atom/1,
integer_to_atom/1,
unix_timestamp/0,
size/2,
plus/1,
plus/2,
connection_name/1,
connection_name/2]).
%% debug
-export([qs/1,
show/2]).
-define(STATE_PREFIX, "state_").
-define(OP_PREFIX, "op_").
%% @doc Creates a bottom CRDT from a type
%% or from an existing state-based CRDT.
new_crdt(type, CType) ->
{Type, Args} = extract_args(CType),
case Args of
[] ->
Type:new();
_ ->
Type:new(Args)
end;
new_crdt(state, CRDT) ->
%% defined in lasp-lang/types.
state_type:new(CRDT).
%% @doc Returns the proper backend.
-spec get_backend() -> atom().
get_backend() ->
case ldb_config:get(ldb_mode, ?DEFAULT_MODE) of
state_based ->
ldb_state_based_backend;
delta_based ->
ldb_delta_based_backend;
scuttlebutt ->
ldb_scuttlebutt_backend;
op_based ->
ldb_op_based_backend
end.
%% @doc
-spec atom_to_binary(atom()) -> binary().
atom_to_binary(Atom) ->
erlang:atom_to_binary(Atom, utf8).
%% @doc
-spec binary_to_atom(binary()) -> atom().
binary_to_atom(Binary) ->
erlang:binary_to_atom(Binary, utf8).
%% @doc
-spec integer_to_atom(integer()) -> atom().
integer_to_atom(Integer) ->
list_to_atom(integer_to_list(Integer)).
%% @doc
-spec unix_timestamp() -> timestamp().
unix_timestamp() ->
erlang:system_time(second).
%% @doc
-spec size(crdt | ack_map | vector | matrix | op, term()) -> non_neg_integer().
size(crdt, CRDT) ->
state_type:crdt_size(CRDT);
size(ack_map, AckMap) ->
maps:size(AckMap);
%% scuttlebutt + op based
size(vector, VV) ->
vclock:size(VV);
size(matrix, Matrix) ->
%% matrix size is the sum of all vector sizes
%% plus the number of entries in the matrix
maps:fold(
fun(_, VV, Acc) -> Acc + 1 + vclock:size(VV) end,
0,
Matrix
);
size(op, Op) ->
state_type:op_size(Op).
%% @doc sum
-spec plus([size_metric()]) -> size_metric().
plus(L) ->
lists:foldl(fun(E, Acc) -> plus(E, Acc) end, {0, 0}, L).
%% @doc
-spec plus(size_metric(), size_metric()) -> size_metric().
plus({A1, B1}, {A2, B2}) ->
{A1 + A2, B1 + B2}.
%% @doc
-spec connection_name(ldb_node_id()) -> atom().
connection_name(Id) ->
RandomIndex = rand:uniform(?CONNECTIONS),
connection_name(Id, RandomIndex).
%% @doc
-spec connection_name(ldb_node_id(), non_neg_integer()) -> atom().
connection_name(Id, Index) ->
list_to_atom(atom_to_list(Id) ++ "_" ++ integer_to_list(Index)).
%% @private
extract_args({Type, Args}) ->
{get_type(Type), get_type(Args)};
extract_args(Type) ->
{get_type(Type), []}.
%% @private
get_type({A, B}) ->
{get_type(A), get_type(B)};
get_type([]) ->
[];
get_type([H|T]) ->
[get_type(H) | get_type(T)];
get_type(Type) ->
list_to_atom(prefix() ++ atom_to_list(Type)).
%% @doc Log Process queue length.
qs(ID) ->
{message_queue_len, MessageQueueLen} = process_info(self(), message_queue_len),
lager:info("MAILBOX ~p REMAINING: ~p", [ID, MessageQueueLen]).
%% @doc Pretty-print.
-spec show(id | dot | dots | vector | ops, term()) -> term().
show(id, Id) ->
lists:nth(1, string:split(atom_to_list(Id), "@"));
show(dot, {Id, Seq}) ->
{show(id, Id), Seq};
show(dots, Dots) ->
lists:map(fun(Dot) -> show(dot, Dot) end, Dots);
show(vector, VV) ->
%% only show seqs for VVs
Dots = show(dots, maps:to_list(VV)),
{_, Seqs} = lists:unzip(lists:sort(Dots)),
erlang:list_to_tuple(Seqs);
show(ops, Ops) ->
lists:map(
fun({_, _, Dot, VV, From}) ->
{show(dot, Dot), show(vector, VV), show(id, From)}
end,
Ops
).
%% @private Compute CRDT type prefix.
-spec prefix() -> string().
prefix() ->
case get_backend() of
ldb_op_based_backend -> ?OP_PREFIX;
_ -> ?STATE_PREFIX
end. | src/ldb_util.erl | 0.553143 | 0.444625 | ldb_util.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2019, <COMPANY>
%%% @doc
%%%
%%% @end
%%% Created : 29. Oct 2019 6:55 AM
%%%-------------------------------------------------------------------
-module(round_robin).
-author("<NAME>").
-export([init/0, add/1, next/1, test/0, test2/0]).
-include_lib("../macros.hrl").
%% creates a new `ets` table
-spec init() -> atom().
init() ->
ets:new(?MODULE, [set, named_table]).
%% takes a module name and returns and returns an atom of the
%% new worker name to use for this module ex: "area_server2"
-spec add(Name :: atom()) -> atom().
add(Name) ->
Count = case ets:lookup(?MODULE, {Name, count}) of
[] ->
% if no count, insert initialize count
0;
[{{Name, count}, N}] ->
N
end,
% increment count
NewCount = Count + 1,
ets:insert(?MODULE, {{Name, count}, NewCount}),
% return the new worker name
worker_name(Name, NewCount).
%% returns the current worker count based on the Name
-spec worker_count(Name :: atom()) -> integer().
worker_count(Name) ->
case ets:lookup(?MODULE, {Name, count}) of
[] -> -
0;
[{{Name, count}, N}] ->
N
end.
%% returns the next worker in the round robin to receive work
-spec next(Name :: atom()) -> {ok, NextName :: atom()} | {error, no_workers}.
next(Name) ->
% first check if there are any workers
case worker_count(Name) of
0 ->
{error, no_workers};
Count ->
next_worker(Name, Count)
end.
%% returns the next worker in the rotation
-spec next_worker(Name :: atom(), Count :: integer()) -> {ok, NextName :: atom()}.
next_worker(Name, Count) ->
% get the current worker from rotation
Next = case ets:lookup(?MODULE, {Name, next}) of
[] ->
0;
[{{Name, next}, N}] ->
N
end,
% get next worker by N, if N is greater than the worker count
% reset to 1, else N
RealNext = if
Next =:= Count ->
1;
true ->
Next + 1
end,
% save current worker to rotation
ets:insert(?MODULE, {{Name, next}, RealNext}),
worker_name(Name, RealNext).
%% concat's atom and int for an atom name of a worker
-spec worker_name(Name :: atom(), N :: integer()) -> atom().
worker_name(Name, N) ->
S = atom_to_list(Name),
S2 = integer_to_list(N),
list_to_atom(S ++ S2).
%% tests
%% run all tests
-spec test() -> ok.
test() ->
% initialize ETS table
init(),
% worker name helper
bob42 = worker_name(bob, 42),
% add some workers
area_server1 = add(area_server),
area_server2 = add(area_server),
foo1 = add(foo),
% worker counts
2 = worker_count(area_server),
0 = worker_count(bizbazz),
% worker doesn't exist
{error, no_workers} = next(bizbazz),
% get next worker from rotation
area_server1 = next(area_server),
area_server2 = next(area_server),
% rotation resets because no workers after worker 2
area_server1 = next(area_server),
% other worker type has it's own rotation
foo1 = next(foo),
% only 1 worker in this rotation
foo1 = next(foo),
ok.
test2() ->
init(),
tag1 = add(tag). | src/otp_system/round_robin.erl | 0.605566 | 0.494263 | round_robin.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2018 <NAME> <<EMAIL>>
%%
%% @doc 'Species3' rules implementation module for 'cgolam' app.
%%
%% This is based on 'Species1', following the failure of 'Species2'.
%% Here the strategy is simply to make colours/species more compatible
%% by changing the colour matching algorithm.
%%
%% Multiple colour match algorithms are implemented, and may be
%% changed by way of the colmatch_algorithm config, which may be set
%% to one of:
%%
%% <pre>
%% common_duo - the default, will match colours based on
%% there being only two common colour components
%% instead of three, though three is matched
%% first if possible.
%%
%% common_highbit - two colours are considered the same if
%% any of the top four MSBs are common
%% for R, G and B colour components.
%% </pre>
%%
%% This does at last yield a result that is interesting, due to the
%% inter species interaction being sufficiently favourable whilst
%% still fundamentally maintaining the integrity of Conway's Game
%% of Life rules.
%%
%% A single colour will still operate exactly according to the original
%% CGoL rules, but the colours, when interacting are not purely
%% cooperative... though I'm not sure if you would call them
%% competitive as such, a bit like CGoL generally, it's a bit of
%% a curiosity and a fascinating demonstration of emergent behaviour.
-module(cgolam_rules_species3).
-behaviour(cgolam_rules).
-export([new/1, calc/4, init/4]).
-record(cgolam_rules_species3, {
field_mod :: module(),
colmatch_alg :: atom()
}).
-type cgolam_rules_species3() :: cgolam_rules:rules() .
-export_type([cgolam_rules_species3/0]).
%% @private
-spec new
(RulesModCfg :: list()) ->
cgolam_rules_species3() .
new(RulesModCfg) ->
FieldMod = case lists:keysearch(field, 1, RulesModCfg) of
{value, {field, M}} -> M;
{value, {field, M, _C}} -> M
end,
ColMatchAlg = case lists:keysearch(colmatch_algorithm, 1, RulesModCfg) of
{value, {colmatch_algorithm, CMA}} -> CMA;
false -> common_highbit
end,
#cgolam_rules_species3{
field_mod = FieldMod,
colmatch_alg = ColMatchAlg
}
.
%% @private
-spec calc
(Rules :: cgolam_rules_species3(), Field :: cgolam_field:field(), X :: integer(), Y :: integer()) ->
CellState :: term() .
calc(#cgolam_rules_species3{
field_mod = FieldMod,
colmatch_alg = ColMatchAlg
}, Field, X, Y) ->
SurroundingRGBs = lists:foldl(
fun ({Xdiff, Ydiff}, SurroundingRGBsAcc) ->
case FieldMod:get(Field, X + Xdiff, Y + Ydiff) of
false -> SurroundingRGBsAcc;
{col, RGB} -> [RGB | SurroundingRGBsAcc]
end
end,
[],
[{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}]
),
MidCol = FieldMod:get(Field, X, Y),
MidRGB = case MidCol of
false -> false;
{col, RGB} -> RGB
end,
UniqueRGBs = lists:foldl(
fun (RGB, UniqueRGBsAcc) ->
Repeat = lists:any(
fun (UniqueRGB) -> tolerated_same(RGB, UniqueRGB, ColMatchAlg) /= false end,
UniqueRGBsAcc
),
if Repeat -> UniqueRGBsAcc; true -> [RGB | UniqueRGBsAcc] end
end,
[],
case MidCol of
{col, MidRGB} -> [MidRGB | SurroundingRGBs];
false -> SurroundingRGBs
end
),
{ContendingRGBs, MidRGBContending} = lists:foldl(
fun (RGB, {ContendingRGBsAcc, MidCellRGBContending}) ->
SurroundingSameRGBs = [
SurroundingRGB ||
SurroundingRGB <- SurroundingRGBs,
tolerated_same(SurroundingRGB, RGB, ColMatchAlg) /= false
],
case cgol_rule(length(SurroundingSameRGBs)) of
true ->
{[{RGB, SurroundingSameRGBs} | ContendingRGBsAcc], MidCellRGBContending};
false ->
{ContendingRGBsAcc, MidCellRGBContending};
unchanged when MidRGB == false ->
{ContendingRGBsAcc, MidCellRGBContending};
unchanged ->
case tolerated_same(MidRGB, RGB, ColMatchAlg) of
false -> {ContendingRGBsAcc, MidCellRGBContending};
_Alive -> {[{RGB, SurroundingSameRGBs} | ContendingRGBsAcc], true}
end
end
end,
{[], false},
UniqueRGBs
),
if ContendingRGBs == [] ->
false;
MidRGBContending ->
MidCol;
true ->
{col, adjust_brightness(
merge_cellstates(
lists:flatten(
[Contribs || {_RGB, Contribs} <- ContendingRGBs]
)
)
)}
end
.
%% @private
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_duo)
when ((Ra band 16#80) == (Rb band 16#80))
and ((Ga band 16#80) == (Gb band 16#80))
and ((Ba band 16#80) == (Bb band 16#80)) ->
{Ra band 16#80, Ga band 16#80, Ba band 16#80}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_duo)
when ((Ra band 16#80) == (Rb band 16#80))
and ((Ga band 16#80) == (Gb band 16#80)) ->
{Ra band 16#80, Ga band 16#80, Ba band Bb}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_duo)
when ((Ra band 16#80) == (Rb band 16#80))
and ((Ba band 16#80) == (Bb band 16#80)) ->
{Ra band 16#80, Ga band Gb, Ba band 16#80}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_duo)
when ((Ga band 16#80) == (Gb band 16#80))
and ((Ba band 16#80) == (Bb band 16#80)) ->
{Ra band Rb, Ga band 16#80, Ba band 16#80}
;
tolerated_same(_A, _B, common_duo) ->
false
;
tolerated_same(A, B, intolerant_duo) when (A == B) ->
A
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, intolerant_duo) when (Ra == Rb) and (Ga == Gb) ->
{Ra band 16#80, Ga band 16#80, Ba band Bb}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, intolerant_duo) when (Ra == Rb) and (Ba == Bb) ->
{Ra band 16#80, Ga band Gb, Ba band 16#80}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, intolerant_duo) when (Ga == Gb) and (Ba == Bb) ->
{Ra band Rb, Ga band 16#80, Ba band 16#80}
;
tolerated_same(_A, _B, intolerant_duo) ->
false
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_highbit)
when ((Ra band 16#80) == (Rb band 16#80))
and ((Ga band 16#80) == (Gb band 16#80))
and ((Ba band 16#80) == (Bb band 16#80)) ->
{Ra band 16#80, Ga band 16#80, Ba band 16#80}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_highbit)
when ((Ra band 16#40) == (Rb band 16#40))
and ((Ga band 16#40) == (Gb band 16#40))
and ((Ba band 16#40) == (Bb band 16#40)) ->
{Ra band 16#40, Ga band 16#40, Ba band 16#40}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_highbit)
when ((Ra band 16#20) == (Rb band 16#20))
and ((Ga band 16#20) == (Gb band 16#20))
and ((Ba band 16#20) == (Bb band 16#20)) ->
{Ra band 16#20, Ga band 16#20, Ba band 16#20}
;
tolerated_same({Ra, Ga, Ba}, {Rb, Gb, Bb}, common_highbit)
when ((Ra band 16#10) == (Rb band 16#10))
and ((Ga band 16#10) == (Gb band 16#10))
and ((Ba band 16#10) == (Bb band 16#10)) ->
{Ra band 16#10, Ga band 16#10, Ba band 16#10}
;
tolerated_same(_A, _B, common_highbit) ->
false
.
%% @private
cgol_rule(SurroundingCells) ->
if SurroundingCells > 3 -> false;
SurroundingCells < 2 -> false;
SurroundingCells == 3 -> true;
SurroundingCells == 2 -> unchanged
end
.
%% @private
merge_cellstates([{R, G, B} | T]) ->
merge_cellstates(R, G, B, T, 1)
.
merge_cellstates(Rs, Gs, Bs, [{R, G, B} | T], N) ->
merge_cellstates(Rs + R, Gs + G, Bs + B, T, N + 1)
;
merge_cellstates(Rs, Gs, Bs, [], N) ->
{trunc(Rs/N), trunc(Gs/N), trunc(Bs/N)}
.
adjust_brightness({R, G, B}) when (R >= G) and (R >= B) and (R /= 0) ->
adjust_brightness({R, G, B}, 255 / R)
;
adjust_brightness({R, G, B}) when (G >= R) and (G >= B) and (G /= 0) ->
adjust_brightness({R, G, B}, 255 / G)
;
adjust_brightness({R, G, B}) when (B >= R) and (B >= G) and (B /= 0) ->
adjust_brightness({R, G, B}, 255 / B)
;
adjust_brightness({0, 0, 0}) ->
{0, 0, 0}
.
adjust_brightness({R, G, B}, F) ->
{trunc(R*F), trunc(G*F), trunc(B*F)}
.
%% @private
-spec init
(Rules :: cgolam_rules_species3(), Field0 :: cgolam_field:field(), Type :: atom(), InitCfg :: list()) ->
Field1 :: cgolam_field:field() .
init(#cgolam_rules_species3{field_mod=FieldMod}, Field0, default, InitCfg) ->
Width = FieldMod:width(Field0),
Height = FieldMod:height(Field0),
Clusters = case lists:keysearch(clusters, 1, InitCfg) of
{value, {clusters, I}} -> I;
false -> 3
end,
ClusterSizeCfg = case lists:keysearch(cluster_size, 1, InitCfg) of
{value, {cluster_size, CSC}} -> CSC / 100;
false -> 1
end,
ClusterDensityCfg = case lists:keysearch(cluster_density, 1, InitCfg) of
{value, {cluster_density, CDC}} -> CDC / 100;
false -> 1
end,
ClusterCols = [{255,0,0}, {0,255,0}, {0,0,255}, {255,255,0}, {255,0,255}, {0,255,255}],
ClusterSize = trunc(math:sqrt(Width * Height) / 2 * ClusterSizeCfg),
ClusterDensity = trunc(ClusterSize * ClusterSize / 20 * ClusterDensityCfg),
{Field1, _RemainingCols} = lists:foldl(
fun (_, {Field01Acc, [Col | ClusterCols01AccT]}) ->
% per cluster, accumulator is field and depleting colour selection
ClusterX = trunc(rand:uniform(Width)),
ClusterY = trunc(rand:uniform(Height)),
Field01Acc2 = lists:foldl(
fun (_, Field02Acc) ->
% per cell in cluster,
CellX = trunc((rand:uniform()-0.5)*(rand:uniform()-0.5) * ClusterSize) + ClusterX,
CellY = trunc((rand:uniform()-0.5)*(rand:uniform()-0.5) * ClusterSize) + ClusterY,
FieldMod:set(Field02Acc, CellX, CellY, {col, Col})
end,
Field01Acc,
lists:seq(1, ClusterDensity)
),
{Field01Acc2, ClusterCols01AccT};
(_, {Field01Acc, []}) ->
% cycle round colours if run out
{Field01Acc, ClusterCols}
end,
{Field0, ClusterCols},
lists:seq(1, Clusters)
),
Field1
;
init(#cgolam_rules_species3{field_mod=FieldMod}, Field0, term, InitCfg) ->
{value, {set, InitTerm}} = lists:keysearch(set, 1, InitCfg),
lists:foldl(
fun ({{X, Y}, Col = {col, _RGB}}, Field0Acc) ->
FieldMod:set(Field0Acc, X, Y, Col)
end,
Field0,
InitTerm
)
. | src/cgolam_rules_species3.erl | 0.531696 | 0.443299 | cgolam_rules_species3.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% Span behaviour.
%% @end
%%%-------------------------------------------------------------------------
-module(ot_span).
-export([start_span/3,
finish_span/2,
get_ctx/2,
is_recording_events/2,
set_attribute/4,
set_attributes/3,
add_events/3,
add_links/3,
set_status/3,
update_name/3]).
-type start_opts() :: #{parent => undefined | opentelemetry:span() | opentelemetry:span_ctx(),
sampler => ot_sampler:sampler(),
links => opentelemetry:links(),
is_recorded => boolean(),
kind => opentelemetry:span_kind()}.
-export_type([start_opts/0]).
-callback start_span(opentelemetry:span_name(), start_opts()) -> opentelemetry:span_ctx().
-callback finish_span(opentelemetry:span_ctx()) -> boolean() | {error, term()}.
-callback get_ctx(opentelemetry:span()) -> opentelemetry:span_ctx().
-callback is_recording_events(opentelemetry:span_ctx()) -> boolean().
-callback set_attribute(opentelemetry:span_ctx(),
opentelemetry:attribute_key(),
opentelemetry:attribute_value()) -> boolean().
-callback set_attributes(opentelemetry:span_ctx(), opentelemetry:attributes()) -> boolean().
-callback add_events(opentelemetry:span_ctx(), opentelemetry:time_events()) -> boolean().
-callback set_status(opentelemetry:span_ctx(), opentelemetry:status()) -> boolean().
-callback update_name(opentelemetry:span_ctx(), opentelemetry:span_name()) -> boolean().
start_span(Module, Name, Opts) ->
Module:start_span(Name, Opts).
finish_span(Module, Ctx) ->
Module:finish_span(Ctx).
get_ctx(Module, Span) ->
Module:get_ctx(Span).
is_recording_events(Module, SpanCtx) ->
Module:is_recording_events(SpanCtx).
set_attribute(Module, SpanCtx, Key, Value) ->
Module:set_attribute(SpanCtx, Key, Value).
set_attributes(Module, SpanCtx, Attributes) ->
Module:set_attributes(SpanCtx, Attributes).
add_events(Module, SpanCtx, Events) ->
Module:add_events(SpanCtx, Events).
add_links(Module, SpanCtx, Links) ->
Module:add_links(SpanCtx, Links).
set_status(Module, SpanCtx, Status) ->
Module:set_status(SpanCtx, Status).
update_name(Module, SpanCtx, Name) ->
Module:update_name(SpanCtx, Name). | src/ot_span.erl | 0.691185 | 0.429041 | ot_span.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(vtreeviz).
-export([visualize/2]).
-record(node, {
% type = inner | leaf
type = inner
}).
visualize(Fd, ParentPos) ->
io:format("digraph G~n{~n node [shape = record];~n", []),
print_children(Fd, ParentPos),
io:format("}~n", []),
ok.
get_children(Fd, Pos) ->
{ok, {_RootMbr, _RootMeta, Children}} = couch_file:pread_term(Fd, Pos),
Children.
print_children(Fd, ParentPos) ->
ChildrenPos = get_children(Fd, ParentPos),
ChildrenLabels = if is_integer(hd(ChildrenPos)) ->
ChildrenMbrMeta = lists:map(fun(ChildPos) ->
%io:format("ChildPos: ~p~n", [ChildPos]),
{ok, {Mbr, Meta, _Children}} = couch_file:pread_term(Fd, ChildPos),
{Mbr, Meta}
end, ChildrenPos),
node_labels(ChildrenPos, ChildrenMbrMeta);
true ->
node_labels(ChildrenPos)
end,
io:format("node~w [label=\"{~s}\"];~n", [ParentPos, ChildrenLabels]),
print_edges(Fd, ParentPos, ChildrenPos).
% leaf nodes
node_labels(Children) ->
string_join("|", Children, fun({Mbr, Meta, {Id, _Val}}) ->
io_lib:format("~s ~w ~w", [Id, tuple_to_list(Mbr), Meta#node.type])
end).
% inner nodes
node_labels(ChildrenPos, ChildrenMbrMeta) ->
Children = lists:zip(ChildrenPos, ChildrenMbrMeta),
ChildrenLabels = lists:map(fun({ChildPos, {ChildMbr, ChildMeta}}) ->
io_lib:format("<f~w>~w ~w ~w", [ChildPos, ChildPos,
tuple_to_list(ChildMbr), ChildMeta#node.type])
end, Children),
string_join("|", ChildrenLabels).
print_edges(Fd, ParentPos, Children) ->
lists:foreach(fun(ChildPos) ->
if is_integer(ChildPos) ->
io:format("node~w:f~w -> node~w~n", [ParentPos, ChildPos, ChildPos]),
print_children(Fd, ChildPos);
true ->
ok
end
end, Children).
% From http://www.trapexit.org/String_join_with (2010-03-12)
string_join(Join, L) ->
string_join(Join, L, fun(E) -> E end).
string_join(_Join, L=[], _Conv) ->
L;
string_join(Join, [H|Q], Conv) ->
lists:flatten(lists:concat(
[Conv(H)|lists:map(fun(E) -> [Join, Conv(E)] end, Q)]
)). | src/vtreeviz.erl | 0.604165 | 0.460774 | vtreeviz.erl | starcoder |
%% @doc
%% The buffer stores temporary, which is implicitly discarded,
%% and permanent data, which are explicitly discarded.
%%
%% Data is always delivered in the order they are buffered.
%% The temporary data is stored in a queue. Permanent data
%% is stored in a wheel for performance and to avoid discards.
%% @end
-module(gen_stage_buffer).
-export(
[
new/1,
estimate_size/1,
take_count_or_until_permanent/2,
store_temporary/3,
store_permanent_unless_empty/2
]).
%% @doc Builds a new buffer.
new(Size) when Size > 0 ->
{queue:new(), 0, init_wheel(Size)}.
%% @doc
%% Returns the estimate size of the buffer data.
%%
%% It does not count data on the wheel.
%% @end
estimate_size({_, Count, _}) -> Count.
%% @doc
%% Stores the temporary entries.
%%
%% keep controls which side to keep, first or last.
%%
%% It returns a new buffer, the amount of discarded messages and
%% any permanent entry that had to be emitted while discarding.
%% @end
store_temporary({Queue, Counter, Infos}, Temps, Keep) when is_list(Temps) ->
{{Excess, Queue1, Counter1}, Perms, Infos1} =
store_temporary(Keep, Temps, Queue, Counter, capacity_wheel(Infos), Infos),
{{Queue1, Counter1, Infos1}, Excess, Perms}.
store_temporary(_Keep, Temps, _Queue, 0, infinity, Infos) ->
{{0, queue:from_list(Temps), length(Temps)}, [], Infos};
store_temporary(_Keep, Temps, Queue, Counter, infinity, Infos) ->
{queue_infinity(Temps, Queue, Counter), [], Infos};
store_temporary(first, Temps, Queue, Counter, Max, Infos) ->
{queue_first(Temps, Queue, Counter, Max), [], Infos};
store_temporary(last, Temps, Queue, Counter, Max, Infos) ->
queue_last(Temps, Queue, 0, Counter, Max, [], Infos).
%% Infinity
queue_infinity([], Queue, Counter) ->
{0, Queue, Counter};
queue_infinity([Temp | R], Queue, Counter) ->
queue_infinity(R, queue:in(Temp, Queue), Counter + 1).
%% First
queue_first([], Queue, Counter, _Max) ->
{0, Queue, Counter};
queue_first(Temps, Queue, Max, Max) ->
{length(Temps), Queue, Max};
queue_first([Temp | R], Queue, Counter, Max) ->
queue_first(R, queue:in(Temp, Queue), Counter + 1, Max).
%% Last
queue_last([], Queue, Excess, Counter, _Max, Perms, Wheel) ->
{{Excess, Queue, Counter}, Perms, Wheel};
queue_last([Temp | R], Queue, Excess, Max, Max, Perms, Wheel) ->
NewQueue = queue:in(Temp, queue:drop(Queue)),
case pop_and_increment_wheel(Wheel) of
{ok, NewPerms, NewWheel} ->
queue_last(R, NewQueue, Excess + 1, Max, Max, Perms ++ NewPerms, NewWheel);
{error, NewWheel} ->
queue_last(R, NewQueue, Excess + 1, Max, Max, Perms, NewWheel)
end;
queue_last([Temp | R], Queue, Excess, Counter, Max, Perms, Wheel) ->
queue_last(R, queue:in(Temp, Queue), Excess, Counter + 1, Max, Perms, Wheel).
%% @doc
%% Puts the permanent entry in the buffer unless the buffer is empty.
%% @end
store_permanent_unless_empty(Buffer, Perm) ->
case Buffer of
{_Queue, 0, _Infos} ->
empty;
{Queue, Count, Infos} when is_reference(Infos) ->
{ok, {queue:in({Infos, Perm}, Queue), Count + 1, Infos}};
{Queue, Count, Infos} ->
{ok, {Queue, Count, put_wheel(Infos, Count, Perm)}}
end.
%% @doc
%% Take count temporary from the buffer or until we find a permanent.
%%
%% Return empty if nothing was taken.
%% @end
take_count_or_until_permanent({_Queue, Buffer, _Infos}, Counter) when Buffer =:= 0 orelse Counter =:= 0 ->
empty;
take_count_or_until_permanent({Queue, Buffer, Infos}, Counter) ->
take_count_or_until_permanent(Counter, [], Queue, Buffer, Infos).
take_count_or_until_permanent(0, Temps, Queue, Buffer, Infos) ->
{ok, {Queue, Buffer, Infos}, 0, lists:reverse(Temps), []};
take_count_or_until_permanent(Counter, Temps, Queue, 0, Infos) ->
{ok, {Queue, 0, Infos}, Counter, lists:reverse(Temps), []};
take_count_or_until_permanent(Counter, Temps, Queue, Buffer, Infos) when is_reference(Infos) ->
{{value, Value}, NewQueue} = queue:out(Queue),
case Value of
{Infos, Perm} ->
{ok, {NewQueue, Buffer - 1, Infos}, Counter, lists:reverse(Temps), [Perm]};
Temp ->
take_count_or_until_permanent(Counter - 1, [Temp | Temps], NewQueue, Buffer - 1, Infos)
end;
take_count_or_until_permanent(Counter, Temps, Queue, Buffer, Infos) ->
{{value, Temp}, NewQueue} = queue:out(Queue),
case pop_and_increment_wheel(Infos) of
{ok, Perms, NewInfos} ->
{ok, {NewQueue, Buffer - 1, NewInfos}, Counter - 1, lists:reverse([Temp | Temps]), Perms};
{error, NewInfos} ->
take_count_or_until_permanent(Counter - 1, [Temp | Temps], NewQueue, Buffer - 1, NewInfos)
end.
%% Wheel helpers
init_wheel(infinity) -> make_ref();
init_wheel(Size) -> Size.
capacity_wheel(Ref) when is_reference(Ref) -> infinity;
capacity_wheel({_, Max, _}) -> Max;
capacity_wheel(Max) -> Max.
put_wheel({Pos, Max, Wheel}, Count, Perm) ->
Key = (Pos + Count - 1) rem Max,
Fun = fun(V) -> [Perm | V] end,
{Pos, Max, maps:update_with(Key, Fun, [Perm], Wheel)};
put_wheel(Max, Count, Perm) ->
{0, Max, #{((Count - 1) rem Max) => [Perm]}}.
pop_and_increment_wheel({Pos, Max, Wheel}) ->
NewPos = (Pos + 1) rem Max,
case maps:take(Pos, Wheel) of
{Perms, NewWheel} ->
MaybeTriplet = if NewWheel =:= #{} -> Max; true -> {NewPos, Max, NewWheel} end,
{ok, Perms, MaybeTriplet};
error ->
{error, {NewPos, Max, Wheel}}
end;
pop_and_increment_wheel(Max) ->
{error, Max}. | src/gen_stage_buffer.erl | 0.527317 | 0.577495 | gen_stage_buffer.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% Based on SumoDB `sumo_type'.
%%%
%%% @reference See
%%% <a href="https://github.com/inaka/sumo_db/blob/master/src/utils/sumo_type.erl">SumoDB</a>
%%% @end
%%% @end
%%%-------------------------------------------------------------------
-module(xdb_schema_type).
-export([
cast/2,
cast_field_name/1
]).
%%%===================================================================
%%% API
%%%===================================================================
-spec cast(atom(), term()) -> {ok, term()} | {error, {invalid, term()}}.
cast(_, undefined) ->
{ok, undefined};
cast(Type, Data) when is_binary(Data) andalso (Type == integer orelse Type == float) ->
cast(Type, binary_to_list(Data));
cast(float, Data) when is_integer(Data) ->
{ok, Data + 0.0};
cast(float, Data) when is_list(Data) ->
cast_float(Data);
cast(integer, Data) when is_float(Data) ->
{ok, trunc(Data)};
cast(integer, Data) when is_list(Data) ->
cast_integer(Data);
cast(Type, Data) when is_list(Data), Type /= binary, Type /= custom ->
case io_lib:printable_list(Data) of
true -> cast(Type, list_to_binary(Data));
false -> {error, {invalid, Data}}
end;
cast(string, Data) when is_binary(Data); is_atom(Data); is_number(Data) ->
{ok, xdb_lib:to_bin(Data)};
cast(boolean, Data) when is_binary(Data) ->
case lists:member(Data, [<<"true">>, <<"1">>]) of
true ->
{ok, true};
false ->
case lists:member(Data, [<<"false">>, <<"0">>]) of
true -> {ok, false};
false -> {error, {invalid, Data}}
end
end;
cast(Type, Data) when is_binary(Data) andalso (Type == date orelse Type == datetime) ->
try
{ok, iso8601:parse(Data)}
catch
_:_ -> {error, {invalid, Data}}
end;
cast(Type, Data) ->
Fun = maps:get(Type, primitives(), fun(_) -> false end),
case Fun(Data) of
true -> {ok, Data};
false -> {error, {invalid, Data}}
end.
-spec cast_field_name(atom() | binary()) -> atom().
cast_field_name(Data) when is_atom(Data) ->
Data;
cast_field_name(Data) when is_binary(Data) ->
binary_to_atom(Data, utf8).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% @private
primitives() ->
#{
string => fun erlang:is_binary/1,
integer => fun erlang:is_integer/1,
float => fun erlang:is_float/1,
boolean => fun erlang:is_boolean/1,
date => fun xdb_lib:is_datetime/1,
datetime => fun xdb_lib:is_datetime/1,
binary => fun erlang:is_binary/1,
custom => fun(_) -> true end
}.
%% @private
cast_float(Data) ->
case string:to_float(Data) of
{error, no_float} ->
case cast_integer(Data) of
{ok, Integer} -> {ok, Integer + 0.0};
Error -> Error
end;
{Float, _Rest} ->
{ok, Float}
end.
%% @private
cast_integer(Data) ->
case string:to_integer(Data) of
{error, no_integer} -> {error, {invalid, Data}};
{Integer, _Rest} -> {ok, Integer}
end. | src/schema/xdb_schema_type.erl | 0.568416 | 0.493348 | xdb_schema_type.erl | starcoder |
%%% vi:ts=4 sw=4 et
%%%-------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright 2011 Erlware, LLC.
%%% @doc
%%% A module that supports association of keys to values. A map cannot
%%% contain duplicate keys; each key can map to at most one value.
%%%
%%% This interface is a member of the Erlware Commons Library.
%%% @end
%%%-------------------------------------------------------------------
-module(ec_dictionary).
%% API
-export([new/1,
has_key/2,
get/2,
get/3,
add/3,
remove/2,
has_value/2,
size/1,
to_list/1,
from_list/2,
keys/1]).
-export_type([dictionary/2,
key/1,
value/1]).
%%%===================================================================
%%% Types
%%%===================================================================
-record(dict_t,
{callback,
data}).
%% This should be opaque, but that kills dialyzer so for now we export it
%% however you should not rely on the internal representation here
-type dictionary(_K, _V) :: #dict_t{}.
-type key(T) :: T.
-type value(T) :: T.
-ifdef(have_callback_support).
-callback new() -> any().
-callback has_key(key(any()), any()) -> boolean().
-callback get(key(any()), any()) -> any().
-callback add(key(any()), value(any()), T) -> T.
-callback remove(key(any()), T) -> T.
-callback has_value(value(any()), any()) -> boolean().
-callback size(any()) -> non_neg_integer().
-callback to_list(any()) -> [{key(any()), value(any())}].
-callback from_list([{key(any()), value(any())}]) -> any().
-callback keys(any()) -> [key(any())].
-else.
%% In the case where R14 or lower is being used to compile the system
%% we need to export a behaviour info
-export([behaviour_info/1]).
-spec behaviour_info(atom()) -> [{atom(), arity()}] | undefined.
behaviour_info(callbacks) ->
[{new, 0},
{has_key, 2},
{get, 2},
{add, 3},
{remove, 2},
{has_value, 2},
{size, 1},
{to_list, 1},
{from_list, 1},
{keys, 1}];
behaviour_info(_Other) ->
undefined.
-endif.
%%%===================================================================
%%% API
%%%===================================================================
%% @doc create a new dictionary object from the specified module. The
%% module should implement the dictionary behaviour.
%%
%% @param ModuleName The module name.
-spec new(module()) -> dictionary(_K, _V).
new(ModuleName) when is_atom(ModuleName) ->
#dict_t{callback = ModuleName, data = ModuleName:new()}.
%% @doc check to see if the dictionary provided has the specified key.
%%
%% @param Dict The dictory object to check
%% @param Key The key to check the dictionary for
-spec has_key(key(K), dictionary(K, _V)) -> boolean().
has_key(Key, #dict_t{callback = Mod, data = Data}) ->
Mod:has_key(Key, Data).
%% @doc given a key return that key from the dictionary. If the key is
%% not found throw a 'not_found' exception.
%%
%% @param Dict The dictionary object to return the value from
%% @param Key The key requested
%% when the key does not exist @throws not_found
-spec get(key(K), dictionary(K, V)) -> value(V).
get(Key, #dict_t{callback = Mod, data = Data}) ->
Mod:get(Key, Data).
%% @doc given a key return that key from the dictionary. If the key is
%% not found then the default value is returned.
%%
%% @param Dict The dictionary object to return the value from
%% @param Key The key requested
%% @param Default The value that will be returned if no value is found
%% in the database.
-spec get(key(K), value(V), dictionary(K, V)) -> value(V).
get(Key, Default, #dict_t{callback = Mod, data = Data}) ->
Mod:get(Key, Default, Data).
%% @doc add a new value to the existing dictionary. Return a new
%% dictionary containing the value.
%%
%% @param Dict the dictionary object to add too
%% @param Key the key to add
%% @param Value the value to add
-spec add(key(K), value(V), dictionary(K, V)) -> dictionary(K, V).
add(Key, Value, #dict_t{callback = Mod, data = Data} = Dict) ->
Dict#dict_t{data = Mod:add(Key, Value, Data)}.
%% @doc Remove a value from the dictionary returning a new dictionary
%% with the value removed.
%%
%% @param Dict the dictionary object to remove the value from
%% @param Key the key of the key/value pair to remove
-spec remove(key(K), dictionary(K, V)) -> dictionary(K, V).
remove(Key, #dict_t{callback = Mod, data = Data} = Dict) ->
Dict#dict_t{data = Mod:remove(Key, Data)}.
%% @doc Check to see if the value exists in the dictionary
%%
%% @param Dict the dictionary object to check
%% @param Value The value to check if exists
-spec has_value(value(V), dictionary(_K, V)) -> boolean().
has_value(Value, #dict_t{callback = Mod, data = Data}) ->
Mod:has_value(Value, Data).
%% @doc return the current number of key value pairs in the dictionary
%%
%% @param Dict the object return the size for.
-spec size(dictionary(_K, _V)) -> integer().
size(#dict_t{callback = Mod, data = Data}) ->
Mod:size(Data).
%% @doc Return the contents of this dictionary as a list of key value
%% pairs.
%%
%% @param Dict the base dictionary to make use of.
-spec to_list(Dict::dictionary(K, V)) -> [{key(K), value(V)}].
to_list(#dict_t{callback = Mod, data = Data}) ->
Mod:to_list(Data).
%% @doc Create a new dictionary, of the specified implementation using
%% the list provided as the starting contents.
%%
%% @param ModuleName the type to create the dictionary from
%% @param List The list of key value pairs to start with
-spec from_list(module(), [{key(K), value(V)}]) -> dictionary(K, V).
from_list(ModuleName, List) when is_list(List) ->
#dict_t{callback = ModuleName, data = ModuleName:from_list(List)}.
%% @doc Return the keys of this dictionary as a list
%%
%% @param Dict the base dictionary to make use of.
-spec keys(Dict::dictionary(K, _V)) -> [key(K)].
keys(#dict_t{callback = Mod, data = Data}) ->
Mod:keys(Data). | src/ec_dictionary.erl | 0.514644 | 0.42925 | ec_dictionary.erl | starcoder |
-module(icu_string).
-export([from_utf8/1, to_utf8/1,
transform_utf8/2, transform/2,
to_lower/1, to_lower/2, to_upper/1, to_upper/2,
normalize/2,
transliterate/2, transliterate/3]).
%% @doc Convert an UTF-8 encoded binary string to an ICU string.
-spec from_utf8(binary()) -> icu:ustring().
from_utf8(BinaryString) ->
icu_nif:str_from_utf8(BinaryString).
%% @doc Convert an ICU string to an UTF-8 encoded binary string.
-spec to_utf8(icu:ustring()) -> binary().
to_utf8(UString) ->
icu_nif:str_to_utf8(UString).
%% @doc Apply a list of transformation functions to an UTF-8 binary string.
%%
%% The function simply wraps `transform/2', adding the necessary input and
%% output format conversions.
%%
%% @see transform/2
-spec transform_utf8(binary(), [Fun]) -> binary() when
Fun :: fun((icu:ustring()) -> icu:ustring()).
transform_utf8(BinaryString, Funs) ->
UString = from_utf8(BinaryString),
UString2 = transform(UString, Funs),
to_utf8(UString2).
%% @doc Apply a list of transformation functions to an ICU string.
%%
%% Each transformation is applied to the output of the previous one. The
%% function returns the output of the last transformation.
-spec transform(icu:ustring(), [Fun]) -> icu:ustring() when
Fun :: fun((icu:ustring()) -> icu:ustring()).
transform(UString, []) ->
UString;
transform(UString, [Fun | Rest]) ->
transform(Fun(UString), Rest).
%% @doc Convert an ICU string to lower case using the default locale.
-spec to_lower(icu:ustring()) -> icu:ustring().
to_lower(UString) ->
icu_nif:str_to_lower(UString).
%% @doc Convert an ICU string to lower case.
-spec to_lower(icu:ustring(), string()) -> icu:ustring().
to_lower(UString, Locale) ->
icu_nif:str_to_lower(UString, Locale).
%% @doc Convert an ICU string to upper case using the default locale.
-spec to_upper(icu:ustring()) -> icu:ustring().
to_upper(UString) ->
icu_nif:str_to_upper(UString).
%% @doc Convert an ICU string to upper case.
-spec to_upper(icu:ustring(), string()) -> icu:ustring().
to_upper(UString, Locale) ->
icu_nif:str_to_upper(UString, Locale).
%% @doc Normalize an ICU string.
-spec normalize(icu:ustring(), icu:normalization_mode()) -> icu:ustring().
normalize(UString, Mode) ->
Normalizer = normalizer(Mode),
icu_nif:unorm2_normalize(Normalizer, UString).
%% @doc Return the normalizer associated with a normalization mode.
-spec normalizer(icu:normalization_mode()) -> icu_nif:normalizer().
normalizer(nfc) ->
icu_nif:unorm2_get_instance("nfc", compose);
normalizer(nfkc) ->
icu_nif:unorm2_get_instance("nfkc", compose);
normalizer(nfkc_cf) ->
icu_nif:unorm2_get_instance("nfkc_cf", compose);
normalizer(nfd) ->
icu_nif:unorm2_get_instance("nfc", decompose);
normalizer(nfkd) ->
icu_nif:unorm2_get_instance("nfkc", decompose).
%% @doc Perform unicode transliteration on an ICU string.
%%
%% @see transliterate/3
-spec transliterate(icu:ustring(), binary()) -> icu:ustring().
transliterate(UString, TransliteratorId) ->
transliterate(UString, TransliteratorId, forward).
%% @doc Perform unicode transliteration on an ICU string with a specific
%% direction.
-spec transliterate(icu:ustring(), binary(),
icu:transliteration_direction()) -> icu:ustring().
transliterate(UString, TransliteratorId, Direction) ->
Transliterator = icu_nif:utrans_open_u(from_utf8(TransliteratorId),
Direction),
icu_nif:utrans_uchars(Transliterator, UString). | src/icu_string.erl | 0.535098 | 0.577883 | icu_string.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc riak_core_stat_q is an interface to query folsom stats
%% To use, call `get_stats/1' with a query `Path'.
%% A `Path' is a list of atoms | binaries. The module creates a set
%% of `ets:select/1' guards, one for each element in `Path'
%% For each stat that has a key that matches `Path' we calculate the
%% current value and return it.
-module(riak_core_stat_q).
-compile(nowarn_export_all).
-compile(export_all).
-export_type([path/0,
stat_name/0]).
-include_lib("kernel/include/logger.hrl").
-type path() :: [] | [atom()|binary()].
-type stats() :: [stat()].
-type stat() :: {stat_name(), stat_value()}.
-type stat_name() :: list().
-type stat_value() :: integer() | [tuple()].
%% @doc To allow for namespacing, and adding richer dimensions, stats
%% are named with a tuple key. The key (like `{riak_kv, node, gets}' or
%% `{riak_kv, vnode, puts, time}') can
%% be seen as an hierarchical path. With `riak_kv' at the root and
%% the other elements as branches / leaves.
%% This module allows us to get only the stats at and below a particular key.
%% `Path' is a list of atoms or the empty list.
%% an example path might be `[riak_kv]' which will return every
%% stat that has `riak_kv' in the first element of its key tuple.
%% You may use the atom '_' at any point
%% in `Path' as a wild card.
-spec get_stats(path()) -> stats().
get_stats(Path) ->
exometer:get_values(Path).
%% %% get all the stats that are at Path
%% calculate_stats(exometer:select(
%% [{ {Path ++ '_','_',enabled}, [], ['$_'] }])).
calculate_stats(NamesAndTypes) ->
[{Name, get_stat(Name)} || {Name, _, _} <- NamesAndTypes].
%% Create/lookup a cache/calculation process
get_stat(Stat) ->
exometer:get_value(Stat).
%% Encapsulate getting a stat value from exometer.
%%
%% If for any reason we can't get a stats value
%% return 'unavailable'.
%% @TODO experience shows that once a stat is
%% broken it stays that way. Should we delete
%% stats that are broken?
calc_stat({Name, _Type}) when is_tuple(Name) ->
stat_return(exometer:get_value([riak_core_stat:prefix()|tuple_to_list(Name)]));
calc_stat({[_|_] = Name, _Type}) ->
stat_return(exometer:get_value([riak_core_stat:prefix()|Name])).
stat_return({error,not_found}) -> unavailable;
stat_return({ok, Value}) -> Value.
log_error(StatName, ErrClass, ErrReason) ->
?LOG_WARNING("Failed to calculate stat ~p with ~p:~p", [StatName, ErrClass, ErrReason]).
%% some crazy people put funs in gauges (exometer has a 'function' metric)
%% so that they can have a consistent interface
%% to access stats from disperate sources
calc_gauge({function, Mod, Fun}) ->
Mod:Fun();
calc_gauge(Val) ->
Val. | src/riak_core_stat_q.erl | 0.673192 | 0.451327 | riak_core_stat_q.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2009 <NAME>
%% @doc Validator for checking if an input value matches a regular expression
%% Copyright 2009 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(validator_base_format).
-include("zotonic.hrl").
-export([render_validator/5, validate/5]).
render_validator(format, TriggerId, _TargetId, Args, Context) ->
Pattern = proplists:get_value(pattern, Args),
Negate = proplists:get_value(negate, Args, false),
JsObject = z_utils:js_object(z_validation:rename_args(Args)),
Script = [<<"z_add_validator(\"">>,TriggerId,<<"\", \"format\", ">>, JsObject, <<");\n">>],
{[z_utils:is_true(Negate),Pattern], Script, Context}.
%% @spec validate(Type, TriggerId, Value, Args, Context) -> {ok,AcceptedValue} | {error,Id,Error}
%% Error = invalid | novalue | {script, Script}
validate(format, _Id, [], _, Context) ->
{{ok, []}, Context};
validate(format, Id, Value, [Negate,Pattern], Context) ->
PcrePattern = javascript_to_pcre_pattern(Pattern),
{Re,Options} = pattern_to_re(PcrePattern),
%% If a unicode pattern is given, convert the utf8 list into a unicode char list
{Value1, Options1} = case z_string:contains("\\u", Pattern) of
true ->
{unicode:characters_to_list(erlang:list_to_binary(Value)), [unicode|Options]};
false ->
{Value, Options}
end,
Ok = not Negate,
Match = case re:run(Value1, Re, Options1) of
{match, _} -> true;
nomatch -> false
end,
case Match of
Ok -> {{ok, Value}, Context};
_ -> {{error, Id, invalid}, Context}
end.
%% @doc Translate a regular expression in javascript format to erlang re module format
pattern_to_re([$/|Rest]=Pattern) ->
case string:rchr(Rest, $/) of
0 ->
{Pattern,[]};
N ->
{Re, [$/|Options]} = lists:split(N-1,Rest),
ReOptions = [anycrlf|trans_options(Options, [])],
{Re, ReOptions}
end;
pattern_to_re(Pattern) ->
{Pattern, []}.
trans_options([], Acc) ->
Acc;
trans_options([$i|T], Acc) ->
trans_options(T, [caseless|Acc]);
trans_options([$m|T], Acc) ->
trans_options(T, [multiline|Acc]);
trans_options([$s|T], Acc) ->
trans_options(T, [dotall|Acc]);
trans_options([$x|T], Acc) ->
trans_options(T, [extended|Acc]);
trans_options([$A|T], Acc) ->
trans_options(T, [anchored|Acc]);
trans_options([$D|T], Acc) ->
trans_options(T, [dollar_endonly|Acc]);
trans_options([$U|T], Acc) ->
trans_options(T, [ungreedy|Acc]);
trans_options([_|T], Acc) ->
trans_options(T, Acc).
%% @doc Make a javascript regular expression pcre compatible.
javascript_to_pcre_pattern(Pattern) ->
%% convert \uXXXX to \x{XXXX}
R1 = re:replace(Pattern, "([\\\\]{1}[u]{1}[0-9a-fA-F]{4,6})", "@@--@@&}", [global]),
re:replace(R1, "@@--@@[\\\\]u", "\\\\x{", [{return, list}, global]). | modules/mod_base/validators/validator_base_format.erl | 0.528777 | 0.435421 | validator_base_format.erl | starcoder |
%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6) for non-binary
%% Erlang terms.
%% <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
%% Based on <a href="https://mochiweb.googlecode.com/svn/trunk/src/mochiglobal.erl">[2]</a>.
%%
%% <B>Note:</B> We are explicitly using tuples here because we expect to
%% use this to speed up ETS lookups and ETS stores tuples.
-module(fling_mochiglobal).
-export([create/3,
create/4,
get/2,
get/3,
mode/2,
to_list/1,
gen_module_name/0,
purge/1]).
-define(ALL, all).
-define(GETTER, term).
-define(GETTER(K), term(K)).
-type get_expr_fun() :: fun((tuple()) -> any()).
-spec create( L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun() ) -> ModName :: atom().
%% @doc create a module using the list of tuples given. The functions
%% passed in should return the key and the value respectively when
%% provided an element from the list of input tuples. Each function will be given
%% the same element of L as input.
%%
%% A simple example might be:
%% <pre>
%% GetKey = fun({K, _V}) -> K end.
%% GetValue = fun({_K, V}) -> V end.
%% </pre>
%%
%% A more complex example might be:
%% <pre>
%% -record(person, { name, phone }).
%% % use phone as the key for this record lookup
%% get_key(#person{ phone = Phone }) -> Phone.
%% % use entire record tuple as the value
%% get_value(E) -> E.
%% </pre>
create(L, GetKey, GetValue) when is_list(L) andalso L /= []
andalso is_function(GetKey) andalso is_function(GetValue)->
ModName = gen_module_name(),
ok = create(ModName, L, GetKey, GetValue),
ModName.
-spec create( ModName :: atom(),
L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun()
) -> ok | {error, Reason :: term()}.
%% @doc create and load a module using the given module name, and constructed
%% using the list of tuples given.
%% {@link create/3}
create(ModName, L, GetKey, GetValue) when is_atom(ModName)
andalso is_list(L) andalso L /= []
andalso is_function(GetKey) andalso is_function(GetValue) ->
Bin = compile(ModName, L, GetKey, GetValue),
code:purge(ModName),
case code:load_binary(ModName, atom_to_list(ModName) ++ ".erl", Bin) of
{module, ModName} -> ok;
Error -> Error
end.
-spec get(ModName :: atom(), Key :: term()) -> any() | undefined.
%% @equiv get(ModName, K, undefined)
get(ModName, K) ->
get(ModName, K, undefined).
-spec get(ModName :: atom(), Key :: term(), Default :: term()) -> term().
%% @doc Get the term for K or return Default if K is not found.
get(ModName, K, Default) ->
try
ModName:?GETTER(K)
catch
error:function_clause ->
Default
end.
-spec mode(ModName :: atom(), Tid :: ets:tid()) -> {ets, Tid :: ets:tid()}
| {mg, ModName :: atom()}.
%% @doc Get the current mode.
mode(ModName, Tid) ->
try
ModName:mode()
catch
error:undef ->
{ets, Tid}
end.
-spec to_list(ModName :: atom()) -> [ tuple() ].
%% @doc Return all input tuples from the constructed module as a list.
to_list(ModName) when is_atom(ModName) ->
ModName:all().
-spec purge( ModName :: atom() ) -> boolean().
%% @doc Purges and removes the given module
purge(ModName) when is_atom(ModName) ->
code:purge(ModName),
code:delete(ModName).
-spec gen_module_name() -> atom().
%% @doc Generate a unique random module name
gen_module_name() ->
list_to_atom("fling$" ++ md5hex(term_to_binary(erlang:make_ref()))).
%% internal functions
% @private
-spec md5hex( binary() ) -> string().
md5hex(Data) ->
binary_to_list(hexlify(erlang:md5(Data))).
%% http://stackoverflow.com/a/29819282
-spec hexlify( binary() ) -> binary().
hexlify(Bin) when is_binary(Bin) ->
<< <<(hex(H)),(hex(L))>> || <<H:4,L:4>> <= Bin >>.
hex(C) when C < 10 -> $0 + C;
hex(C) -> $a + C - 10.
-spec compile( ModName :: atom(),
L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun() ) -> binary().
compile(Module, L, GetKey, GetValue) ->
{ok, Module, Bin} = compile:forms(forms(Module, L, GetKey, GetValue),
[verbose, report_errors]),
Bin.
-spec forms( ModName :: atom(),
L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun() ) -> [erl_syntax:syntaxTree()].
forms(Module, L, GetKey, GetValue) ->
[erl_syntax:revert(X) || X <- [ module_header(Module),
handle_exports(?GETTER),
make_all(L),
make_mode(Module),
make_lookup_terms(?GETTER, L, GetKey, GetValue) ] ].
-spec module_header( ModName :: atom() ) -> erl_syntax:syntaxTree().
%% -module(Module).
module_header(Module) ->
erl_syntax:attribute(
erl_syntax:atom(module),
[erl_syntax:atom(Module)]).
%% -export([ term/1, all/0 ]).
-spec handle_exports( Getter :: atom() ) -> erl_syntax:syntaxTree().
handle_exports(Getter) ->
erl_syntax:attribute(
erl_syntax:atom(export),
[erl_syntax:list(
[erl_syntax:arity_qualifier(
erl_syntax:atom(Getter),
erl_syntax:integer(1)),
erl_syntax:arity_qualifier(
erl_syntax:atom(mode),
erl_syntax:integer(0)),
erl_syntax:arity_qualifier(
erl_syntax:atom(?ALL),
erl_syntax:integer(0))
])]).
%% all() -> L.
-spec make_all([ tuple() ]) -> erl_syntax:syntaxTree().
make_all(L) ->
erl_syntax:function(
erl_syntax:atom(?ALL),
[erl_syntax:clause([], none, [erl_syntax:abstract(L)])]).
make_mode(ModName) ->
erl_syntax:function(
erl_syntax:atom(mode),
[erl_syntax:clause([], none, [erl_syntax:abstract({mg, ModName})])]).
%% term(K) -> V;
-spec make_lookup_terms( Getter :: atom(),
L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun() ) -> [erl_syntax:syntaxTree()].
make_lookup_terms(Getter, L, GetKey, GetValue) ->
erl_syntax:function(
erl_syntax:atom(Getter),
make_terms(L, GetKey, GetValue, [])).
-spec make_terms( L :: [ tuple() ],
GetKey :: get_expr_fun(),
GetValue :: get_expr_fun(),
Acc :: list() ) -> [erl_syntax:syntaxTree()].
make_terms([], _GetKey, _GetValue, Acc) ->
Acc;
make_terms([ H | T ], GetKey, GetValue, Acc) ->
make_terms(T, GetKey, GetValue,
%% Pattern (Key) Guards, Function Body (Value)
[ erl_syntax:clause([erl_syntax:abstract(GetKey(H))], none, [erl_syntax:abstract(GetValue(H))]) | Acc ]).
%%
%% Tests
%%
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
basic_test() ->
L = [{a,1}, {b,2}, {c,3}],
GetKey = fun({K, _V}) -> K end,
GetValue = fun({_K, V}) -> V end,
Mod = create(L, GetKey, GetValue),
?assertEqual(L, to_list(Mod)),
?assertEqual(1, ?MODULE:get(Mod, a)),
?assertEqual(2, ?MODULE:get(Mod, b)),
?assertEqual(3, ?MODULE:get(Mod, c)),
?assertEqual(undefined, ?MODULE:get(Mod, d)).
-record(person, {name, phone}).
record_test() ->
L = [ #person{name="mike", phone=1}, #person{name="joe", phone=2}, #person{name="robert", phone=3} ],
GetKey = fun(#person{ phone = P }) -> P end,
GetValue = fun(#person{ name = N }) -> N end,
Mod = create(L, GetKey, GetValue),
?assertEqual(L, to_list(Mod)),
?assertEqual("mike", ?MODULE:get(Mod, 1)),
?assertEqual("joe", ?MODULE:get(Mod, 2)),
?assertEqual("robert", ?MODULE:get(Mod, 3)).
-endif. | src/fling_mochiglobal.erl | 0.561455 | 0.446736 | fling_mochiglobal.erl | starcoder |
%% A priority queue based on gb_trees
-module(tiny_pq).
-export([
delete_value/3,
insert_value/3,
move_value/4,
foldr_new/4,
prune_old/2,
prune_collect_old/4
]).
%% @spec delete_value(Priority, Value, Tree) -> Tree1
%% @doc Delete a `Value' associated with `Priority' from `Tree'
delete_value(Priority, Value, Tree) ->
case gb_trees:lookup(Priority, Tree) of
{value, [Value]} ->
gb_trees:delete(Priority, Tree);
{value, Values} ->
gb_trees:enter(Priority, lists:delete(Value, Values), Tree)
end.
%% @spec insert_value(Priority, Value, Tree) -> Tree1
%% @doc Insert a `Value' with associated `Priority' into `Tree'
insert_value(Priority, Value, Tree) ->
NewVal = case gb_trees:lookup(Priority, Tree) of
none -> [Value];
{value, ValueList} -> [Value|ValueList]
end,
gb_trees:enter(Priority, NewVal, Tree).
%% @spec move_value(OldPriority, NewPriority, Value, Tree) -> Tree1
%% @doc Change the priority of `Value' from `OldPriority' to `NewPriority'
move_value(OldPriority, NewPriority, Value, Tree) ->
insert_value(NewPriority, Value, delete_value(OldPriority, Value, Tree)).
%% @spec foldr_new(Function, Acc0, Tree, Priority) -> Acc1
%% @doc Fold over values with priority greater than `Priority'
foldr_new(Function, Acc0, {_Size, TreeNode}, Priority) ->
Acc1 = iterate_nonexpired_nodes(Function, Acc0, TreeNode, Priority),
Acc1.
%% @spec prune_old(Tree, Priority) -> Tree1
%% @doc Remove nodes with priority less than or equal to `Priority'
prune_old({Size, TreeNode}, Priority) ->
{Tree1, NumDeleted} = prune_expired_nodes(TreeNode, Priority),
{Size - NumDeleted, Tree1}.
%% @spec prune_collect_old((Function, Acc0, Tree, Priority) -> {Acc1, Tree1}
%% @doc Remove nodes with priority less than or equal to `Priority', and
%% fold over them using `Function'
prune_collect_old(Function, Acc0, {Size, TreeNode}, Priority) ->
{Acc1, Tree1, NumDeleted} = prune_collect_expired_nodes(Function, Acc0, TreeNode, Priority),
{Acc1, {Size - NumDeleted, Tree1}}.
iterate_nonexpired_nodes(Function, State, {K, V, S, L}, Now) when K > Now ->
Acc1 = iterate_nonexpired_nodes(Function, State, L, Now),
Acc2 = lists:foldr(Function, Acc1, V),
iterate_nonexpired_nodes(Function, Acc2, S, Now);
iterate_nonexpired_nodes(Function, State, {K, _V, _S, L}, Now) when K =< Now ->
iterate_nonexpired_nodes(Function, State, L, Now);
iterate_nonexpired_nodes(_Function, State, nil, _Now) ->
State.
prune_expired_nodes({K, V, S, L}, Now) when K > Now ->
{Tree1, NumDeleted} = prune_expired_nodes(S, Now),
{{K, V, Tree1, L}, NumDeleted};
prune_expired_nodes({K, _V, S, L}, Now) when K =< Now ->
{_, NumDeleted_S} = prune_expired_nodes(S, Now),
{Tree1, NumDeleted_L} = prune_expired_nodes(L, Now),
{Tree1, NumDeleted_S + NumDeleted_L + 1};
prune_expired_nodes(nil, _Now) ->
{nil, 0}.
prune_collect_expired_nodes(Function, Acc, {K, V, S, L}, Now) when K > Now ->
{Acc1, Tree1, NumDeleted} = prune_collect_expired_nodes(Function, Acc, S, Now),
{Acc1, {K, V, Tree1, L}, NumDeleted};
prune_collect_expired_nodes(Function, Acc, {K, V, S, L}, Now) when K =< Now ->
Acc1 = lists:foldr(Function, Acc, V),
{Acc2, _, NumDeleted_S} = prune_collect_expired_nodes(Function, Acc1, S, Now),
{Acc3, Tree3, NumDeleted_L} = prune_collect_expired_nodes(Function, Acc2, L, Now),
{Acc3, Tree3, NumDeleted_S + NumDeleted_L + 1};
prune_collect_expired_nodes(_Function, Acc, nil, _Now) ->
{Acc, nil, 0}. | src/tiny_pq.erl | 0.598312 | 0.538741 | tiny_pq.erl | starcoder |
%Copyright [2012] [<NAME>]
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
% http://www.apache.org/licenses/LICENSE-2.0
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
-module(coello_basic).
-include_lib("amqp_client/include/amqp_client.hrl").
-export([publish/4, publish/5, consume/3, consume/4, cancel/2, ack/3, reject/3]).
-spec publish(Channel::pid(), Data::binary() | list(), Exchange::bitstring(), RoutingKey::bitstring(), ReplyTo::bitstring()) -> ok.
publish(Channel, Data, Exchange, RoutingKey, ReplyTo) when is_binary(Data)->
Msg = build_amqp_msg([{payload, Data}, {reply_to, ReplyTo}]),
publish_msg(Channel, Msg, Exchange, RoutingKey);
publish(Channel, Data, Exchange, RoutingKey, ReplyTo) when is_list(Data)->
publish(Channel, list_to_binary(Data), Exchange, RoutingKey, ReplyTo).
-spec publish(Channel::pid(), Data::binary() | list(), Exchange::bitstring(), RoutingKey::bitstring()) ->ok.
publish(Channel, Data, Exchange, RoutingKey) when is_binary(Data) ->
Msg = build_amqp_msg([{payload, Data}]),
publish_msg(Channel, Msg, Exchange, RoutingKey);
publish(Channel, Data, Exchange, RoutingKey) when is_list(Data) ->
publish(Channel, list_to_binary(Data), Exchange, RoutingKey).
-spec consume(Channel::pid(), QueueName::bitstring(), Callback::fun()) -> {pid(), term()}.
consume(Channel, QueueName, Callback) ->
consume(Channel, QueueName, Callback, []).
-spec consume(Channel::pid(), QueueName::bitstring(), Callback::fun() , Options ::list({atom(), term()})) -> {pid(), term()}.
consume(Channel, QueueName, Callback, Options) ->
Consumer = coello_consumer:start(Callback),
Method = #'basic.consume'{
queue = QueueName,
no_ack = proplists:get_value(no_ack, Options, false)
},
Response = amqp_channel:subscribe(Channel, Method, Consumer),
{Consumer, Response#'basic.consume_ok'.consumer_tag}.
-spec cancel(Channel::pid(), {Consumer::pid(), ConsumerTag::bitstring()}) -> ok.
cancel(Channel, {Consumer, ConsumerTag}) ->
Method = #'basic.cancel'{consumer_tag = ConsumerTag},
amqp_channel:call(Channel, Method),
coello_consumer:stop(Consumer).
-spec ack(Channel :: pid(), DeliveryTag :: term(), Multiple :: byte()) -> ok.
ack(Channel, DeliveryTag, Multiple) ->
Method = #'basic.ack'{delivery_tag = DeliveryTag, multiple = Multiple},
amqp_channel:cast(Channel, Method).
-spec reject(Channel :: pid(), DeliveryTag :: term(), Requeue :: boolean()) -> ok.
reject(Channel, DeliveryTag, Requeue) ->
Method = #'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
amqp_channel:cast(Channel, Method).
%==================
%
% Internal
%
%==================
-spec build_amqp_msg(list({atom(), term()})) -> #amqp_msg{}.
build_amqp_msg(Options) ->
build_amqp_msg(#amqp_msg{}, Options).
-spec build_amqp_msg(Msg::#amqp_msg{}, list({atom(), term()})) -> #amqp_msg{}.
build_amqp_msg(Msg, [{payload, Payload} | Tail ]) ->
build_amqp_msg(Msg#amqp_msg{ payload = Payload}, Tail);
build_amqp_msg(Msg, [{reply_to, ReplyTo} | Tail]) ->
Props = Msg#amqp_msg.props#'P_basic'{ reply_to = ReplyTo},
build_amqp_msg(Msg#amqp_msg{ props = Props}, Tail);
build_amqp_msg(Msg, []) ->
Msg.
-spec publish_msg(Channel::pid, Msg::#amqp_msg{}, Exchange::bitstring(), RoutingKey::bitstring()) ->
ok.
publish_msg(Channel, Msg, Exchange, RoutingKey) ->
Method = #'basic.publish'{ exchange = Exchange, routing_key = RoutingKey},
amqp_channel:cast(Channel, Method, Msg). | src/coello_basic.erl | 0.617974 | 0.432962 | coello_basic.erl | starcoder |
%% Copyright 2016-2017 TensorHub, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(guild_data_reader).
-export([flags/1, attrs/1, series/3, output/1, series_keys/1,
compare/2]).
%% ===================================================================
%% Flags
%% ===================================================================
flags(Run) ->
Db = run_db(Run),
case guild_run_db:flags(Db) of
{ok, Flags} -> format_flags(Flags);
{error, Err} -> error({db_dlags, Err, Run})
end.
format_flags(Flags) -> maps:from_list(Flags).
%% ===================================================================
%% Attrs
%% ===================================================================
attrs(Run) ->
Db = run_db(Run),
case guild_run_db:attrs(Db) of
{ok, Attrs} -> format_attrs(Attrs);
{error, Err} -> error({db_attrs, Err, Run})
end.
format_attrs(Attrs) -> maps:from_list(Attrs).
%% ===================================================================
%% Series
%% ===================================================================
series(Run, Pattern, Max) ->
Db = run_db(Run),
case guild_run_db:series(Db, Pattern) of
{ok, Series} -> format_series(reduce_series(Series, Max));
{error, Err} -> error({db_series, Err, Run, Pattern})
end.
reduce_series(Series, all) ->
Series;
reduce_series(Series, Max) ->
[{Key, guild_util:reduce_to(Vals, Max)} || {Key, Vals} <- Series].
format_series(Series) ->
maps:from_list(Series).
%% ===================================================================
%% Output
%% ===================================================================
output(Run) ->
Db = run_db(Run),
case guild_run_db:output(Db) of
{ok, Output} -> format_output(Output);
{error, Err} -> error({db_output, Err, Run})
end.
format_output(Output) ->
[[Time div 1000, stream_id(Stream), Val]
|| {Time, Stream, Val} <- Output].
stream_id(stdout) -> 0;
stream_id(stderr) -> 1;
stream_id(_) -> null.
%% ===================================================================
%% Series keys
%% ===================================================================
series_keys(Runs) ->
Keys = lists:foldl(fun series_keys_acc/2, sets:new(), Runs),
sets:to_list(Keys).
series_keys_acc(Run, Acc) ->
Keys = run_series_keys(Run),
sets:union(Acc, sets:from_list(Keys)).
run_series_keys(Run) ->
Db = run_db(Run),
case guild_run_db:series_keys(Db) of
{ok, Keys} -> Keys;
{error, Err} -> error({db_series_keys, Err, Run})
end.
%% ===================================================================
%% Compare
%% ===================================================================
compare(Runs, Sources) ->
[run_compare(Run, Sources) || Run <- Runs].
run_compare(Run, Sources) ->
maps:from_list(
[{run, guild_run_util:format_run(Run)}
|[{source_key(Source), run_source(Source, Run)}
|| Source <- Sources]]).
source_key(Name) -> list_to_binary(Name).
run_source("flags", Run) -> flags(Run);
run_source("attrs", Run) -> attrs(Run);
run_source("output", Run) -> output(Run);
run_source("series/" ++ Path, Run) -> series(Run, Path, all);
run_source(Other, _Run) -> error({run_source, Other}).
%% ===================================================================
%% Utils / support
%% ===================================================================
run_db(Run) ->
RunDir = guild_run:dir(Run),
case guild_run_db:open(RunDir) of
ok -> RunDir;
{error, missing} -> error({db_missing, Run})
end. | src/guild_data_reader.erl | 0.619701 | 0.421671 | guild_data_reader.erl | starcoder |
%% Copyright 2018 <NAME> (http://dairon.org)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(sentry).
-include("sentry.hrl").
-export([
capture_exception/2
]).
-spec capture_exception(string() | binary(), [parameter()]) -> ok.
-type parameter() ::
{stacktrace, [stackframe()]} |
{exception, {exit | error | throw, term()}} |
{atom(), binary() | integer()}.
-type stackframe() ::
{module(), atom(), non_neg_integer() | [term()]} |
{module(), atom(), non_neg_integer() | [term()], [{atom(), term()}]}.
capture_exception(Message, Params) when is_list(Message) ->
capture_exception(unicode:characters_to_binary(Message), Params);
capture_exception(Message, _Params0) ->
Event = #{
event_id => utils:event_id(),
platform => erlang,
server_name => node(),
timestamp => utils:unix_timestamp(),
message => term_to_json(Message)
},
sentry_client:send_event(Event).
frame_to_json({Module, Function, Arguments}) ->
frame_to_json({Module, Function, Arguments, []});
frame_to_json({Module, Function, Arguments, Location}) ->
Arity = case is_list(Arguments) of
true -> length(Arguments);
false -> Arguments
end,
Line = case lists:keyfind(line, 1, Location) of
false -> -1;
{line, L} -> L
end,
{
case is_list(Arguments) of
true -> [{vars, [iolist_to_binary(io_lib:format("~w", [Argument])) || Argument <- Arguments]}];
false -> []
end ++ [
{module, Module},
{function, <<(atom_to_binary(Function, utf8))/binary, "/", (list_to_binary(integer_to_list(Arity)))/binary>>},
{lineno, Line},
{filename, case lists:keyfind(file, 1, Location) of
false -> <<(atom_to_binary(Module, utf8))/binary, ".erl">>;
{file, File} -> list_to_binary(File)
end}
]
}.
term_to_json(Term) when is_binary(Term); is_atom(Term) ->
Term;
term_to_json(Term) ->
iolist_to_binary(io_lib:format("~120p", [Term])). | src/sentry.erl | 0.557725 | 0.431584 | sentry.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(otel).
-export([start_span/1,
start_span/2,
with_span/1,
with_span/2,
current_span_ctx/0,
end_span/0]).
-export([get_ctx/1,
is_recording_events/0,
set_attribute/2,
set_attributes/1,
add_event/2,
add_events/1,
add_links/1,
set_status/1,
update_name/1]).
-include("opentelemetry.hrl").
%% handy macros so we don't have function name typos
-define(DO(Args), do_span_function(?FUNCTION_NAME, Args)).
-spec start_span(opentelemetry:span_name()) -> opentelemetry:span_ctx().
start_span(Name) ->
start_span(Name, #{}).
-spec start_span(opentelemetry:span_name(), ot_span:start_opts()) -> opentelemetry:span_ctx().
start_span(SpanName, Opts) ->
Tracer = opentelemetry:get_tracer(),
ot_tracer:start_span(Tracer, SpanName, Opts).
-spec with_span(opentelemetry:span_ctx()) -> ok.
with_span(Span) ->
Tracer = opentelemetry:get_tracer(),
ot_tracer:with_span(Tracer, Span).
-spec with_span(opentelemetry:span_ctx(), opentelemetry:span_ctx() | fun()) -> ok.
with_span(Span=#span_ctx{}, Fun) ->
Tracer = opentelemetry:get_tracer(),
ot_tracer:with_span(Tracer, Span, Fun).
-spec end_span() -> ok.
end_span() ->
Tracer = opentelemetry:get_tracer(),
ot_tracer:end_span(Tracer).
current_span_ctx() ->
Tracer = opentelemetry:get_tracer(),
ot_tracer:current_span_ctx(Tracer).
%% access span functions
get_ctx(Span) ->
?DO([Span]).
is_recording_events() ->
?DO([]).
%% manipulate span functions
set_attribute(Key, Value) ->
?DO([Key, Value]).
set_attributes(Attributes) ->
?DO([Attributes]).
add_event(Name, Attributes) ->
?DO([Name, Attributes]).
add_events(Events) ->
?DO([Events]).
add_links(Links) ->
?DO([Links]).
set_status(Status) ->
?DO([Status]).
update_name(Name) ->
?DO([Name]).
%% internal functions
do_span_function(Function, Args) ->
Tracer = opentelemetry:get_tracer(),
SpanCtx = ot_tracer:current_span_ctx(Tracer),
SpanModule = ot_tracer:span_module(Tracer),
apply_span_function(SpanModule, Function, [SpanCtx | Args]).
apply_span_function(ot_span_noop, _Function, _Args) ->
ok;
apply_span_function(SpanModule, Function, Args) ->
erlang:apply(SpanModule, Function, Args). | src/otel.erl | 0.556279 | 0.458531 | otel.erl | starcoder |
%%% @doc Tells server to switch to "COPY-in" mode
%%%
%%% See [https://www.postgresql.org/docs/current/sql-copy.html].
%%% See [https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-COPY].
%%%
%%% When `Format' is `text', copy data should then be delivered using Erlang
%%% <a href="https://erlang.org/doc/apps/stdlib/io_protocol.html">io protocol</a>.
%%% See {@link file:write/2}, {@link io:put_chars/2}.
%%% "End-of-data" marker `\.' at the end of TEXT or CSV data stream is not needed.
%%%
%%% When `Format' is `{binary, [epgsql_type()]}', recommended way to deliver data is
%%% {@link epgsql:copy_send_rows/3}. IO-protocol can be used as well, as long as you can
%%% do proper binary encoding of data tuples (header and trailer are sent automatically),
%%% see [https://www.postgresql.org/docs/current/sql-copy.html#id-1.9.3.55.9.4.6].
%%% When you don't know what are the correct type names for your columns, you could try to
%%% construct equivalent `INSERT' or `SELECT' statement and call {@link epgsql:parse/2} command.
%%% It will return `#statement{columns = [#column{type = TypeName}]}' with correct type names.
%%%
%%% {@link epgsql_cmd_copy_done} should be called in the end.
%%%
%%% This command should not be used with command pipelining!
%%%
%%% ```
%%% > SQuery COPY ... FROM STDIN ...
%%% < CopyInResponse
%%% > CopyData* -- implemented in io protocol, not here
%%% > CopyDone | CopyFail -- implemented in epgsql_cmd_copy_done
%%% < CommandComplete -- implemented in epgsql_cmd_copy_done
%%% '''
-module(epgsql_cmd_copy_from_stdin).
-behaviour(epgsql_command).
-export([init/1, execute/2, handle_message/4]).
-export_type([response/0]).
-type response() :: {ok, [text | binary]} | {error, epgsql:query_error()}.
-include("epgsql.hrl").
-include("protocol.hrl").
-include("../epgsql_copy.hrl").
-record(copy_stdin,
{query :: iodata(),
initiator :: pid(),
format :: {binary, [epgsql:epgsql_type()]} | text}).
init({SQL, Initiator, Format}) ->
#copy_stdin{query = SQL, initiator = Initiator, format = Format}.
execute(Sock, #copy_stdin{query = SQL, format = Format} = St) ->
undefined = epgsql_sock:get_subproto_state(Sock), % assert we are not in copy-mode already
{PktType, PktData} = epgsql_wire:encode_query(SQL),
case Format of
text ->
{send, PktType, PktData, Sock, St};
{binary, _} ->
Header = epgsql_wire:encode_copy_header(),
{send_multi, [{PktType, PktData},
{?COPY_DATA, Header}], Sock, St}
end.
%% CopyBothResponses
handle_message(?COPY_IN_RESPONSE, <<BinOrText, NumColumns:?int16, Formats/binary>>, Sock,
#copy_stdin{initiator = Initiator, format = RequestedFormat}) ->
ColumnFormats = [format_to_atom(Format) || <<Format:?int16>> <= Formats],
length(ColumnFormats) =:= NumColumns orelse error(invalid_copy_in_response),
CopyState = init_copy_state(format_to_atom(BinOrText), RequestedFormat, ColumnFormats, Initiator),
Sock1 = epgsql_sock:set_attr(subproto_state, CopyState, Sock),
Res = {ok, ColumnFormats},
{finish, Res, Res, epgsql_sock:set_packet_handler(on_copy_from_stdin, Sock1)};
handle_message(?ERROR, Error, _Sock, _State) ->
Result = {error, Error},
{sync_required, Result};
handle_message(_, _, _, _) ->
unknown.
init_copy_state(text, text, ColumnFormats, Initiator) ->
%% When BinOrText is `text', all "columns" should be `text' format as well.
%% See https://www.postgresql.org/docs/current/protocol-message-formats.html
%% CopyInResponse
(lists:member(binary, ColumnFormats) == false)
orelse error(invalid_copy_in_response),
#copy{initiator = Initiator, format = text};
init_copy_state(binary, {binary, ColumnTypes}, ColumnFormats, Initiator) ->
%% https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-COPY
%% "As of the present implementation, all columns in a given COPY operation will use the same
%% format, but the message design does not assume this."
(lists:member(text, ColumnFormats) == false)
orelse error(invalid_copy_in_response),
NumColumns = length(ColumnFormats),
%% Eg, `epgsql:copy_from_stdin(C, "COPY tab (a, b, c) WITH (FORMAT binary)", {binary, [int2, int4]})'
%% so number of columns in SQL is not same as number of types in `binary'
(NumColumns == length(ColumnTypes))
orelse error({column_count_mismatch, ColumnTypes, NumColumns}),
#copy{initiator = Initiator, format = binary, binary_types = ColumnTypes};
init_copy_state(ServerExpectedFormat, RequestedFormat, _, _Initiator) ->
%% Eg, `epgsql:copy_from_stdin(C, "COPY ... WITH (FORMAT text)", {binary, ...})' or
%% `epgsql:copy_from_stdin(C, "COPY ... WITH (FORMAT binary)", text)' or maybe PostgreSQL
%% got some new format epgsql is not aware of
error({format_mismatch, RequestedFormat, ServerExpectedFormat}).
format_to_atom(0) -> text;
format_to_atom(1) -> binary. | src/commands/epgsql_cmd_copy_from_stdin.erl | 0.553988 | 0.462048 | epgsql_cmd_copy_from_stdin.erl | starcoder |
%%%===================================================================
%%% @copyright 2019 Klarna Bank AB (publ)
%%%
%%% @doc This behavior can be seen as the opposite of {@link
%%% kflow_gen_aggregate}; it implements a stream processing node that
%%% applies a pure function to each incoming message. This function
%%% produces a list of messages that should be sent downstream.
%%%
%%% This behavior can be used in two modes: full and simplified. In
%%% simplified mode stream processing node is defined like following:
%%%
%%% ```{unfold, fun(Offset, Message) -> [Message, Message] end}'''
%%%
%%% In full mode one has to create a callback module with
%%% `kflow_gen_unfold' behavior.
%%%
%%% `unfold' callback takes 3 arguments: first is offset of a message,
%%% second is the message itself and the third one is state of the
%%% callback module. This state is created in `init' callback and
%%% remains the same through the lifetime of the pipe. Return value of
%%% `unfold' callback is a list of messages, each one is passed
%%% downstream.
%%%
%%% `init' and `terminate' callbacks can be used e.g. when some
%%% resource should be obtained to process messages. Both callbacks
%%% are optional; configuration will be passed as is to
%%% `map' callback when `init' is omitted.
%%%
%%% == Example ==
%%% ```
%%% -module(extract_writes).
%%%
%%% -behavior(kflow_gen_unfold).
%%%
%%% -export([init/1, unfold/3, terminate/1]).
%%%
%%% init(Config) ->
%%% State = do_init(Config),
%%% State.
%%%
%%% unfold(Offset, #tx{writes = Writes}, State) ->
%%% Writes.
%%%
%%% terminate(State) ->
%%% do_cleanup(State).
%%%
%%% '''
%%%
%%% NOTE: Since state is immutable, it's actually shared between the
%%% routes.
%%%
%%% @end
-module(kflow_gen_unfold).
-behavior(kflow_gen).
-include("kflow.hrl").
-include_lib("hut/include/hut.hrl").
-export([init/2, handle_message/3, handle_flush/2, terminate/2]).
-export_type([callback_fun/0]).
-callback init(_Config) -> _State.
-callback unfold(kflow:offset(), _DataIn, _State) -> [_DataOut].
-callback terminate(_State) -> _.
-optional_callbacks([init/1, terminate/1]).
-type callback_fun() :: fun((kflow:offset(), _InputMessage) -> [_OutputMessage]).
-record(s1,
{ cb_module :: module()
, cb_state :: term()
}).
-record(s2,
{ function :: callback_fun()
}).
-type state() :: #s1{} | #s2{}.
%% @private
init(_NodeId, {?MODULE, Fun}) when is_function(Fun) ->
is_function(Fun, 2) orelse error({badarity, Fun}),
{ok, #s2{ function = Fun
}};
init(_NodeId, {CbModule, CbConfig}) ->
CbState = kflow_lib:optional_callback(CbModule, init, [CbConfig], CbConfig),
{ok, #s1{ cb_module = CbModule
, cb_state = CbState
}}.
%% @private
handle_message(Msg = #kflow_msg{hidden = true}, State, _) ->
%% Don't execute callback for a hidden message, simply pass it downstream:
{ok, [Msg], State};
handle_message(Msg0, State, _) ->
#kflow_msg{ payload = Payload0
, offset = Offset
, fully_processed_offset = FPO
, route = Route
} = Msg0,
Payloads = case State of
#s1{cb_module = CbModule, cb_state = CbState} ->
CbModule:unfold(Offset, Payload0, CbState);
#s2{function = Fun} ->
Fun(Offset, Payload0)
end,
Msgs = create_downstream_messages(Offset, FPO, Route, Payloads),
{ok, Msgs, State}.
%% @private
handle_flush(State, _) ->
{ok, [], State}.
%% @private
terminate(#s1{cb_state = CbState, cb_module = CbModule}, _) ->
kflow_lib:optional_callback(CbModule, terminate, [CbState]);
terminate(#s2{}, _) ->
ok.
%% @private
-spec create_downstream_messages( kflow:offset()
, kflow:offset() | undefined
, kflow:route()
, term()
) -> [#kflow_msg{}].
create_downstream_messages(Offset, FPO, Route, []) ->
%% Callback didn't produce anything; create a hidden message to
%% advance offset:
[#kflow_msg{ offset = Offset
, fully_processed_offset = FPO
, hidden = true
, route = Route
}];
create_downstream_messages(Offset, FPO, Route, [Payload]) ->
%% This is the last message; it can advance the offset as usual:
[#kflow_msg{ offset = Offset
, fully_processed_offset = FPO
, hidden = false
, route = Route
, payload = Payload
}];
create_downstream_messages(Offset, FPO, Route, [Payload|Rest]) ->
%% We cannot advance fully processed offset until the last message
%% is produced; otherwise replays will lose data!
[#kflow_msg{ offset = Offset
, fully_processed_offset = min(FPO, Offset - 1) % works for `FPO=undefined'
, hidden = false
, route = Route
, payload = Payload
} | create_downstream_messages(Offset, FPO, Route, Rest)]. | src/framework/kflow_gen_unfold.erl | 0.663015 | 0.488283 | kflow_gen_unfold.erl | starcoder |
%%
%% @doc A problem of finding celebrities.
%%
%% Imaging a set `P' of people at a party.
%% Say a subset `C' of `P' forms a <em>celebrity clique</em> if
%% `C' is nonempty, everybody at the party knows every member
%% of `C', but members of `C' know only each other.
%% Find this celebrity clique, assuming it exists.
%% Data for the problem:
%% 1) binary predicate `knows' and
%% 2) the set `P' as a list without duplicates.
%%
%% Note: the problem is not to determine whether or not such a clique exists.
%%
%% What is interesting about this problem is that it is asymptotically more
%% efficient to find a solution assuming one exists than to check that
%% it actually is a solution.
%%
%% @reference [B1] Chapter 9, pp. 56–63
%%
-module(celebrities).
-author("<NAME> <<EMAIL>>").
-export([cclique/2]).
-import(lists, [foldr/3]).
%%
%% @doc Finds celebrity clique in linear time
%% <em>assuming it exists</em>.
%% If the clique does not exist, returns meaningless list.
%%
-spec cclique(Knows, [A]) -> [A] when
Knows :: fun((A, A) -> boolean()),
A :: term().
cclique(Knows, Ps) ->
Op = fun(P, Cs) -> op(Knows, P, Cs) end,
foldr(Op, [], Ps).
op(_, P, []) -> [P];
op(Knows, P, [C | _] = Cs) ->
case {Knows(P, C), Knows(C, P)} of
{false, _} -> [P];
{_, false} -> Cs;
_ -> [P | Cs]
end.
%% =============================================================================
%% Unit tests
%% =============================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
knows(_, {c, _}) -> true;
knows({c, _}, {p, _}) -> false;
knows({p, M}, {p, N}) when M < N -> true;
knows({p, _}, {p, _}) -> false.
clique_exists_test_() ->
[
?_assertEqual(
[{c, 1}, {c, 4}, {c, 3}],
cclique(fun knows/2, [{c, 1}, {p, 6}, {c, 4}, {p, 2}, {p, 5}, {c, 3}]))
].
clique_doesnt_exist_test_() ->
[
?_assertEqual(
[{p, 1}], % this is not a celebrity clique
cclique(fun knows/2, [{p, 1}, {p, 1}]))
].
-endif. | lib/ndpar/src/celebrities.erl | 0.512449 | 0.556159 | celebrities.erl | starcoder |
%% Copyright (c) 2016, <NAME> <<EMAIL>>
%%
%% This software is released under the MIT License.
%% See the LICENSE file in the project root for full license information.
%%
%% @doc A priority queue implementation of using the pairing heap algorithm
%% @private
-module(ppg_pq).
%%----------------------------------------------------------------------------------------------------------------------
%% Exported API
%%----------------------------------------------------------------------------------------------------------------------
-export([new/0, is_empty/1, in/2, out/1, peek/1, merge/2]).
-export_type([heap/0, heap/1, item/0]).
%%----------------------------------------------------------------------------------------------------------------------
%% Types
%%----------------------------------------------------------------------------------------------------------------------
-opaque heap(Item) :: empty | {Item, [heap(Item)]}.
-type heap() :: heap(item()).
-type item() :: term().
%%----------------------------------------------------------------------------------------------------------------------
%% Exported Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @doc Returns an empty heap
-spec new() -> heap().
new() -> empty.
%% @doc Tests if `Heap' is empty and returns `true' if so and `false' otherwise
-spec is_empty(Heap :: heap()) -> boolean().
is_empty(empty) -> true;
is_empty(_) -> false.
%% @doc Inserts `Item' into the heap `Heap'
%%
%% Returns the resulting heap
-spec in(Item, heap(Item)) -> heap(Item).
in(Item, Heap) -> merge({Item, []}, Heap).
%% @doc Removes the smallest item from the heap `Heap'
%%
%% Returns the tuple `{Item, Heap2}', where `Item' is the item removed and `Heap2' is the resulting heap.
%% If `Heap' is empty, the tuple `empty' is returned.
-spec out(Heap :: heap(Item)) -> {Item, Heap2 :: heap(Item)} | empty.
out(empty) -> empty;
out({Item, Heap}) -> {Item, merge_pairs(Heap)}.
%% @doc Returns the `Item' where `Item' is the front item of `Heap', or `empty' if `Heap' is empty
-spec peek(Heap :: heap(Item)) -> Item | empty.
peek(empty) -> empty;
peek({Item, _}) -> Item.
%% @doc Returns the merged heap of `Heap1' and `Heap2'
-spec merge(Heap1 :: heap(Item1), Heap2 :: heap(Item2)) -> heap(Item1|Item2).
merge(H, empty) -> H;
merge(empty, H) -> H;
merge(H1 = {X, Hs1}, H2 = {Y, Hs2}) ->
case X < Y of
true -> {X, [H2 | Hs1]};
false -> {Y, [H1 | Hs2]}
end.
%%----------------------------------------------------------------------------------------------------------------------
%% Internal Functions
%%----------------------------------------------------------------------------------------------------------------------
-spec merge_pairs([heap(Item)]) -> heap(Item).
merge_pairs([]) -> empty;
merge_pairs([H]) -> H;
merge_pairs([H1, H2 | Hs]) -> merge(merge(H1, H2), merge_pairs(Hs)). | src/ppg_pq.erl | 0.727589 | 0.412264 | ppg_pq.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @copyright (C) 2016, AdRoll
%%% @doc
%%%
%%% Kinesis record aggregator.
%%%
%%% Follows the KPL aggregated record format:
%%% https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md
%%%
%%% This is an Erlang port of the aggregation functionality from:
%%% https://pypi.python.org/pypi/aws_kinesis_agg/1.0.0
%%%
%%% Creating a new aggregator:
%%%
%%% Agg = kpl_agg:new()
%%%
%%% Adding user records to an aggregator (the aggregator will emit an
%%% aggregated record when it is full):
%%%
%%% case kpl_agg:add(Agg, Record) of
%%% {undefined, NewAgg} -> ...
%%% {FullAggRecord, NewAgg} -> ...
%%% end
%%%
%%% You can also use kpl:add_all to add multiple records at once. A
%%% `Record` is a {PartitionKey, Data} tuple or a {PartitionKey, Data,
%%% ExplicitHashKey} tuple.
%%%
%%% Getting the current aggregated record (e.g. to get the last aggregated
%%% record when you have no more user records to add):
%%%
%%% case kpl_agg:finish(Agg) of
%%% {undefined, Agg} -> ...
%%% {AggRecord, NewAgg} -> ...
%%% end
%%%
%%% The result currently uses a non-standard magic prefix to prevent the KCL from
%%% deaggregating the record automatically. To use compression, use
%%% `kpl_agg:finish/2` with `true` as the second argument, which uses another
%%% non-standard magic prefix.
%%%
%%% @end
%%% Created: 12 Dec 2016 by <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(kpl_agg).
%% API
-export([new/0, count/1, size_bytes/1, finish/1, finish/2, add/2, add_all/2]).
-define(MD5_DIGEST_BYTES, 16).
%% From http://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html:
-define(KINESIS_MAX_BYTES_PER_RECORD, 1 bsl 20).
-include("erlmld.hrl").
-include("kpl_agg_pb.hrl").
%% A set of keys, mapping each key to a unique index.
-record(keyset, {
rev_keys = [] :: list(binary()), %% list of known keys in reverse order
key_to_index = maps:new() :: map() %% maps each known key to a 0-based index
}).
%% Internal state of a record aggregator. It stores an aggregated record that
%% is "in progress", i.e. it is possible to add more user records to it.
-record(state, {
num_user_records = 0 :: non_neg_integer(),
agg_size_bytes = 0 :: non_neg_integer(),
%% The aggregated record's PartitionKey and ExplicitHashKey are the
%% PartitionKey and ExplicitHashKey of the first user record added.
agg_partition_key = undefined :: undefined | binary(),
agg_explicit_hash_key = undefined :: undefined | binary(),
%% Keys seen in the user records added so far.
partition_keyset = #keyset{} :: #keyset{},
explicit_hash_keyset = #keyset{} :: #keyset{},
%% List if user records added so far, in reverse order.
rev_records = [] :: [#'Record'{}]
}).
%%%===================================================================
%%% API
%%%===================================================================
new() ->
#state{}.
count(#state{num_user_records = Num} = _State) ->
Num.
size_bytes(#state{agg_size_bytes = Size,
agg_partition_key = PK} = _State) ->
byte_size(?KPL_AGG_MAGIC)
+ Size
+ ?MD5_DIGEST_BYTES
+ (case PK of
undefined -> 0;
_ -> byte_size(PK)
end)
+ byte_size(kpl_agg_pb:encode_msg(#'AggregatedRecord'{})).
finish(#state{num_user_records = 0} = State, _) ->
{undefined, State};
finish(#state{agg_partition_key = AggPK, agg_explicit_hash_key = AggEHK} = State, ShouldDeflate) ->
AggRecord = {AggPK, serialize_data(State, ShouldDeflate), AggEHK},
{AggRecord, new()}.
finish(State) ->
finish(State, false).
add(State, {PartitionKey, Data} = _Record) ->
add(State, {PartitionKey, Data, create_explicit_hash_key(PartitionKey)});
add(State, {PartitionKey, Data, ExplicitHashKey} = _Record) ->
case {calc_record_size(State, PartitionKey, Data, ExplicitHashKey), size_bytes(State)} of
{RecSize, _} when RecSize > ?KINESIS_MAX_BYTES_PER_RECORD ->
error("input record too large to fit in a single Kinesis record");
{RecSize, CurSize} when RecSize + CurSize > ?KINESIS_MAX_BYTES_PER_RECORD ->
{FullRecord, State1} = finish(State),
State2 = add_record(State1, PartitionKey, Data, ExplicitHashKey, RecSize),
{FullRecord, State2};
{RecSize, _} ->
State1 = add_record(State, PartitionKey, Data, ExplicitHashKey, RecSize),
%% fixme; make size calculations more accurate
case size_bytes(State1) > ?KINESIS_MAX_BYTES_PER_RECORD - 64 of
true ->
%% size estimate is almost the limit, finish & retry:
{FullRecord, State2} = finish(State),
State3 = add_record(State2, PartitionKey, Data, ExplicitHashKey, RecSize),
{FullRecord, State3};
false ->
{undefined, State1}
end
end.
add_all(State, Records) ->
{RevAggRecords, NState} = lists:foldl(
fun(Record, {RevAggRecords, Agg}) ->
case add(Agg, Record) of
{undefined, NewAgg} -> {RevAggRecords, NewAgg};
{AggRecord, NewAgg} -> {[AggRecord | RevAggRecords], NewAgg}
end
end, {[], State}, Records),
{lists:reverse(RevAggRecords), NState}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% Calculate a new explicit hash key based on the input partition key
%% (following the algorithm from the original KPL).
create_explicit_hash_key(_PartitionKey) ->
%% Their python implementation [1] is broken compared to the C++
%% implementation [2]. But we don't care about EHKs anyway.
%% [1] https://github.com/awslabs/kinesis-aggregation/blob/db92620e435ad9924356cda7d096e3c888f0f72f/python/aws_kinesis_agg/aggregator.py#L447-L458
%% [2] https://github.com/awslabs/amazon-kinesis-producer/blob/ea1e49218e1a11f1b462662a1db4cc06ddad39bb/aws/kinesis/core/user_record.cc#L36-L45
%% FIXME: Implement the actual algorithm from KPL.
undefined.
%% Calculate how many extra bytes the given user record would take, when added
%% to the current aggregated record. This calculation has to know about KPL and
%% Protobuf internals.
calc_record_size(#state{partition_keyset = PartitionKeySet,
explicit_hash_keyset = ExplicitHashKeySet} = _State,
PartitionKey, Data, ExplicitHashKey) ->
%% How much space we need for the PK:
PKLength = byte_size(PartitionKey),
PKSize = case is_key(PartitionKey, PartitionKeySet) of
true -> 0;
false -> 1 + varint_size(PKLength) + PKLength
end,
%% How much space we need for the EHK:
EHKSize = case ExplicitHashKey of
undefined -> 0;
_ ->
EHKLength = byte_size(ExplicitHashKey),
case is_key(ExplicitHashKey, ExplicitHashKeySet) of
true -> 0;
false -> 1 + varint_size(EHKLength) + EHKLength
end
end,
%% How much space we need for the inner record:
PKIndexSize = 1 + varint_size(potential_index(PartitionKey, PartitionKeySet)),
EHKIndexSize = case ExplicitHashKey of
undefined -> 0;
_ -> 1 + varint_size(potential_index(ExplicitHashKey, ExplicitHashKeySet))
end,
DataLength = byte_size(Data),
DataSize = 1 + varint_size(DataLength) + DataLength,
InnerSize = PKIndexSize + EHKIndexSize + DataSize,
%% How much space we need for the entire record:
PKSize + EHKSize + 1 + varint_size(InnerSize) + InnerSize.
%% Calculate how many bytes are needed to represent the given integer in a
%% Protobuf message.
varint_size(Integer) when Integer >= 0 ->
NumBits = max(num_bits(Integer, 0), 1),
(NumBits + 6) div 7.
%% Recursively compute the number of bits needed to represent an integer.
num_bits(0, Acc) -> Acc;
num_bits(Integer, Acc) when Integer >= 0 ->
num_bits(Integer bsr 1, Acc + 1).
%% Helper for add; do not use directly.
add_record(#state{partition_keyset = PKSet,
explicit_hash_keyset = EHKSet,
rev_records = RevRecords,
num_user_records = NumUserRecords,
agg_size_bytes = AggSize,
agg_partition_key = AggPK,
agg_explicit_hash_key = AggEHK} = State,
PartitionKey, Data, ExplicitHashKey, NewRecordSize) ->
{PKIndex, NewPKSet} = get_or_add_key(PartitionKey, PKSet),
{EHKIndex, NewEHKSet} = get_or_add_key(ExplicitHashKey, EHKSet),
NewRecord = #'Record'{
partition_key_index = PKIndex,
explicit_hash_key_index = EHKIndex,
data = Data
},
State#state{
partition_keyset = NewPKSet,
explicit_hash_keyset = NewEHKSet,
rev_records = [NewRecord | RevRecords],
num_user_records = 1 + NumUserRecords,
agg_size_bytes = NewRecordSize + AggSize,
agg_partition_key = first_defined(AggPK, PartitionKey),
agg_explicit_hash_key = first_defined(AggEHK, ExplicitHashKey)
}.
first_defined(undefined, Second) -> Second;
first_defined(First, _) -> First.
serialize_data(#state{partition_keyset = PKSet,
explicit_hash_keyset = EHKSet,
rev_records = RevRecords} = _State, ShouldDeflate) ->
ProtobufMessage = #'AggregatedRecord'{
partition_key_table = key_list(PKSet),
explicit_hash_key_table = key_list(EHKSet),
records = lists:reverse(RevRecords)
},
SerializedData = kpl_agg_pb:encode_msg(ProtobufMessage),
Checksum = crypto:hash(md5, SerializedData),
case ShouldDeflate of
true ->
<<?KPL_AGG_MAGIC_DEFLATED/binary,
(zlib:compress(<<SerializedData/binary, Checksum/binary>>))/binary>>;
false ->
<<?KPL_AGG_MAGIC/binary, SerializedData/binary, Checksum/binary>>
end.
%%%===================================================================
%%% Internal functions for keysets
%%%===================================================================
is_key(Key, #keyset{key_to_index = KeyToIndex} = _KeySet) ->
maps:is_key(Key, KeyToIndex).
get_or_add_key(undefined, KeySet) ->
{undefined, KeySet};
get_or_add_key(Key, #keyset{rev_keys = RevKeys, key_to_index = KeyToIndex} = KeySet) ->
case maps:get(Key, KeyToIndex, not_found) of
not_found ->
Index = length(RevKeys),
NewKeySet = KeySet#keyset{
rev_keys = [Key | RevKeys],
key_to_index = maps:put(Key, Index, KeyToIndex)
},
{Index, NewKeySet};
Index ->
{Index, KeySet}
end.
potential_index(Key, #keyset{rev_keys = RevKeys, key_to_index = KeyToIndex} = _KeySet) ->
case maps:get(Key, KeyToIndex, not_found) of
not_found -> length(RevKeys);
Index -> Index
end.
key_list(#keyset{rev_keys = RevKeys} = _KeySet) ->
lists:reverse(RevKeys).
%%%===================================================================
%%% TESTS
%%%===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
varint_size_test() ->
%% Reference values obtained using
%% aws_kinesis_agg.aggregator._calculate_varint_size().
?assertEqual(1, varint_size(0)),
?assertEqual(1, varint_size(1)),
?assertEqual(1, varint_size(127)),
?assertEqual(2, varint_size(128)),
?assertEqual(4, varint_size(9999999)),
?assertEqual(6, varint_size(999999999999)),
ok.
keyset_test() ->
KeySet0 = #keyset{},
?assertEqual([], key_list(KeySet0)),
?assertEqual(false, is_key(<<"foo">>, KeySet0)),
?assertEqual(0, potential_index(<<"foo">>, KeySet0)),
{0, KeySet1} = get_or_add_key(<<"foo">>, KeySet0),
?assertEqual([<<"foo">>], key_list(KeySet1)),
?assertEqual(true, is_key(<<"foo">>, KeySet1)),
{0, KeySet1} = get_or_add_key(<<"foo">>, KeySet1),
?assertEqual(1, potential_index(<<"bar">>, KeySet1)),
{1, KeySet2} = get_or_add_key(<<"bar">>, KeySet1),
?assertEqual([<<"foo">>, <<"bar">>], key_list(KeySet2)),
?assertEqual(true, is_key(<<"foo">>, KeySet2)),
?assertEqual(true, is_key(<<"bar">>, KeySet2)),
{0, KeySet2} = get_or_add_key(<<"foo">>, KeySet2),
{1, KeySet2} = get_or_add_key(<<"bar">>, KeySet2),
?assertEqual(2, potential_index(<<"boom">>, KeySet2)),
ok.
empty_aggregator_test() ->
Agg = new(),
?assertEqual(0, count(Agg)),
?assertEqual(4 + 16, size_bytes(Agg)), % magic and md5
{undefined, Agg} = finish(Agg),
ok.
simple_aggregation_test() ->
Agg0 = new(),
{undefined, Agg1} = add(Agg0, {<<"pk1">>, <<"data1">>, <<"ehk1">>}),
{undefined, Agg2} = add(Agg1, {<<"pk2">>, <<"data2">>, <<"ehk2">>}),
{AggRecord, Agg3} = finish(Agg2),
?assertEqual(0, count(Agg3)),
%% Reference values obtained using priv/kpl_agg_tests_helper.py.
RefPK = <<"pk1">>,
RefEHK = <<"ehk1">>,
RefData = <<(?KPL_AGG_MAGIC)/binary,10,3,112,107,49,10,3,112,107,50,18,4,101,104,
107,49,18,4,101,104,107,50,26,11,8,0,16,0,26,5,100,97,116,97,
49,26,11,8,1,16,1,26,5,100,97,116,97,50,244,41,93,155,173,190,
58,30,240,223,216,8,26,205,86,4>>,
?assertEqual({RefPK, RefData, RefEHK}, AggRecord),
ok.
aggregate_many(Records) ->
{AggRecords, Agg} = add_all(new(), Records),
case finish(Agg) of
{undefined, _} -> AggRecords;
{LastAggRecord, _} -> AggRecords ++ [LastAggRecord]
end.
shared_keys_test() ->
[AggRecord] = aggregate_many([
{<<"alpha">>, <<"data1">>, <<"zulu">>},
{<<"beta">>, <<"data2">>, <<"yankee">>},
{<<"alpha">>, <<"data3">>, <<"xray">>},
{<<"charlie">>, <<"data4">>, <<"yankee">>},
{<<"beta">>, <<"data5">>, <<"zulu">>}
]),
%% Reference values obtained using priv/kpl_agg_tests_helper.py.
RefPK = <<"alpha">>,
RefEHK = <<"zulu">>,
RefData = <<(?KPL_AGG_MAGIC)/binary,10,5,97,108,112,104,97,10,4,98,101,116,97,10,
7,99,104,97,114,108,105,101,18,4,122,117,108,117,18,6,121,97,
110,107,101,101,18,4,120,114,97,121,26,11,8,0,16,0,26,5,100,
97,116,97,49,26,11,8,1,16,1,26,5,100,97,116,97,50,26,11,8,0,
16,2,26,5,100,97,116,97,51,26,11,8,2,16,1,26,5,100,97,116,97,
52,26,11,8,1,16,0,26,5,100,97,116,97,53,78,67,160,206,22,1,
33,154,3,6,110,235,9,229,53,100>>,
?assertEqual({RefPK, RefData, RefEHK}, AggRecord),
ok.
record_fullness_test() ->
Data1 = list_to_binary(["X" || _ <- lists:seq(1, 500000)]),
Data2 = list_to_binary(["Y" || _ <- lists:seq(1, 600000)]),
Data3 = list_to_binary(["Z" || _ <- lists:seq(1, 200000)]),
Agg0 = new(),
{undefined, Agg1} = add(Agg0, {<<"pk1">>, Data1, <<"ehk1">>}),
{{AggPK1, _AggData1, AggEHK1}, Agg2} = add(Agg1, {<<"pk2">>, Data2, <<"ehk2">>}),
{undefined, Agg3} = add(Agg2, {<<"pk3">>, Data3, <<"ehk3">>}),
{{AggPK2, _AggData2, AggEHK2}, _} = finish(Agg3),
%% Reference values obtained using priv/kpl_agg_tests_helper.py.
% fixme; these comparisons will fail as long as we're using the wrong kpl magic.
%RefChecksum1 = <<198,6,88,216,8,244,159,59,223,14,247,208,138,137,64,118>>,
%RefChecksum2 = <<89,148,130,126,150,23,148,18,38,230,176,182,93,186,150,69>>,
?assertEqual(<<"pk1">>, AggPK1),
?assertEqual(<<"ehk1">>, AggEHK1),
%?assertEqual(RefChecksum1, crypto:hash(md5, AggData1)),
?assertEqual(<<"pk2">>, AggPK2),
?assertEqual(<<"ehk2">>, AggEHK2),
%?assertEqual(RefChecksum2, crypto:hash(md5, AggData2)),
ok.
full_record_test() ->
Fill = fun F (Acc) ->
PK = integer_to_binary(rand:uniform(1000)),
Data = << <<(integer_to_binary(rand:uniform(128)))/binary>>
|| _ <- lists:seq(1, 1 + rand:uniform(1000)) >>,
case add(Acc, {PK, Data}) of
{undefined, NAcc} ->
F(NAcc);
{Full, _} ->
Full
end
end,
{PK, Data, _} = Fill(new()),
Total = byte_size(PK) + byte_size(Data),
?assert(Total =< ?KINESIS_MAX_BYTES_PER_RECORD),
?assert(Total >= ?KINESIS_MAX_BYTES_PER_RECORD - 2048).
deflate_test() ->
Agg0 = new(),
{undefined, Agg1} = add(Agg0, {<<"pk1">>, <<"data1">>, <<"ehk1">>}),
{{_, Data, _}, _} = finish(Agg1, true),
<<Magic:4/binary, Deflated/binary>> = Data,
?assertEqual(?KPL_AGG_MAGIC_DEFLATED, Magic),
Inflated = zlib:uncompress(Deflated),
ProtoMsg = binary:part(Inflated, 0, size(Inflated) - 16),
Checksum = binary:part(Inflated, size(Inflated), -16),
?assertEqual(Checksum, crypto:hash(md5, ProtoMsg)),
#'AggregatedRecord'{records = [R1]} = kpl_agg_pb:decode_msg(ProtoMsg, 'AggregatedRecord'),
#'Record'{data = RecordData} = R1,
?assertEqual(<<"data1">>, RecordData).
-endif. | src/kpl_agg.erl | 0.672117 | 0.526525 | kpl_agg.erl | starcoder |
% ==============================================================================
% Exponential distribution
% ==============================================================================
-module(exponential).
-export([pdf/2, cdf/2, invcdf/2, rnd/2]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
% ------------------------------------------------------------------------------
% pdf - Uniform probability density function
% ------------------------------------------------------------------------------
pdf(_,Lambda) when Lambda =< 0 -> {error, "Lambda is smaller than zero."};
pdf(X,_) when X < 0 -> 0.0;
pdf(X,Lambda) ->
Lambda*math:exp(-Lambda*X).
% ------------------------------------------------------------------------------
% cdf - Uniform cumulative distribution function
% ------------------------------------------------------------------------------
cdf(_,Lambda) when Lambda =< 0 -> {error, "Lambda is smaller than zero."};
cdf(X,_) when X < 0 -> 0.0;
cdf(X,Lambda) ->
1-math:exp(-Lambda*X).
% ------------------------------------------------------------------------------
% invcdf - Inverse exponential distribution function
% ------------------------------------------------------------------------------
invcdf(_,Lambda) when Lambda =< 0 -> {error, "Lambda is smaller than zero."};
invcdf(P,_) when P < 0 orelse P > 1 -> {error,"Invalid probability"};
invcdf(P,Lambda) ->
-math:log(1-P)/Lambda.
% ------------------------------------------------------------------------------
% exprnd - RNG function
% ------------------------------------------------------------------------------
rnd(_,Lambda) when Lambda =< 0 -> {error, "Lambda is smaller than zero."};
rnd(N,Lambda) ->
lists:map(fun(_) -> invcdf(rand:uniform(),Lambda) end, lists:seq(1,N)).
% ==============================================================================
% EUnit tests
% ------------------------------------------------------------------------------
-ifdef(TEST).
% ------------------------------------------------------------------------------
% pdf tests
% ------------------------------------------------------------------------------
pdf_test() ->
?assertEqual(0.0, pdf(-1.0,10)),
?assertEqual(10.0, pdf(0.0,10)),
?assertEqual(1.353352832366127, pdf(0.2,10)),
?assertEqual(0.06737946999085467, pdf(0.5,10)),
?assertEqual(4.5399929762484856e-04, pdf(1,10)),
?assertEqual(3.720075976020836e-43, pdf(10,10)).
pdf_error_test() ->
?assertEqual({error,"Lambda is smaller than zero."}, pdf(1.0,-1)).
% ------------------------------------------------------------------------------
% cdf tests
% ------------------------------------------------------------------------------
cdf_test() ->
?assertEqual(0.0, cdf(-1.0,10)),
?assertEqual(0.0, cdf(0.0,10)),
?assertEqual(0.8646647167633873, cdf(0.2,10)),
?assertEqual(0.9932620530009145, cdf(0.5,10)),
?assertEqual(0.9990881180344455, cdf(0.7,10)),
?assertEqual(0.9999546000702375, cdf(1.0,10)),
?assertEqual(1.0, cdf(10.0,10)).
cdf_error_test() ->
?assertEqual({error,"Lambda is smaller than zero."}, cdf(1.0,-1)).
% ------------------------------------------------------------------------------
% inv tests
% ------------------------------------------------------------------------------
invcdf_test() ->
?assertEqual(0.0, invcdf(0.0,10)),
?assertEqual(0.010536051565782628, invcdf(0.1,10)),
?assertEqual(0.06931471805599453, invcdf(0.5,10)),
?assertEqual(0.1203972804325936, invcdf(0.7,10)).
invcdf_error_test() ->
?assertEqual({error,"Lambda is smaller than zero."}, invcdf(1.0,-1)),
?assertEqual({error,"Invalid probability"}, invcdf(-0.1,10)),
?assertEqual({error,"Invalid probability"}, invcdf(1.1,10)).
% ------------------------------------------------------------------------------
% rng tests
% ------------------------------------------------------------------------------
rnd_positive_test() ->
[X] = rnd(1,1),
?assert(X >= 0.0).
rnd_length_test() ->
Xs = rnd(23,1),
?assert(length(Xs) =:= 23).
rnd_error_test() ->
?assertEqual({error,"Lambda is smaller than zero."}, rnd(1,-1)).
-endif. | src/exponential.erl | 0.579043 | 0.566258 | exponential.erl | starcoder |
% License: Apache License, Version 2.0
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
%% @author <NAME> <<EMAIL>>
%% @copyright Copyright 2015 <NAME>
%%
%% @doc Generic utilities used for costmaps and endpoint costmaps
%%
%% @end
-module(costmap_utils).
-define(FILTEREXT, "filterinfo").
-export([
validate_Xcostmap/4,
is_valid_filter/4,
is_registered/3,
filter_Xcostmap/6,
generate_path/2,
generate_path/3
]).
-include("e_alto.hrl").
%%
%% @doc Determines if BasePath/CostMode/CostMetric is registered as a path
%%
is_registered(BasePath, CostMode, CostMetric) ->
registry:is_registered(generate_path(BasePath, CostMode, CostMetric)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% @doc Determines if the filter is valid.
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
is_valid_filter(Filter, PathPrefix, ValidationFunction, ErrFormatFunction) ->
case utils:weak_validate_syntax(Filter) of
{ok, Body} ->
_Errors = lists:flatten ([ utils:check_fields([
{ {"cost-type"},Body,<<"Missing cost-type field in request">> } ]),
ErrFormatFunction (ValidationFunction(ej:get({PathPrefix,"srcs"},Body)) ),
ErrFormatFunction (ValidationFunction(ej:get({PathPrefix,"dsts"},Body)) )
]),
_ErrorList = lists:foldl(fun(E,AccIn) -> case (E == nothing) of
false -> [E] ++ AccIn;
true -> AccIn
end
end, [], _Errors),
case (ej:get({"cost-type"},Body) =/= undefined) of
true ->
case valid_constraints(ej:get({"constraints"},Body),ej:get({"cost-type","cost-mode"},Body),[],[]) of
{ true, Constraints } -> case length(_ErrorList) of
0 -> {true, Body, Constraints};
_ -> {false, _ErrorList}
end;
{ false, ConstraintErrors } -> {false, [ErrFormatFunction(ConstraintErrors)] ++ _ErrorList}
end;
false ->
{ false, _ErrorList }
end;
SomethingElse ->
lager:info("Filter did not pass weak validation check",[]),
{false, SomethingElse}
end.
%%
%% @doc Validates a list of Constraints
%%
valid_constraints(undefined,_,_,_) ->
{ true, [] };
valid_constraints(Conditions,Units,AccIn,Errors) when is_binary(Units) ->
valid_constraints(Conditions,list_to_atom(binary_to_list(Units)),AccIn,Errors);
valid_constraints(Conditions,Units,AccIn,Errors) when is_list(Units) ->
valid_constraints(Conditions,list_to_atom(Units),AccIn,Errors);
valid_constraints([], _, AccIn,Errors) ->
case length(Errors) of
0 -> { true, AccIn };
_ -> { false, Errors }
end;
valid_constraints([H|T], Units, AccIn,Errors) ->
case valid_constraint(H,Units) of
{false, SomeValue} -> valid_constraints(T,Units,AccIn,[SomeValue]++Errors);
{true, Constraint} -> valid_constraints(T,Units,[Constraint]++AccIn,Errors)
end.
%%
%% @doc Validates an individual Constraint.
%%
valid_constraint(Condition,Units) when is_binary(Condition) ->
valid_constraint(binary_to_list(Condition), Units);
valid_constraint(Condition,Units) when is_atom(Units) ->
[_Operator, _Value] = string:tokens(Condition, " "),
case (_Operator == "finegrain") of
true -> { ?FG, 1.0 };
false ->
{_ValType,_NumValue} = to_unit(_Value),
_ErrorC1 = case lists:member(_Operator, ["gt","lt","ge","le","eq"]) of
false -> [ unknown_operator ] ;
true -> []
end,
_Result = case ((_ValType =/= undefined)) of
false -> {false, [unknown_type] ++ _ErrorC1 };
true ->
case ((Units == numerical) and (_ValType == floattype)) or ((Units == ordinal) and (_ValType == inttype)) of
true ->
case (length(_ErrorC1) > 0) of
false -> {true, {list_to_atom(_Operator), _NumValue}};
true -> { false, _ErrorC1 }
end;
false -> {false, value_type_mismatch}
end
end
end.
%%
%% @doc Coverts the string value to the appropriate units value. This
%% funciton only supports numeric and ordinal units at this time.
%%
to_unit(L) when is_list(L) ->
Float = (catch erlang:list_to_float(L)),
case is_number(Float) of
true -> {floattype, Float};
false ->
Int = (catch erlang:list_to_integer(L)),
case is_number(Int) of
true -> {inttype, Int};
false -> {undefined, undefined}
end
end.
%%
%% Generates a URI Path based upon the CostMode and CostMetric.
%%
generate_path(CostMode, CostMetric) when is_binary(CostMode) ->
generate_path(binary_to_list(CostMode), CostMetric);
generate_path(CostMode, CostMetric) when is_binary(CostMetric) ->
generate_path(CostMode, binary_to_list(CostMetric));
generate_path(CostMode, CostMetric) when is_list(CostMode) andalso is_list(CostMetric) ->
CostMode ++ "/" ++ CostMetric.
%%
%% Generates a Path given the base path and metric information
%%
generate_path(BasePath, CostMode, CostMetric) ->
BasePath ++ "/" ++ generate_path(CostMode, CostMetric).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Validation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% @doc Validates a Costmap or Endpoint Costmap
%%
validate_Xcostmap(Costmap,MapAccessor,VerficationFunction,VFArgs) ->
%Get the Cost-Mode
_CostMode = case ej:get({<<"meta">>,<<"cost-type">>,<<"cost-mode">>},Costmap) of
<<"numerical">> -> numerical;
<<"ordinal">> -> ordinal;
_ -> unknown
end,
case _CostMode of
unknown ->
lager:info("422-5 An unknown Cost Mode of type ~p was referenced in the document", [_CostMode]),
{error, 422, "422-5 An unknown Cost Mode was referenced in the document"};
_ ->
% Check to ensure the PIDs are in the network map AND the value type is consistent
Errors = validate_Xcostmap_rows(ej:get(MapAccessor,Costmap),_CostMode,VerficationFunction,VFArgs,[]),
case length(Errors) of
0 ->
{ok, Costmap, nostate};
_ ->
lager:info("Semantic Errors found - ~p", Errors),
{error, 422, "422-6 Semantic Errors are present in the document"}
end
end.
%%
%% Validates individual row validation for an Endpoint Cost Map or Cost
%% map.
%%
validate_Xcostmap_rows({struct,L},CostMode,VF,VFArgs,ErrorList) ->
validate_Xcostmap_rows(L,CostMode,VF,VFArgs,ErrorList);
validate_Xcostmap_rows([],_,_,_,ErrorList) ->
ErrorList;
validate_Xcostmap_rows([{SrcId,L}|T],CostMode,VF,VFArgs,ErrorList) ->
NewErrorList = case VF(SrcId,VFArgs) of
false ->
validate_Xcost_values(L,SrcId,CostMode,VF,VFArgs,[{src_id_notfound, SrcId}] ++ ErrorList);
true ->
validate_Xcost_values(L,SrcId,CostMode,VF,VFArgs,ErrorList)
end,
validate_Xcostmap_rows(T,CostMode,VF,VFArgs,NewErrorList).
%%
%% Validates the cost values of a Endpoint Cost Map or Cost Map's
%% individual row entry by
%% For Costmaps
%% a. Determining that the Source and Destination PIDs exist in the
%% Network Map referenced in the dependent-vtag
%% b. Determine that all cost values conform to the Cost Mode type
%% specified
%% or for Endpoint Costmaps
%% a. Determining that the Source and Destination Endpoints are valid
%% b. Determine that all cost values conform to the Cost Mode type
%% specified
validate_Xcost_values({struct,L},SrcId,CostMode,VF,VFArgs,ErrorList) ->
validate_Xcost_values(L,SrcId,CostMode,VF,VFArgs,ErrorList);
validate_Xcost_values([],_,_,_,_,ErrorList) ->
ErrorList;
validate_Xcost_values([{DstId,_}=Attribute|T],SrcId,CostMode,VF,VFArgs,ErrorList) ->
%Check the DstId
NewErrorList = case VF(DstId,VFArgs) of
false ->
[{dst_id_notfound, DstId}] ++ ErrorList;
true ->
ErrorList
end,
case metrics:validate_cost_metric(Attribute,SrcId,CostMode) of
[] -> %% no error found
validate_Xcost_values(T,SrcId,CostMode,VF,VFArgs,NewErrorList);
MetricErrorList ->
validate_Xcost_values(T,SrcId,CostMode,VF,VFArgs,MetricErrorList ++ NewErrorList)
end.
%%
%% @doc Retrieves Information based upon the JSON provided request.
%%
filter_Xcostmap(Path, InputParameters, PathPrefix, MapPrefix, ValidationFunction, ErrorFormatFunction) ->
case is_valid_filter(InputParameters, PathPrefix, ValidationFunction, ErrorFormatFunction) of
{true, Body, Constraints} ->
CostMetric = ej:get({"cost-type","cost-metric"},Body),
CostMode = ej:get({"cost-type","cost-mode"},Body),
case get_filterinfo(Path) of
not_found ->
lager:info("Error - No filter information found for Path ~p",[Path]),
{ internal_error, <<"Internal Error - No filter information found for Path">>};
_FilterInfo ->
case contains_filterspec(_FilterInfo, CostMetric, CostMode) of
{false, nothing} ->
{not_found, <<"The Costmap could not be located for the Cost Metric and Mode requested">>};
{true, not_found} ->
{not_found, <<"Although the Filter request is valid the Costmap could not be located">>};
{true, _CostMapId} ->
_SeachType = getSearchType(MapPrefix,Constraints),
{_, _Indices } = metrics:indexOf(CostMetric,CostMode),
%%Move the map we want to search to the head of the line in the indices
_Indices1 = case lists:keytake(_CostMapId,3,_Indices) of
false -> _Indices;
{value, Key, TupleList2 } -> lists:append([Key],TupleList2)
end,
SearchMapType = case MapPrefix of
"cost-map" -> costmap;
"endpoint-cost-map" -> epcostmap
end,
{ _FinalResult, _ } = searchMaps(_Indices1,getSearchType(MapPrefix,Constraints),SearchMapType,{struct,[ { list_to_binary(MapPrefix), {struct, []} } ]},
[ {ej:get({PathPrefix,"srcs"},Body), ej:get({PathPrefix,"dsts"},Body) } ], Constraints),
_CostMap = registry:get_resource(_CostMapId),
_PrelimResult = { struct, [{<<"meta">>, {struct,[ {<<"cost-type">>, ej:get({"meta","cost-type"},_CostMap)} ] } },
{ list_to_binary(MapPrefix), _FinalResult } ] },
case SearchMapType of
costmap ->
ej:set({<<"meta">>,<<"dependent-vtags">>}, _PrelimResult, ej:get({"meta","dependent-vtags"},_CostMap));
epcostmap ->
_PrelimResult
end
end
end;
{false, SomeErrors } ->
{error, SomeErrors}
end.
getSearchType("cost-map", _) ->
?CG;
getSearchType("endpoint-cost-map", Constraints) ->
case lists:keyfind(?FG, 1, Constraints) of
false -> unknown;
_ -> ?FG
end.
full_report(List) when is_list(List) ->
lager:info("Reporting~n~n",[]),
lists:all(fun(E) -> lager:info("~p~n",[E]), true end, List),
lager:info("~n",[]).
%% Iterates over entries while misses is not empty and we can search more maps
searchMaps(_,_,_,Hits,[],_) ->
{ Hits, [] };
searchMaps([],_,_,Hits,Misses,_) ->
{ Hits, Misses };
searchMaps([H|T],SearchType,SearchMapType,Hits,Misses,Constraints) ->
{ _Hits, _Misses } = searchMaps1(H,SearchType,SearchMapType,Hits,Misses,Constraints),
searchMaps(T,SearchType,SearchMapType,_Hits,_Misses,Constraints).
searchMaps1({MapType,Granularity,ResourceId,Version,_},SearchType,SearchMapType,Hits,Misses,Constraints) ->
%%Valid Search Conditions
%%1. Both are fine grained and epcs
%%2. or non-finegrain
lager:info("Searching ~p / ~p / ~p",[atom_to_list(MapType),atom_to_list(Granularity),ResourceId]),
case (((MapType == epcostmap) andalso (SearchMapType == epcostmap) andalso (SearchType == ?FG) and (Granularity == ?FG)) or (SearchType =/= ?FG)) of
false -> { Hits, Misses };
true -> case registry:get_resource(ResourceId,Version) of
not_found -> { Hits, Misses };
_Map ->
%NOTE - This is where criteria unmet is dropped from filter lists
{_Filters,_} = misses_to_filter(Misses,[],[]),
case { MapType, SearchMapType } of
{costmap, costmap} -> applyFilters( _Filters, Constraints, _Map, {Hits,[]}, <<"cost-map">>,undefined);
{epcostmap, epcostmap} -> applyFilters( _Filters, Constraints, _Map, {Hits,[]}, <<"endpoint-cost-map">>,undefined);
{epcostmap, costmap} -> %Not a valid search option
{Hits,Misses};
{costmap, epcostmap} ->
lager:info("Executing costmap search for an epcostmap",[]),
_NetworkMap = ej:get({"meta","dependent-vtags",1,"resource-id"},_Map),
_NetworkMapVersion = ej:get({"meta","dependent-vtags",1,"tag"},_Map),
{ _, Tries } = registry:get_resource(_NetworkMap,_NetworkMapVersion,["appstate"]),
applyFilters( _Filters, Constraints, _Map, {Hits,[]}, <<"cost-map">>,[{topid,Tries},{returnbase,<<"endpoint-cost-map">>}])
end
end
end.
applyFilters(A,B,C,D,E,undefined) ->
applyFilters(A,B,C,D,E,[]);
applyFilters([],_,_,{Hits,Misses},_,_) ->
{Hits,Misses};
applyFilters(List,Constraints,Map,{Hits,Misses},Prefix,Options) when is_binary(Prefix)->
applyFilters(List,Constraints,Map,{Hits,Misses},[Prefix],Options);
applyFilters([{SrcFilter,DstFilter}|T],Constraints,Map,{Hits,Misses},Prefix,Options) when is_list(Prefix)->
{ _Hits, _Misses } = filter2(SrcFilter,DstFilter,Constraints,Map,{Hits,Misses},Prefix,Options),
applyFilters(T,Constraints,Map,{_Hits,_Misses},Prefix,Options).
sort_list(Structure, BasePath, undefined, Options) ->
{ struct, _X } = ej:get(BasePath,Structure),
{ lists:foldl(fun(E,{AccIn,_Options}) -> case proplists:get_value(keepvalue,_Options,false) of
false -> {AccIn ++ [element(1,E)],_Options};
true -> {AccIn ++ [E],_Options}
end
end,
{ [], Options },
_X),
[] };
sort_list(Structure, BasePath, List, Options) when is_tuple(BasePath) ->
sort_list(Structure, tuple_to_list(BasePath), List, Options);
sort_list(Structure, BasePath, List, undefined) ->
sort_list(Structure, BasePath, List, []);
sort_list(Structure, BasePath, List, Options) when is_list(BasePath), is_list(Options) ->
{ _, _, _Ins, _Outs, _ } = lists:foldl(
fun(E,{Path,Struct,Ins,Outs,_Options}) ->
_X = case proplists:get_value(topid, _Options, undefined) of
undefined -> ej:get(Path ++ [E], Struct);
Tries -> case mapservices:getPidForAddress(E,Tries) of
undefined -> undefined;
_MappedValue -> {mapped, _MappedValue, ej:get(Path ++ [_MappedValue], Struct)}
end
end,
case _X of
undefined -> {Path,Struct,Ins,Outs ++ [E],_Options};
{mapped, _MV, _MVal} -> case proplists:get_value(keepvalue,_Options,false) of
false -> {Path,Struct,Ins ++ [{mapped, E, _MV}],Outs,_Options};
true -> {Path,Struct,Ins ++ [{{mapped, E, _MV},_MVal}],Outs,_Options}
end;
Val -> case proplists:get_value(keepvalue,_Options,false) of
false -> {Path,Struct,Ins ++ [E],Outs,_Options};
true -> {Path,Struct,Ins ++ [{E,Val}],Outs,_Options}
end
end
end,
{ BasePath, Structure, [], [], Options },
List),
{_Ins, _Outs}.
filter2(RowFilter,ColumnFilter,Constraints,Map,{Hits,Misses},Prefix,Options) when is_tuple(Prefix) ->
filter2(RowFilter,ColumnFilter,Constraints,Map,{Hits,Misses},tuple_to_list(Prefix),Options);
filter2(RowFilter,ColumnFilter,Constraints,Map,{Hits,Misses},Prefix,Options) ->
{RowsPresent,RowsMissing} = sort_list(Map, Prefix, RowFilter, Options),
% Process the rows that are present and get their values
{_,_,_,FinalHits,FinalMisses,_,_} = lists:foldl(
fun(Row, {_ColumnFilter,_Constraints,_Map,_Hits,_Misses,_Prefix,_Options}) ->
% Get all of the values present in the map
{ _RowVal, _RowReturnBase } = case Row of
{mapped, SrcRow, MappedRow} -> { MappedRow, SrcRow };
NonMappedRow -> { NonMappedRow, NonMappedRow }
end,
{ ValuesPresent, ColumnsNotPresent } = sort_list(_Map, _Prefix ++ [_RowVal], _ColumnFilter, Options ++ [keepvalue]),
% Test those values against the Criteria, if they pass then
% add them to the return structure
_ReturnBase = case proplists:get_value(returnbase, Options, undefined) of
undefined -> [_Prefix];
Val -> [Val]
end,
{ _, _QueryHits, ConstraintMisses, _ } = lists:foldl(
fun(E,{_Constraints_, _Hits_, _Misses_, _EJPath}) ->
case E of
{{mapped, _X, _Y}, _Val1} -> case meets_criteria(_Constraints_,_Val1) of
true -> {_Constraints_, ej:set_p(_EJPath ++ [_X],_Hits_, _Val1), _Misses_, _EJPath };
false -> {_Constraints_, _Hits_, [E] ++ _Misses_, _EJPath }
end;
{_Col,_Val2} -> case meets_criteria(_Constraints_,_Val2) of
true -> {_Constraints_, ej:set_p(_EJPath ++ [_Col],_Hits_, _Val2), _Misses_, _EJPath };
false -> {_Constraints_, _Hits_, [E] ++ _Misses_ ,_EJPath}
end
end
end,
{ _Constraints, _Hits, [], _ReturnBase ++ [_RowReturnBase] },
ValuesPresent),
% Post process other return values
_Var2 = case length(ColumnsNotPresent) of
0 -> [];
_ -> [{missing_columns, Row, ColumnsNotPresent}]
end,
_Var3 = case length(ConstraintMisses) of
0 -> [];
_ -> [{criteria_unmet, Row, ConstraintMisses}]
end,
{_ColumnFilter,_Constraints,_Map,_QueryHits, _Var2 ++ _Var3 ++ _Misses, _Prefix, _Options}
end,
{ColumnFilter,Constraints,Map,Hits,Misses,Prefix,Options},
RowsPresent
),
_FinalMisses = case length(RowsMissing) of
0 -> FinalMisses;
_ -> [{ missing_rows, RowsMissing, ColumnFilter }] ++ FinalMisses
end,
{ FinalHits, _FinalMisses }.
misses_to_filter([],StuffICanFilter,StuffICant) ->
{StuffICanFilter,StuffICant};
misses_to_filter([H|T],StuffICanFilter,StuffICant) ->
{ _NewValue1, _NewValue2 } = case H of
% What should I do here in the spirit of ALTO?
{ criteria_unmet, _ , _ } -> { StuffICanFilter, [H] ++ StuffICant };
{ missing_columns, Row, Columns } -> { [{ [valuesToFilter(Row)], valuesToFilter(Columns) }] ++ StuffICanFilter, StuffICant };
{ missing_rows, Rows, ColumnFilter } ->
{ [{ valuesToFilter(Rows), ColumnFilter }] ++ StuffICanFilter, StuffICant };
{ SrcFilter, DstFilter } ->
{ [{ SrcFilter, DstFilter }] ++ StuffICanFilter, StuffICant }
end,
misses_to_filter(T,_NewValue1,_NewValue2).
valuesToFilter(Values) when is_list(Values) ->
lists:foldl(fun(E,AccIn)->[valueToFilter(E)]++AccIn end, [], Values);
valuesToFilter(Values) ->
valueToFilter(Values).
valueToFilter(Value) ->
case Value of
undefined -> [];
{{mapped, _Key1, _}, _} -> _Key1;
{_Key2, _} -> _Key2;
{mapped, _Key3, _} -> _Key3;
_Val -> _Val
end.
meets_criteria(_, undefined) ->
false;
meets_criteria([], _) ->
true;
meets_criteria([{Operator,Discriminator}|T], Value) ->
_TestResult = case Operator of
eq -> Value == Discriminator;
le -> Value =< Discriminator;
ge -> Value >= Discriminator;
ne -> Value =/= Discriminator;
lt -> Value < Discriminator;
gt -> Value > Discriminator;
?FG -> true
end,
case _TestResult of
false -> false;
true -> meets_criteria(T,Value)
end.
get_filterinfo(Path) when is_binary(Path) ->
{_,Spec}=e_alto_backend:get_constant(<< Path/bitstring, << ?FILTEREXT >>/ bitstring >>),
lager:info("Spec is ~p",[Spec]),
Spec;
get_filterinfo(Path) when is_list(Path) ->
{_,Spec}=e_alto_backend:get_constant(list_to_binary(Path ++ ?FILTEREXT)),
lager:info("Spec is ~p",[Spec]),
Spec.
%%
%% Filter Info - This is a list of {MetricInformation, ResourceId} entries where
%% - MetricInformation is the parsed Meta inforamatio of the metric
%% - ResourceId is the internal ResourceId associated with the cost map
%%
contains_filterspec(undefined, _, _) ->
{false, nothing};
contains_filterspec([], _, _) ->
{false, nothing};
contains_filterspec([{ {_,MetaInfo},_ResourceId}|T], CostMetric, CostMode) ->
lager:info("~p is the value",[MetaInfo]),
case ((ej:get({<<"cost-metric">>},MetaInfo) == CostMetric) and
(ej:get({<<"cost-mode">>},MetaInfo) == CostMode)) of
true -> {true, _ResourceId };
false -> contains_filterspec(T, CostMetric, CostMode)
end. | src/core/costmap_utils.erl | 0.514644 | 0.425247 | costmap_utils.erl | starcoder |
-module(kdtree_h3_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-export([all/0]).
-export([
basic_test/1,
add_test/1,
remove_test/1
]).
all() -> [
basic_test,
add_test,
remove_test
].
basic_test(_Config) ->
% random list of h3 indices
{ok, [List0]} = file:consult(filename:join(code:priv_dir(kdtree), "hotspots.txt")),
List = [{H3, Name} || {H3, Name} <- List0],
Coordinates = [{h3:to_geo(H3), Name} || {H3, Name} <- List],
ct:pal("Coordinates: ~p", [Coordinates]),
% build a kdtree
Tree = kdtree:from_indices(List),
ct:pal("Tree: ~p", [Tree]),
{RandCoordinate, RandName} = lists:nth(rand:uniform(length(Coordinates)), Coordinates),
ct:pal("RandCoordinate: ~p, RandName: ~p", [RandCoordinate, RandName]),
% Verify nearest location from given location
{NearestLoc, Distance} = kdtree:nearest(Tree, RandCoordinate),
ct:pal("NearestLoc: ~p, Distance: ~p", [NearestLoc, Distance]),
% Verify nearby
Nearby = kdtree:nearby(Tree, RandCoordinate, round(Distance) + 1),
ct:pal("Nearby: ~p", [Nearby]),
?assertNotEqual([], Nearby),
ok.
add_test(_Config) ->
% random list of h3 indices
{ok, [List0]} = file:consult(filename:join(code:priv_dir(kdtree), "missing.txt")),
List = [{H3, Name} || {H3, Name} <- List0],
Coordinates = [{h3:to_geo(H3), Name} || {H3, Name} <- List],
ct:pal("Coordinates: ~p", [Coordinates]),
% build a kdtree
Tree = kdtree:from_indices(List),
ct:pal("Tree: ~p", [Tree]),
OldList = kdtree:to_list(Tree),
%% create a new node from h3 index
NewNode = treenode:from_h3(631210968874529791, "mini-currant-lizard"),
ct:pal("NewNode: ~p", [NewNode]),
%% add node to tree
NewTree = kdtree:add(Tree, NewNode),
ct:pal("NewTree: ~p", [NewTree]),
?assertEqual(kdtree:size(Tree), kdtree:size(NewTree) - 1),
ok.
remove_test(_Config) ->
% random list of h3 indices
{ok, [List0]} = file:consult(filename:join(code:priv_dir(kdtree), "hotspots.txt")),
List = [{H3, Name} || {H3, Name} <- List0],
Coordinates = [{h3:to_geo(H3), Name} || {H3, Name} <- List],
ct:pal("Coordinates: ~p", [Coordinates]),
% build a kdtree
Tree = kdtree:from_indices(List),
ct:pal("Tree: ~p", [Tree]),
{RandCoordinate, RandName} = lists:nth(rand:uniform(length(Coordinates)), Coordinates),
ct:pal("RandCoordinate: ~p, RandName: ~p", [RandCoordinate, RandName]),
NodeToRemove = treenode:new(RandCoordinate, RandName),
NewTree = kdtree:remove(Tree, NodeToRemove),
?assertEqual(kdtree:size(Tree), kdtree:size(NewTree) + 1),
ok. | test/kdtree_h3_SUITE.erl | 0.583797 | 0.469581 | kdtree_h3_SUITE.erl | starcoder |
-module( sqerl_validator ).
-author( "<NAME> <<EMAIL>>" ).
-export( [ validate/2 ] ).
%%
%% Specification Type
%%
-type spec() :: atom() | { atom(), integer() } | { atom(), integer(), integer() }.
%%
%% Match the given value against the provided list of specifications. If
%% any of the specifications fail, all failed steps will be returned in
%% a list along with the value. Otherwise, the failure list will be empty
%%
%% Specifications:
%%
%% required - Indicates that the value must be present. Fails if the value is matches the atom 'undefined'
%% integer - Value must be an integer
%% numeric - Value must be an integer or floating-point value
%% { length, Len } - String value must be Len characters long
%% { length, Min, Max } - String value must be between Min and Max characters long
%% { bounded, Min, Max } - Numeric value must be greater than or equal to Min and less than or equal to Max
%% { min, Min } - Numeric value must be greater than or equal to Min
%% { max, Max } - Numeric value must be less than or equal to Max
%%
%% Parameters:
%% Value - The value to be matched against the spec
%% Spec - List of validation specifications
%%
%% Returns:
%% { Value, Failures } - Where failures is a list of error messages indicating the failed
%% validation steps.
%%
-spec validate( term(), [ spec() ] ) -> { ok, term() } | { error, term(), [ binary() ] }.
validate( Value, Spec ) ->
validate( Value, Spec, [] ).
%%
%% Check whether the value has been defined. If the check fails, return immediately since all other
%% validations would fail anyway.
%%
validate( undefined, [ required | _Rest ], Errors ) ->
{ error, undefined, [ <<"Value is not present">> | Errors ] };
validate( Value, [ required | Rest ], Errors ) ->
validate( Value, Rest, Errors );
%%
%% Check whether the value is a string
%%
validate( Value, [ string | Rest ], Errors ) when is_list( Value ); is_binary( Value ) ->
validate( Value, Rest, Errors );
validate( Value, [ string | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value is not a string">> | Errors ] );
%%
%% Check whether the value is an integer
%%
validate( Value, [ integer | Rest ], Errors ) when is_integer( Value ) ->
validate( Value, Rest, Errors );
validate( Value, [ integer | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value is not an integer">> | Errors ] );
%%
%% Check whether the value is numeric ( integer or floating-point )
%%
validate( Value, [ numeric | Rest ], Errors ) when is_integer( Value ); is_float( Value ) ->
validate( Value, Rest, Errors );
validate( Value, [ numeric | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value is not numeric">> | Errors ] );
%%
%% Check whether the value's length is equal to Len
%%
validate( Value, [ { length, Len } | Rest ], Errors ) when byte_size( Value ) == Len ->
validate( Value, Rest, Errors );
validate( Value, [ { length, Len } | Rest ], Errors ) when length( Value ) == Len ->
validate( Value, Rest, Errors );
validate( Value, [ { length, _Len } | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value has incorrect length">> | Errors ] );
%%
%% Check whether the value's length is greater than Min and less than or equal to Max
%%
validate( Value, [ { length, Min, Max } | Rest ], Errors ) when byte_size( Value ) >= Min, byte_size( Value ) =< Max ->
validate( Value, Rest, Errors );
validate( Value, [ { length, Min, Max } | Rest ], Errors ) when length( Value ) >= Min, length( Value ) =< Max ->
validate( Value, Rest, Errors );
validate( Value, [ { length, _Min, _Max } | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value has incorrect length">> | Errors ] );
%%
%% Check whether the value is greater or equal to Min and less than or equal to Max
%%
validate( Value, [ { bounded, Min, Max } | Rest ], Errors ) when Value >= Min, Value =< Max ->
validate( Value, Rest, Errors );
validate( Value, [ { bounded, _Min, _Max } | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value has incorrect magnitude">> | Errors ] );
%%
%% Check whether the value is greater or equal to Min
%%
validate( Value, [ { min, Min } | Rest ], Errors ) when Value >= Min ->
validate( Value, Rest, Errors );
validate( Value, [ { min, _Min } | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value has incorrect magnitude">> | Errors ] );
%%
%% Check whether the value is less than or equal to Max
%%
validate( Value, [ { max, Max } | Rest ], Errors ) when Value =< Max ->
validate( Value, Rest, Errors );
validate( Value, [ { max, _Max } | Rest ], Errors ) ->
validate( Value, Rest, [ <<"Value has incorrect magnitude">> | Errors ] );
%%
%% Return the result
%%
validate( Value, [], [] ) ->
{ ok, Value };
validate( Value, [], Errors ) ->
{ error, Value, Errors }. | src/core/sqerl_validator.erl | 0.566738 | 0.485966 | sqerl_validator.erl | starcoder |
% @doc Communicate with the
% <a href="https://datasheets.maximintegrated.com/en/ds/DS18B20.pdf">
% DS18B20 - Programmable Resolution 1-Wire Digital Thermometer
% </a>.
% @end
-module(onewire_ds18b20).
% API
-export([temp/1]).
-export([read_scratchpad/1]).
-export([convert/2]).
-define(READ_SCRATCHPAD, 16#BE).
-define(CONVERT_T, 16#44).
%--- API -----------------------------------------------------------------------
% @doc Read the temperature in °C from the scratchpad.
%
% === Example ===
% ```
% onewire_ds18b20:temp([40,255,190,25,96,23,3,203]).
% 22.375
% '''
-spec temp([byte()]) -> float().
temp(ID) ->
grisp_onewire:transaction(fun() ->
select_device(ID),
{<<LSB>>, <<MSB>>, Config} = read_scratchpad(),
Bits = bits(Config),
<<_:4, Temp:Bits/signed-big, _/binary>> = <<MSB, LSB>>,
Temp / 16.0
end).
% @doc Read the scratchpad.
%
% Returns the two bytes of the temperature register (`LSB' and `MSB') and
% the one byte of the configuration register.
-spec read_scratchpad([byte()]) -> {LSB::binary(), MSB::binary(),
Config::binary()}.
read_scratchpad(ID) ->
grisp_onewire:transaction(fun() ->
select_device(ID),
read_scratchpad()
end).
% @doc Initiate a temperature measurement.
%
% === Example ===
% ```
% 1> onewire_ds18b20:convert([40,255,190,25,96,23,3,203], 500).
% ok
% '''
-spec convert([byte()], any()) -> ok.
convert(ID, Timeout) ->
grisp_onewire:transaction(fun() ->
select_device(ID),
grisp_onewire:write_byte(?CONVERT_T),
confirm(Timeout)
end).
%--- Internal ------------------------------------------------------------------
select_device(ID) ->
case grisp_onewire:bus_reset() of
presence_detected ->
grisp_onewire:write_byte(16#55),
[grisp_onewire:write_byte(B) || B <- ID];
Other ->
error({onewire, Other})
end.
read_scratchpad() ->
grisp_onewire:write_byte(?READ_SCRATCHPAD),
[LSB, MSB, _TH, _TL, Config, _, _, _, _CRC]
= [grisp_onewire:read_byte() || _ <- lists:seq(0, 8)],
{LSB, MSB, Config}.
bits(<<_:1, 0:1, 0:1, _:5>>) -> 9;
bits(<<_:1, 0:1, 1:1, _:5>>) -> 10;
bits(<<_:1, 1:1, 0:1, _:5>>) -> 11;
bits(<<_:1, 1:1, 1:1, _:5>>) -> 12.
confirm(Timeout) ->
confirm(grisp_onewire:read_byte(), ms(), Timeout).
confirm(<<16#00>>, Start, Timeout) ->
case ms() - Start > Timeout of
false ->
timer:sleep(10),
confirm(grisp_onewire:read_byte(), Start, Timeout);
true ->
error({onewire_ds18b20, confirmation_timeout})
end;
confirm(<<1:1, _:7>>, _Start, _Timeout) ->
ok.
ms() -> erlang:monotonic_time(millisecond). | src/onewire_ds18b20.erl | 0.629319 | 0.673527 | onewire_ds18b20.erl | starcoder |
%% The contents of this file are subject to the Mozilla Public License
%% Version 1.1 (the "License"); you may not use this file except in
%% compliance with the License. You may obtain a copy of the License at
%% http://www.mozilla.org/MPL/
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
%% License for the specific language governing rights and limitations
%% under the License.
%%
%% The Original Code is RabbitMQ.
%%
%% The Initial Developers of the Original Code are LShift Ltd,
%% Cohesive Financial Technologies LLC, and Rabbit Technologies Ltd.
%%
%% Portions created before 22-Nov-2008 00:00:00 GMT by LShift Ltd,
%% Cohesive Financial Technologies LLC, or Rabbit Technologies Ltd
%% are Copyright (C) 2007-2008 LShift Ltd, Cohesive Financial
%% Technologies LLC, and Rabbit Technologies Ltd.
%%
%% Portions created by LShift Ltd are Copyright (C) 2007-2009 LShift
%% Ltd. Portions created by Cohesive Financial Technologies LLC are
%% Copyright (C) 2007-2009 Cohesive Financial Technologies
%% LLC. Portions created by Rabbit Technologies Ltd are Copyright
%% (C) 2007-2009 Rabbit Technologies Ltd.
%%
%% All Rights Reserved.
%%
%% Contributor(s): ______________________________________.
%%
%% Priority queues have essentially the same interface as ordinary
%% queues, except that a) there is an in/3 that takes a priority, and
%% b) we have only implemented the core API we need.
%%
%% Priorities should be integers - the higher the value the higher the
%% priority - but we don't actually check that.
%%
%% in/2 inserts items with priority 0.
%%
%% We optimise the case where a priority queue is being used just like
%% an ordinary queue. When that is the case we represent the priority
%% queue as an ordinary queue. We could just call into the 'queue'
%% module for that, but for efficiency we implement the relevant
%% functions directly in here, thus saving on inter-module calls and
%% eliminating a level of boxing.
%%
%% When the queue contains items with non-zero priorities, it is
%% represented as a sorted kv list with the inverted Priority as the
%% key and an ordinary queue as the value. Here again we use our own
%% ordinary queue implemention for efficiency, often making recursive
%% calls into the same function knowing that ordinary queues represent
%% a base case.
-module(riak_core_priority_queue).
-export([new/0,
is_queue/1,
is_empty/1,
len/1,
to_list/1,
in/2,
in/3,
out/1,
out/2,
pout/1,
join/2]).
%%----------------------------------------------------------------------------
-type priority() :: integer().
-type squeue() :: {queue, [any()], [any()]}.
-type pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}.
%%----------------------------------------------------------------------------
-spec new() -> pqueue().
new() -> {queue, [], []}.
-spec is_queue(any()) -> boolean().
is_queue({queue, R, F}) when is_list(R), is_list(F) ->
true;
is_queue({pqueue, Queues}) when is_list(Queues) ->
lists:all(fun ({P, Q}) -> is_integer(P) andalso is_queue(Q) end, Queues);
is_queue(_) ->
false.
-spec is_empty(pqueue()) -> boolean().
is_empty({queue, [], []}) ->
true;
is_empty(_) ->
false.
-spec len(pqueue()) -> non_neg_integer().
len({queue, R, F}) when is_list(R), is_list(F) ->
length(R) + length(F);
len({pqueue, Queues}) ->
lists:sum([len(Q) || {_, Q} <- Queues]).
-spec to_list(pqueue()) -> [{priority(), any()}].
to_list({queue, In, Out}) when is_list(In), is_list(Out) ->
[{0, V} || V <- Out ++ lists:reverse(In, [])];
to_list({pqueue, Queues}) ->
[{-P, V} || {P, Q} <- Queues, {0, V} <- to_list(Q)].
-spec in(any(), pqueue()) -> pqueue().
in(Item, Q) ->
in(Item, 0, Q).
-spec in(any(), priority(), pqueue()) -> pqueue().
in(X, 0, {queue, [_] = In, []}) ->
{queue, [X], In};
in(X, 0, {queue, In, Out}) when is_list(In), is_list(Out) ->
{queue, [X|In], Out};
in(X, Priority, _Q = {queue, [], []}) ->
in(X, Priority, {pqueue, []});
in(X, Priority, Q = {queue, _, _}) ->
in(X, Priority, {pqueue, [{0, Q}]});
in(X, Priority, {pqueue, Queues}) ->
P = -Priority,
{pqueue, case lists:keysearch(P, 1, Queues) of
{value, {_, Q}} ->
lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
false ->
lists:keysort(1, [{P, {queue, [X], []}} | Queues])
end}.
-spec out(pqueue()) -> {(empty | {value, any()}), pqueue()}.
out({queue, [], []} = Q) ->
{empty, Q};
out({queue, [V], []}) ->
{{value, V}, {queue, [], []}};
out({queue, [Y|In], []}) ->
[V|Out] = lists:reverse(In, []),
{{value, V}, {queue, [Y], Out}};
out({queue, In, [V]}) when is_list(In) ->
{{value, V}, r2f(In)};
out({queue, In, [V|Out]}) when is_list(In) ->
{{value, V}, {queue, In, Out}};
out({pqueue, [{P, Q} | Queues]}) ->
{R, Q1} = out(Q),
NewQ = case is_empty(Q1) of
true -> case Queues of
[] -> {queue, [], []};
[{0, OnlyQ}] -> OnlyQ;
[_|_] -> {pqueue, Queues}
end;
false -> {pqueue, [{P, Q1} | Queues]}
end,
{R, NewQ}.
-spec out(priority(), pqueue()) -> {(empty | {value, any()}), pqueue()}.
out(_Priority, {queue, [], []} = Q) ->
{empty, Q};
out(Priority, {queue, _, _} = Q) when Priority =< 0 ->
out(Q);
out(_Priority, {queue, _, _} = Q) ->
{empty, Q};
out(Priority, {pqueue, [{P, _Q} | _Queues]} = Q) when Priority =< (-P) ->
out(Q);
out(_Priority, {pqueue, [_|_]} = Q) ->
{empty, Q}.
-spec pout(pqueue()) -> {(empty | {value, any(), priority()}), pqueue()}.
pout({queue, [], []} = Q) ->
{empty, Q};
pout({queue, _, _} = Q) ->
{{value, V}, Q1} = out(Q),
{{value, V, 0}, Q1};
pout({pqueue, [{P, Q} | Queues]}) ->
{{value, V}, Q1} = out(Q),
NewQ = case is_empty(Q1) of
true -> case Queues of
[] -> {queue, [], []};
[{0, OnlyQ}] -> OnlyQ;
[_|_] -> {pqueue, Queues}
end;
false -> {pqueue, [{P, Q1} | Queues]}
end,
{{value, V, -P}, NewQ}.
-spec join(pqueue(), pqueue()) -> pqueue().
join(A, {queue, [], []}) ->
A;
join({queue, [], []}, B) ->
B;
join({queue, AIn, AOut}, {queue, BIn, BOut}) ->
{queue, BIn, AOut ++ lists:reverse(AIn, BOut)};
join(A = {queue, _, _}, {pqueue, BPQ}) ->
{Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, BPQ),
Post1 = case Post of
[] -> [ {0, A} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
_ -> [ {0, A} | Post ]
end,
{pqueue, Pre ++ Post1};
join({pqueue, APQ}, B = {queue, _, _}) ->
{Pre, Post} = lists:splitwith(fun ({P, _}) -> P < 0 end, APQ),
Post1 = case Post of
[] -> [ {0, B} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
_ -> [ {0, B} | Post ]
end,
{pqueue, Pre ++ Post1};
join({pqueue, APQ}, {pqueue, BPQ}) ->
{pqueue, merge(APQ, BPQ, [])}.
merge([], BPQ, Acc) ->
lists:reverse(Acc, BPQ);
merge(APQ, [], Acc) ->
lists:reverse(Acc, APQ);
merge([{P, A}|As], [{P, B}|Bs], Acc) ->
merge(As, Bs, [ {P, join(A, B)} | Acc ]);
merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB ->
merge(As, Bs, [ {PA, A} | Acc ]);
merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
merge(As, Bs, [ {PB, B} | Acc ]).
r2f([]) -> {queue, [], []};
r2f([_] = R) -> {queue, [], R};
r2f([X, Y]) -> {queue, [X], [Y]};
r2f([X, Y|R]) -> {queue, [X, Y], lists:reverse(R, [])}.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
simple_case(Order) ->
Queue = ?MODULE:new(),
?assertEqual(true, ?MODULE:is_queue(Queue)),
?assertEqual(true, ?MODULE:is_empty(Queue)),
?assertEqual(0, ?MODULE:len(Queue)),
?assertEqual([], ?MODULE:to_list(Queue)),
case Order of
forward ->
Queue2 = ?MODULE:in(low, Queue),
Queue3 = ?MODULE:in(mid, 500, Queue2),
Queue4 = ?MODULE:in(high, 1000, Queue3);
reverse ->
Queue2 = ?MODULE:in(high, 1000, Queue),
Queue3 = ?MODULE:in(mid, 500, Queue2),
Queue4 = ?MODULE:in(low, Queue3);
mixed ->
Queue2 = ?MODULE:in(high, 1000, Queue),
Queue3 = ?MODULE:in(low, Queue2),
Queue4 = ?MODULE:in(mid, 500, Queue3)
end,
?assertEqual(false, ?MODULE:is_empty(Queue4)),
?assertEqual(3, ?MODULE:len(Queue4)),
?assertMatch({{value, high}, _}, ?MODULE:out(Queue4)),
{{value, high}, Queue5} = ?MODULE:out(Queue4),
?assertMatch({{value, mid}, _}, ?MODULE:out(Queue5)),
{{value, mid}, Queue6} = ?MODULE:out(Queue5),
?assertMatch({{value, low}, _}, ?MODULE:out(Queue6)),
{{value, low}, Queue7} = ?MODULE:out(Queue6),
?assertEqual(0, ?MODULE:len(Queue7)),
?assertEqual(true, ?MODULE:is_queue(Queue2)),
?assertEqual(true, ?MODULE:is_queue(Queue3)),
?assertEqual(true, ?MODULE:is_queue(Queue4)),
?assertEqual(false, ?MODULE:is_queue([])),
ok.
merge_case() ->
QueueA1 = ?MODULE:new(),
QueueA2 = ?MODULE:in(1, QueueA1),
QueueA3 = ?MODULE:in(3, QueueA2),
QueueA4 = ?MODULE:in(5, QueueA3),
QueueB1 = ?MODULE:new(),
QueueB2 = ?MODULE:in(2, QueueB1),
QueueB3 = ?MODULE:in(4, QueueB2),
QueueB4 = ?MODULE:in(6, QueueB3),
Merged1 = ?MODULE:join(QueueA4, QueueB4),
?assertEqual([{0, 1}, {0, 3}, {0, 5}, {0, 2}, {0, 4}, {0, 6}],
?MODULE:to_list(Merged1)),
QueueC1 = ?MODULE:new(),
QueueC2 = ?MODULE:in(1, 10, QueueC1),
QueueC3 = ?MODULE:in(3, 30, QueueC2),
QueueC4 = ?MODULE:in(5, 50, QueueC3),
QueueD1 = ?MODULE:new(),
QueueD2 = ?MODULE:in(2, 20, QueueD1),
QueueD3 = ?MODULE:in(4, 40, QueueD2),
QueueD4 = ?MODULE:in(6, 60, QueueD3),
Merged2 = ?MODULE:join(QueueC4, QueueD4),
?assertEqual([{60, 6}, {50, 5}, {40, 4}, {30, 3}, {20, 2}, {10, 1}],
?MODULE:to_list(Merged2)),
ok.
basic_test() ->
simple_case(forward),
simple_case(reverse),
simple_case(mixed),
merge_case(),
ok.
-endif. | src/riak_core_priority_queue.erl | 0.572364 | 0.45417 | riak_core_priority_queue.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
%%
%% -------------------------------------------------------------------
%% @doc Exometer histogram probe behavior
%% This module implements histogram metrics. Each histogram is a sliding
%% window, for which the following datapoints are calculated:
%%
%% * `max': the maximum value
%% * `min': the minimum value
%% * `mean': the arithmetic mean
%% * `median': the median
%% * `50|75|90|95|97|99': percentiles
%% * `999': the 99.9th percentile
%% * `n': the number of values used in the calculation (Note)
%%
%% Two histogram implementations are supported and can be selected using
%% the option `histogram_module':
%%
%% * `exometer_slide' implements a sliding window, which saves all elements
%% within the window. Updating the histogram is cheap, but calculating the
%% datapoints may be expensive depending on the size of the window.
%%
%% * `exometer_slot_slide' (default), aggregates mean, min and max values
%% within given time slots, thereby reducing the amount of data kept for
%% datapoint calculation. The update overhead should be insignificant.
%% However, some loss of precision must be expected. To achieve slightly
%% better accuracy of percentiles, 'extra values' are kept (every 4th
%% value). For the calculation, extra vaules are included in the set
%% until a suitable number has been reached (up to 600). Note that
%% `n' reflects the number of values used in the calculation - not the
%% number of updates made within the time window.
%%
%% Supported options:
%%
%% * `time_span' (default: `60000') size of the window in milliseconds.
%% * `slot_period' (default: `1000') size of the time slots in milliseconds.
%% * `histogram_module' (default: `exometer_slot_slide').
%% * `truncate' (default: `true') whether to truncate the datapoint values.
%% Supported values: `true | false | round', where `round' means to round
%% the value rather than truncating it.
%% * `keep_high' (default: `0') number of top values to actually keep.
%%
%% The `keep_high' option can be used to get better precision for the higher
%% percentiles. A bounded buffer (see {@link exometer_shallowtree}) is used
%% to store the highest values, and these values are used to calculate the
%% exact higher percentiles, as far as they go. For example, if the window
%% saw 10,000 values, and the 1000 highest values are kept, these can be used
%% to determine the percentiles `90' and up.
%%
%% @end
-module(exometer_histogram).
-behaviour(exometer_probe).
%% exometer_probe callbacks
-export([behaviour/0,
probe_init/3,
probe_terminate/1,
probe_setopts/3,
probe_update/2,
probe_get_value/2,
probe_get_datapoints/1,
probe_reset/1,
probe_code_change/3,
probe_sample/1,
probe_handle_msg/2]).
-compile(inline).
-compile(inline_list_funcs).
-export([datapoints/0]).
-export([average_sample/3,
average_transform/2]).
-export([test_run/1, test_run/2,
test_series/0]).
%% -compile({parse_transform, exometer_igor}).
%% -compile({igor, [{files, ["src/exometer_util.erl"
%% , "src/exometer_proc.erl"
%% , "src/exometer_slot_slide.erl"
%% , "src/exometer_slide.erl"
%% ]}]}).
-include("exometer.hrl").
-record(st, {name,
slide = undefined, %%
slot_period = 1000, %% msec
time_span = 60000, %% msec
truncate = true,
histogram_module = exometer_slot_slide,
heap,
opts = []}).
%% for auto-conversion
-define(OLDSTATE, {st,_,_,_,_,_,_,_}).
-define(DATAPOINTS,
[n, mean, min, max, median, 50, 75, 90, 95, 99, 999 ]).
-spec behaviour() -> exometer:behaviour().
behaviour() ->
probe.
probe_init(Name, _Type, Options) ->
{ok, init_state(Name, Options)}.
init_state(Name, Options) ->
St = process_opts(#st{name = Name},
[{histogram_module, exometer_slot_slide},
{time_span, 60000},
{slot_period, 10}] ++ Options),
Slide = (St#st.histogram_module):new(St#st.time_span,
St#st.slot_period,
fun average_sample/3,
fun average_transform/2,
Options),
Heap = if St#st.histogram_module == exometer_slot_slide ->
case lists:keyfind(keep_high, 1, Options) of
false ->
undefined;
{_, N} when is_integer(N), N > 0 ->
T = exometer_shallowtree:new(N),
{T, T};
{_, 0} ->
undefined
end;
true -> undefined
end,
St#st{slide = Slide, heap = Heap}.
probe_terminate(_St) ->
ok.
probe_get_value(DPs, ?OLDSTATE = St) ->
probe_get_value(DPs, convert(St));
probe_get_value(DataPoints, St) ->
{ok, get_value_int(St, DataPoints)}.
probe_get_datapoints(_St) ->
{ok, datapoints()}.
datapoints() ->
?DATAPOINTS.
get_value_int(St, default) ->
get_value_int_(St, ?DATAPOINTS);
get_value_int(_, []) ->
[];
get_value_int(?OLDSTATE = St, DPs) ->
get_value_int(convert(St), DPs);
get_value_int(St, DataPoints) ->
get_value_int_(St, DataPoints).
get_value_int_(#st{truncate = Trunc,
histogram_module = Module,
time_span = TimeSpan,
heap = Heap} = St, DataPoints) ->
%% We need element count and sum of all elements to get mean value.
Tot0 = case Trunc of true -> 0; round -> 0; false -> 0.0 end,
TS = exometer_util:timestamp(),
{Length, FullLength, Total, Min0, Max, Lst0, Xtra} =
Module:foldl(
TS,
fun
({_TS1, {Val, Cnt, NMin, NMax, X}},
{Length, FullLen, Total, OMin, OMax, List, Xs}) ->
{Length + 1, FullLen + Cnt, Total + Val,
min(OMin, NMin), max(OMax, NMax),
[Val|List], [X|Xs]};
({_TS1, Val}, {Length, _, Total, Min, Max, List, Xs}) ->
L1 = Length+1,
{L1, L1, Total + Val, min(Val, Min), max(Val, Max),
[Val|List], Xs}
end,
{0, 0, Tot0, infinity, 0, [], []}, St#st.slide),
Min = if Min0 == infinity -> 0; true -> Min0 end,
Mean = case Length of
0 -> 0.0;
N -> Total / N
end,
{Len, List} =
if Module == exometer_slot_slide ->
{Length1, Lst} = add_extra(Length, Lst0, Xtra),
{Length1 + 2, [Min|lists:sort(Lst)] ++ [Max]};
true ->
{Length, lists:sort(Lst0)}
end,
TopPercentiles = get_from_heap(Heap, TS, TimeSpan, FullLength, DataPoints),
Results = exometer_util:get_statistics2(Len, List, Total, Mean),
CombinedResults = TopPercentiles ++ Results,
[get_dp(K, CombinedResults, Trunc) || K <- DataPoints].
get_from_heap({New,Old}, TS, TSpan, N, DPs) when N > 0 ->
Sz = exometer_shallowtree:size(New)
+ exometer_shallowtree:size(Old),
if Sz > 0 ->
MinPerc = 100 - ((Sz*100) div N),
MinPerc10 = MinPerc * 10,
GetDPs = lists:foldl(
fun(D, Acc) when is_integer(D),
D < 100, D >= MinPerc ->
[{D, p(D, N)}|Acc];
(D, Acc) when is_integer(D),
D > 100, D >= MinPerc10 ->
[{D, p(D, N)}|Acc];
(_, Acc) ->
Acc
end, [], DPs),
pick_heap_vals(GetDPs, New, Old, TS, TSpan);
true ->
[]
end;
get_from_heap(_, _, _, _, _) ->
[].
pick_heap_vals([], _, _, _, _) ->
[];
pick_heap_vals(DPs, New, Old, TS, TSpan) ->
TS0 = TS - TSpan,
NewVals = exometer_shallowtree:filter(fun(V,_) -> {true,V} end, New),
OldVals = exometer_shallowtree:filter(
fun(V,T) ->
if T >= TS0 ->
{true, V};
true ->
false
end
end, Old),
Vals = revsort(OldVals ++ NewVals),
exometer_util:pick_items(Vals, DPs).
revsort(L) ->
lists:sort(fun erlang:'>'/2, L).
p(50, N) -> perc(0.5, N);
p(75, N) -> perc(0.25, N);
p(90, N) -> perc(0.1, N);
p(95, N) -> perc(0.05, N);
p(99, N) -> perc(0.01, N);
p(999,N) -> perc(0.001, N).
perc(P, Len) when P > 1.0 ->
round((P / 10) * Len) + 1;
perc(P, Len) ->
round(P * Len) + 1.
add_extra(Length, L, []) ->
{Length, L};
add_extra(Length, L, X) when Length < 300 ->
%% aim for 600 elements, since experiments indicate that this
%% gives decent accuracy at decent speed (ca 300-400 us on a Core i7)
Pick = max(2, ((600 - Length) div Length) + 1),
pick_extra(X, Pick, Pick, L, Length);
add_extra(Length, L, X) ->
%% Always take something from the Xtra, since this improves percentile
%% accuracy
pick_extra(X, 1, 1, L, Length).
pick_extra([[H|T]|T1], P, Pick, L, Length) when P > 0 ->
pick_extra([T|T1], P-1, Pick, [H|L], Length+1);
pick_extra([_|T], 0, Pick, L, Length) ->
pick_extra(T, Pick, Pick, L, Length);
pick_extra([[]|T], _, Pick, L, Length) ->
pick_extra(T, Pick, Pick, L, Length);
pick_extra([], _, _, L, Length) ->
{Length, L}.
get_dp(K, L, Trunc) ->
case lists:keyfind(K, 1, L) of
false ->
{K, if Trunc -> 0; Trunc==round -> 0; true -> 0.0 end};
{median, F} when is_float(F) ->
%% always truncate median
{median, trunc(F)};
{_, V} = DP when is_integer(V) ->
DP;
{_,_} = DP ->
opt_trunc(Trunc, DP)
end.
probe_setopts(_Entry, _Opts, _St) ->
ok.
probe_update(Value, ?OLDSTATE = St) ->
probe_update(Value, convert(St));
probe_update(Value, St) ->
{ok, update_int(exometer_util:timestamp(), Value, St)}.
update_int(Timestamp, Value, #st{slide = Slide,
histogram_module = Module,
heap = Heap} = St) ->
{Wrapped, Slide1} = Module:add_element(Timestamp, Value, Slide, true),
St#st{slide = Slide1, heap = into_heap(Wrapped, Value, Timestamp, Heap)}.
into_heap(_, _Val, _TS, undefined) ->
undefined;
into_heap(false, Val, TS, {New,Old}) ->
{exometer_shallowtree:insert(Val, TS, New), Old};
into_heap(true, Val, TS, {New,_}) ->
Limit = exometer_shallowtree:limit(New),
{exometer_shallowtree:insert(
Val, TS, exometer_shallowtree:new(Limit)), New}.
probe_reset(?OLDSTATE = St) ->
probe_reset(convert(St));
probe_reset(#st{slide = Slide,
histogram_module = Module} = St) ->
{ok, St#st{slide = Module:reset(Slide)}}.
probe_sample(_St) ->
{error, unsupported}.
probe_handle_msg(_, S) ->
{ok, S}.
probe_code_change(_, ?OLDSTATE = S, _) ->
{ok, convert(S)};
probe_code_change(_, S, _) ->
{ok, S}.
convert({st, Name, Slide, Slot_period, Time_span,
Truncate, Histogram_module, Opts}) ->
#st{name = Name, slide = Slide, slot_period = Slot_period,
time_span = Time_span, truncate = Truncate,
histogram_module = Histogram_module, opts = Opts}.
process_opts(St, Options) ->
exometer_proc:process_options(Options),
lists:foldl(
fun
%% Sample interval.
( {time_span, Val}, St1) -> St1#st {time_span = Val};
( {slot_period, Val}, St1) -> St1#st {slot_period = Val};
( {histogram_module, Val}, St1) -> St1#st {histogram_module = Val};
( {truncate, Val}, St1) when is_boolean(Val); Val == round ->
St1#st{truncate = Val};
%% Unknown option, pass on to State options list, replacing
%% any earlier versions of the same option.
({Opt, Val}, St1) ->
St1#st{ opts = [ {Opt, Val}
| lists:keydelete(Opt, 1, St1#st.opts) ] }
end, St, Options).
-record(sample, {count, total, min, max, extra = []}).
%% Simple sample processor that maintains an average
%% of all sampled values
average_sample(_TS, Val, undefined) ->
#sample{count = 1,
total = Val,
min = Val,
max = Val};
average_sample(_TS, Val, #sample{count = Count,
total = Total,
min = Min,
max = Max, extra = X} = S) ->
Count1 = Count + 1,
X1 = if Count1 rem 4 == 0 -> [Val|X];
true -> X
end,
S#sample{count = Count1,
total = Total + Val,
min = min(Min, Val),
max = max(Max, Val),
extra = X1}.
%% If average_sample() has not been called for the current time slot,
%% then the provided state will still be 'undefined'
average_transform(_TS, undefined) ->
undefined;
%% Return the calculated total for the slot and return it as the
%% element to be stored in the histogram.
average_transform(_TS, #sample{count = Count,
total = Total,
min = Min,
max = Max, extra = X}) ->
%% Return the sum of all counter increments received during this slot
{Total / Count, Count, Min, Max, X}.
opt_trunc(true, {K,V}) when is_float(V) ->
{K, trunc(V)};
opt_trunc(round, {K,V}) when is_float(V) ->
{K, round(V)};
opt_trunc(_, V) ->
V.
test_new(Opts) ->
init_state(test, Opts).
%% @equiv test_run(Module, 1)
test_run(Module) ->
test_run(Module, 1).
%% @doc Test the performance and accuracy of a histogram callback module.
%%
%% This function uses a test set ({@link test_series/0}) and initializes
%% and updates a histogram using the callback module `Module'.
%%
%% The `Module' argument can either be the module name, or `{ModName, Opts}'
%% where `Opts' are options passed on to the histogram module.
%%
%% `Interval' is the gap in milliseconds between the inserts. The test run
%% will not actually wait, but instead manipulate the timestamp.
%%
%% Return value: `[Result1, Result2]', where the results are
%% `{Time1, Time2, Datapoints}'. `Time1' is the time (in microsecs) it took to
%% insert the values. `Time2' is the time it took to calculate all default
%% datapoints. The data set is shuffled between the two runs.
%%
%% To assess the accuracy of the reported percentiles, use e.g.
%% `bear:get_statistics(exometer_histogram:test_series())' as a reference.
%% @end
test_run(Module, Interval) ->
Series = test_series(),
[test_run(Module, Interval, Series),
test_run(Module, Interval, shuffle(Series))].
test_run(Module, Int, Series) ->
St = test_new(test_opts(Module)),
{T1, St1} = tc(fun() ->
test_update(
Series, Int,
exometer_util:timestamp(), St)
end),
{T2, Result} = tc(fun() ->
get_value_int(St1, default)
end),
erlang:garbage_collect(), erlang:yield(),
{T1, T2, Result}.
test_opts(M) when is_atom(M) ->
[{histogram_module, M}];
test_opts({M, Opts}) ->
[{histogram_module, M}|Opts].
test_update([H|T], Int, TS, St) ->
test_update(T, Int, TS+Int, update_int(TS, H, St));
test_update([], _, _, St) ->
St.
tc(F) ->
T1 = os:timestamp(),
Res = F(),
T2 = os:timestamp(),
{timer:now_diff(T2, T1), Res}.
-spec test_series() -> [integer()].
%% @doc Create a series of values for histogram testing.
%%
%% These are the properties of the current test set:
%% <pre lang="erlang">
%% 1> rp(bear:get_statistics(exometer_histogram:test_series())).
%% [{min,3},
%% {max,100},
%% {arithmetic_mean,6.696},
%% {geometric_mean,5.546722009408586},
%% {harmonic_mean,5.033909932832006},
%% {median,5},
%% {variance,63.92468674297564},
%% {standard_deviation,7.995291535833802},
%% {skewness,7.22743137858698},
%% {kurtosis,59.15674033499604},
%% {percentile,[{50,5},{75,7},{90,8},{95,9},{99,50},{999,83}]},
%% {histogram,[{4,2700},
%% {5,1800},
%% {6,900},
%% {7,1800},
%% {8,900},
%% {9,720},
%% {53,135},
%% {83,36},
%% {103,9}]},
%% {n,9000}]
%% </pre>
%% @end
test_series() ->
S = lists:flatten(
[dupl(200,3),
dupl(100,4),
dupl(200,5),
dupl(100,6),
dupl(200,7),
dupl(100,8),
dupl(80,9),
dupl(15,50), 80,81,82,83,100]),
shuffle(S ++ S ++ S ++ S ++ S ++ S ++ S ++ S ++ S).
dupl(N,V) ->
lists:duplicate(N, V).
shuffle(List) ->
random:seed(random:seed0()),
Randomized = lists:keysort(1, [{random:uniform(), Item} || Item <- List]),
[Value || {_, Value} <- Randomized]. | deps/exometer_core/src/exometer_histogram.erl | 0.786991 | 0.568955 | exometer_histogram.erl | starcoder |
%%%=============================================================================
%% Copyright 2012- Klarna AB
%% Copyright 2015- AUTHORS
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc Schema definitions cache handling.
%%
%% All the schema definitions are stored in an ETS table for quick access during
%% validation. This module provides an interface for: 1) updating of schema
%% definitions in runtime; 2) getting of a schema definition by a key. When
%% an update is ordered, the update function checks a schema definition file
%% timestamp and compares it to a timestamp for the same schema in the `cache',
%% so, it will never update a schema in the database if the definition file
%% was not updated.
%% @end
%%%=============================================================================
-module(jesse_database).
%% API
-export([ add/3
, delete/1
, read/1
, update/4
]).
-export_type([ update_result/0
]).
-type update_result() :: ok | [fail()].
-type fail() :: {file:filename(), file:date_time(), reason()}.
-type reason() :: term().
-define(JESSE_ETS, jesse_ets).
-include_lib("kernel/include/file.hrl").
%%% API
%% @doc Adds a schema definition `Schema' to in-memory storage associated with
%% a key `Key'. It will overwrite an existing schema with the same key if
%% there is any.
-spec add( Schema :: jesse:json_term()
, ValidationFun :: fun((any()) -> boolean())
, MakeKeyFun :: fun((jesse:json_term()) -> any())
) -> update_result().
add(Schema, ValidationFun, MakeKeyFun) ->
store_schema([{"", "", Schema}], ValidationFun, MakeKeyFun).
%% @doc Deletes a schema definition from in-memory storage associated with
%% the key `Key'.
-spec delete(Key :: any()) -> ok.
delete(Key) ->
Table = table_name(),
ets:delete(Table, Key),
ok.
%% @doc Loads schema definitions from filesystem to in-memory storage.
%% The function loads all the files from directory `Path', then each schema
%% entry will be checked for a validity by function `ValidationFun', and
%% will be stored in in-memory storage with a key returned by `MakeKeyFun'
%% function.
%%
%% In addition to a schema definition, a timestamp of the schema file will be
%% stored, so, during the next update timestamps will be compared to avoid
%% unnecessary updates.
%%
%% Schema definitions are stored in the format which json parsing function
%% `ParseFun' returns.
%%
%% NOTE: it's impossible to automatically update schema definitions added by
%% add_schema/2, the only way to update them is to use add_schema/2
%% again with the new definition.
-spec update( Path :: string()
, ParseFun :: fun((binary()) -> jesse:json_term())
, ValidationFun :: fun((any()) -> boolean())
, MakeKeyFun :: fun((jesse:json_term()) -> any())
) -> update_result().
update(Path, ParseFun, ValidationFun, MakeKeyFun) ->
Schemas = load_schema(Path, get_updated_files(Path), ParseFun),
store_schema(Schemas, ValidationFun, MakeKeyFun).
%% @doc Reads a schema definition with the same key as `Key' from the internal
%% storage. If there is no such key in the storage, an exception will be thrown.
-spec read(Key :: any()) -> jesse:json_term() | no_return().
read(Key) ->
case ets:lookup(table_name(), Key) of
[{Key, _SecondaryKey, _TimeStamp, Term}] ->
Term;
_ ->
throw({database_error, Key, schema_not_found})
end.
%%% Internal functions
%% @doc Stores schema definitions `Schemas' in in-memory storage.
%% Uses `ValidationFun' to validate each schema definition before it is stored.
%% Each schema definition is stored with a key returned by `MakeKeyFun' applied
%% to the schema entry. Returns `ok' in case if all the schemas passed
%% the validation and were stored, otherwise a list of invalid entries
%% is returned.
%% @private
store_schema(Schemas, ValidationFun, MakeKeyFun) ->
Table = create_table(table_name()),
StoreFun = fun({InFile, TimeStamp, Value} = Object, Acc) ->
case ValidationFun(Value) of
true ->
NewObject = { MakeKeyFun(Value)
, InFile
, TimeStamp
, Value
},
ets:insert(Table, NewObject),
Acc;
false ->
[Object | Acc]
end
end,
store_result(lists:foldl(StoreFun, [], Schemas)).
%% @private
store_result([]) -> ok;
store_result(Fails) -> Fails.
%% @doc Creates ETS table for internal cache if it does not exist yet,
%% otherwise the name of the table is returned.
%% @private
create_table(TableName) ->
case table_exists(TableName) of
false -> ets:new(TableName, [set, public, named_table]);
true -> TableName
end.
%% @doc Checks if ETS table with name `TableName' exists.
%% @private
table_exists(TableName) ->
case ets:info(TableName) of
undefined -> false;
_TableInfo -> true
end.
%% @doc Returns a list of schema definitions files in `InDir' which need to be
%% updated in the cache.
%% @private
get_updated_files(InDir) ->
case { get_file_list(InDir)
, table_exists(table_name())
} of
{[] = Files, _TableExists} ->
Files;
{Files, false} ->
Files;
{Files, _TableExists} ->
Filter = fun(InFile) ->
is_outdated( get_full_path(InDir, InFile)
, InFile
)
end,
lists:filter(Filter, Files)
end.
%% @doc Loads schema definitions from a list of files `Files' located in
%% directory `InDir', and parses each of entry by the given parse
%% function `ParseFun'. Silently ignores subdirectories.
%% @private
load_schema(InDir, Files, ParseFun) ->
LoadFun = fun(InFile, Acc) ->
InFilePath = get_full_path(InDir, InFile),
case file:read_file(InFilePath) of
{ok, SchemaBin} ->
{ok, FileInfo} = file:read_file_info(InFilePath),
TimeStamp = FileInfo#file_info.mtime,
Schema = try_parse(ParseFun, SchemaBin),
[{InFile, TimeStamp, Schema} | Acc];
{error, eisdir} ->
Acc
end
end,
lists:foldl(LoadFun, [], Files).
%% @doc Wraps up calls to a third party json parser.
%% @private
try_parse(ParseFun, SchemaBin) ->
try
ParseFun(SchemaBin)
catch
_:Error ->
{parse_error, Error}
end.
%% @private
get_file_list(InDir) ->
{ok, Files} = file:list_dir(InDir),
Files.
%% @private
get_full_path(Dir, File) ->
filename:join([Dir, File]).
%% @doc Checks if a cache entry for a schema definition from file `InFile'
%% is outdated. Returns `true' if the cache entry needs to be updated, or if
%% the entry does not exist in the cache, otherwise `false' is returned.
%% @private
is_outdated(InFile, SecondaryKey) ->
case ets:match_object(table_name(), {'_', SecondaryKey, '_', '_'}) of
[] ->
true;
[{_Key, SecondaryKey, TimeStamp, _Value}] ->
{ok, #file_info{mtime = MtimeIn}} = file:read_file_info(InFile),
MtimeIn > TimeStamp
end.
%% @doc Returns a name of ETS table which is used for in-memory cache.
%% Could be rewritten to use a configuration parameter instead of a hardcoded
%% value.
%% @private
table_name() -> ?JESSE_ETS. | src/jesse_database.erl | 0.71413 | 0.475423 | jesse_database.erl | starcoder |
-module(recursiontail).
-export([fib/1,fib/3,perfect/1,isperfect/3]).
%% Step 01.21:
%% 1. Define a function fib/3 - to compute Fibonacci numbers - using tail recursion.
%% 2. "Define a function perfect/1 that takes a positive number N and returns a boolean which indicates whether or not the number is perfect."
%% Assumes the following sequence: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ...
fib(0) -> 0;
fib(1) -> 1;
fib(N) when N >= 1 -> %% Let it fail for negative numbers!
fib(0,1,N).
fib(A, _, 0) ->
A;
fib(A, B, N) when N >= 1 -> %% Same guard as in fib(X).
fib(B, A+B, N-1).
%% step-by-step evaluation of fib(4). :
%% fib(4)
%% = fib(0,1,4)
%% = fib(1,1,3)
%% = fib(1,2,2)
%% = fib(2,3,1)
%% = fib(3,5,0)
%% = 3
%% List of perfect numbers on Wikipedia: https://en.wikipedia.org/wiki/Perfect_number
%% Why calculate what you already know? :-P
perfect(6) -> true;
perfect(28) -> true;
perfect(496) -> true;
perfect(8128) -> true;
perfect(33550336) -> true;
perfect(8589869056) -> true;
perfect(X) when X < 6 -> false;
perfect(X) when X < 28 -> false;
perfect(X) when X < 496 -> false;
perfect(X) when X < 8128 -> false;
perfect(X) when X < 33550336 -> false;
perfect(X) when X > 8589869056 -> %% Note: isperfect/3 was also tested WITHOUT the guards in the above lines.
io:format("perfect(~p)? Are you joking? Get a supercomputer!~nbye~n", [X]);
perfect(X) when X > 33550336 ->
isperfect(X, (X+1) div 2, 0).
isperfect(N, 0, Sum) when N == Sum ->
true;
isperfect(_, 0, _) ->
false;
isperfect(N, Divisor, Sum) when (N rem Divisor == 0) ->
isperfect(N, Divisor - 1, Divisor + Sum);
isperfect(N, Divisor, Sum) ->
isperfect(N, Divisor - 1, Sum).
%% https://duckduckgo.com/?q=%22perfect+number%22+recursive&ia=qa
%% http://stackoverflow.com/questions/33507834/find-perfect-numbers-using-recursion#33508113
%% int perfectNumberRecurse(int num, int divisor, int sum)
%% {
%% return (divisor == 0) ? sum : perfectNumberRecurse(num, divisor - 1, (num % divisor == 0) ? sum + divisor : sum);
%% }
%%
%% bool isPerfect(int x)
%% {
%% return (x > 1) ? x == perfectNumberRecurse(x, (x + 1) / 2, 0) : false;
%% } | futurelearn-2017/recursiontail.erl | 0.605449 | 0.750701 | recursiontail.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% A set of optics specific to maps.
%%% @end
%%%-------------------------------------------------------------------
-module(optic_maps).
%% API
-export([all/0,
all/1,
keys/0,
keys/1,
values/0,
values/1,
associations/0,
associations/1,
key/1,
key/2,
association/1,
association/2]).
%%%===================================================================
%%% API
%%%===================================================================
%% @see values/1
-spec all() -> optic:optic().
all() ->
values().
%% @see values/1
-spec all(Options) -> optic:optic() when
Options :: optic:variations().
all(Options) ->
values(Options).
%% @see keys/1
-spec keys() -> optic:optic().
keys() ->
keys(#{}).
%% @doc
%% Focus on all keys of a map.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_maps:keys()], #{first => 1, second => 2}).
%% {ok,[first,second]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec keys(Options) -> optic:optic() when
Options :: optic:variations().
keys(Options) ->
Fold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (Key, _Value, InnerAcc) ->
Fun(Key, InnerAcc)
end,
Acc,
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (Key, Value, {InnerMap, InnerAcc}) ->
{NewKey, NewAcc} = Fun(Key, InnerAcc),
{InnerMap#{NewKey=>Value}, NewAcc}
end,
{#{}, Acc},
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
New =
fun (_Data, _Template) ->
#{}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see values/1
-spec values() -> optic:optic().
values() ->
values(#{}).
%% @doc
%% Focus on all values of a map.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_maps:values()], #{first => 1, second => 2}).
%% {ok,[1,2]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec values(Options) -> optic:optic() when
Options :: optic:variations().
values(Options) ->
Fold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (_Key, Value, InnerAcc) ->
Fun(Value, InnerAcc)
end,
Acc,
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (Key, Value, {InnerMap, InnerAcc}) ->
{NewValue, NewAcc} = Fun(Value, InnerAcc),
{InnerMap#{Key=>NewValue}, NewAcc}
end,
{#{}, Acc},
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
New =
fun (_Data, _Template) ->
#{}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see associations/1
-spec associations() -> optic:optic().
associations() ->
associations(#{}).
%% @doc
%% Focus on all associations of a map. An association is a tuple of
%% the key and value for each entry.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_maps:associations()], #{first => 1, second => 2}).
%% {ok,[{first,1},{second,2}]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec associations(Options) -> optic:optic() when
Options :: optic:variations().
associations(Options) ->
Fold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (Key, Value, InnerAcc) ->
Fun({Key, Value}, InnerAcc)
end,
Acc,
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, Map) when is_map(Map) ->
{ok, maps:fold(fun (Key, Value, {InnerMap, InnerAcc}) ->
{{NewKey, NewValue}, NewAcc} = Fun({Key, Value}, InnerAcc),
{InnerMap#{NewKey=>NewValue}, NewAcc}
end,
{#{}, Acc},
Map)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
New =
fun (_Data, _Template) ->
#{}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see key/2
-spec key(Key) -> optic:optic() when
Key :: term().
key(Key) ->
key(Key, #{}).
%% @doc
%% Focus on the value of a map key.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_maps:key(first)], #{first => 1, second => 2}).
%% {ok,[1]}
%% '''
%% @end
%% @param Key The key to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec key(Key, Options) -> optic:optic() when
Key :: term(),
Options :: optic:variations().
key(Key, Options) ->
Fold =
fun (Fun, Acc, #{Key:=Value}) ->
{ok, Fun(Value, Acc)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, #{Key:=Value} = Map) when is_map(Map) ->
{NewValue, NewAcc} = Fun(Value, Acc),
{ok, {Map#{Key:=NewValue}, NewAcc}};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
New =
fun (Map, Template) when is_map(Map) ->
Map#{Key=>Template};
(_Data, Template) ->
#{Key=>Template}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see association/2
-spec association(Key) -> optic:optic() when
Key :: term().
association(Key) ->
association(Key, #{}).
%% @doc
%% Focus on the association for a map key. An association is the tuple
%% of a map key and value. If the key is modified, the optic is no
%% longer well behaved.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_maps:association(first)], #{first => 1, second => 2}).
%% {ok,[{first,1}]}
%% '''
%% @end
%% @param Key The key to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec association(Key, Options) -> optic:optic() when
Key :: term(),
Options :: optic:variations().
association(Key, Options) ->
Fold =
fun (Fun, Acc, #{Key:=Value}) ->
{ok, Fun({Key, Value}, Acc)};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, #{Key:=Value} = Map) when is_map(Map) ->
{{NewKey, NewValue}, NewAcc} = Fun({Key, Value}, Acc),
{ok, {(maps:remove(Key, Map))#{NewKey=>NewValue}, NewAcc}};
(_Fun, _Acc, _Data) ->
{error, undefined}
end,
New =
fun (Map, Template) when is_map(Map) ->
Map#{Key=>Template};
(_Data, Template) ->
#{Key=>Template}
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New). | src/optic_maps.erl | 0.635901 | 0.461138 | optic_maps.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_event_listener_mfa).
-behavior(couch_event_listener).
-export([
start_link/4,
enter_loop/4,
stop/1
]).
-export([
init/1,
terminate/2,
handle_event/3,
handle_cast/2,
handle_info/2
]).
-record(st, {
mod,
func,
state,
parent
}).
start_link(Mod, Func, State, Options) ->
Parent = case proplists:get_value(parent, Options) of
P when is_pid(P) -> P;
_ -> self()
end,
Arg = {Parent, Mod, Func, State},
couch_event_listener:start_link(?MODULE, Arg, Options).
enter_loop(Mod, Func, State, Options) ->
Parent = case proplists:get_value(parent, Options) of
P when is_pid(P) ->
erlang:monitor(process, P),
P;
_ ->
undefined
end,
St = #st{
mod = Mod,
func = Func,
state = State,
parent = Parent
},
couch_event_listener:enter_loop(?MODULE, St, Options).
stop(Pid) ->
couch_event_listener:cast(Pid, shutdown).
init({Parent, Mod, Func, State}) ->
erlang:monitor(process, Parent),
{ok, #st{
mod = Mod,
func = Func,
state = State,
parent = Parent
}}.
terminate(_Reason, _MFA) ->
ok.
handle_event(DbName, Event, #st{mod=Mod, func=Func, state=State}=St) ->
case (catch Mod:Func(DbName, Event, State)) of
{ok, NewState} ->
{ok, St#st{state=NewState}};
stop ->
{stop, normal, St};
Else ->
erlang:error(Else)
end.
handle_cast(shutdown, St) ->
{stop, normal, St};
handle_cast(_Msg, St) ->
{ok, St}.
handle_info({'DOWN', _Ref, process, Parent, _Reason}, #st{parent=Parent}=St) ->
{stop, normal, St};
handle_info(_Msg, St) ->
{ok, St}. | src/couch_event/src/couch_event_listener_mfa.erl | 0.668772 | 0.435241 | couch_event_listener_mfa.erl | starcoder |
%% -------- TIC-TAC ACTOR ---------
%%
%% The TicTac actor is responsible for tracking the state of the store and
%% signalling that state to other trusted actors
%%
%% https://en.wikipedia.org/wiki/Tic-tac
%%
%% This is achieved through the exchange of merkle trees, but *not* trees that
%% are secure to interference - there is no attempt to protect the tree from
%% byzantine faults or tampering. The tree is only suited for use between
%% trusted actors across secure channels.
%%
%% In dropping the cryptographic security requirement, a simpler tree is
%% possible, and also one that allows for trees of a partitioned database to
%% be quickly merged to represent a global view of state for the database
%% across the partition boundaries.
%%
%% -------- PERSPECTIVES OF STATE ---------
%%
%% The insecure Merkle trees (Tic-Tac Trees) are intended to be used in two
%% ways:
%% - To support the building of a merkle tree across a coverage plan to
%% represent global state across many stores (or vnodes) i.e. scanning over
%% the real data by bucket, by key range or by index.
%% - To track changes with "recent" modification dates.
%%
%% -------- TIC-TAC TREES ---------
%%
%% The Tic-Tac tree takes is split into 256 * 4096 different segments. Every
%% key is hashed to map it to one of those segment leaves using the
%% elrang:phash2 function.
%%
%% External to the leveled_tictac module, the value should also have been
%% hashed to a 4-byte integer (presumably based on a tag-specific hash
%% function). The combination of the Object Key and the Hash is then
%% hashed together to get a segment-change hash.
%%
%% To change a segment-leaf hash, the segment-leaf hash is XORd with the
%% segment-change hash associated with the changing key. This assumes that
%% only one version of the key is ever added to the segment-leaf hash if the
%% tree is to represent the state of store (or partition of the store. If
%% not, the segment-leaf hash can only represent a history of changes under
%% that leaf, not the current state (unless the previous segment-change hash
%% for the key is removed by XORing it once more from the segment-leaf hash
%% that already contains it).
%%
%% A Level 1 hash is then created by XORing the 4096 Level 2 segment-hashes
%% in the level below it (or XORing both the previous version and the new
%% version of the segment-leaf hash from the previous level 1 hash).
%%
-module(leveled_tictac).
-include("include/leveled.hrl").
-export([
new_tree/1,
new_tree/2,
add_kv/4,
add_kv/5,
alter_segment/3,
find_dirtyleaves/2,
find_dirtysegments/2,
fetch_root/1,
fetch_leaves/2,
merge_trees/2,
get_segment/2,
export_tree/1,
import_tree/1,
valid_size/1,
keyto_segment32/1,
keyto_doublesegment32/1,
keyto_segment48/1,
generate_segmentfilter_list/2,
adjust_segmentmatch_list/3,
merge_binaries/2,
join_segment/2,
match_segment/2,
tictac_hash/2 % called by kv_index_tictactree
]).
-include_lib("eunit/include/eunit.hrl").
-define(HASH_SIZE, 4).
-define(L2_CHUNKSIZE, 256).
-define(L2_BITSIZE, 8).
%% UNSUUPPORTED tree sizes for accelerated segment filtering
-define(XXSMALL, 16).
-define(XSMALL, 64).
%% SUPPORTED tree sizes for accelerated segment filtering
-define(SMALL, 256).
-define(MEDIUM, 1024).
-define(LARGE, 4096).
-define(XLARGE, 16384).
-define(EMPTY, <<0:8/integer>>).
-define(VALID_SIZES, [xxsmall, xsmall, small, medium, large, xlarge]).
-record(tictactree, {treeID :: any(),
size :: tree_size(),
width :: integer(),
segment_count :: integer(),
level1 :: binary(),
level2 :: any() % an array - but OTP compatibility
}).
-type tictactree() ::
#tictactree{}.
-type segment48() ::
{segment_hash, non_neg_integer(), non_neg_integer(), non_neg_integer()}.
-type tree_extract() ::
{binary(), integer(), integer(), integer(), binary()}.
-type tree_size() ::
xxsmall|xsmall|small|medium|large|xlarge.
-export_type([tictactree/0, segment48/0, tree_size/0]).
%%%============================================================================
%%% External functions
%%%============================================================================
-spec valid_size(any()) -> boolean().
%% @doc
%% For validation of input
valid_size(Size) ->
lists:member(Size, ?VALID_SIZES).
-spec new_tree(any()) -> tictactree().
%% @doc
%% Create a new tree, zeroed out.
new_tree(TreeID) ->
new_tree(TreeID, small).
new_tree(TreeID, Size) ->
Width = get_size(Size),
Lv1Width = Width * ?HASH_SIZE * 8,
Lv1Init = <<0:Lv1Width/integer>>,
Lv2Init = array:new([{size, Width}, {default, ?EMPTY}]),
#tictactree{treeID = TreeID,
size = Size,
width = Width,
segment_count = Width * ?L2_CHUNKSIZE,
level1 = Lv1Init,
level2 = Lv2Init}.
-spec export_tree(tictactree()) -> {struct, list()}.
%% @doc
%% Export the tree into a tuple list, with the level1 binary, and then for
%% level2 {branchID, binary()}
export_tree(Tree) ->
EncodeL2Fun =
fun(X, L2Acc) ->
L2Element = zlib:compress(array:get(X, Tree#tictactree.level2)),
[{integer_to_binary(X), base64:encode_to_string(L2Element)}|L2Acc]
end,
L2 =
lists:foldl(EncodeL2Fun, [], lists:seq(0, Tree#tictactree.width - 1)),
{struct,
[{<<"level1">>, base64:encode_to_string(Tree#tictactree.level1)},
{<<"level2">>, {struct, lists:reverse(L2)}}
]}.
-spec import_tree({struct, list()}) -> tictactree().
%% @doc
%% Reverse the export process
import_tree(ExportedTree) ->
{struct,
[{<<"level1">>, L1Base64},
{<<"level2">>, {struct, L2List}}]} = ExportedTree,
L1Bin = base64:decode(L1Base64),
Sizes = lists:map(fun(SizeTag) -> {SizeTag, get_size(SizeTag)} end,
?VALID_SIZES),
Width = byte_size(L1Bin) div ?HASH_SIZE,
{Size, Width} = lists:keyfind(Width, 2, Sizes),
Width = get_size(Size),
Lv2Init = array:new([{size, Width}]),
FoldFun =
fun({X, EncodedL2SegBin}, L2Array) ->
L2SegBin = zlib:uncompress(base64:decode(EncodedL2SegBin)),
array:set(binary_to_integer(X), L2SegBin, L2Array)
end,
Lv2 = lists:foldl(FoldFun, Lv2Init, L2List),
#tictactree{treeID = import,
size = Size,
width = Width,
segment_count = Width * ?L2_CHUNKSIZE,
level1 = L1Bin,
level2 = Lv2}.
-spec add_kv(tictactree(), term(), term(), fun()) -> tictactree().
%% @doc
%% Add a Key and value to a tictactree using the BinExtractFun to extract a
%% binary from the Key and value from which to generate the hash. The
%% BinExtractFun will also need to do any canonicalisation necessary to make
%% the hash consistent (such as whitespace removal, or sorting)
add_kv(TicTacTree, Key, Value, BinExtractFun) ->
add_kv(TicTacTree, Key, Value, BinExtractFun, false).
-spec add_kv(tictactree(), term(), term(), fun(), boolean())
-> tictactree()|{tictactree(), integer()}.
%% @doc
%% add_kv with ability to return segment ID of Key added
add_kv(TicTacTree, Key, Value, BinExtractFun, ReturnSegment) ->
{BinK, BinV} = BinExtractFun(Key, Value),
{SegHash, SegChangeHash} = tictac_hash(BinK, BinV),
Segment = get_segment(SegHash, TicTacTree#tictactree.segment_count),
{SegLeaf1, SegLeaf2, L1Extract, L2Extract}
= extract_segment(Segment, TicTacTree),
SegLeaf2Upd = SegLeaf2 bxor SegChangeHash,
SegLeaf1Upd = SegLeaf1 bxor SegChangeHash,
case ReturnSegment of
true ->
{replace_segment(SegLeaf1Upd, SegLeaf2Upd,
L1Extract, L2Extract, TicTacTree),
Segment};
false ->
replace_segment(SegLeaf1Upd, SegLeaf2Upd,
L1Extract, L2Extract, TicTacTree)
end.
-spec alter_segment(integer(), integer(), tictactree()) -> tictactree().
%% @doc
%% Replace the value of a segment in the tree with a new value - for example
%% to be used in partial rebuilds of trees
alter_segment(Segment, Hash, Tree) ->
{SegLeaf1, SegLeaf2, L1Extract, L2Extract}
= extract_segment(Segment, Tree),
SegLeaf1Upd = SegLeaf1 bxor SegLeaf2 bxor Hash,
replace_segment(SegLeaf1Upd, Hash, L1Extract, L2Extract, Tree).
-spec find_dirtyleaves(tictactree(), tictactree()) -> list(integer()).
%% @doc
%% Returns a list of segment IDs which hold differences between the state
%% represented by the two trees.
find_dirtyleaves(SrcTree, SnkTree) ->
Size = SrcTree#tictactree.size,
Size = SnkTree#tictactree.size,
IdxList = find_dirtysegments(fetch_root(SrcTree), fetch_root(SnkTree)),
SrcLeaves = fetch_leaves(SrcTree, IdxList),
SnkLeaves = fetch_leaves(SnkTree, IdxList),
FoldFun =
fun(Idx, Acc) ->
{Idx, SrcLeaf} = lists:keyfind(Idx, 1, SrcLeaves),
{Idx, SnkLeaf} = lists:keyfind(Idx, 1, SnkLeaves),
L2IdxList = segmentcompare(SrcLeaf, SnkLeaf),
Acc ++ lists:map(fun(X) -> X + Idx * ?L2_CHUNKSIZE end, L2IdxList)
end,
lists:sort(lists:foldl(FoldFun, [], IdxList)).
-spec find_dirtysegments(binary(), binary()) -> list(integer()).
%% @doc
%% Returns a list of branch IDs that contain differences between the tress.
%% Pass in level 1 binaries to make the comparison.
find_dirtysegments(SrcBin, SinkBin) ->
segmentcompare(SrcBin, SinkBin).
-spec fetch_root(tictactree()) -> binary().
%% @doc
%% Return the level1 binary for a tree.
fetch_root(TicTacTree) ->
TicTacTree#tictactree.level1.
-spec fetch_leaves(tictactree(), list(integer())) -> list().
%% @doc
%% Return a keylist for the segment hashes for the leaves of the tree based on
%% the list of branch IDs provided
fetch_leaves(TicTacTree, BranchList) ->
MapFun =
fun(Idx) ->
{Idx, get_level2(TicTacTree, Idx)}
end,
lists:map(MapFun, BranchList).
-spec merge_trees(tictactree(), tictactree()) -> tictactree().
%% Merge two trees providing a result that represents the combined state,
%% assuming that the two trees were correctly partitioned pre-merge. If a key
%% and value has been added to both trees, then the merge will not give the
%% expected outcome.
merge_trees(TreeA, TreeB) ->
Size = TreeA#tictactree.size,
Size = TreeB#tictactree.size,
MergedTree = new_tree(merge, Size),
L1A = fetch_root(TreeA),
L1B = fetch_root(TreeB),
NewLevel1 = merge_binaries(L1A, L1B),
MergeFun =
fun(SQN, MergeL2) ->
L2A = get_level2(TreeA, SQN),
L2B = get_level2(TreeB, SQN),
NewLevel2 = merge_binaries(L2A, L2B),
array:set(SQN, NewLevel2, MergeL2)
end,
NewLevel2 = lists:foldl(MergeFun,
MergedTree#tictactree.level2,
lists:seq(0, MergedTree#tictactree.width - 1)),
MergedTree#tictactree{level1 = NewLevel1, level2 = NewLevel2}.
-spec get_segment(integer(),
integer()|xxsmall|xsmall|small|medium|large|xlarge) ->
integer().
%% @doc
%% Return the segment ID for a Key. Can pass the tree size or the actual
%% segment count derived from the size
get_segment(Hash, SegmentCount) when is_integer(SegmentCount) ->
Hash band (SegmentCount - 1);
get_segment(Hash, TreeSize) ->
get_segment(Hash, ?L2_CHUNKSIZE * get_size(TreeSize)).
-spec tictac_hash(binary(), any()) -> {integer(), integer()}.
%% @doc
%% Hash the key and term.
%% The term can be of the form {is_hash, 32-bit integer)} to indicate the hash
%% has already been taken. If the value is not a pre-extracted hash just use
%% erlang:phash2. If an exportable hash of the value is required this should
%% be managed through the add_kv ExtractFun providing a pre-prepared Hash.
tictac_hash(BinKey, Val) when is_binary(BinKey) ->
{HashKeyToSeg, AltHashKey} = keyto_doublesegment32(BinKey),
HashVal =
case Val of
{is_hash, HashedVal} ->
HashedVal;
_ ->
erlang:phash2(Val)
end,
{HashKeyToSeg, AltHashKey bxor HashVal}.
-spec keyto_doublesegment32(binary())
-> {non_neg_integer(), non_neg_integer()}.
%% @doc
%% Used in tictac_hash/2 to provide an alternative hash of the key to bxor with
%% the value, as well as the segment hash to locate the leaf of the tree to be
%% updated
keyto_doublesegment32(BinKey) when is_binary(BinKey) ->
Segment48 = keyto_segment48(BinKey),
{keyto_segment32(Segment48), element(4, Segment48)}.
-spec keyto_segment32(any()) -> integer().
%% @doc
%% The first 16 bits of the segment hash used in the tictac tree should be
%% made up of the segment ID part (which is used to accelerate queries)
keyto_segment32({segment_hash, SegmentID, ExtraHash, _AltHash})
when is_integer(SegmentID), is_integer(ExtraHash) ->
(ExtraHash band 65535) bsl 16 + SegmentID;
keyto_segment32(BinKey) when is_binary(BinKey) ->
keyto_segment32(keyto_segment48(BinKey));
keyto_segment32(Key) ->
keyto_segment32(term_to_binary(Key)).
-spec keyto_segment48(binary()) -> segment48().
%% @doc
%% Produce a segment with an Extra Hash part - for tictac use most of the
%% ExtraHash will be discarded
keyto_segment48(BinKey) ->
<<SegmentID:16/integer,
ExtraHash:32/integer,
AltHash:32/integer,
_Rest/binary>> = crypto:hash(md5, BinKey),
{segment_hash, SegmentID, ExtraHash, AltHash}.
-spec generate_segmentfilter_list(list(integer()), tree_size())
-> false|list(integer()).
%% @doc
%% Cannot accelerate segment listing for trees below certain sizes, so check
%% the creation of segment filter lists with this function
generate_segmentfilter_list(_SegmentList, xxsmall) ->
false;
generate_segmentfilter_list(SegmentList, xsmall) ->
case length(SegmentList) =< 4 of
true ->
A0 = 1 bsl 15,
A1 = 1 bsl 14,
ExpandSegFun =
fun(X, Acc) ->
Acc ++ [X, X + A0, X + A1, X + A0 + A1]
end,
lists:foldl(ExpandSegFun, [], SegmentList);
false ->
false
end;
generate_segmentfilter_list(SegmentList, Size) ->
case lists:member(Size, ?VALID_SIZES) of
true ->
SegmentList
end.
-spec adjust_segmentmatch_list(list(integer()), tree_size(), tree_size())
-> list(integer()).
%% @doc
%% If we have dirty segments discovered by comparing trees of size CompareSize,
%% and we want to see if it matches a segment for a key which was created for a
%% tree of size Store Size, then we need to alter the segment list
%%
%% See timing_test/0 when considering using this or match_segment/2
%%
%% Check with KeyCount=10000 SegCount=4 TreeSizes small large:
%% adjust_segmentmatch_list check took 1.256 ms match_segment took 5.229 ms
%%
%% Check with KeyCount=10000 SegCount=8 TreeSizes small large:
%% adjust_segmentmatch_list check took 2.065 ms match_segment took 8.637 ms
%%
%% Check with KeyCount=10000 SegCount=4 TreeSizes medium large:
%% adjust_segmentmatch_list check took 0.453 ms match_segment took 4.843 ms
%%
%% Check with KeyCount=10000 SegCount=4 TreeSizes small medium:
%% adjust_segmentmatch_list check took 0.451 ms match_segment took 5.528 ms
%%
%% Check with KeyCount=100000 SegCount=4 TreeSizes small large:
%% adjust_segmentmatch_list check took 11.986 ms match_segment took 56.522 ms
%%
adjust_segmentmatch_list(SegmentList, CompareSize, StoreSize) ->
CompareSizeI = get_size(CompareSize),
StoreSizeI = get_size(StoreSize),
if CompareSizeI =< StoreSizeI ->
ExpItems = StoreSizeI div CompareSizeI - 1,
ShiftFactor = round(leveled_math:log2(CompareSizeI * ?L2_CHUNKSIZE)),
ExpList =
lists:map(fun(X) -> X bsl ShiftFactor end, lists:seq(1, ExpItems)),
UpdSegmentList =
lists:foldl(fun(S, Acc) ->
L = lists:map(fun(F) -> F + S end, ExpList),
L ++ Acc
end,
[],
SegmentList),
lists:usort(UpdSegmentList ++ SegmentList)
end.
-spec match_segment({integer(), tree_size()}, {integer(), tree_size()})
-> boolean().
%% @doc
%% Does segment A match segment B - given that segment A was generated using
%% Tree size A and segment B was generated using Tree Size B
match_segment({SegIDA, TreeSizeA}, {SegIDB, TreeSizeB}) ->
SmallestTreeSize =
min(get_size(TreeSizeA), get_size(TreeSizeB)) * ?L2_CHUNKSIZE,
get_segment(SegIDA, SmallestTreeSize)
== get_segment(SegIDB, SmallestTreeSize).
-spec join_segment(integer(), integer()) -> integer().
%% @doc
%% Generate a segment ID for the Branch and Leaf ID co-ordinates
join_segment(BranchID, LeafID) ->
BranchID bsl ?L2_BITSIZE + LeafID.
%%%============================================================================
%%% Internal functions
%%%============================================================================
-spec extract_segment(integer(), tictactree()) ->
{integer(), integer(), tree_extract(), tree_extract()}.
%% @doc
%% Extract the Level 1 and Level 2 slices from a tree to prepare an update
extract_segment(Segment, TicTacTree) ->
Level2Pos =
Segment band (?L2_CHUNKSIZE - 1),
Level1Pos =
(Segment bsr ?L2_BITSIZE)
band (TicTacTree#tictactree.width - 1),
Level2BytePos = ?HASH_SIZE * Level2Pos,
Level1BytePos = ?HASH_SIZE * Level1Pos,
Level2 = get_level2(TicTacTree, Level1Pos),
HashIntLength = ?HASH_SIZE * 8,
<<PreL2:Level2BytePos/binary,
SegLeaf2:HashIntLength/integer,
PostL2/binary>> = Level2,
<<PreL1:Level1BytePos/binary,
SegLeaf1:HashIntLength/integer,
PostL1/binary>> = TicTacTree#tictactree.level1,
{SegLeaf1,
SegLeaf2,
{PreL1, Level1BytePos, Level1Pos, HashIntLength, PostL1},
{PreL2, Level2BytePos, Level2Pos, HashIntLength, PostL2}}.
-spec replace_segment(integer(), integer(),
tree_extract(), tree_extract(),
tictactree()) -> tictactree().
%% @doc
%% Replace a slice of a tree
replace_segment(L1Hash, L2Hash, L1Extract, L2Extract, TicTacTree) ->
{PreL1, Level1BytePos, Level1Pos, HashIntLength, PostL1} = L1Extract,
{PreL2, Level2BytePos, _Level2Pos, HashIntLength, PostL2} = L2Extract,
Level1Upd = <<PreL1:Level1BytePos/binary,
L1Hash:HashIntLength/integer,
PostL1/binary>>,
Level2Upd = <<PreL2:Level2BytePos/binary,
L2Hash:HashIntLength/integer,
PostL2/binary>>,
TicTacTree#tictactree{level1 = Level1Upd,
level2 = array:set(Level1Pos,
Level2Upd,
TicTacTree#tictactree.level2)}.
get_level2(TicTacTree, L1Pos) ->
case array:get(L1Pos, TicTacTree#tictactree.level2) of
?EMPTY ->
Lv2SegBinSize = ?L2_CHUNKSIZE * ?HASH_SIZE * 8,
<<0:Lv2SegBinSize/integer>>;
SrcL2 ->
SrcL2
end.
get_size(Size) ->
case Size of
xxsmall ->
?XXSMALL;
xsmall ->
?XSMALL;
small ->
?SMALL;
medium ->
?MEDIUM;
large ->
?LARGE;
xlarge ->
?XLARGE
end.
segmentcompare(SrcBin, SinkBin) when byte_size(SrcBin) == byte_size(SinkBin) ->
segmentcompare(SrcBin, SinkBin, [], 0);
segmentcompare(<<>>, SinkBin) ->
Size = bit_size(SinkBin),
segmentcompare(<<0:Size/integer>>, SinkBin);
segmentcompare(SrcBin, <<>>) ->
Size = bit_size(SrcBin),
segmentcompare(SrcBin, <<0:Size/integer>>).
segmentcompare(<<>>, <<>>, Acc, _Counter) ->
Acc;
segmentcompare(SrcBin, SnkBin, Acc, Counter) ->
<<SrcHash:?HASH_SIZE/binary, SrcTail/binary>> = SrcBin,
<<SnkHash:?HASH_SIZE/binary, SnkTail/binary>> = SnkBin,
case SrcHash of
SnkHash ->
segmentcompare(SrcTail, SnkTail, Acc, Counter + 1);
_ ->
segmentcompare(SrcTail, SnkTail, [Counter|Acc], Counter + 1)
end.
checktree(TicTacTree) ->
checktree(TicTacTree#tictactree.level1, TicTacTree, 0).
checktree(<<>>, TicTacTree, Counter) ->
true = TicTacTree#tictactree.width == Counter;
checktree(Level1Bin, TicTacTree, Counter) ->
BitSize = ?HASH_SIZE * 8,
<<TopHash:BitSize/integer, Tail/binary>> = Level1Bin,
L2Bin = get_level2(TicTacTree, Counter),
true = TopHash == segmentsummarise(L2Bin, 0),
checktree(Tail, TicTacTree, Counter + 1).
segmentsummarise(<<>>, L1Acc) ->
L1Acc;
segmentsummarise(L2Bin, L1Acc) ->
BitSize = ?HASH_SIZE * 8,
<<TopHash:BitSize/integer, Tail/binary>> = L2Bin,
segmentsummarise(Tail, L1Acc bxor TopHash).
merge_binaries(BinA, BinB) ->
BitSize = bit_size(BinA),
BitSize = bit_size(BinB),
<<AInt:BitSize/integer>> = BinA,
<<BInt:BitSize/integer>> = BinB,
MergedInt = AInt bxor BInt,
<<MergedInt:BitSize/integer>>.
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(TEST).
simple_bysize_test_() ->
{timeout, 60, fun simple_bysize_test_allsizes/0}.
simple_bysize_test_allsizes() ->
simple_test_withsize(xxsmall),
simple_test_withsize(xsmall),
simple_test_withsize(small),
simple_test_withsize(medium),
simple_test_withsize(large),
simple_test_withsize(xlarge).
simple_test_withsize(Size) ->
?assertMatch(true, valid_size(Size)),
BinFun = fun(K, V) -> {term_to_binary(K), term_to_binary(V)} end,
K1 = {o, "B1", "K1", null},
K2 = {o, "B1", "K2", null},
K3 = {o, "B1", "K3", null},
Tree0 = new_tree(0, Size),
Tree1 = add_kv(Tree0, K1, {caine, 1}, BinFun),
% Check that we can get to the segment ID that has changed, and confirm it
% is the segment ID expected
Root1 = fetch_root(Tree1),
Root0 = fetch_root(Tree0),
[BranchID] = find_dirtysegments(Root0, Root1),
[{BranchID, Branch1}] = fetch_leaves(Tree1, [BranchID]),
[{BranchID, Branch0}] = fetch_leaves(Tree0, [BranchID]),
[LeafID] = find_dirtysegments(Branch0, Branch1),
SegK1 = keyto_segment32(K1) band (get_size(Size) * 256 - 1),
?assertMatch(SegK1, join_segment(BranchID, LeafID)),
Tree2 = add_kv(Tree1, K2, {caine, 2}, BinFun),
Tree3 = add_kv(Tree2, K3, {caine, 3}, BinFun),
Tree3A = add_kv(Tree3, K3, {caine, 4}, BinFun),
?assertMatch(true, Tree0#tictactree.level1 == Tree0#tictactree.level1),
?assertMatch(false, Tree0#tictactree.level1 == Tree1#tictactree.level1),
?assertMatch(false, Tree1#tictactree.level1 == Tree2#tictactree.level1),
?assertMatch(false, Tree2#tictactree.level1 == Tree3#tictactree.level1),
?assertMatch(false, Tree3#tictactree.level1 == Tree3A#tictactree.level1),
Tree0X = new_tree(0, Size),
Tree1X = add_kv(Tree0X, K3, {caine, 3}, BinFun),
Tree2X = add_kv(Tree1X, K1, {caine, 1}, BinFun),
Tree3X = add_kv(Tree2X, K2, {caine, 2}, BinFun),
Tree3XA = add_kv(Tree3X, K3, {caine, 4}, BinFun),
?assertMatch(false, Tree1#tictactree.level1 == Tree1X#tictactree.level1),
?assertMatch(false, Tree2#tictactree.level1 == Tree2X#tictactree.level1),
?assertMatch(true, Tree3#tictactree.level1 == Tree3X#tictactree.level1),
?assertMatch(true, Tree3XA#tictactree.level1 == Tree3XA#tictactree.level1),
SC = Tree0#tictactree.segment_count,
GetSegFun =
fun(TK) ->
get_segment(keyto_segment32(term_to_binary(TK)), SC)
end,
DL0 = find_dirtyleaves(Tree1, Tree0),
?assertMatch(true, lists:member(GetSegFun(K1), DL0)),
DL1 = find_dirtyleaves(Tree3, Tree1),
?assertMatch(true, lists:member(GetSegFun(K2), DL1)),
?assertMatch(true, lists:member(GetSegFun(K3), DL1)),
?assertMatch(false, lists:member(GetSegFun(K1), DL1)),
% Export and import tree to confirm no difference
ExpTree3 = export_tree(Tree3),
ImpTree3 = import_tree(ExpTree3),
?assertMatch(DL1, find_dirtyleaves(ImpTree3, Tree1)).
merge_bysize_small_test() ->
merge_test_withsize(small).
merge_bysize_medium_test() ->
merge_test_withsize(medium).
merge_bysize_large_test() ->
merge_test_withsize(large).
merge_bysize_xlarge_test_() ->
{timeout, 60, fun merge_bysize_xlarge_test2/0}.
merge_bysize_xlarge_test2() ->
merge_test_withsize(xlarge).
merge_test_withsize(Size) ->
BinFun = fun(K, V) -> {term_to_binary(K), term_to_binary(V)} end,
TreeX0 = new_tree(0, Size),
TreeX1 = add_kv(TreeX0, {o, "B1", "X1", null}, {caine, 1}, BinFun),
TreeX2 = add_kv(TreeX1, {o, "B1", "X2", null}, {caine, 2}, BinFun),
TreeX3 = add_kv(TreeX2, {o, "B1", "X3", null}, {caine, 3}, BinFun),
TreeX4 = add_kv(TreeX3, {o, "B1", "X3", null}, {caine, 4}, BinFun),
TreeY0 = new_tree(0, Size),
TreeY1 = add_kv(TreeY0, {o, "B1", "Y1", null}, {caine, 101}, BinFun),
TreeY2 = add_kv(TreeY1, {o, "B1", "Y2", null}, {caine, 102}, BinFun),
TreeY3 = add_kv(TreeY2, {o, "B1", "Y3", null}, {caine, 103}, BinFun),
TreeY4 = add_kv(TreeY3, {o, "B1", "Y3", null}, {caine, 104}, BinFun),
TreeZ1 = add_kv(TreeX4, {o, "B1", "Y1", null}, {caine, 101}, BinFun),
TreeZ2 = add_kv(TreeZ1, {o, "B1", "Y2", null}, {caine, 102}, BinFun),
TreeZ3 = add_kv(TreeZ2, {o, "B1", "Y3", null}, {caine, 103}, BinFun),
TreeZ4 = add_kv(TreeZ3, {o, "B1", "Y3", null}, {caine, 104}, BinFun),
TreeM0 = merge_trees(TreeX4, TreeY4),
checktree(TreeM0),
?assertMatch(true, TreeM0#tictactree.level1 == TreeZ4#tictactree.level1),
TreeM1 = merge_trees(TreeX3, TreeY4),
checktree(TreeM1),
?assertMatch(false, TreeM1#tictactree.level1 == TreeZ4#tictactree.level1).
exportable_test() ->
{Int1, Int2} = tictac_hash(<<"key">>, <<"value">>),
?assertMatch({true, true}, {Int1 >= 0, Int2 >=0}).
merge_emptytree_test() ->
TreeA = new_tree("A"),
TreeB = new_tree("B"),
TreeC = merge_trees(TreeA, TreeB),
?assertMatch([], find_dirtyleaves(TreeA, TreeC)).
alter_segment_test() ->
BinFun = fun(K, V) -> {term_to_binary(K), term_to_binary(V)} end,
TreeX0 = new_tree(0, small),
TreeX1 = add_kv(TreeX0, {o, "B1", "X1", null}, {caine, 1}, BinFun),
TreeX2 = add_kv(TreeX1, {o, "B1", "X2", null}, {caine, 2}, BinFun),
TreeX3 = add_kv(TreeX2, {o, "B1", "X3", null}, {caine, 3}, BinFun),
TreeX4 = add_kv(TreeX3, {o, "B1", "X3", null}, {caine, 4}, BinFun),
TreeY5 = add_kv(TreeX4, {o, "B1", "Y4", null}, {caine, 5}, BinFun),
[{DeltaBranch, DeltaLeaf}] = compare_trees_maxonedelta(TreeX4, TreeY5),
DeltaSegment = DeltaBranch * ?SMALL + DeltaLeaf,
io:format("DeltaSegment ~w", [DeltaSegment]),
TreeX4A = alter_segment(DeltaSegment, 0, TreeX4),
TreeY5A = alter_segment(DeltaSegment, 0, TreeY5),
CompareResult = compare_trees_maxonedelta(TreeX4A, TreeY5A),
?assertMatch([], CompareResult).
return_segment_test() ->
BinFun = fun(K, V) -> {term_to_binary(K), term_to_binary(V)} end,
TreeX0 = new_tree(0, small),
{TreeX1, SegID}
= add_kv(TreeX0, {o, "B1", "X1", null}, {caine, 1}, BinFun, true),
TreeX2 = alter_segment(SegID, 0, TreeX1),
?assertMatch(1, length(compare_trees_maxonedelta(TreeX1, TreeX0))),
?assertMatch(1, length(compare_trees_maxonedelta(TreeX1, TreeX2))).
compare_trees_maxonedelta(Tree0, Tree1) ->
Root1 = fetch_root(Tree1),
Root0 = fetch_root(Tree0),
case find_dirtysegments(Root0, Root1) of
[BranchID] ->
[{BranchID, Branch1}] = fetch_leaves(Tree1, [BranchID]),
[{BranchID, Branch0}] = fetch_leaves(Tree0, [BranchID]),
[LeafID] = find_dirtysegments(Branch0, Branch1),
[{BranchID, LeafID}];
[] ->
[]
end.
segment_match_test() ->
segment_match_tester(small, large, <<"K0">>),
segment_match_tester(xlarge, medium, <<"K1">>),
expand_membershiplist_tester(small, large, <<"K0">>),
expand_membershiplist_tester(xsmall, large, <<"K1">>),
expand_membershiplist_tester(large, xlarge, <<"K2">>).
segment_match_tester(Size1, Size2, Key) ->
HashKey = keyto_segment32(Key),
Segment1 = get_segment(HashKey, Size1),
Segment2 = get_segment(HashKey, Size2),
?assertMatch(true, match_segment({Segment1, Size1}, {Segment2, Size2})).
expand_membershiplist_tester(SmallSize, LargeSize, Key) ->
HashKey = keyto_segment32(Key),
Segment1 = get_segment(HashKey, SmallSize),
Segment2 = get_segment(HashKey, LargeSize),
AdjList = adjust_segmentmatch_list([Segment1], SmallSize, LargeSize),
?assertMatch(true, lists:member(Segment2, AdjList)).
segment_expandsimple_test() ->
AdjList = adjust_segmentmatch_list([1, 100], small, medium),
io:format("List adjusted to ~w~n", [AdjList]),
?assertMatch(true, lists:member(1, AdjList)),
?assertMatch(true, lists:member(100, AdjList)),
?assertMatch(true, lists:member(65537, AdjList)),
?assertMatch(true, lists:member(131073, AdjList)),
?assertMatch(true, lists:member(196609, AdjList)),
?assertMatch(true, lists:member(65636, AdjList)),
?assertMatch(true, lists:member(131172, AdjList)),
?assertMatch(true, lists:member(196708, AdjList)),
?assertMatch(8, length(AdjList)),
OrigList = adjust_segmentmatch_list([1, 100], medium, medium),
?assertMatch([1, 100], OrigList).
timing_test() ->
timing_tester(10000, 4, small, large),
timing_tester(10000, 8, small, large),
timing_tester(10000, 4, medium, large),
timing_tester(10000, 4, small, medium),
timing_tester(100000, 4, small, large).
timing_tester(KeyCount, SegCount, SmallSize, LargeSize) ->
SegList =
lists:map(fun(_C) ->
leveled_rand:uniform(get_size(SmallSize) * ?L2_CHUNKSIZE - 1)
end,
lists:seq(1, SegCount)),
KeyToSegFun =
fun(I) ->
HK = keyto_segment32(integer_to_binary(I)),
{I, get_segment(HK, LargeSize)}
end,
MatchList = lists:map(KeyToSegFun, lists:seq(1, KeyCount)),
{T0, Out0} =
adjustsegmentlist_check(SegList, MatchList, SmallSize, LargeSize),
{T1, Out1} =
matchbysegment_check(SegList, MatchList, SmallSize, LargeSize),
?assertMatch(true, Out0 == Out1),
io:format(user, "~nCheck with KeyCount=~w SegCount=~w TreeSizes ~w ~w:~n",
[KeyCount, SegCount, SmallSize, LargeSize]),
io:format(user,
"adjust_segmentmatch_list check took ~w ms " ++
"match_segment took ~w ms~n",
[T0, T1]).
adjustsegmentlist_check(SegList, MatchList, SmallSize, LargeSize) ->
SW = os:timestamp(),
AdjList = adjust_segmentmatch_list(SegList, SmallSize, LargeSize),
PredFun =
fun({_I, S}) ->
lists:member(S, AdjList)
end,
OL = lists:filter(PredFun, MatchList),
{timer:now_diff(os:timestamp(), SW)/1000, OL}.
matchbysegment_check(SegList, MatchList, SmallSize, LargeSize) ->
SW = os:timestamp(),
PredFun =
fun({_I, S}) ->
FoldFun =
fun(_SM, true) ->
true;
(SM, false) ->
match_segment({SM, SmallSize}, {S, LargeSize})
end,
lists:foldl(FoldFun, false, SegList)
end,
OL = lists:filter(PredFun, MatchList),
{timer:now_diff(os:timestamp(), SW)/1000, OL}.
find_dirtysegments_withanemptytree_test() ->
T1 = new_tree(t1),
T2 = new_tree(t2),
?assertMatch([], find_dirtysegments(fetch_root(T1), fetch_root(T2))),
{T3, DS1} =
add_kv(T2, <<"TestKey">>, <<"V1">>, fun(B, K) -> {B, K} end, true),
ExpectedAnswer = [DS1 div 256],
?assertMatch(ExpectedAnswer, find_dirtysegments(<<>>, fetch_root(T3))),
?assertMatch(ExpectedAnswer, find_dirtysegments(fetch_root(T3), <<>>)).
-endif. | src/leveled_tictac.erl | 0.592549 | 0.69817 | leveled_tictac.erl | starcoder |
%%% vi:ts=4 sw=4 et
%%%-------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright 2011 Erlware, LLC.
%%% @doc
%%% This provides an implementation of the type ec_dictionary using
%%% gb_trees as a backin
%%% see ec_dictionary
%%% see gb_trees
%%% @end
%%%-------------------------------------------------------------------
-module(ec_gb_trees).
-behaviour(ec_dictionary).
%% API
-export([new/0,
has_key/2,
get/2,
get/3,
add/3,
remove/2,
has_value/2,
size/1,
to_list/1,
from_list/1,
keys/1]).
%%%===================================================================
%%% API
%%%===================================================================
%% @doc create a new dictionary object from the specified module. The
%% module should implement the dictionary behaviour. In the clause
%% where an existing object is passed in new empty dictionary of the
%% same implementation is created and returned.
%%
%% @param ModuleName|Object The module name or existing dictionary object.
-spec new() -> gb_trees:tree(_K, _V).
new() ->
gb_trees:empty().
%% @doc check to see if the dictionary provided has the specified key.
%%
%% @param Object The dictory object to check
%% @param Key The key to check the dictionary for
-spec has_key(ec_dictionary:key(K), Object :: gb_trees:tree(K, _V)) -> boolean().
has_key(Key, Data) ->
case gb_trees:lookup(Key, Data) of
{value, _Val} ->
true;
none ->
false
end.
%% @doc given a key return that key from the dictionary. If the key is
%% not found throw a 'not_found' exception.
%%
%% @param Object The dictionary object to return the value from
%% @param Key The key requested
%% when the key does not exist @throws not_found
-spec get(ec_dictionary:key(K), Object :: gb_trees:tree(K, V)) ->
ec_dictionary:value(V).
get(Key, Data) ->
case gb_trees:lookup(Key, Data) of
{value, Value} ->
Value;
none ->
throw(not_found)
end.
-spec get(ec_dictionary:key(K),
ec_dictionary:value(V),
Object :: gb_trees:tree(K, V)) ->
ec_dictionary:value(V).
get(Key, Default, Data) ->
case gb_trees:lookup(Key, Data) of
{value, Value} ->
Value;
none ->
Default
end.
%% @doc add a new value to the existing dictionary. Return a new
%% dictionary containing the value.
%%
%% @param Object the dictionary object to add too
%% @param Key the key to add
%% @param Value the value to add
-spec add(ec_dictionary:key(K), ec_dictionary:value(V),
Object :: gb_trees:tree(K, V)) ->
gb_trees:tree(K, V).
add(Key, Value, Data) ->
gb_trees:enter(Key, Value, Data).
%% @doc Remove a value from the dictionary returning a new dictionary
%% with the value removed.
%%
%% @param Object the dictionary object to remove the value from
%% @param Key the key of the key/value pair to remove
-spec remove(ec_dictionary:key(K), Object :: gb_trees:tree(K, V)) ->
gb_trees:tree(K, V).
remove(Key, Data) ->
gb_trees:delete_any(Key, Data).
%% @doc Check to see if the value exists in the dictionary
%%
%% @param Object the dictionary object to check
%% @param Value The value to check if exists
-spec has_value(ec_dictionary:value(V), Object :: gb_trees:tree(_K, V)) -> boolean().
has_value(Value, Data) ->
lists:member(Value, gb_trees:values(Data)).
%% @doc return the current number of key value pairs in the dictionary
%%
%% @param Object the object return the size for.
-spec size(Object :: gb_trees:tree(_K, _V)) -> non_neg_integer().
size(Data) ->
gb_trees:size(Data).
-spec to_list(gb_trees:tree(K, V)) -> [{ec_dictionary:key(K),
ec_dictionary:value(V)}].
to_list(Data) ->
gb_trees:to_list(Data).
-spec from_list([{ec_dictionary:key(K), ec_dictionary:value(V)}]) ->
gb_trees:tree(K, V).
from_list(List) when is_list(List) ->
lists:foldl(fun({Key, Value}, Dict) ->
gb_trees:enter(Key, Value, Dict)
end,
gb_trees:empty(),
List).
-spec keys(gb_trees:tree(K, _V)) -> [ec_dictionary:key(K)].
keys(Data) ->
gb_trees:keys(Data).
%%%===================================================================
%%% Tests
%%%===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
%% For me unit testing initially is about covering the obvious case. A
%% check to make sure that what you expect the tested functionality to
%% do, it actually does. As time goes on and people detect bugs you
%% add tests for those specific problems to the unit test suit.
%%
%% However, when getting started you can only test your basic
%% expectations. So here are the expectations I have for the add
%% functionality.
%%
%% 1) I can put arbitrary terms into the dictionary as keys
%% 2) I can put arbitrary terms into the dictionary as values
%% 3) When I put a value in the dictionary by a key, I can retrieve
%% that same value
%% 4) When I put a different value in the dictionary by key it does
%% not change other key value pairs.
%% 5) When I update a value the new value in available by the new key
%% 6) When a value does not exist a not found exception is created
add_test() ->
Dict0 = ec_dictionary:new(ec_gb_trees),
Key1 = foo,
Key2 = [1, 3],
Key3 = {"super"},
Key4 = <<"fabulous">>,
Key5 = {"Sona", 2, <<"Zuper">>},
Value1 = Key5,
Value2 = Key4,
Value3 = Key2,
Value4 = Key3,
Value5 = Key1,
Dict01 = ec_dictionary:add(Key1, Value1, Dict0),
Dict02 = ec_dictionary:add(Key3, Value3,
ec_dictionary:add(Key2, Value2,
Dict01)),
Dict1 =
ec_dictionary:add(Key5, Value5,
ec_dictionary:add(Key4, Value4,
Dict02)),
?assertMatch(Value1, ec_dictionary:get(Key1, Dict1)),
?assertMatch(Value2, ec_dictionary:get(Key2, Dict1)),
?assertMatch(Value3, ec_dictionary:get(Key3, Dict1)),
?assertMatch(Value4, ec_dictionary:get(Key4, Dict1)),
?assertMatch(Value5, ec_dictionary:get(Key5, Dict1)),
Dict2 = ec_dictionary:add(Key3, Value5,
ec_dictionary:add(Key2, Value4, Dict1)),
?assertMatch(Value1, ec_dictionary:get(Key1, Dict2)),
?assertMatch(Value4, ec_dictionary:get(Key2, Dict2)),
?assertMatch(Value5, ec_dictionary:get(Key3, Dict2)),
?assertMatch(Value4, ec_dictionary:get(Key4, Dict2)),
?assertMatch(Value5, ec_dictionary:get(Key5, Dict2)),
?assertThrow(not_found, ec_dictionary:get(should_blow_up, Dict2)),
?assertThrow(not_found, ec_dictionary:get("This should blow up too",
Dict2)).
-endif. | src/ec_gb_trees.erl | 0.561696 | 0.445409 | ec_gb_trees.erl | starcoder |
% Copyright (c) 2017 <EMAIL> <<EMAIL>>
% See LICENCE
-module(ternary).
% @ref https://en.wikipedia.org/wiki/Balanced_ternary
-export([negate/1, add/2, subtract/2]).
-export([from_integer/1, to_integer/1, from_binary/1, to_binary/1]).
-define(BASE, 3).
-define(is_trit(X), (X >= -1 andalso X =< 1)).
negate(Trits) ->
[-X || X <- Trits].
add(Trits, Trits0) ->
add(lists:reverse(Trits), lists:reverse(Trits0), 0, []).
add([H|T], [H0|T0], C, Acc) ->
[C0, X] = add3(H, H0, C),
add(T, T0, C0, [X|Acc]);
add([], [H0|T0], C, Acc) ->
[C0, X] = add3(0, H0, C),
add([], T0, C0, [X|Acc]);
add([H|T], [], C, Acc) ->
[C0, X] = add3(H, 0, C),
add(T, [], C0, [X|Acc]);
add([], [], C, Acc) when C =/= 0 ->
pad3([C|Acc]);
add([], [], 0, Acc) ->
pad3(Acc).
add3(X, Y, Z) ->
case X + Y + Z of
-3 -> [-1, 0];
-2 -> [-1, 1];
-1 -> [0, -1];
0 -> [0, 0];
1 -> [0, 1];
2 -> [1, -1];
3 -> [1, 0]
end.
subtract(Trits, Trits0) ->
add(Trits, negate(Trits0)).
from_integer(0) ->
[0, 0, 0];
from_integer(N) ->
from_integer(N, 0, []).
from_integer(0, 0, Acc) ->
pad3(Acc);
from_integer(0, Carry, Acc) ->
pad3([Carry|Acc]);
from_integer(N, Carry, Acc) ->
[C, X] = add3(N rem ?BASE, 0, Carry),
from_integer(N div ?BASE, C, [X|Acc]).
to_integer(Trits) ->
to_integer(Trits, 0).
to_integer([H|T], Acc) when ?is_trit(H) ->
to_integer(T, Acc * ?BASE + H);
to_integer([], Acc) ->
Acc.
from_binary(Bin) ->
Trits = from_binary(Bin, []),
Trits0 = lists:dropwhile(fun(X) -> X =:= 0 end, Trits),
pad3(Trits0).
from_binary(<<0:2, Bin/bits>>, Acc) ->
from_binary(Bin, [0|Acc]);
from_binary(<<1:2, Bin/bits>>, Acc) ->
from_binary(Bin, [1|Acc]);
from_binary(<<2:2, Bin/bits>>, Acc) ->
from_binary(Bin, [-1|Acc]);
from_binary(<<3:2, _/bits>>, _) ->
error(invalid_encoding);
from_binary(<<>>, Acc) ->
lists:reverse(Acc).
to_binary(Trits) ->
Trits0 = lists:dropwhile(fun(X) -> X =:= 0 end, Trits),
Trits1 = pad4(Trits0),
to_binary(Trits1, <<>>).
to_binary([0|Trits], Acc) ->
to_binary(Trits, <<Acc/bits, 0:2>>);
to_binary([1|Trits], Acc) ->
to_binary(Trits, <<Acc/bits, 1:2>>);
to_binary([-1|Trits], Acc) ->
to_binary(Trits, <<Acc/bits, 2:2>>);
to_binary([], Acc) ->
Acc.
pad3(Trits) ->
case length(Trits) rem 3 of
0 -> Trits;
1 -> [0, 0|Trits];
2 -> [0|Trits]
end.
pad4(Trits) ->
case length(Trits) rem 4 of
0 -> Trits;
1 -> [0, 0, 0|Trits];
2 -> [0, 0|Trits];
3 -> [0|Trits]
end. | src/ternary.erl | 0.545044 | 0.564219 | ternary.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
%%
%% -------------------------------------------------------------------
%% @doc
%% == Creating custom exometer entries ==
%%
%% An exometer_entry behavior implementation can be created when custom
%% processing of various metrics is needed.
%%
%% A custom exometer entry is invoked by mapping a type to the module
%% name of the custom exometer entry module. All metrics created with the
%% given type will trigger the invocation of the new entry module. See
%% {@section Configuring type - entry maps} for details on how to setup
%% such maps.
%%
%% The life cycle of a an exometer entry consists of the following steps.
%%
%% + Metrics Creation
%% <br/>`new/3' is invoked by exometer to signal that a new metrics
%% should be created. The name of the new metric will be provided as
%% a list of atoms.
%%
%% + Update Data
%% <br/>Values will be sent to the entry through the `update/4'
%% function. The custom entry should store this value for the given
%% metric and break it down into data points that can be reported for
%% the metric.
%%
%% + Retrieve Value
%% <br/>`get_value/4' will be invoked by exometer to retrieve specific
%% data points from a given metric.
%%
%% See individual functions for details on the
%% in the exometer_entry behavior.
%%
%% === behaviour/0 ===
%%
%% The `behaviour()' function for an entry implementation should return
%% the atom `entry'. This function will be involved by the
%% exometer system in order to determine if a callback is
%% an entry or a probe.
%%
%% === new/3 ===
%%
%% The `new()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% new(Name, Type, Options)</pre>
%%
%% The custom entry should create the necessary state for the new metric and store
%% it for furure access through `update()' and `get_value()' calls.
%%
%% + `Name'
%% <br/>Specifies the name of the metric to be created as a list of atoms.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map). It can be used if several
%% different types are mapped to the same entry module.
%%
%% + `Options'
%% <br/>Specifies an option list that contains additional setup directives to
%% the entry. The actual options to support are implementation dependent.
%%
%% The `new()' function should return `{ok, Ref}' where Ref is a
%% tuple that will be provided as a reference argument to other calls
%% made into the module. Any other return formats will cancel the
%% creation of the new metric.
%%
%% === delete/3 ===
%%
%% The `delete()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% delete(Name, Type, Ref)</pre>
%%
%% The custom entry should free all resources associated with the given name.
%%
%% + `Name'
%% <br/>Specifies the name of the metric to be deleted as a list of atoms.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's `new()' function.
%%
%% The `delete()' function shall return `ok'.
%%
%%
%% === get_value/4 ===
%%
%% The `get_value()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% get_value(Name, Type, Ref, DataPoints)</pre>
%%
%% The custom entry should retrieve the metric with the given name and
%% return the values of the specified data points. Data points can be
%% expected to be one or more of those returned by the entry's
%% `get_datapoints()' function.
%%
%% + `Name'
%% <br/>Specifies the name of the metric to update with a value.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's `new()' function.
%%
%% + `DataPoints'
%% <br/>Will contain a list of data points, each picked from the list returned by
%% the module's `get_datapoints()' function.
%%
%% The `get_value()' function should calculate the values of the given
%% data points based on previous calls to `update()' and return them to the caller.
%%
%% The return format shall be:
%%
%% <pre lang="erlang">
%% {ok, [ { DataPoint, Value }, ...]}</pre>
%%
%% Each `{ DataPoint, Value }' tuple shall contain the name and value of
%% one of the data points provided as arguments to `get_value()'.
%%
%% If a data point is not valid (i.e. not in the list returned by
%% `get_datapoints()'), the returned tuple should be `{ DataPoint,
%% undefined }'.
%%
%%
%% === update/4 ===
%%
%% The `update()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% update(Name, Value, Type, Ref)</pre>
%%
%% + `Name'
%% <br/>Specifies the name of the metric to update.
%%
%% + `Value'
%% <br/>Specifies the new value to integrate into the given metric.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's `new()' function.
%%
%% The `update()' function should update the data points for the metric with the
%% given name in preparation for future calls to `get_value()'.
%%
%% The return format shall be `ok'.
%%
%%
%% === reset/3 ===
%%
%% The `reset()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% reset(Name, Type, Ref)</pre>
%%
%% + `Name'
%% <br/>Specifies the name of the metric to reset.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's `new()' function.
%%
%% The `reset()' function should revert the metric with the given name to
%% its original state. A counter, for example, should be reset to 0 while
%% histograms should be emptied.
%%
%% The return format shall be `ok'.
%%
%% === sample/3 ===
%%
%% The `sample()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% sample(Name, Type, Ref)</pre>
%%
%% + `Name'
%% <br/>Specifies the name of the metric to run the sample.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's
%% `new()' function.
%%
%% This function is only used by probes, where it is periodically called
%% to sample a local sub system such as /proc or netlink in order to
%% update its data points.
%%
%% Any exometer entry-based implementation should do nothing and return
%% `ok'.
%%
%% === get_datapoints/3 ===
%%
%% The `get_datapoints()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% get_datapoints(Name, Type, Ref)</pre>
%%
%% + `Name'
%% <br/>Specifies the name of the metric to return available datapoints for.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's
%% `new()' function.
%%
%% This function should return a list of all data points supported by
%% the exometer entry implementation. The returned data points shall
%% be supported by the module's `get_value()' function.
%%
%%
%% === setopts/3 ===
%%
%% The `setopts()' function is invoked as follows:
%%
%% <pre lang="erlang">
%% setopts(Entry, Options, Type, Ref)</pre>
%%
%% + `Entry'
%% The (opaque) exometer entry record. See {@link exometer_info} for
%% information on how to inspect the data structure.
%%
%% + `Options'
%% <br/>Specifies an option list that contains additional setup directives to
%% the entry. The actual options to support are implementation dependent.
%%
%% + `Type'
%% <br/>Specifies the type provided to the `exometer:new()' call (before it
%% was translated by the type - exometer entry map).
%%
%% + `Ref'
%% <br/>Will contain the same tuple returned as `Ref' by the module's
%% `new()' function.
%%
%% This function should modify the behavior of the given metric by the
%% options provided in the `Options' property list.
%%
%% The function should return either `ok' or `{error, Reason}', where
%% `Reason' contins a descriptive reason for a failure to set one or more
%% options.
%%
%% @end
-module(exometer_entry).
-include("exometer.hrl").
-export_type([name/0, type/0, options/0, datapoint/0, datapoints/0, value/0, ref/0, error/0]).
-type name() :: list().
-type type() :: atom().
-type options() :: [{atom(), any()}].
-type datapoints() :: [datapoint()].
-type datapoint() :: exometer:datapoint().
-type value() :: any().
-type ref() :: any().
-type error() :: {error, any()}.
-callback behaviour() -> exometer:behaviour().
-callback new(name(), type(), options()) ->
ok | {ok, ref()} | error().
-callback delete(name(), type(), ref()) ->
ok | error().
-callback get_value(name(), type(), ref(), datapoints()) ->
[{datapoint(), value()}].
-callback update(name(), value(), type(), ref()) ->
ok | {ok, value()} | error().
-callback reset(name(), type(), ref()) ->
ok | {ok, value()} | error().
-callback sample(name(), type(), ref()) ->
ok | error().
-callback get_datapoints(name(), type(), ref()) ->
datapoints().
-callback setopts(exometer:entry(), options(), exometer:status()) ->
ok | error(). | deps/exometer_core/src/exometer_entry.erl | 0.812793 | 0.468 | exometer_entry.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(easton_99_fb_56316).
-include_lib("eunit/include/eunit.hrl").
idx_dir() ->
"idx/99_fb_56316".
open_idx() ->
case filelib:is_dir(idx_dir()) of
true ->
ok = easton_index:destroy(idx_dir());
false ->
ok
end,
{ok, Idx} = easton_index:open(idx_dir()),
Idx.
close_idx(Idx) ->
easton_index:close(Idx).
bookmark_test_() ->
{"Regression: FB 56316",
{timeout, 10, [
{setup,
fun open_idx/0,
fun close_idx/1,
fun(Idx) -> {with, Idx, [
fun multi_geometry_test/1
]} end
}
]}
}.
multi_geometry_test(Idx) ->
lists:foreach(fun({Id, Geoms}) ->
?assertEqual(ok, easton_index:update(Idx, Id, Geoms))
end, geometries()),
BBox = easton_shapes:rectangle(-180, -90, 180, 90),
{ok, Results} = easton_index:search(Idx, BBox),
?assertEqual(4, length(Results)),
lists:foreach(fun({Id, _Geoms}) ->
?assertEqual(ok, easton_index:remove(Idx, Id))
end, geometries()),
?assertEqual({ok, []}, easton_index:search(Idx, BBox)).
geometries() ->
[
{<<"10">>, [
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [-78.8234112, 35.7637407]}
]},
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [-78.777361, 35.9045565]}
]}
]},
{<<"14">>, [
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [-78.794028, 35.8497739]}
]},
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [-78.9488454, 35.9652997]}
]}
]}
]. | test/easton_99_fb_56316.erl | 0.752286 | 0.463687 | easton_99_fb_56316.erl | starcoder |
%% -*- coding: utf-8 -*-
%% @private
%% @doc The dumpsterl spec tree data structure.
%%
%% A spec is represented as a hierarchical tree of type classes. Nodes
%% of this tree are represented as tuples:
%%
%% `{Class, Data, SubSpec}'
%%
%% <ul>
%% <li> `Class' is a term (in most cases an atom) describing
%% the type this node represents in the type hierarchy.</li>
%%
%% <li> `Data' is node-specific data as a tuple:
%%
%% `{Stats, Ext}'
%%
%% <ul>
%% <li> Stats is an instance of #stats{} with general statistics data, eg.:
%% <ul>
%% <li>an integer count of data items captured by this type class</li>
%% <li>an estimator of cardinality (number of different unique values)</li>
%% <li>points of interest (min, max)</li>
%% <li>an even sampling of the data values</li>
%% </ul>
%% </li>
%% <li> Ext is class-specific extra/extended data to hold
%% further attributes and/or statistics.</li>
%% </ul>
%% </li>
%%
%% <li> `SubSpec' is a list of child nodes. Depending on the kind of type
%% denoted by `Class', this may be a list of subtypes or in case of
%% generic types, a list of specs corresponding to the elements
%% or fields of this type. The exact semantics of `SubSpec' are left
%% up to `Class', but the tree is uniformly recursive through `SubSpecs'.
%% </li>
%% </ul>
-module(ds_spec).
-author("<NAME> <<EMAIL>>").
-export([ add/2
, new/0
, new/1
, new/2
, join/2
, postproc/1
, compact/1
, join_up/1
]).
-export_type([ spec/0 ]).
-ifdef(TEST).
-export([ eq/2 ]).
-include_lib("eunit/include/eunit.hrl").
-endif.
-type spec() :: {class(), data(), [spec()]}.
-type class() :: atom()
| {tuple, pos_integer()}
| {record, {atom(), pos_integer()}}.
-type data() :: {ds_stats:stats(), ds_types:ext_data()}.
%% Initialize a new data spec
new() -> new(term).
new(Class) ->
Data = {ds_stats:new(), ds_types:ext_new(Class)},
{Class, Data, []}.
%% Initialize, immediately adding one data term
new(Class, Data) -> add(Data, new(Class)).
%% Add the value V with attributes A to Spec.
%% add/2 is written so it can be used as a function to lists:foldl/3.
add({V,_A}=VA, {Class, Data, SubSpec}) ->
add(VA, {Class, Data, SubSpec}, ds_types:subtype(V, Class)).
add(VA, {Class, Data0, SubSpec}, '$null') -> % leaf type
{Class, update(VA, Class, Data0), SubSpec};
add({_V, A}=VA, {Class, Data0, SubSpec}, {'$fields', Fields}) -> % many subtypes
{Class, update(VA, Class, Data0), merge_fields({Fields, A}, SubSpec)};
add({_V, A}=VA, {Class, Data0, SubSpec}, {'$elements', Items}) -> % one subtype
{Class, update(VA, Class, Data0), merge_items({Items, A}, SubSpec)};
add({_V, A}=VA, {Class, Data0, SubSpec0}, {'$attrs', Dict}) -> % dict of subtypes
%% The attribute spec is stored in Ext, merge_attrs needs to modify it.
{Stats, Ext0} = update(VA, Class, Data0),
{Ext, SubSpec} = merge_attrs({Dict, A}, Ext0, SubSpec0),
{Class, {Stats, Ext}, SubSpec};
add({_V, A}=VA, {Class, Data0, SubSpec}, {'$improper_list', Items, Tail}) ->
%% one subtype for list items, one for tail
{Class, update(VA, Class, Data0), merge_improper({Items, Tail, A}, SubSpec)};
add(VA, {Class, Data0, SubSpec}, SubType) -> % abstract type
{Class, Data0, merge(VA, SubType, SubSpec)}.
update(VA, Class, {Stats, Ext}) ->
{ds_stats:add(VA, Stats), ds_types:ext_add(VA, Class, Ext)}.
%% choose subspec given by Class or create it from scratch,
%% add V to it and return the resulting Spec.
merge(VA, Class, Spec) ->
case lists:keyfind(Class, 1, Spec) of
false -> [new(Class, VA) | Spec];
SubSpec -> lists:keystore(Class, 1, Spec, add(VA, SubSpec))
end.
%% merge the per-field sub-specs of tuple/record types
merge_fields({Vs, A}, []) ->
lists:map(fun(V) -> new(term, {V, A}) end, Vs);
merge_fields({Vs, A}, SubSpec) ->
lists:map(fun({V, S}) -> add({V, A}, S) end, lists:zip(Vs, SubSpec)).
%% merge elements of container types into the single inferior spec
merge_items({Vs, A}, []) ->
[lists:foldl(fun(V, Spec) -> add({V, A}, Spec) end, new(term), Vs)];
merge_items({Vs, A}, [SubSpec]) ->
[lists:foldl(fun(V, Spec) -> add({V, A}, Spec) end, SubSpec, Vs)].
%% merge a [{Key, Value}] dictionary into a list of sub-specs.
%% Attrs contains a key list [Key] so that if the N-th item of
%% Attrs is Key, then the N-th item of SpecL corresponds to the
%% values associated with Key.
%% Return {Attrs, SpecL} with updated data.
merge_attrs({Dict, A}, Attrs, SpecL) ->
lists:foldl(fun({K, V}, Acc) -> merge_attr({K, V}, A, Acc) end,
{Attrs, SpecL}, Dict).
merge_attr({K, V}, A, {Attrs0, SpecL0}) ->
case ds_utils:index(K, Attrs0) of
error ->
Attrs = lists:sort([K|Attrs0]),
Index = ds_utils:index(K, Attrs),
{SpecL1, SpecL2} = lists:split(Index-1, SpecL0),
SpecL = lists:append([SpecL1, [new(term, {V, A})], SpecL2]),
{Attrs, SpecL};
Index ->
{SpecL1, [Spec0|SpecL2]} = lists:split(Index-1, SpecL0),
Spec = add({V, A}, Spec0),
SpecL = lists:append([SpecL1, [Spec], SpecL2]),
{Attrs0, SpecL}
end.
%% merge spec for improper list
merge_improper(VA, []) ->
merge_improper(VA, [new(term), new(term)]);
merge_improper({Vs, Vt, A}, [ListSpec0, TailSpec0]) ->
[ListSpec] = merge_items({Vs, A}, [ListSpec0]),
TailSpec = add({Vt, A}, TailSpec0),
[ListSpec, TailSpec].
%% Join two spec trees into one.
%% This clause is for maps where the children lists are joined
%% based on attributes stored in Ext.
join({map=Class, {Stats1, Ext1}, ChildL1},
{Class, {Stats2, Ext2}, ChildL2}) ->
Stats = ds_stats:join(Stats1, Stats2),
Ext = lists:usort(Ext1 ++ Ext2), % joined attribute list
AttrDict1 = lists:zip(Ext1, ChildL1),
AttrDict2 = lists:zip(Ext2, ChildL2),
ChildL =
%% For each attribute in joined Ext, look it up in both children
%% and join appropriately into the result child spec list.
lists:foldl(
fun(Attr, Acc) ->
case {lists:keyfind(Attr, 1, AttrDict1),
lists:keyfind(Attr, 1, AttrDict2)} of
{ {Attr, Spec1}, {Attr, Spec2} } ->
[join(Spec1, Spec2) | Acc];
{ {Attr, Spec1}, false } ->
[Spec1 | Acc];
{ false, {Attr, Spec2} } ->
[Spec2 | Acc]
end
end, [], lists:reverse(Ext)),
{Class, {Stats, Ext}, ChildL};
%% Subspec (child) trees are joined in a class-specific manner:
%% - joined by attributes stored in Ext for maps (see above clause);
%% - zipped (by position) for other generic types;
%% - joined (by class) for all other types.
join({Class, {Stats1, Ext1}, ChildL1},
{Class, {Stats2, Ext2}, ChildL2}) ->
Stats = ds_stats:join(Stats1, Stats2),
Ext = ds_types:ext_join(Class, Ext1, Ext2),
ChildL = case ds_types:kind(Class) of
generic -> lists:zipwith(fun join/2, ChildL1, ChildL2);
_ -> join_specs(ChildL1, ChildL2)
end,
{Class, {Stats, Ext}, ChildL}.
join_specs(Acc, []) -> Acc;
join_specs(Acc0, [{Class,_Data,_ChildL}=Spec | Rest]) ->
Acc = case lists:keyfind(Class, 1, Acc0) of
false -> [Spec | Acc0];
AccSpec -> lists:keystore(Class, 1, Acc0, join(AccSpec, Spec))
end,
join_specs(Acc, Rest).
%% Postprocess a raw spec tree to make it ready for consumption by the gui.
postproc(Spec) -> sort_count(join_up(compact(Spec))).
%% Rearrange the spec tree so that nodes of non-generic types have their
%% children sorted by decreasing order of count. This makes the gui nicer.
sort_count({Class, Data, Children0}) ->
Children = [sort_count(Ch) || Ch <- Children0],
case ds_types:kind(Class) of
generic ->
{Class, Data, Children};
_ ->
{Class, Data, lists:sort(fun sort_count_f/2, Children)}
end.
sort_count_f({_Class1, {Stats1,_Ext1},_Ch1},
{_Class2, {Stats2,_Ext2},_Ch2}) ->
ds_stats:get_count(Stats1) >= ds_stats:get_count(Stats2).
%% Compact the tree by cutting unnecessary abstract types
%% (those having a single child and no terms captured themselves)
%% from the tree. E.g. if all terms are tuples of three, the tree
%% term -> tuple -> {tuple, 3} -> ...
%% will be simplified to
%% {tuple, 3} -> ...
%% without any loss of information.
compact({Class, {Stats,_Ext} = Data, [SubSpec1]}) ->
Kind = ds_types:kind(Class),
Count = ds_stats:get_count(Stats),
if Kind =/= generic andalso Count =:= 0 ->
compact(SubSpec1);
true ->
{Class, Data, [compact(SubSpec1)]}
end;
compact({Class, Data, SubSpec}) ->
{Class, Data, [compact(SSp) || SSp <- SubSpec]}.
%% For performance, abstract type nodes do not update their data
%% when collecting terms, since the same information will be stored
%% further down the tree. Before evaluating/visualizing the tree,
%% this function should be used to propagate the data upwards so
%% the abstract nodes also have all the statistics.
join_up({Class, Data0, SubSpec0}) ->
SubSpec = [join_up(SSp) || SSp <- SubSpec0],
Data = case ds_types:kind(Class) of
generic -> Data0; % don't cross type domains with the join
_ -> {Class, JointData} = join_data({Class, Data0}, SubSpec),
JointData
end,
{Class, Data, SubSpec}.
join_data(Acc, SubSpec) -> lists:foldl(fun join_data_f/2, Acc, SubSpec).
join_data_f({_Class, {Stats1, Ext1}, _SubSpec}, {SuperClass, {Stats0, Ext0}}) ->
{SuperClass,
{ds_stats:join(Stats0, Stats1), ds_types:ext_join(SuperClass, Ext0, Ext1)}}.
%% Tests
-ifdef(TEST).
%% Return true iff two spec instances are equivalent.
%% The actual term-level representation may be different, hence this function.
eq({Class, {Stats1, Ext}, ChL1},
{Class, {Stats2, Ext}, ChL2}) ->
ChildrenEq =
case lists:usort(lists:zipwith(fun eq/2,
lists:sort(ChL1), lists:sort(ChL2))) of
[] -> true;
[true] -> true;
_ -> false
end,
ds_stats:eq(Stats1, Stats2) andalso
length(ChL1) =:= length(ChL2) andalso
ChildrenEq;
eq(_Spec0, _Spec1) -> false.
-endif. | src/ds_spec.erl | 0.560854 | 0.519704 | ds_spec.erl | starcoder |
%% @copyright 2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Unit tests specifically for histogram_rt in addition to the
%% tests in histogram_SUITE.
%% @end
%% @version $Id$
-module(histogram_rt_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-include("unittest.hrl").
-include("scalaris.hrl").
-compile(export_all).
all() ->
[
add_keys,
merge_keys
].
suite() -> [ {timetrap, {seconds, 30}} ].
init_per_suite(Config) ->
unittest_helper:init_per_suite(Config).
end_per_suite(Config) ->
unittest_helper:end_per_suite(Config).
-define(EPSILON, 1.0e-8 * ?RT:n()).
-spec prop_add_keys(BaseKey::?RT:key(), Size::1..50, Values::[?RT:key(),...]) -> true.
prop_add_keys(BaseKey, Size, Values0) ->
H = histogram_rt:create(Size, BaseKey),
Values = lists:sublist(Values0, Size),
H2 = lists:foldl(fun histogram_rt:add/2, H, Values),
%% check results
SortedValues = lists:sort(fun(Val, Val2) ->
succ_ord_key(Val, Val2, BaseKey)
end, Values),
%ct:pal("BaseKey: ~p SortedValues: ~p Result: ~p", [BaseKey, SortedValues, histogram_rt:get_data(H2)]),
Result = lists:map(fun(Value) -> {Value, 1} end, SortedValues),
?compare(fun(Actual, Expected) ->
check_result(Actual, Expected, BaseKey)
end, histogram_rt:get_data(H2), Result),
true.
add_keys(_Config) ->
prop_add_keys(?RT:hash_key("0"), 17, [?RT:hash_key("0")]),
tester:test(?MODULE, prop_add_keys, 3, 250, [{threads, 2}]).
-spec prop_merge_keys(BaseKey::?RT:key(), Key1::?RT:key(), Key2::?RT:key()) -> true.
prop_merge_keys(BaseKey, Key1, Key2) ->
%% insert two keys which will
H = histogram_rt:create(1, BaseKey),
H2 = histogram_rt:add(Key1, H),
H3 = histogram_rt:add(Key2, H2),
%% check results
SplitKey =
if Key1 =:= Key2 -> Key1;
true ->
case succ_ord_key(Key1, Key2, BaseKey) of
true -> ?RT:get_split_key(Key1, Key2, {1,2});
false -> ?RT:get_split_key(Key2, Key1, {1,2})
end
end,
%ct:pal("Key1: ~p (Range: ~p) Key2: ~p (Range: ~p) BaseKey: ~p SplitKey: ~p Result: ~p, Raw: ~p", [Key1, ?RT:get_range(Key1, BaseKey), Key2, ?RT:get_range(Key2, BaseKey), BaseKey, SplitKey, histogram_rt:get_data(H3), histogram:get_data(element(1, H3))]),
?compare(fun(Actual, Expected) ->
check_result(Actual, Expected, BaseKey)
end, histogram_rt:get_data(H3), [{SplitKey, 2}]),
true.
merge_keys(_Config) ->
tester:test(?MODULE, prop_merge_keys, 3, 250, [{threads, 2}]).
check_result(Actual, Expected, BaseKey) ->
case rt_SUITE:default_rt_has_chord_keys() of
true -> ?equals(Actual, Expected);
false -> check_elements(Actual, Expected, BaseKey)
end.
check_elements([], [], _BaseKey) ->
true;
check_elements([{El1, Count1} | Rest], [{El2, Count2} | Rest2], BaseKey) ->
%ct:pal("El: ~p, El2:~p", [El1, El2]),
case nodelist:succ_ord_id(El1, El2, BaseKey) of
true -> Range = ?RT:get_range(El1, El2);
false -> Range = ?RT:get_range(El2, El1)
end,
%ct:pal("Range: ~p", [Range]),
%ct:pal("Check: ~p", [Range < ?EPSILON orelse El1 =:= El2]),
?assert_w_note(Range < ?EPSILON orelse El1 =:= El2,
{Range, '>=', ?EPSILON, ';', El1, '=/=', El2}),
?equals(Count1, Count2),
check_elements(Rest, Rest2, BaseKey).
%% @doc Like nodelist:succ_ord_id/3 but here the BaseKey is the biggest
%% possible key and not the smallest.
-spec succ_ord_key(K1::?RT:key(), K2::?RT:key(), BaseKey::?RT:key()) -> boolean().
succ_ord_key(K1, K2, BaseKey) ->
(K1 > BaseKey andalso K2 > BaseKey andalso K1 =< K2) orelse
(K1 < BaseKey andalso K2 =< BaseKey andalso K1 =< K2) orelse
(K1 > BaseKey andalso K2 =< BaseKey). | test/histogram_rt_SUITE.erl | 0.518302 | 0.458652 | histogram_rt_SUITE.erl | starcoder |
% Copyright 2018 <NAME>
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
-module(ogonek_research).
-include("include/ogonek.hrl").
-export([from_json/1,
from_doc/1,
to_doc/1,
to_json/1,
to_json/2]).
-export([all_researches/0,
has_requirement/2,
has_requirements/2,
progress/1,
research_duration/1,
possible_research/1,
possible_research/2,
research_info_json/3]).
-spec all_researches() -> [rdef()].
all_researches() ->
case application:get_env(research) of
undefined -> [];
{ok, Research} -> Research
end.
-spec possible_research([research()]) -> [rdef()].
possible_research(Research) ->
possible_research(Research, all_researches()).
-spec possible_research([research()], [rdef()]) -> [rdef()].
possible_research(Research, Definitions) ->
lists:filter(fun(R) -> research_available(Research, R) end, Definitions).
-spec research_available([research()], rdef()) -> boolean().
research_available(_Research, #rdef{requirements=[]}) -> true;
research_available(Research, #rdef{requirements=Reqs}) ->
lists:all(fun(Req) -> has_requirement(Research, Req) end, Reqs).
-spec has_requirement([research()], requirement()) -> boolean().
has_requirement(_, {building, _, _}) -> true;
has_requirement([], _Requirement) -> false;
has_requirement([Research | Rs], {research, Name, MinLevel}=Req) ->
if Research#research.research == Name andalso Research#research.level >= MinLevel ->
true;
true ->
has_requirement(Rs, Req)
end.
-spec research_duration([building()]) -> integer() | undefined.
research_duration(Buildings) ->
% TODO: proper research duration distribution
ResearchLab = ogonek_buildings:get_building_max_level(Buildings, research_lab),
ResearchFacility = ogonek_buildings:get_building_max_level(Buildings, research_facility),
CombinedLevel = ResearchLab + ResearchFacility * 2,
if CombinedLevel > 0 ->
% for now we simply limit the duration to 2h at minimum
% usually the formula should take care of that
round(max(2, 8 - math:pow(CombinedLevel, 0.3)) * 3600);
true ->
undefined
end.
-spec has_requirements([research()], [requirement()]) -> boolean().
has_requirements(Research, Requirements) ->
lists:all(fun(Req) -> has_requirement(Research, Req) end, Requirements).
-spec from_json(json_doc()) -> {ok, research()} | {error, invalid}.
from_json(UserJson) ->
Keys = [<<"_id">>, <<"user">>, <<"research">>, <<"level">>, <<"created">>, <<"finish">>, <<"progress">>],
case ogonek_util:keys(Keys, UserJson) of
[Id, User, Research, Level, Created, Finish, Progress] ->
{ok, #research{id=Id,
user=User,
research=to_research(Research),
level=Level,
created=Created,
finish=Finish,
progress=Progress}};
_Otherwise ->
{error, invalid}
end.
-spec from_doc(Doc :: map()) -> {ok, research()} | {error, invalid}.
from_doc(Doc) ->
case Doc of
#{<<"_id">> := Id,
<<"user">> := User,
<<"research">> := Research,
<<"level">> := Level,
<<"created">> := Created,
<<"finish">> := Finish,
<<"progress">> := Progress} ->
{ok, #research{id=ogonek_mongo:from_id(Id),
user=ogonek_mongo:from_id(User),
research=to_research(Research),
level=Level,
created=Created,
finish=Finish,
progress=Progress}};
_Otherwise ->
{error, invalid}
end.
-spec to_doc(research()) -> map().
to_doc(Research) ->
Doc = #{<<"user">> => ogonek_mongo:to_id(Research#research.user),
<<"research">> => erlang:atom_to_binary(Research#research.research, utf8),
<<"level">> => Research#research.level,
<<"created">> => Research#research.created,
<<"finish">> => Research#research.finish,
<<"progress">> => Research#research.progress},
ogonek_util:with_id(Research#research.id, Doc).
-spec to_json(research()) -> tuple().
to_json(Research) ->
to_json(Research, true).
-spec to_json(research(), boolean()) -> tuple().
to_json(Research, _Db) ->
Values = [{<<"user">>, Research#research.user},
{<<"research">>, Research#research.research},
{<<"level">>, Research#research.level},
{<<"created">>, ogonek_util:unixtime_to_millis(Research#research.created)},
{<<"finish">>, ogonek_util:unixtime_to_millis(Research#research.finish)},
{<<"progress">>, Research#research.progress}
]
++ ogonek_util:if_defined(<<"_id">>, Research#research.id),
ogonek_util:doc(<<"research">>, Values).
-spec research_info_json(research() | undefined, [research()], integer() | undefined) -> json_doc().
research_info_json(Running, Finished, Duration) ->
Status = case Running of
undefined -> [];
_IsRunning ->
Finish = Running#research.finish,
Created = Running#research.created,
Progress = progress(Created, Finish),
InProgress = if Progress >= 50 -> [{<<"name">>, Running#research.research}];
true -> []
end,
Status0 = {[{<<"finish">>, ogonek_util:unixtime_to_millis(Finish)},
{<<"created">>, ogonek_util:unixtime_to_millis(Created)}
] ++ InProgress},
[{<<"status">>, Status0}]
end,
Sorted = lists:keysort(4, Finished),
Research = lists:map(fun(R) ->
{[{<<"name">>, R#research.research},
{<<"level">>, R#research.level}]}
end, Sorted),
ogonek_util:doc(<<"research">>,
[{<<"research">>, Research},
{<<"duration">>, Duration}
] ++ Status).
-spec to_research(binary()) -> atom().
to_research(TypeName) when is_binary(TypeName) ->
% this looks scary but the valid list of building types
% should be already existing via configuration initialization
erlang:binary_to_existing_atom(TypeName, utf8).
-spec progress(research()) -> integer().
progress(#research{created=Started, finish=Finished}) ->
progress(Started, Finished).
-spec progress(erlang:timestamp(), erlang:timestamp()) -> integer().
progress(Started, Finished) ->
Total = ogonek_util:seconds_since(Started, Finished),
Progress = ogonek_util:seconds_since(Started),
(Progress * 100) div Total. | server/src/ogonek_research.erl | 0.521227 | 0.411939 | ogonek_research.erl | starcoder |
%% Support for CDMI Object ID
%%
%% Object IDs are used to identify objects in CDMI. Object IDs are intended
%% to be globally unique values that have a specific structure. The native
%% format of an object ID is a variable length byte sequence with a maximum
%% size of 40 bytes. This leaves an implementer up to 32 bytes for data
%% that can be used for whatever purpose is needed.
%%
%% Refer to clause 5.10 of the CDMI specification
%% for more information.
%%
-module(objectid).
-export([build_objectid/1, build_objectid/2, to_base16/1, from_base16/1]).
%% The SNMP Enterprise Number for your organization in network byte
%% order. See RFC 2578 and
%% http://www.iana.org/assignments/enterprise-numbers
%%
%% This reference implementation uses a value of 0.
-define(ENTERPRISENUM, 0).
%% @doc Build an object ID based on our own enterprise number. Data is
%% expected to be wither a string or a binary.
%%
%% @spec build_objectid(Data::{string() | binary()}) -> binary() | {error, atom()}
build_objectid(Data) ->
build_objectid(?ENTERPRISENUM, Data).
%% @doc Build an object ID given an enterprise number and data as string.
%%
%% @spec build_objectid(Enum::integer(), Data::string()) -> binary() | {error, atom()}
build_objectid(Enum, Data) when is_list(Data) ->
build_objectid(Enum, list_to_binary(Data));
%% @doc Build an object ID given an enterprise number and data as a
%% binary. We ensure here that the Data is not more than 32 bytes. The
%% object ID is composed of a number of fields:
%%
%% +----------+------------+-----------+--------+-------+-----------+
%% | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |..| 39 |
%% +----------+------------+-----------+--------+-------+-----------+
%% | Reserved | Enterprise | Reserverd | Length | CRC | Opaque |
%% | (zero) | Number | (zero) | | | Data |
%% +----------+------------+-----------+--------+-------+-----------+
%%
%% @spec build_objectid(Enum::integer(), Data::binary()) -> binary() | {error, atom()}
build_objectid(Enum, Data) when is_binary(Data) ->
Length = size(Data),
case (Length =< 32) of
true ->
Bin = <<0:8, Enum:24, 0:8, Length:8, 0:16, Data/binary>>,
Crc = crc16:crc16(binary_to_list(Bin)),
io:format("CRC: ~p~n", [Crc]),
<<0:8, Enum:24, 0:8, Length:8, Crc:16, Data/binary>>;
false ->
{error, badarg}
end.
%% @doc Convert an object ID to a Base16 encoded string.
%% @spec to_base16(Bin::binary()) -> string()
to_base16(Bin) ->
lists:flatten([io_lib:format("~2.16.0B", [X]) ||
X <- binary_to_list(Bin)]).
%% @doc Convert an encoded object ID to its binary form.
%% @spec from_base16(Encoded::string()) -> binary()
from_base16(Encoded) ->
from_base16(Encoded, []).
from_base16([], Acc) ->
list_to_binary(lists:reverse(Acc));
from_base16([X,Y | T], Acc) ->
{ok, [V], []} = io_lib:fread("~16u", [X,Y]),
from_base16(T, [V | Acc]). | apps/crime_core/src/objectid.erl | 0.629205 | 0.634656 | objectid.erl | starcoder |
-module(matrixCalculator).
-export([calculate/1]).
-include("results.hrl").
-record(currentHolder,{matrix, row, column, startRow, startColumn, side}).
createEmptyMatrix(Side) ->
Matrix = array:new(Side, fixed),
array:map(fun(_,_) -> array:new(Side, fixed) end, Matrix).
actual(Value, Side) ->
if
Value < 0 -> Value + Side;
Value >= Side -> Value - Side;
true -> Value
end.
actualRow(Current) ->
actual(Current#currentHolder.row, Current#currentHolder.side).
actualColumn(Current) ->
actual(Current#currentHolder.column, Current#currentHolder.side).
getNext(Current, Count) ->
if
Count rem Current#currentHolder.side /= 0 ->
Current#currentHolder{row=Current#currentHolder.row + 1, column=Current#currentHolder.column + 1};
true ->
StartRow = Current#currentHolder.startRow + 1,
StartColumn = Current#currentHolder.startColumn - 1,
Current#currentHolder{row=StartRow, column=StartColumn, startRow=StartRow, startColumn=StartColumn}
end.
testResult(Matrix, ExpectedSum, Side) ->
IndexList = lists:seq(0, Side - 1),
GetMatrix = fun(RowIndex, ColumnIndex) ->
Row = array:get(RowIndex, Matrix),
array:get(ColumnIndex, Row)
end,
RowsGetter = fun() ->
lists:map(
fun(RowIndex) ->
array:to_list(array:map(fun(_, Column) -> array:get(RowIndex, Column) end, Matrix))
end,
IndexList)
end,
ColumnsGetter = fun() ->
array:foldl(fun(_, Row, Acc) -> lists:merge([array:to_list(Row)], Acc) end, [], Matrix)
end,
FirstDiagonalGetter = fun() ->
[lists:map(
fun(Index) ->
GetMatrix(Index, Index)
end,
IndexList)]
end,
SecondDiagonalGetter = fun() ->
[lists:map(
fun(Index) ->
GetMatrix(Index, Side - Index - 1)
end,
IndexList)]
end,
Sum = fun(List) -> lists:foldl(fun(Value, Acc) -> Value + Acc end, 0, List) end,
lists:all(
fun(ValuesGetter) ->
lists:all(
fun(ValueList) -> Sum(ValueList) == ExpectedSum end,
ValuesGetter())
end,
[
RowsGetter,
ColumnsGetter,
FirstDiagonalGetter,
SecondDiagonalGetter
]).
calculate(Side) ->
if
Side rem 2 == 0 -> throw("Side needs to be an odd number.");
true -> ok
end,
ExpectedSum = round(Side * (Side * Side + 1) / 2),
put(matrix, createEmptyMatrix(Side)),
StartRow = round(-(Side - 1) / 2),
StartColumn = round((Side - 1) / 2),
SetMatrix = fun (RowIndex, ColumnIndex, Value, Matrix) ->
OriginalRow = array:get(RowIndex, Matrix),
ChangedRow = array:set(ColumnIndex, Value, OriginalRow),
array:set(RowIndex, ChangedRow, Matrix)
end,
Result = lists:foldl(
fun(Count, Current) ->
Matrix = Current#currentHolder.matrix,
getNext(Current#currentHolder{matrix=SetMatrix(actualRow(Current), actualColumn(Current), Count, Matrix)}, Count)
end,
#currentHolder{
matrix=createEmptyMatrix(Side),
row=StartRow,
column=StartColumn,
startRow=StartRow,
startColumn=StartColumn,
side=Side},
lists:seq(1, Side * Side)
),
#matrixResult{
success=testResult(Result#currentHolder.matrix, ExpectedSum, Side),
matrix=Result#currentHolder.matrix,
sum=ExpectedSum,
side=Side
}. | src/erl/matrixCalculator.erl | 0.51562 | 0.639455 | matrixCalculator.erl | starcoder |
%% ``The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved via the world wide web at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
%% Portions created by Ericsson are Copyright 2000, Ericsson Utvecklings
%% AB. All Rights Reserved.''
%%
%% $Id$
%%
-module(digraph_utils).
%%% Operations on directed (and undirected) graphs.
%%%
%%% Implementation based on <NAME>: Graph Algorithms with a
%%% Functional Flavour, in <NAME>, and <NAME> (Eds.):
%%% Advanced Functional Programming, Lecture Notes in Computer
%%% Science 925, Springer Verlag, 1995.
-export([components/1, strong_components/1, cyclic_strong_components/1,
reachable/2, reachable_neighbours/2,
reaching/2, reaching_neighbours/2,
topsort/1, is_acyclic/1,
arborescence_root/1, is_arborescence/1, is_tree/1,
loop_vertices/1,
subgraph/2, subgraph/3, condensation/1,
preorder/1, postorder/1]).
%%
%% Exported functions
%%
components(G) ->
forest(G, fun inout/3).
strong_components(G) ->
forest(G, fun in/3, revpostorder(G)).
cyclic_strong_components(G) ->
remove_singletons(strong_components(G), G, []).
reachable(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun out/3, Vs, first)).
reachable_neighbours(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun out/3, Vs, not_first)).
reaching(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun in/3, Vs, first)).
reaching_neighbours(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun in/3, Vs, not_first)).
topsort(G) ->
L = revpostorder(G),
case length(forest(G, fun in/3, L)) =:= length(digraph:vertices(G)) of
true -> L;
false -> false
end.
is_acyclic(G) ->
case loop_vertices(G) of
[] -> topsort(G) =/= false;
_ -> false
end.
arborescence_root(G) ->
case digraph:no_edges(G) =:= digraph:no_vertices(G) - 1 of
true ->
try
F = fun(V, Z) ->
case digraph:in_degree(G, V) of
1 -> Z;
0 when Z =:= [] -> [V]
end
end,
[Root] = lists:foldl(F, [], digraph:vertices(G)),
{yes, Root}
catch _:_ ->
no
end;
false ->
no
end.
is_arborescence(G) ->
arborescence_root(G) =/= no.
is_tree(G) ->
(digraph:no_edges(G) =:= digraph:no_vertices(G) - 1)
andalso (length(components(G)) =:= 1).
loop_vertices(G) ->
lists:filter(fun(V) -> is_reflexive_vertex(V, G) end, digraph:vertices(G)).
subgraph(G, Vs) ->
subgraph_opts(G, Vs, []).
subgraph(G, Vs, Opts) ->
subgraph_opts(G, Vs, Opts).
condensation(G) ->
SCs = strong_components(G),
%% Each component is assigned a number.
%% V2I: from vertex to number.
%% I2C: from number to component.
V2I = ets:new(condensation, []),
I2C = ets:new(condensation, []),
CFun = fun(SC, N) -> lists:foreach(fun(V) ->
true = ets:insert(V2I, {V,N})
end,
SC),
true = ets:insert(I2C, {N, SC}),
N + 1
end,
lists:foldl(CFun, 1, SCs),
SCG = subgraph_opts(G, [], []),
lists:foreach(fun(SC) -> condense(SC, G, SCG, V2I, I2C) end, SCs),
ets:delete(V2I),
ets:delete(I2C),
SCG.
preorder(G) ->
lists:reverse(revpreorder(G)).
postorder(G) ->
lists:reverse(revpostorder(G)).
%%
%% Local functions
%%
forest(G, SF) ->
forest(G, SF, digraph:vertices(G)).
forest(G, SF, Vs) ->
forest(G, SF, Vs, first).
forest(G, SF, Vs, HandleFirst) ->
T = ets:new(forest, [set]),
F = fun(V, LL) -> pretraverse(HandleFirst, V, SF, G, T, LL) end,
LL = lists:foldl(F, [], Vs),
ets:delete(T),
LL.
pretraverse(first, V, SF, G, T, LL) ->
ptraverse([V], SF, G, T, [], LL);
pretraverse(not_first, V, SF, G, T, LL) ->
case ets:member(T, V) of
false -> ptraverse(SF(G, V, []), SF, G, T, [], LL);
true -> LL
end.
ptraverse([V | Vs], SF, G, T, Rs, LL) ->
case ets:member(T, V) of
false ->
ets:insert(T, {V}),
ptraverse(SF(G, V, Vs), SF, G, T, [V | Rs], LL);
true ->
ptraverse(Vs, SF, G, T, Rs, LL)
end;
ptraverse([], _SF, _G, _T, [], LL) ->
LL;
ptraverse([], _SF, _G, _T, Rs, LL) ->
[Rs | LL].
revpreorder(G) ->
lists:append(forest(G, fun out/3)).
revpostorder(G) ->
T = ets:new(forest, [set]),
L = posttraverse(digraph:vertices(G), G, T, []),
ets:delete(T),
L.
posttraverse([V | Vs], G, T, L) ->
L1 = case ets:member(T, V) of
false ->
ets:insert(T, {V}),
[V | posttraverse(out(G, V, []), G, T, L)];
true ->
L
end,
posttraverse(Vs, G, T, L1);
posttraverse([], _G, _T, L) ->
L.
in(G, V, Vs) ->
digraph:in_neighbours(G, V) ++ Vs.
out(G, V, Vs) ->
digraph:out_neighbours(G, V) ++ Vs.
inout(G, V, Vs) ->
in(G, V, out(G, V, Vs)).
remove_singletons([C=[V] | Cs], G, L) ->
case is_reflexive_vertex(V, G) of
true -> remove_singletons(Cs, G, [C | L]);
false -> remove_singletons(Cs, G, L)
end;
remove_singletons([C | Cs], G, L) ->
remove_singletons(Cs, G, [C | L]);
remove_singletons([], _G, L) ->
L.
is_reflexive_vertex(V, G) ->
lists:member(V, digraph:out_neighbours(G, V)).
subgraph_opts(G, Vs, Opts) ->
subgraph_opts(Opts, inherit, true, G, Vs).
subgraph_opts([{type, Type} | Opts], _Type0, Keep, G, Vs)
when Type =:= inherit; is_list(Type) ->
subgraph_opts(Opts, Type, Keep, G, Vs);
subgraph_opts([{keep_labels, Keep} | Opts], Type, _Keep0, G, Vs)
when Keep; not Keep ->
subgraph_opts(Opts, Type, Keep, G, Vs);
subgraph_opts([], inherit, Keep, G, Vs) ->
Info = digraph:info(G),
{_, {_, Cyclicity}} = lists:keysearch(cyclicity, 1, Info),
{_, {_, Protection}} = lists:keysearch(protection, 1, Info),
subgraph(G, Vs, [Cyclicity, Protection], Keep);
subgraph_opts([], Type, Keep, G, Vs) ->
subgraph(G, Vs, Type, Keep);
subgraph_opts([Opt | _], _Type, _Keep, _G, _Vs) ->
{error, {invalid_option, Opt}}.
subgraph(G, Vs, Type, Keep) ->
case digraph:new(Type) of
Error = {error, _} ->
Error;
SG ->
lists:foreach(fun(V) -> subgraph_vertex(V, G, SG, Keep) end, Vs),
EFun = fun(V) -> lists:foreach(fun(E) ->
subgraph_edge(E, G, SG, Keep)
end,
digraph:out_edges(G, V))
end,
lists:foreach(EFun, digraph:vertices(SG)),
SG
end.
subgraph_vertex(V, G, SG, Keep) ->
case digraph:vertex(G, V) of
false -> ok;
_ when not Keep -> digraph:add_vertex(SG, V);
{_V, Label} when Keep -> digraph:add_vertex(SG, V, Label)
end.
subgraph_edge(E, G, SG, Keep) ->
{_E, V1, V2, Label} = digraph:edge(G, E),
case digraph:vertex(SG, V2) of
false -> ok;
_ when not Keep -> digraph:add_edge(SG, E, V1, V2, []);
_ when Keep -> digraph:add_edge(SG, E, V1, V2, Label)
end.
condense(SC, G, SCG, V2I, I2C) ->
T = ets:new(condense, []),
NFun = fun(Neighbour) ->
[{_V,I}] = ets:lookup(V2I, Neighbour),
ets:insert(T, {I})
end,
VFun = fun(V) -> lists:foreach(NFun, digraph:out_neighbours(G, V)) end,
lists:foreach(VFun, SC),
digraph:add_vertex(SCG, SC),
condense(ets:first(T), T, SC, G, SCG, I2C),
ets:delete(T).
condense('$end_of_table', _T, _SC, _G, _SCG, _I2C) ->
ok;
condense(I, T, SC, G, SCG, I2C) ->
[{_,C}] = ets:lookup(I2C, I),
digraph:add_vertex(SCG, C),
digraph:add_edge(SCG, SC, C),
condense(ets:next(T, I), T, SC, G, SCG, I2C). | data/erlang/82b4e93b1e226b99ed90eb6d6944dc03_digraph_utils.erl | 0.506836 | 0.476214 | 82b4e93b1e226b99ed90eb6d6944dc03_digraph_utils.erl | starcoder |
-module(day15).
-export([solve/0, solve_nif/0]).
-dialyzer({nowarn_function, [ solve_nif/0
]}).
%% Taken from:
%% https://github.com/jesperes/aoc_erlang/blob/b53a0d2475920ef7beb330536e468eac6cfd659f/src/2021/aoc2021_day15.erl#L32
%% Encoding the {X, Y} in the Seen map shaves off a good 800 ms from the total runtime.
-define(BIT_XY(X, Y), X bsl 12 bor Y).
%% API ========================================================================
solve() ->
Grid = to_grid(#{}, input()),
{687, 2957} = {calculate_lowest_risk(Grid), calculate_lowest_risk(expand_grid(Grid))}.
solve_nif() ->
Grid = to_grid(#{}, input()),
{687, 2957} = {util:dijkstra(Grid), util:dijkstra(expand_grid(Grid))}.
%% Logic ======================================================================
calculate_lowest_risk(Grid) ->
Keys = maps:keys(Grid),
MinNode = lists:min(Keys),
Checked = gb_sets:add_element({0, MinNode}, gb_sets:new()),
do_calculate_lowest_risk(Grid, Checked, #{}, infinity).
do_calculate_lowest_risk(_Grid, {0, nil}, _Seen, Score) ->
Score;
do_calculate_lowest_risk(Grid, Checked, Seen, Score) ->
{{Cost, {X, Y}}, NewSet0} = gb_sets:take_smallest(Checked),
case maps:is_key(?BIT_XY(X, Y), Seen) of
true ->
do_calculate_lowest_risk(Grid, NewSet0, Seen, Score);
false ->
NewSet = build_new(NewSet0, X, Y, Seen, Cost, Grid),
NewSeen = maps:put(?BIT_XY(X, Y), true, Seen),
do_calculate_lowest_risk(Grid, NewSet, NewSeen, Cost)
end.
build_new(Set, X, Y, Seen, Cost, Grid) ->
lists:foldl(fun({C, {Xc, Yc} = Coord}, Acc) ->
case maps:is_key(?BIT_XY(Xc, Yc), Seen) of
true -> Acc;
false -> gb_sets:add_element({Cost + C, Coord}, Acc)
end
end,
Set,
surrounding(X, Y, Grid)).
surrounding(X, Y, Grid) ->
lists:foldl(fun(Pos, Acc) ->
case maps:get(Pos, Grid, undefined) of
undefined -> Acc;
Val -> [ {Val, Pos} | Acc ]
end
end, [], [{X + 1, Y}, {X, Y + 1}, {X - 1, Y}, {X, Y - 1}]).
expand_grid(Grid) ->
XYs = [ {X, Y} || X <- lists:seq(0, 4),
Y <- lists:seq(0, 4)
],
do_expand_grid(Grid, XYs, lists:max(maps:keys(Grid)), #{}).
do_expand_grid(_, [], _, Acc) -> Acc;
do_expand_grid(Grid, [ {OffsetX, OffsetY} | T ], {MaxX, MaxY}, Acc) ->
NewAcc = maps:fold(fun({X, Y}, V, A) ->
maps:put({X + OffsetX * MaxX, Y + OffsetY * MaxY},
plus_one_with_wrap(V, {OffsetX, OffsetY}),
A)
end, Acc, Grid),
do_expand_grid(Grid, T, {MaxX, MaxY}, NewAcc).
plus_one_with_wrap(V, {OffsetX, OffsetY}) ->
case V + OffsetX + OffsetY of
Val when Val > 9 -> Val rem 9;
Val -> Val
end.
%% Parsing ====================================================================
input() ->
util:read_file("day15.txt", <<"\n">>, fun binary_to_list/1).
to_grid(Acc, Lines) ->
{_, NewAcc} = lists:foldl(fun(Line, {Y, A}) ->
to_grid(A, Line, Y)
end, {1, Acc}, Lines),
NewAcc.
to_grid(Acc, Line, Y) ->
{_, NewAcc} = lists:foldl(fun(V, {X, A}) ->
{X + 1, maps:put({X, Y}, V - $0, A)}
end, {1, Acc}, Line),
{Y + 1, NewAcc}. | apps/aoc/src/day15.erl | 0.563018 | 0.638666 | day15.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2021 DGIOT Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(dgiot_metrics).
-include_lib("dgiot/include/dgiot.hrl").
-include_lib("dgiot/include/logger.hrl").
-export([counter/2, counter/3, counter/4, gauge/2, gauge/3, gauge/4, summary/2, summary/3, summary/4, histogram/3, histogram/2, histogram/4]).
-export([counter_reset/1, counter_reset/2, counter_reset/3]).
-export([gauge_reset/1, gauge_reset/2, gauge_reset/3]).
-export([summary_reset/1, summary_reset/2, summary_reset/3]).
-export([histogram_reset/1, histogram_reset/2, histogram_reset/3]).
-export([init_metrics/1, collect_metrics/4]).
-export([start_metrics/1, inc/3, inc/4, inc/5, dec/3, dec/4, dec/5]).
-export([start/1, check_metrics/0]).
-route_path("/metrics/:Registry").
-export([init/2]).
init(Req0, ?MODULE) ->
Registry = dgiot_req:binding(<<"Registry">>, Req0),
?LOG(info,"Registry ~p", [Registry]),
Data = dgiot_stats:metrics(Registry),
Req = cowboy_req:reply(200, #{
<<"content-type">> => <<"text/plain">>
}, dgiot_utils:to_binary(Data), Req0),
{ok, Req, ?MODULE}.
counter(Name, Value) when Value > 0 ->
counter(Name, [], Value);
counter(Name, Value) when Value < 0 ->
counter(Name, [], Value).
counter(Name, LabelValues, Value) ->
counter(?DEFREGISTRY, Name, LabelValues, Value).
counter(Registry, Name, LabelValues, Value) when Value > 0 ->
prometheus_counter:inc(Registry, Name, LabelValues, Value);
counter(Registry, Name, LabelValues, Value) when Value < 0 ->
prometheus_counter:inc(Registry, Name, LabelValues, Value).
counter_reset(Name) ->
counter_reset(Name, []).
counter_reset(Name, LabelValues) ->
counter_reset(?DEFREGISTRY, Name, LabelValues).
counter_reset(Registry, Name, LabelValues) ->
prometheus_counter:reset(Registry, Name, LabelValues).
gauge(Name, Value) ->
gauge(Name, [], Value).
gauge(Name, LabelValues, Value) ->
gauge(?DEFREGISTRY, Name, LabelValues, Value).
gauge(Registry, Name, LabelValues, Value) ->
prometheus_gauge:set(Registry, Name, LabelValues, Value).
gauge_reset(Name) ->
gauge_reset(Name, []).
gauge_reset(Name, LabelValues) ->
gauge_reset(?DEFREGISTRY, Name, LabelValues).
gauge_reset(Registry, Name, LabelValues) ->
prometheus_gauge:reset(Registry, Name, LabelValues).
summary(Name, Value) ->
summary(Name, [], Value).
summary(Name, LabelValues, Value) ->
summary(?DEFREGISTRY, Name, LabelValues, Value).
summary(Registry, Name, LabelValues, Value) ->
prometheus_summary:observe(Registry, Name, LabelValues, Value).
summary_reset(Name) ->
summary_reset(Name, []).
summary_reset(Name, LabelValues) ->
summary_reset(?DEFREGISTRY, Name, LabelValues).
summary_reset(Registry, Name, LabelValues) ->
prometheus_summary:reset(Registry, Name, LabelValues).
histogram(Name, Value) ->
histogram(Name, [], Value).
histogram(Name, LabelValues, Value) ->
histogram(?DEFREGISTRY, Name, LabelValues, Value).
histogram(Registry, Name, LabelValues, Value) ->
prometheus_histogram:observe(Registry, Name, LabelValues, Value).
histogram_reset(Name) ->
histogram_reset(Name, []).
histogram_reset(Name, LabelValues) ->
histogram_reset(?DEFREGISTRY, Name, LabelValues).
histogram_reset(Registry, Name, LabelValues) ->
prometheus_histogram:reset(Registry, Name, LabelValues).
%%新增统计函数
inc(Registry, Name, Value) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
dgiot_data:insert({Name, Registry}, Count + Value).
inc(Registry, Name, Value, Total, rate) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
case (Count + Value) >= Total of
true ->
dgiot_data:insert({Name, Registry}, 100);
_ ->
case Total > 0 of
true -> dgiot_data:insert({Name, Registry}, round(100 * (Count + Value) / Total));
_ -> dgiot_data:insert({Name, Registry}, 0)
end
end;
inc(Registry, Name, Value, Total, max) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
case (Count + Value) >= Total of
true ->
dgiot_data:insert({Name, Registry}, Total);
_ ->
dgiot_data:insert({Name, Registry}, (Count + Value))
end.
inc(Registry, Name, Value, average) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
dgiot_data:insert({Name, Registry}, (Count + Value) / 2);
inc(Registry, Name, Label, Value) ->
{ok, Map} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, #{}};
{ok, Map1} -> {ok, Map1}
end,
Count = maps:get(Label, Map, 0),
dgiot_data:insert({Name, Registry}, Map#{Label => Count + Value}).
dec(Registry, Name, Value) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
NewCount =
case Count =< Value of
true ->
0;
false ->
Count - Value
end,
dgiot_data:insert({Name, Registry}, NewCount).
dec(Registry, Name, Value, Total, rate) ->
{ok, Count} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, 0};
{ok, Count1} -> {ok, Count1}
end,
NewCount =
case Count > Value of
true ->
Count - Value;
false ->
0
end,
case Total > 0 of
true ->
dgiot_data:insert({Name, Registry}, round(100 * NewCount / Total));
_ ->
dgiot_data:insert({Name, Registry}, 0)
end.
dec(Registry, Name, Label, Value) ->
{ok, Map} =
case dgiot_data:lookup({Name, Registry}) of
{error, not_find} -> {ok, #{}};
{ok, Map1} -> {ok, Map1}
end,
Count = maps:get(Label, Map, 0),
NewCount =
case Count > Value of
true ->
Count - Value;
false ->
0
end,
dgiot_data:insert({Name, Registry}, Map#{Label => NewCount}).
start(Registry) ->
start_metrics(Registry).
start_metrics(Registry) ->
dgiot_stats:new(Registry).
init_metrics(#{name := Name, registry := Registry, labels := Labels}) ->
case Labels of
[] ->
dgiot_data:insert({Name, Registry}, 0);
[#{<<"values">> := Values}] ->
dgiot_data:insert({Name, Registry}, lists:foldl(fun(Label, Acc) -> Acc#{Label => 0} end, #{}, Values))
end.
% Module = ?MODULE,
collect_metrics(_Instance, Registry, Name, _Labels) ->
{ok, Map} = dgiot_data:lookup({Name, Registry}),
case is_map(Map) of
true ->
maps:fold(
fun(Label, Value, Acc) ->
ok = gauge(Registry, Name, [Label], Value),
[Label | Acc]
end, [], Map);
false ->
ok = gauge(Registry, Name, [], Map)
end.
check_metrics() ->
Fun =
fun({App, _Desc, _Vsn}) ->
case dgiot_plugin:check_module(App) of
false ->
FileName = lists:concat([App, ".metrics"]),
case file:list_dir(code:priv_dir(App)) of
{ok, Files} ->
case lists:member(FileName, Files) of
true ->
start(App);
false -> pass
end;
_ -> pass
end;
_ -> pass
end
end,
lists:map(Fun, application:loaded_applications()). | apps/dgiot/src/otp/dgiot_metrics.erl | 0.532182 | 0.456168 | dgiot_metrics.erl | starcoder |
%% Copyright (c) 2013-2020 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : luerl_ex.erl
%% Authors : <NAME>
%% Purpose : Elixir-style wrappers for luerl.erl
%% This module just contains functions that forward to luerl.erl, but place
%% the VM State arguments in the first position rather than the last. This
%% better matches Elixir conventions and allows for using the Elixir pipe
%% operator '|>' to chain Luerl function calls.
-module('Elixir.Luerl.New').
%% Basic user API to luerl.
-export([init/0,gc/1,
load/2,load/3,loadfile/2,loadfile/3,
load_module/3,load_module_dec/3,
do/2,do_dec/2,do/3,do_dec/3,
dofile/2,dofile/3,dofile_dec/2,dofile_dec/3,
call/3,call_chunk/2,call_chunk/3,
call_function/3,call_function_dec/3,
call_method/4,call_method_dec/4,
get_table_keys/2,get_table_keys_dec/2,
set_table_keys/3,set_table_keys_dec/3,
get_stacktrace/1
]).
%% Encoding and decoding.
-export([encode/2,encode_list/2,decode/2,decode_list/2]).
init() ->
luerl_new:init().
gc(St) ->
luerl_new:gc(St).
load(St, Bin) ->
luerl_new:load(Bin, St).
load(St, Bin, Opts) ->
luerl_new:load(Bin, Opts, St).
loadfile(St, Name) ->
luerl_new:loadfile(Name, St).
loadfile(St, Name, Opts) ->
luerl_new:loadfile(Name, Opts, St).
load_module(St, Lfp, Mod) ->
luerl_new:load_module(Lfp, Mod, St).
load_module_dec(St, Dfp, Mod) ->
luerl_new:load_module_dec(Dfp, Mod, St).
do(St, S) ->
luerl_new:do(S, St).
do(St, S, Opts) ->
luerl_new:do(S, Opts, St).
do_dec(St, S) ->
luerl_new:do_dec(S, St).
do_dec(St, S, Opts) ->
luerl_new:do_dec(S, Opts, St).
dofile(St, Path) ->
luerl_new:dofile(Path, St).
dofile(St, Path, Opts) ->
luerl_new:dofile(Path, Opts, St).
dofile_dec(St, Path) ->
luerl_new:dofile_dec(Path, St).
dofile_dec(St, Path, Opts) ->
luerl_new:dofile_dec(Path, Opts, St).
call(St, C, Args) ->
luerl_new:call(C, Args, St).
call_chunk(St, C) ->
luerl_new:call_chunk(C, St).
call_chunk(St, C, Args) ->
luerl_new:call_chunk(C, Args, St).
call_function(St, Fp, Args) ->
luerl_new:call_function(Fp, Args, St).
call_function_dec(St, Dfunc, Dargs) ->
luerl_new:call_function_dec(Dfunc, Dargs, St).
call_method(St, Obj, Meth, Args) ->
luerl_new:call_method(Obj, Meth, Args, St).
call_method_dec(St, Dobj, Dmeth, Dargs) ->
luerl_new:call_method_dec(Dobj, Dmeth, Dargs, St).
get_table_keys(St, Keys) ->
luerl_new:get_table_keys(Keys, St).
get_table_keys_dec(St, Dkeys) ->
luerl_new:get_table_keys_dec(Dkeys, St).
set_table_keys(St, Keys, Val) ->
luerl_new:set_table_keys(Keys, Val, St).
set_table_keys_dec(St, Dkeys, Dval) ->
luerl_new:set_table_keys_dec(Dkeys, Dval, St).
get_stacktrace(St) ->
luerl_new:get_stacktrace(St).
encode(St, V) ->
luerl_new:encode(V, St).
encode_list(St, Ts) ->
luerl_new:encode_list(Ts, St).
decode(St, V) ->
luerl_new:decode(V, St).
decode_list(St, Lts) ->
luerl_new:decode_list(Lts, St). | src/Elixir.Luerl.New.erl | 0.60871 | 0.414543 | Elixir.Luerl.New.erl | starcoder |
%%%
%%% Copyright 2011, Boundary
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%%-------------------------------------------------------------------
%%% File: folsom_utils.erl
%%% @author <NAME> <<EMAIL>>
%%% @doc
%%% various util functions
%%% @end
%%%------------------------------------------------------------------
-module(folsom_utils).
-export([
to_atom/1,
convert_tags/1,
now_epoch/0,
now_epoch/1,
now_epoch_micro/0,
timestamp/0,
get_ets_size/1,
update_counter/3,
update_counter_no_exceptions/3,
rand_uniform/1
]).
to_atom(Binary) when is_binary(Binary) ->
list_to_atom(binary_to_list(Binary));
to_atom(List) when is_list(List) ->
list_to_atom(List).
convert_tags(Tags) ->
[to_atom(Tag) || Tag <- Tags].
now_epoch() ->
now_epoch(os:timestamp()).
now_epoch({Mega, Sec, _}) ->
(Mega * 1000000 + Sec).
now_epoch_micro() ->
{Mega, Sec, Micro} = os:timestamp(),
(Mega * 1000000 + Sec) * 1000000 + Micro.
%% useful because you can't meck os:timestamp for some reason
timestamp() ->
os:timestamp().
get_ets_size(Tab) ->
ets:info(Tab, size).
-ifdef(use_update_counter_4).
%% ets:update_counter/4 was introduced in OTP 18. When < 18 is no
%% longer supported all this code can be cleaned up along with
%% folsom_metrics_spiral:new/2
update_counter(Tid, Key, Value) when is_integer(Value) ->
ets:update_counter(Tid, Key, Value, {Key, 0}).
update_counter_no_exceptions(Tid, Key, Value) ->
update_counter(Tid, Key, Value).
-else.
%% @doc
%% Same as {@link ets:update_counter/3} but inserts `{Key, Value}' if object
%% is missing in the table.
update_counter(Tid, Key, Value) when is_integer(Value) ->
%% try to update the counter, will badarg if it doesn't exist
try ets:update_counter(Tid, Key, Value) of
Res ->
Res
catch
error:badarg ->
%% row didn't exist, create it
%% use insert_new to avoid races
case ets:insert_new(Tid, {Key, Value}) of
true ->
Value;
false ->
%% someone beat us to it
ets:update_counter(Tid, Key, Value)
end
end.
%% @doc
%% Same as {@link ets:update_counter/3} but inserts `{Key, Value}' if object
%% is missing in the table, avoiding exceptions by reading first.
%% Won't be required after https://github.com/erlang/otp/pull/362
update_counter_no_exceptions(Tid, Key, Value) when is_integer(Value) ->
%% Read counter first to avoid an exception
case ets:lookup(Tid, Key) of
[] ->
%% row didn't exist, create it
%% use insert_new to avoid races
case ets:insert_new(Tid, {Key, Value}) of
true ->
Value;
false ->
%% someone beat us to it
ets:update_counter(Tid, Key, Value)
end;
_ ->
ets:update_counter(Tid, Key, Value)
end.
-endif.
-ifdef(use_rand).
rand_uniform(N) ->
rand:uniform(N).
-else.
rand_uniform(N) ->
%% ensure seed is initialized
%% simluating new `rand' module's behaviour
case get(random_seed) of
undefined ->
random:seed(os:timestamp());
{_, _, _} ->
ok
end,
random:uniform(N).
-endif. | src/folsom_utils.erl | 0.505127 | 0.421076 | folsom_utils.erl | starcoder |
%%% @author <NAME> <<EMAIL>>
%%% [http://ferd.ca/]
%%% @doc Regroups useful functionality used by recon when dealing with data
%%% from the node. The functions in this module allow quick runtime access
%%% to fancier behaviour than what would be done using recon module itself.
%%% @end
-module(recon_lib).
-export([sliding_window/2, sample/2, count/1,
port_list/1, port_list/2,
proc_attrs/1, proc_attrs/2,
inet_attrs/1, inet_attrs/2,
triple_to_pid/3, term_to_pid/1,
term_to_port/1,
time_map/5, time_fold/6,
scheduler_usage_diff/2,
sublist_top_n_attrs/2]).
%% private exports
-export([binary_memory/1]).
-type diff() :: [recon:proc_attrs() | recon:inet_attrs()].
%% @doc Compare two samples and return a list based on some key. The type mentioned
%% for the structure is `diff()' (`{Key,Val,Other}'), which is compatible with
%% the {@link recon:proc_attrs()} type.
-spec sliding_window(First::diff(), Last::diff()) -> diff().
sliding_window(First, Last) ->
Dict = lists:foldl(
fun({Key, {Current, Other}}, Acc) ->
dict:update(Key,
fun({Old,_Other}) -> {Current-Old, Other} end,
{Current, Other},
Acc)
end,
dict:from_list([{K,{V,O}} || {K,V,O} <- First]),
[{K,{V,O}} || {K,V,O} <- Last]
),
[{K,V,O} || {K,{V,O}} <- dict:to_list(Dict)].
%% @doc Runs a fun once, waits `Ms', runs the fun again,
%% and returns both results.
-spec sample(Ms::non_neg_integer(), fun(() -> term())) ->
{First::term(), Second::term()}.
sample(Delay, Fun) ->
First = Fun(),
timer:sleep(Delay),
Second = Fun(),
{First, Second}.
%% @doc Takes a list of terms, and counts how often each of
%% them appears in the list. The list returned is in no
%% particular order.
-spec count([term()]) -> [{term(), Count::integer()}].
count(Terms) ->
Dict = lists:foldl(
fun(Val, Acc) -> dict:update_counter(Val, 1, Acc) end,
dict:new(),
Terms
),
dict:to_list(Dict).
%% @doc Returns a list of all the open ports in the VM, coupled with
%% one of the properties desired from `erlang:port_info/1-2'.
-spec port_list(Attr::atom()) -> [{port(), term()}].
port_list(Attr) ->
[{Port,Val} || Port <- erlang:ports(),
{_, Val} <- [erlang:port_info(Port, Attr)]].
%% @doc Returns a list of all the open ports in the VM, but only
%% if the `Attr''s resulting value matches `Val'. `Attr' must be
%% a property accepted by `erlang:port_info/2'.
-spec port_list(Attr::atom(), term()) -> [port()].
port_list(Attr, Val) ->
[Port || Port <- erlang:ports(),
{Attr, Val} =:= erlang:port_info(Port, Attr)].
%% @doc Returns the attributes ({@link recon:proc_attrs()}) of
%% all processes of the node, except the caller.
-spec proc_attrs(term()) -> [recon:proc_attrs()].
proc_attrs(AttrName) ->
Self = self(),
[Attrs || Pid <- processes(),
Pid =/= Self,
{ok, Attrs} <- [proc_attrs(AttrName, Pid)]
].
%% @doc Returns the attributes of a given process. This form of attributes
%% is standard for most comparison functions for processes in recon.
%%
%% A special attribute is `binary_memory', which will reduce the memory used
%% by the process for binary data on the global heap.
-spec proc_attrs(term(), pid()) -> {ok, recon:proc_attrs()} | {error, term()}.
proc_attrs(binary_memory, Pid) ->
case process_info(Pid, [binary, registered_name,
current_function, initial_call]) of
[{_, Bins}, {registered_name,Name}, Init, Cur] ->
{ok, {Pid, binary_memory(Bins), [Name || is_atom(Name)]++[Init, Cur]}};
undefined ->
{error, undefined}
end;
proc_attrs(AttrName, Pid) ->
case process_info(Pid, [AttrName, registered_name,
current_function, initial_call]) of
[{_, Attr}, {registered_name,Name}, Init, Cur] ->
{ok, {Pid, Attr, [Name || is_atom(Name)]++[Init, Cur]}};
undefined ->
{error, undefined}
end.
%% @doc Returns the attributes ({@link recon:inet_attrs()}) of
%% all inet ports (UDP, SCTP, TCP) of the node.
-spec inet_attrs(term()) -> [recon:inet_attrs()].
inet_attrs(AttrName) ->
Ports = [Port || Port <- erlang:ports(),
{_, Name} <- [erlang:port_info(Port, name)],
Name =:= "tcp_inet" orelse
Name =:= "udp_inet" orelse
Name =:= "sctp_inet"],
[Attrs || Port <- Ports,
{ok, Attrs} <- [inet_attrs(AttrName, Port)]].
%% @doc Returns the attributes required for a given inet port (UDP,
%% SCTP, TCP). This form of attributes is standard for most comparison
%% functions for processes in recon.
-spec inet_attrs(AttributeName, port()) -> {ok,recon:inet_attrs()}
| {error,term()} when
AttributeName :: 'recv_cnt' | 'recv_oct' | 'send_cnt' | 'send_oct'
| 'cnt' | 'oct'.
inet_attrs(Attr, Port) ->
Attrs = case Attr of
cnt -> [recv_cnt, send_cnt];
oct -> [recv_oct, send_oct];
_ -> [Attr]
end,
case inet:getstat(Port, Attrs) of
{ok, Props} ->
ValSum = lists:foldl(fun({_,X},Y) -> X+Y end, 0, Props),
{ok, {Port,ValSum,Props}};
{error, Reason} ->
{error, Reason}
end.
%% @doc Equivalent of `pid(X,Y,Z)' in the Erlang shell.
-spec triple_to_pid(N,N,N) -> pid() when
N :: non_neg_integer().
triple_to_pid(X, Y, Z) ->
list_to_pid("<" ++ integer_to_list(X) ++ "." ++
integer_to_list(Y) ++ "." ++
integer_to_list(Z) ++ ">").
%% @doc Transforms a given term to a pid.
-spec term_to_pid(recon:pid_term()) -> pid().
term_to_pid(Pid) when is_pid(Pid) -> Pid;
term_to_pid(Name) when is_atom(Name) -> whereis(Name);
term_to_pid(List = "<0."++_) -> list_to_pid(List);
term_to_pid(Binary = <<"<0.", _/binary>>) -> list_to_pid(binary_to_list(Binary));
term_to_pid({global, Name}) -> global:whereis_name(Name);
term_to_pid({via, Module, Name}) -> Module:whereis_name(Name);
term_to_pid({X,Y,Z}) when is_integer(X), is_integer(Y), is_integer(Z) ->
triple_to_pid(X,Y,Z).
%% @doc Transforms a given term to a port
-spec term_to_port(recon:port_term()) -> port().
term_to_port(Port) when is_port(Port) -> Port;
term_to_port(Name) when is_atom(Name) -> whereis(Name);
term_to_port("#Port<0."++Id) ->
N = list_to_integer(lists:sublist(Id, length(Id)-1)), % drop trailing '>'
term_to_port(N);
term_to_port(N) when is_integer(N) ->
%% We rebuild the term from the int received:
%% http://www.erlang.org/doc/apps/erts/erl_ext_dist.html#id86892
Name = iolist_to_binary(atom_to_list(node())),
NameLen = iolist_size(Name),
Vsn = binary:last(term_to_binary(self())),
Bin = <<131, % term encoding value
102, % port tag
100, % atom ext tag, used for node name
NameLen:2/unit:8,
Name:NameLen/binary,
N:4/unit:8, % actual counter value
Vsn:8>>, % version
binary_to_term(Bin).
%% @doc Calls a given function every `Interval' milliseconds and supports
%% a map-like interface (each result is modified and returned)
-spec time_map(N, Interval, Fun, State, MapFun) -> [term()] when
N :: non_neg_integer(),
Interval :: pos_integer(),
Fun :: fun((State) -> {term(), State}),
State :: term(),
MapFun :: fun((_) -> term()).
time_map(0, _, _, _, _) ->
[];
time_map(N, Interval, Fun, State, MapFun) ->
{Res, NewState} = Fun(State),
timer:sleep(Interval),
[MapFun(Res) | time_map(N-1,Interval,Fun,NewState,MapFun)].
%% @doc Calls a given function every `Interval' milliseconds and supports
%% a fold-like interface (each result is modified and accumulated)
-spec time_fold(N, Interval, Fun, State, FoldFun, Init) -> [term()] when
N :: non_neg_integer(),
Interval :: pos_integer(),
Fun :: fun((State) -> {term(), State}),
State :: term(),
FoldFun :: fun((term(), Init) -> Init),
Init :: term().
time_fold(0, _, _, _, _, Acc) ->
Acc;
time_fold(N, Interval, Fun, State, FoldFun, Init) ->
timer:sleep(Interval),
{Res, NewState} = Fun(State),
Acc = FoldFun(Res,Init),
time_fold(N-1,Interval,Fun,NewState,FoldFun,Acc).
%% @doc Diffs two runs of erlang:statistics(scheduler_wall_time) and
%% returns usage metrics in terms of cores and 0..1 percentages.
-spec scheduler_usage_diff(SchedTime, SchedTime) -> undefined | [{SchedulerId, Usage}] when
SchedTime :: [{SchedulerId, ActiveTime, TotalTime}],
SchedulerId :: pos_integer(),
Usage :: number(),
ActiveTime :: non_neg_integer(),
TotalTime :: non_neg_integer().
scheduler_usage_diff(First, Last) when First =:= undefined orelse Last =:= undefined ->
undefined;
scheduler_usage_diff(First, Last) ->
lists:map(
fun ({{I, _A0, T}, {I, _A1, T}}) -> {I, 0.0}; % Avoid divide by zero
({{I, A0, T0}, {I, A1, T1}}) -> {I, (A1 - A0)/(T1 - T0)}
end,
lists:zip(lists:sort(First), lists:sort(Last))
).
%% @doc Returns the top n element of a list of process or inet attributes
-spec sublist_top_n_attrs([Attrs], pos_integer()) -> [Attrs]
when Attrs :: recon:proc_attrs() | recon:inet_attrs().
sublist_top_n_attrs(_, 0) ->
%% matching lists:sublist/2 behaviour
[];
sublist_top_n_attrs(List, Len) ->
pheap_fill(List, Len, []).
%% @private crush binaries from process_info into their amount of place
%% taken in memory.
binary_memory(Bins) ->
lists:foldl(fun({_,Mem,_}, Tot) -> Mem+Tot end, 0, Bins).
%%%%%%%%%%%%%%%
%%% PRIVATE %%%
%%%%%%%%%%%%%%%
pheap_fill(List, 0, Heap) ->
pheap_full(List, Heap);
pheap_fill([], _, Heap) ->
pheap_to_list(Heap, []);
pheap_fill([{Y, X, _} = H|T], N, Heap) ->
pheap_fill(T, N-1, insert({{X, Y}, H}, Heap)).
pheap_full([], Heap) ->
pheap_to_list(Heap, []);
pheap_full([{Y, X, _} = H|T], [{K, _}|HeapT] = Heap) ->
case {X, Y} of
N when N > K ->
pheap_full(T, insert({N, H}, merge_pairs(HeapT)));
_ ->
pheap_full(T, Heap)
end.
pheap_to_list([], Acc) -> Acc;
pheap_to_list([{_, H}|T], Acc) ->
pheap_to_list(merge_pairs(T), [H|Acc]).
-compile({inline, [insert/2, merge/2]}).
insert(E, []) -> [E]; %% merge([E], H)
insert(E, [E2|_] = H) when E =< E2 -> [E, H];
insert(E, [E2|H]) -> [E2, [E]|H].
merge(H1, []) -> H1;
merge([E1|H1], [E2|_]=H2) when E1 =< E2 -> [E1, H2|H1];
merge(H1, [E2|H2]) -> [E2, H1|H2].
merge_pairs([]) -> [];
merge_pairs([H]) -> H;
merge_pairs([A, B|T]) -> merge(merge(A, B), merge_pairs(T)). | src/recon_lib.erl | 0.540196 | 0.405714 | recon_lib.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
% This module implements a split algorithm for the vtree. It is an
% implementation of the split algorithm described in:
% A Revised R * -tree in Comparison with Related Index Structures
% by <NAME>, <NAME>
-module(vtree_split).
-include("vtree.hrl").
% couch_db.hrl is only included to have log messages
-include("couch_db.hrl").
-export([split_inner/5, split_leaf/5]).
-ifdef(makecheck).
-compile(nowarn_export_all).
-compile(export_all).
-endif.
% Some infos for this module
% ==========================
%
% Nodes
% -----
%
% Nodes are a 2-tuple, containing the MBB and n case of a:
% 1. KV node: the pointer to the node in the file
% 2. KP node: a list of pointers to its children
%
% Candidates
% ----------
%
% A candidate is a Node that is splitted into two partitions. Each partition
% contains a at least some given minimum number and at most some given maximum
% number of Nodes
% So it's: {[some Nodes], [some Nodes]}
%
% Split axis
% ----------
%
% The split axis is a list of candidates. It is the list of candidates, that
% have the overall smallest perimeter. For the calculations on how to get
% there see `split_axis/4`.
% The full split algorithm for an inner node. All dimensions are
% taken into account. The best split candidate is returned.
-spec split_inner(Nodes :: [split_node()], Mbb0 :: mbb(),
FillMin :: number(), FillMax :: number(),
Less :: lessfun()) -> candidate().
split_inner(Nodes, MbbO, FillMin, FillMax, Less) ->
NumDims = length(element(1, hd(Nodes))),
MbbN = vtree_util:nodes_mbb(Nodes, Less),
{_, Candidate} =
lists:foldl(
% Loop through every dimension to find the split with the
% minimal cost
fun(Dim, {MinVal, _}=Acc) ->
SortedMin = sort_dim_min(Nodes, Dim, Less),
SortedMax = sort_dim_max(Nodes, Dim, Less),
CandidatesMin = create_split_candidates(SortedMin, FillMin,
FillMax),
CandidatesMax = create_split_candidates(SortedMax, FillMin,
FillMax),
{Val, Candidate} = choose_candidate(
CandidatesMin ++ CandidatesMax, Dim,
MbbO, MbbN, FillMin, Less),
case Val < MinVal of
true -> {Val, Candidate};
false -> Acc
end
end,
{nil, nil}, lists:seq(1, NumDims)),
Candidate.
% The full split algorithm for a leaf node. Only the dimension with the
% minumum perimeter is taken into account. The best split candidate is
% returned.
-spec split_leaf(Nodes :: [split_node()], Mbb0 :: mbb(),
FillMin :: number(), FillMax :: number(),
Less :: lessfun()) -> candidate().
split_leaf(Nodes, MbbO, FillMin, FillMax, Less) ->
{Dim, Candidates} = split_axis(Nodes, FillMin, FillMax, Less),
MbbN = vtree_util:nodes_mbb(Nodes, Less),
{_, Candidate} = choose_candidate(Candidates, Dim, MbbO, MbbN, FillMin,
Less),
Candidate.
% Calculate the split axis. Returns the dimension of the split candidates
% with the overall minimal perimeter and the candidates themselves.
% This corresponds to step 1 of 4.1 in the RR*-tree paper
-spec split_axis(Nodes :: [split_node()], FillMin :: pos_integer(),
FillMax :: pos_integer(), Less :: lessfun()) ->
{integer(), [candidate()]}.
split_axis(Nodes, FillMin, FillMax, Less) ->
NumDims = length(element(1, hd(Nodes))),
{Dim, {_MinPerim, Candidates}} =
lists:foldl(
fun(Dim, {_, CurMin}) ->
SortedMin = sort_dim_min(Nodes, Dim, Less),
SortedMax = sort_dim_max(Nodes, Dim, Less),
Min = candidates_perimeter(SortedMin, FillMin, FillMax,
Less),
Max = candidates_perimeter(SortedMax, FillMin, FillMax,
Less),
NewMin = case CurMin of
nil -> min_perim([Min, Max]);
CurMin -> min_perim([CurMin, Min, Max])
end,
{Dim, NewMin}
end,
{1, nil}, lists:seq(1, NumDims)),
{Dim, Candidates}.
% chooose_candidate returns the candidate with the minimal value as calculated
% by the goal function. It's the second step of the split algorithm as
% described in section 4.2.4.
% `MbbN` is the bounding box around the nodes that should be split including
% the newly added one.
-spec choose_candidate(Candidates :: [candidate()], Dim :: integer(),
MbbO :: mbb(), MbbN :: mbb(), FillMin :: number(),
Less :: lessfun()) ->
{number(), candidate()}.
choose_candidate([{F, S}|_]=Candidates, Dim, MbbO, MbbN, FillMin, Less) ->
CandidateSize = ?ext_size([Node || {_, Node} <- F]) +
?ext_size([Node || {_, Node} <- S]),
PerimMax = perim_max(MbbN),
Asym = asym(Dim, MbbO, MbbN),
Wf = make_weighting_fun(Asym, FillMin, CandidateSize),
vtree_util:find_min_value(
fun(Candidate) ->
goal_fun(Candidate, PerimMax, Wf, Less)
end, Candidates).
% This is the goal function "w" as described in section 4.2.4.
% It takes a Candidate, the maximum perimeter of the MBB that also includes
% the to be added node and a less function.
-spec goal_fun(Candidate :: candidate(), PerimMax :: number(), Wf :: fun(),
Less :: lessfun()) -> number().
goal_fun({F, S}=Candidate, PerimMax, Wf, Less) ->
MbbF = vtree_util:nodes_mbb(F, Less),
MbbS = vtree_util:nodes_mbb(S, Less),
% The `Offset` is bytes offset where the candidates were split
Offset = ?ext_size([Node || {_, Node} <- F]),
case vtree_util:intersect_mbb(MbbF, MbbS, Less) of
overlapfree ->
wg_overlapfree(Candidate, PerimMax, Less) * Wf(Offset);
_ ->
wg(Candidate, Less) / Wf(Offset)
end.
% It's the original weighting function "wg" that returns a value for a
% candidate.
% It corresponds to step 2 of 4.1 in the RR*-tree paper, extended by 4.2.4.
-spec wg(Candidate :: candidate(), Less :: lessfun()) -> number().
wg({F, S}, Less) ->
MbbF = vtree_util:nodes_mbb(F, Less),
MbbS = vtree_util:nodes_mbb(S, Less),
OverlapMbb = vtree_util:intersect_mbb(MbbF, MbbS, Less),
% Check if one of the nodes has no volume (at least one
% dimension is collapsed to a single point).
case (vtree_util:calc_volume(MbbF) /= 0) andalso
(vtree_util:calc_volume(MbbS) /= 0) of
true ->
vtree_util:calc_volume(OverlapMbb);
false ->
vtree_util:calc_perimeter(OverlapMbb)
end.
% It's the original weighting function "wg" for the overlap-free case
% It corresponds to step 2 of 4.1 in the RR*-tree paper, extended by 4.2.4.
-spec wg_overlapfree(Candidate :: candidate(), PerimMax :: number(),
Less :: lessfun()) -> number().
wg_overlapfree({F, S}, PerimMax, Less) ->
nodes_perimeter(F, Less) + nodes_perimeter(S, Less) - PerimMax.
-spec make_weighting_fun(Asym :: float(), FillMin :: number(),
MaxSize :: pos_integer()) -> fun().
make_weighting_fun(Asym, FillMin, MaxSize) ->
% In thee RR*-tree paper they conclude that the best average performance
% is achieved with a "S" set to 0.5. Hence it's hard-coded here.
S = 0.5,
% In the RR*-tree paper Mu is calcluated with the maximum fill size + 1
% (which corresponds to an overflowing node). As we don't use the number
% of nodes, but the byte size as thresholds to determine how many children
% a node should/can hold, we use the total size of a single split
% candidate instead.
Mu = (1 - (2*FillMin)/MaxSize) * Asym,
Sigma = S * (1 + erlang:abs(Mu)),
Y1 = math:exp(-1/math:pow(S, 2)),
Ys = 1 / (1-Y1),
% In the RR*-tree paper the position of the node within the node list
% is used. As we use the byte size of the nodes as thresholds, use the
% byte offset of the split location instead.
fun(Offset) ->
% In the RR*-tree paper Xi is calculate with the maximum
% fill size + 1, use again the maximum byte size (see the
% comments above for the reasoning.
Xi = ((2*Offset) / MaxSize) - 1,
Exp = math:exp(-math:pow((Xi-Mu)/Sigma, 2)),
Ys * (Exp - Y1)
end.
% Sorts the nodes by a certain dimension by the lower value (For example
% the lower value of the y coordinate)
-spec sort_dim_min(Nodes :: [split_node()], Dim :: integer(),
Less :: lessfun()) -> [split_node()].
sort_dim_min(Nodes, Dim, Less) ->
lists:sort(
fun({MbbA, _}, {MbbB, _}) ->
{MinA, _} = lists:nth(Dim, MbbA),
{MinB, _} = lists:nth(Dim, MbbB),
Less(MinA, MinB) orelse (MinA == MinB)
end,
Nodes).
% Sorts the nodes by a certain dimension by the higher value (For example
% the higher value of the y coordinate)
-spec sort_dim_max(Nodes :: [split_node()], Dim :: integer(),
Less :: lessfun()) -> [split_node()].
sort_dim_max(Nodes, Dim, Less) ->
lists:sort(
fun({MbbA, _}, {MbbB, _}) ->
{_, MaxA} = lists:nth(Dim, MbbA),
{_, MaxB} = lists:nth(Dim, MbbB),
Less(MaxA, MaxB) orelse (MaxA == MaxB)
end,
Nodes).
% The maximum perimeter (for definition see proof of Lemma 1, section 4.2.4)
-spec perim_max(Mbb :: mbb()) -> number().
perim_max(Mbb) ->
MinPerim = lists:min([Max-Min || {Min, Max} <- Mbb]),
2 * vtree_util:calc_perimeter(Mbb) - MinPerim.
% Create all possible split candidates from a list of nodes
-spec create_split_candidates(Nodes :: [split_node()],
FillMin :: number(),
FillMax :: number()) -> [candidate()].
create_split_candidates([H|T], FillMin, FillMax) ->
create_split_candidates([H], T, FillMin, FillMax, []).
-spec create_split_candidates(A :: [split_node()], B :: [split_node()],
FillMin :: number(), FillMax :: number(),
[candidate()]) -> [candidate()].
% The minimum fill rate was already relaxed (see below) and there's still
% no split candidate. The reason is probably that any of the nodes is
% bigger in size (bytes) than the maximum chunk threshold.
% Instead of making it a fatal failure, create a single split candidate,
% where the first partition contains as many nodes as possible until the
% maximum threshold is overcome. This means the the maximum threshold
% guarantee will be violated, but that's better than a fatal error.
create_split_candidates(A, [], 0, FillMax, []) ->
case vtree_modify:get_overflowing_subset(FillMax, A) of
% The very last node lead to the overflow, hence the second partition
% is empty, but split candidates must be divided into two partitions
% that contain at least one item each.
{A, []} ->
[lists:split(length(A) - 1, A)];
Else ->
[Else]
end;
% No valid split candidates were found. Instead of returning an error, we
% relax the minimum filled condition to zero. This case should rarely happen
% (only in very extreme cases). For example if you have two nodes, one with a
% very large byte size, the other one very small. There the minimum fill rate
% can't be satisfied easily and we would end up without a candidate at all.
create_split_candidates(A, [], _, FillMax, []) ->
create_split_candidates(A, 0, FillMax);
create_split_candidates(_, [], _, _, Candidates) ->
lists:reverse(Candidates);
create_split_candidates(A, [HeadB|RestB]=B, FillMin, FillMax, Candidates0) ->
% Use the sizes of the actual nodes
SizeA = ?ext_size([Node || {_, Node} <- A]),
SizeB = ?ext_size([Node || {_, Node} <- B]),
Candidates =
case (SizeA >= FillMin andalso SizeA =< FillMax) andalso
(SizeB >= FillMin andalso SizeB =< FillMax) of
true ->
[{A, B}|Candidates0];
false ->
Candidates0
end,
create_split_candidates(A ++ [HeadB], RestB, FillMin, FillMax, Candidates).
% Calculate the perimeter of the enclosing MBB of some nodes
-spec nodes_perimeter(Nodes :: [split_node()], Less :: lessfun()) -> number().
nodes_perimeter(Nodes, Less) ->
Mbb = vtree_util:nodes_mbb(Nodes, Less),
vtree_util:calc_perimeter(Mbb).
% Get the perimeters of all split candidates. Returns a 2-tuple with the
% perimeter and the split candidates
-spec candidates_perimeter(Nodes :: [split_node()], FillMin :: number(),
FillMax :: number(), Less :: lessfun()) ->
{number(), [candidate()]}.
candidates_perimeter(Nodes, FillMin, FillMax, Less) ->
Candidates = create_split_candidates(Nodes, FillMin, FillMax),
Perims = [nodes_perimeter(F, Less) + nodes_perimeter(S, Less) ||
{F, S} <- Candidates],
Perim = lists:sum(Perims),
{Perim, Candidates}.
% Input is a list of 2-tuples that contain the perimeter as first element.
% Return the 2-tuple that contains the minimum perimeter.
-spec min_perim([{number(), any()}]) -> {number(), any()}.
min_perim([H|T]) ->
min_perim(T, H).
-spec min_perim([{number(), any()}], {number(), any()}) -> {number(), any()}.
min_perim([], Min) ->
Min;
min_perim([{Perim, _}=H|T], {MinPerim, _}) when Perim < MinPerim ->
min_perim(T, H);
min_perim([_|T], Min) ->
min_perim(T, Min).
% Returns the asym for a certain dimension
-spec asym(Dim :: integer(), MbbO :: mbb(), MbbN :: mbb()) -> number().
asym(Dim, MbbO, MbbN) ->
LengthN = mbb_dim_length(Dim, MbbN),
case LengthN == 0 of
true ->
0;
false ->
CenterN = mbb_dim_center(Dim, MbbN),
CenterO = mbb_dim_center(Dim, MbbO),
((2*(CenterN - CenterO)) / LengthN)
end.
% Returns the length of a certain dimension of an MBB
-spec mbb_dim_length(Dim :: integer(), Mbb :: mbb()) -> number().
mbb_dim_length(Dim, Mbb) ->
{Min, Max} = lists:nth(Dim, Mbb),
Max - Min.
% Returns the center of a certain dimension of an MBB
-spec mbb_dim_center(Dim :: integer(), Mbb :: mbb()) -> number().
mbb_dim_center(Dim, Mbb) ->
{Min, Max} = lists:nth(Dim, Mbb),
Min + ((Max - Min)/2). | vtree/src/vtree_split.erl | 0.82734 | 0.614828 | vtree_split.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(easton_shapes).
-export([
point/2,
point/3,
point/4,
rectangle/4
]).
-export([
moving/5,
historical/3,
point/0,
point3d/0,
point4d/0,
linestring/0,
polygon/0,
polygon_with_hole/0,
multipoint/0,
multilinestring/0,
multipolygon/0,
geometrycollection/0
]).
moving({ShapeProps}, LowV, HighV, StartTime, EndTime) ->
TemporalProps = [
{<<"lowV">>, LowV},
{<<"highV">>, HighV},
{<<"start">>, StartTime},
{<<"end">>, EndTime}
],
{ShapeProps ++ TemporalProps}.
historical({ShapeProps}, StartTime, EndTime) ->
HistoricalProps = [
{<<"start">>, StartTime},
{<<"end">>, EndTime}
],
{ShapeProps ++ HistoricalProps}.
point(X, Y) ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [X, Y]}
]}.
point(X, Y, Z) ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [X, Y, Z]}
]}.
point(X, Y, Z, M) ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [X, Y, Z, M]}
]}.
rectangle(X1, Y1, X2, Y2) ->
{[
{<<"type">>, <<"Polygon">>},
{<<"coordinates">>, [
[
[X1, Y1],
[X1, Y2],
[X2, Y2],
[X2, Y1],
[X1, Y1]
]
]}
]}.
point() ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [100.0, 0.0]}
]}.
point3d() ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [100.0, 0.0, 1.0]}
]}.
point4d() ->
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [100.0, 0.0, 1.0, 1.0]}
]}.
linestring() ->
{[
{<<"type">>, <<"LineString">>},
{<<"coordinates">>, [
[100.0, 0.0], [101.0, 1.0]
]}
]}.
polygon() ->
{[
{<<"type">>, <<"Polygon">>},
{<<"coordinates">>, [
% Outer ring
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
]
]}
]}.
polygon_with_hole() ->
{[
{<<"type">>, <<"Polygon">>},
{<<"coordinates">>, [
% Outer ring
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
% Inner ring
[
[100.2, 0.2],
[100.8, 0.2],
[100.8, 0.8],
[100.2, 0.8],
[100.2, 0.2]
]
]}
]}.
multipoint() ->
{[
{<<"type">>, <<"MultiPoint">>},
{<<"coordinates">>, [
[100.0, 0.0],
[101.0, 1.0]
]}
]}.
multilinestring() ->
{[
{<<"type">>, <<"MultiLineString">>},
{<<"coordinates">>, [
[[100.0, 0.0], [101.0, 1.0]],
[[102.0, 2.0], [103.0, 3.0]]
]}
]}.
multipolygon() ->
{[
{<<"type">>, <<"MultiPolygon">>},
{<<"coordinates">>, [
[
[
[102.0, 2.0],
[103.0, 2.0],
[103.0, 3.0],
[102.0, 3.0],
[102.0, 2.0]
]
],
[
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.2, 0.2],
[100.8, 0.2],
[100.8, 0.8],
[100.2, 0.8],
[100.2, 0.2]
]
]
]}
]}.
geometrycollection() ->
{[
{<<"type">>, <<"GeometryCollection">>},
{<<"geometries">>, [
{[
{<<"type">>, <<"Point">>},
{<<"coordinates">>, [100.0, 0.0]}
]},
{[
{<<"type">>, <<"LineString">>},
{<<"coordinates">>, [
[101.0, 0.0],
[102.0, 1.0]
]}
]}
]}
]}. | test/easton_shapes.erl | 0.653238 | 0.494385 | easton_shapes.erl | starcoder |
%%==============================================================================
%% Copyright 2015 Erlang Solutions Ltd.
%% Licensed under the Apache License, Version 2.0 (see LICENSE file)
%%==============================================================================
-module(amoc_user).
%% defaults
-define(REPEAT_INTERVAL, 60000). % time between sceanario restarts (60s)
-define(REPEAT_NUM, infinity). % number of scenario repetitions
%% API
-export([start_link/3]).
-export([init/4]).
-type state() :: term().
-spec start_link(amoc:scenario(), amoc_scenario:user_id(), state()) ->
{ok, pid()}.
start_link(Scenario, Id, State) ->
proc_lib:start_link(?MODULE, init, [self(), Scenario, Id, State]).
-spec init(pid(), amoc:scenario(), amoc_scenario:user_id(), state()) ->
no_return().
init(Parent, Scenario, Id, State) ->
proc_lib:init_ack(Parent, {ok, self()}),
ets:insert(amoc_users, {Id, self()}),
F = fun() -> perform_scenario(Scenario, Id, State) end,
R = try
case repeat_num() of
infinity -> repeat(F);
N -> repeat(F, N)
end,
normal
catch
throw:stop ->
normal;
%% {R, get_stack()} will result in a compact error message
%% {E, R, get_stack()} will result in a full stack report
E:Reason ->
{E, {abnormal_exit, Reason}, erlang:get_stacktrace()}
after
ets:delete(amoc_users, Id)
end,
exit(R).
-spec perform_scenario(amoc:scenario(), amoc_scenario:user_id(), state()) -> ok.
perform_scenario(Scenario, Id, State) ->
case erlang:function_exported(Scenario, start, 2) of
true ->
Scenario:start(Id, State);
false ->
Scenario:start(Id)
end,
flush_mailbox().
-spec flush_mailbox() -> ok.
flush_mailbox() ->
receive
stop ->
throw(stop);
_ ->
flush_mailbox()
after 0 ->
ok
end.
repeat(F) ->
F(),
timer:sleep(repeat_interval()),
repeat(F).
repeat(F, N) when N > 1 ->
F(),
timer:sleep(repeat_interval()),
repeat(F, N - 1);
repeat(F, 1) ->
F().
repeat_interval() ->
amoc_config:get(repeat_interval, ?REPEAT_INTERVAL).
repeat_num() ->
amoc_config:get(repeat_num, ?REPEAT_NUM). | src/amoc_user.erl | 0.52756 | 0.446314 | amoc_user.erl | starcoder |
%% The contents of this file are subject to the Mozilla Public License
%% Version 1.1 (the "License"); you may not use this file except in
%% compliance with the License. You may obtain a copy of the License at
%% http://www.mozilla.org/MPL/
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
%% License for the specific language governing rights and limitations
%% under the License.
%%
%% The Original Code is RabbitMQ.
%%
%% The Initial Developer of the Original Code is GoPivotal, Inc.
%% Copyright (c) 2007-2017 Pivotal Software, Inc. All rights reserved.
%%
-module(gm).
%% Guaranteed Multicast
%% ====================
%%
%% This module provides the ability to create named groups of
%% processes to which members can be dynamically added and removed,
%% and for messages to be broadcast within the group that are
%% guaranteed to reach all members of the group during the lifetime of
%% the message. The lifetime of a message is defined as being, at a
%% minimum, the time from which the message is first sent to any
%% member of the group, up until the time at which it is known by the
%% member who published the message that the message has reached all
%% group members.
%%
%% The guarantee given is that provided a message, once sent, makes it
%% to members who do not all leave the group, the message will
%% continue to propagate to all group members.
%%
%% Another way of stating the guarantee is that if member P publishes
%% messages m and m', then for all members P', if P' is a member of
%% the group prior to the publication of m, and P' receives m', then
%% P' will receive m.
%%
%% Note that only local-ordering is enforced: i.e. if member P sends
%% message m and then message m', then for-all members P', if P'
%% receives m and m', then they will receive m' after m. Causality
%% ordering is _not_ enforced. I.e. if member P receives message m
%% and as a result publishes message m', there is no guarantee that
%% other members P' will receive m before m'.
%%
%%
%% API Use
%% -------
%%
%% Mnesia must be started. Use the idempotent create_tables/0 function
%% to create the tables required.
%%
%% start_link/3
%% Provide the group name, the callback module name, and any arguments
%% you wish to be passed into the callback module's functions. The
%% joined/2 function will be called when we have joined the group,
%% with the arguments passed to start_link and a list of the current
%% members of the group. See the callbacks specs and the comments
%% below for further details of the callback functions.
%%
%% leave/1
%% Provide the Pid. Removes the Pid from the group. The callback
%% handle_terminate/2 function will be called.
%%
%% broadcast/2
%% Provide the Pid and a Message. The message will be sent to all
%% members of the group as per the guarantees given above. This is a
%% cast and the function call will return immediately. There is no
%% guarantee that the message will reach any member of the group.
%%
%% confirmed_broadcast/2
%% Provide the Pid and a Message. As per broadcast/2 except that this
%% is a call, not a cast, and only returns 'ok' once the Message has
%% reached every member of the group. Do not call
%% confirmed_broadcast/2 directly from the callback module otherwise
%% you will deadlock the entire group.
%%
%% info/1
%% Provide the Pid. Returns a proplist with various facts, including
%% the group name and the current group members.
%%
%% validate_members/2
%% Check whether a given member list agrees with the chosen member's
%% view. Any differences will be communicated via the members_changed
%% callback. If there are no differences then there will be no reply.
%% Note that members will not necessarily share the same view.
%%
%% forget_group/1
%% Provide the group name. Removes its mnesia record. Makes no attempt
%% to ensure the group is empty.
%%
%% Implementation Overview
%% -----------------------
%%
%% One possible means of implementation would be a fan-out from the
%% sender to every member of the group. This would require that the
%% group is fully connected, and, in the event that the original
%% sender of the message disappears from the group before the message
%% has made it to every member of the group, raises questions as to
%% who is responsible for sending on the message to new group members.
%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] -
%% if the sender dies part way through, who is responsible for
%% ensuring that the remaining Members receive the Msg? In the event
%% that within the group, messages sent are broadcast from a subset of
%% the members, the fan-out arrangement has the potential to
%% substantially impact the CPU and network workload of such members,
%% as such members would have to accommodate the cost of sending each
%% message to every group member.
%%
%% Instead, if the members of the group are arranged in a chain, then
%% it becomes easier to reason about who within the group has received
%% each message and who has not. It eases issues of responsibility: in
%% the event of a group member disappearing, the nearest upstream
%% member of the chain is responsible for ensuring that messages
%% continue to propagate down the chain. It also results in equal
%% distribution of sending and receiving workload, even if all
%% messages are being sent from just a single group member. This
%% configuration has the further advantage that it is not necessary
%% for every group member to know of every other group member, and
%% even that a group member does not have to be accessible from all
%% other group members.
%%
%% Performance is kept high by permitting pipelining and all
%% communication between joined group members is asynchronous. In the
%% chain A -> B -> C -> D, if A sends a message to the group, it will
%% not directly contact C or D. However, it must know that D receives
%% the message (in addition to B and C) before it can consider the
%% message fully sent. A simplistic implementation would require that
%% D replies to C, C replies to B and B then replies to A. This would
%% result in a propagation delay of twice the length of the chain. It
%% would also require, in the event of the failure of C, that D knows
%% to directly contact B and issue the necessary replies. Instead, the
%% chain forms a ring: D sends the message on to A: D does not
%% distinguish A as the sender, merely as the next member (downstream)
%% within the chain (which has now become a ring). When A receives
%% from D messages that A sent, it knows that all members have
%% received the message. However, the message is not dead yet: if C
%% died as B was sending to C, then B would need to detect the death
%% of C and forward the message on to D instead: thus every node has
%% to remember every message published until it is told that it can
%% forget about the message. This is essential not just for dealing
%% with failure of members, but also for the addition of new members.
%%
%% Thus once A receives the message back again, it then sends to B an
%% acknowledgement for the message, indicating that B can now forget
%% about the message. B does so, and forwards the ack to C. C forgets
%% the message, and forwards the ack to D, which forgets the message
%% and finally forwards the ack back to A. At this point, A takes no
%% further action: the message and its acknowledgement have made it to
%% every member of the group. The message is now dead, and any new
%% member joining the group at this point will not receive the
%% message.
%%
%% We therefore have two roles:
%%
%% 1. The sender, who upon receiving their own messages back, must
%% then send out acknowledgements, and upon receiving their own
%% acknowledgements back perform no further action.
%%
%% 2. The other group members who upon receiving messages and
%% acknowledgements must update their own internal state accordingly
%% (the sending member must also do this in order to be able to
%% accommodate failures), and forwards messages on to their downstream
%% neighbours.
%%
%%
%% Implementation: It gets trickier
%% --------------------------------
%%
%% Chain A -> B -> C -> D
%%
%% A publishes a message which B receives. A now dies. B and D will
%% detect the death of A, and will link up, thus the chain is now B ->
%% C -> D. B forwards A's message on to C, who forwards it to D, who
%% forwards it to B. Thus B is now responsible for A's messages - both
%% publications and acknowledgements that were in flight at the point
%% at which A died. Even worse is that this is transitive: after B
%% forwards A's message to C, B dies as well. Now C is not only
%% responsible for B's in-flight messages, but is also responsible for
%% A's in-flight messages.
%%
%% Lemma 1: A member can only determine which dead members they have
%% inherited responsibility for if there is a total ordering on the
%% conflicting additions and subtractions of members from the group.
%%
%% Consider the simultaneous death of B and addition of B' that
%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or
%% C is responsible for in-flight messages from B. It is easy to
%% ensure that at least one of them thinks they have inherited B, but
%% if we do not ensure that exactly one of them inherits B, then we
%% could have B' converting publishes to acks, which then will crash C
%% as C does not believe it has issued acks for those messages.
%%
%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E
%% becoming A -> C' -> E. Who has inherited which of B, C and D?
%%
%% However, for non-conflicting membership changes, only a partial
%% ordering is required. For example, A -> B -> C becoming A -> A' ->
%% B. The addition of A', between A and B can have no conflicts with
%% the death of C: it is clear that A has inherited C's messages.
%%
%% For ease of implementation, we adopt the simple solution, of
%% imposing a total order on all membership changes.
%%
%% On the death of a member, it is ensured the dead member's
%% neighbours become aware of the death, and the upstream neighbour
%% now sends to its new downstream neighbour its state, including the
%% messages pending acknowledgement. The downstream neighbour can then
%% use this to calculate which publishes and acknowledgements it has
%% missed out on, due to the death of its old upstream. Thus the
%% downstream can catch up, and continues the propagation of messages
%% through the group.
%%
%% Lemma 2: When a member is joining, it must synchronously
%% communicate with its upstream member in order to receive its
%% starting state atomically with its addition to the group.
%%
%% New members must start with the same state as their nearest
%% upstream neighbour. This ensures that it is not surprised by
%% acknowledgements they are sent, and that should their downstream
%% neighbour die, they are able to send the correct state to their new
%% downstream neighbour to ensure it can catch up. Thus in the
%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' ->
%% C, A' must start with the state of A, so that it can send C the
%% correct state when B dies, allowing C to detect any missed
%% messages.
%%
%% If A' starts by adding itself to the group membership, A could then
%% die, without A' having received the necessary state from A. This
%% would leave A' responsible for in-flight messages from A, but
%% having the least knowledge of all, of those messages. Thus A' must
%% start by synchronously calling A, which then immediately sends A'
%% back its state. A then adds A' to the group. If A dies at this
%% point then A' will be able to see this (as A' will fail to appear
%% in the group membership), and thus A' will ignore the state it
%% receives from A, and will simply repeat the process, trying to now
%% join downstream from some other member. This ensures that should
%% the upstream die as soon as the new member has been joined, the new
%% member is guaranteed to receive the correct state, allowing it to
%% correctly process messages inherited due to the death of its
%% upstream neighbour.
%%
%% The canonical definition of the group membership is held by a
%% distributed database. Whilst this allows the total ordering of
%% changes to be achieved, it is nevertheless undesirable to have to
%% query this database for the current view, upon receiving each
%% message. Instead, we wish for members to be able to cache a view of
%% the group membership, which then requires a cache invalidation
%% mechanism. Each member maintains its own view of the group
%% membership. Thus when the group's membership changes, members may
%% need to become aware of such changes in order to be able to
%% accurately process messages they receive. Because of the
%% requirement of a total ordering of conflicting membership changes,
%% it is not possible to use the guaranteed broadcast mechanism to
%% communicate these changes: to achieve the necessary ordering, it
%% would be necessary for such messages to be published by exactly one
%% member, which can not be guaranteed given that such a member could
%% die.
%%
%% The total ordering we enforce on membership changes gives rise to a
%% view version number: every change to the membership creates a
%% different view, and the total ordering permits a simple
%% monotonically increasing view version number.
%%
%% Lemma 3: If a message is sent from a member that holds view version
%% N, it can be correctly processed by any member receiving the
%% message with a view version >= N.
%%
%% Initially, let us suppose that each view contains the ordering of
%% every member that was ever part of the group. Dead members are
%% marked as such. Thus we have a ring of members, some of which are
%% dead, and are thus inherited by the nearest alive downstream
%% member.
%%
%% In the chain A -> B -> C, all three members initially have view
%% version 1, which reflects reality. B publishes a message, which is
%% forward by C to A. B now dies, which A notices very quickly. Thus A
%% updates the view, creating version 2. It now forwards B's
%% publication, sending that message to its new downstream neighbour,
%% C. This happens before C is aware of the death of B. C must become
%% aware of the view change before it interprets the message its
%% received, otherwise it will fail to learn of the death of B, and
%% thus will not realise it has inherited B's messages (and will
%% likely crash).
%%
%% Thus very simply, we have that each subsequent view contains more
%% information than the preceding view.
%%
%% However, to avoid the views growing indefinitely, we need to be
%% able to delete members which have died _and_ for which no messages
%% are in-flight. This requires that upon inheriting a dead member, we
%% know the last publication sent by the dead member (this is easy: we
%% inherit a member because we are the nearest downstream member which
%% implies that we know at least as much than everyone else about the
%% publications of the dead member), and we know the earliest message
%% for which the acknowledgement is still in flight.
%%
%% In the chain A -> B -> C, when B dies, A will send to C its state
%% (as C is the new downstream from A), allowing C to calculate which
%% messages it has missed out on (described above). At this point, C
%% also inherits B's messages. If that state from A also includes the
%% last message published by B for which an acknowledgement has been
%% seen, then C knows exactly which further acknowledgements it must
%% receive (also including issuing acknowledgements for publications
%% still in-flight that it receives), after which it is known there
%% are no more messages in flight for B, thus all evidence that B was
%% ever part of the group can be safely removed from the canonical
%% group membership.
%%
%% Thus, for every message that a member sends, it includes with that
%% message its view version. When a member receives a message it will
%% update its view from the canonical copy, should its view be older
%% than the view version included in the message it has received.
%%
%% The state held by each member therefore includes the messages from
%% each publisher pending acknowledgement, the last publication seen
%% from that publisher, and the last acknowledgement from that
%% publisher. In the case of the member's own publications or
%% inherited members, this last acknowledgement seen state indicates
%% the last acknowledgement retired, rather than sent.
%%
%%
%% Proof sketch
%% ------------
%%
%% We need to prove that with the provided operational semantics, we
%% can never reach a state that is not well formed from a well-formed
%% starting state.
%%
%% Operational semantics (small step): straight-forward message
%% sending, process monitoring, state updates.
%%
%% Well formed state: dead members inherited by exactly one non-dead
%% member; for every entry in anyone's pending-acks, either (the
%% publication of the message is in-flight downstream from the member
%% and upstream from the publisher) or (the acknowledgement of the
%% message is in-flight downstream from the publisher and upstream
%% from the member).
%%
%% Proof by induction on the applicable operational semantics.
%%
%%
%% Related work
%% ------------
%%
%% The ring configuration and double traversal of messages around the
%% ring is similar (though developed independently) to the LCR
%% protocol by [Levy 2008]. However, LCR differs in several
%% ways. Firstly, by using vector clocks, it enforces a total order of
%% message delivery, which is unnecessary for our purposes. More
%% significantly, it is built on top of a "group communication system"
%% which performs the group management functions, taking
%% responsibility away from the protocol as to how to cope with safely
%% adding and removing members. When membership changes do occur, the
%% protocol stipulates that every member must perform communication
%% with every other member of the group, to ensure all outstanding
%% deliveries complete, before the entire group transitions to the new
%% view. This, in total, requires two sets of all-to-all synchronous
%% communications.
%%
%% This is not only rather inefficient, but also does not explain what
%% happens upon the failure of a member during this process. It does
%% though entirely avoid the need for inheritance of responsibility of
%% dead members that our protocol incorporates.
%%
%% In [Marandi et al 2010], a Paxos-based protocol is described. This
%% work explicitly focuses on the efficiency of communication. LCR
%% (and our protocol too) are more efficient, but at the cost of
%% higher latency. The Ring-Paxos protocol is itself built on top of
%% IP-multicast, which rules it out for many applications where
%% point-to-point communication is all that can be required. They also
%% have an excellent related work section which I really ought to
%% read...
%%
%%
%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008.
%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast
%% Protocol
-behaviour(gen_server2).
-export([create_tables/0, start_link/4, leave/1, broadcast/2, broadcast/3,
confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3, prioritise_info/3]).
%% For INSTR_MOD callbacks
-export([call/3, cast/2, monitor/1, demonitor/1]).
-export([table_definitions/0]).
-define(GROUP_TABLE, gm_group).
-define(MAX_BUFFER_SIZE, 100000000). %% 100MB
-define(BROADCAST_TIMER, 25).
-define(FORCE_GC_TIMER, 250).
-define(VERSION_START, 0).
-define(SETS, ordsets).
-record(state,
{ self,
left,
right,
group_name,
module,
view,
pub_count,
members_state,
callback_args,
confirms,
broadcast_buffer,
broadcast_buffer_sz,
broadcast_timer,
force_gc_timer,
txn_executor,
shutting_down
}).
-record(gm_group, { name, version, members }).
-record(view_member, { id, aliases, left, right }).
-record(member, { pending_ack, last_pub, last_ack }).
-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group},
{attributes, record_info(fields, gm_group)}]}).
-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}).
-define(TAG, '$gm').
-export_type([group_name/0]).
-type group_name() :: any().
-type txn_fun() :: fun((fun(() -> any())) -> any()).
-spec create_tables() -> 'ok' | {'aborted', any()}.
-spec start_link(group_name(), atom(), any(), txn_fun()) ->
rabbit_types:ok_pid_or_error().
-spec leave(pid()) -> 'ok'.
-spec broadcast(pid(), any()) -> 'ok'.
-spec confirmed_broadcast(pid(), any()) -> 'ok'.
-spec info(pid()) -> rabbit_types:infos().
-spec validate_members(pid(), [pid()]) -> 'ok'.
-spec forget_group(group_name()) -> 'ok'.
%% The joined, members_changed and handle_msg callbacks can all return
%% any of the following terms:
%%
%% 'ok' - the callback function returns normally
%%
%% {'stop', Reason} - the callback indicates the member should stop
%% with reason Reason and should leave the group.
%%
%% {'become', Module, Args} - the callback indicates that the callback
%% module should be changed to Module and that the callback functions
%% should now be passed the arguments Args. This allows the callback
%% module to be dynamically changed.
%% Called when we've successfully joined the group. Supplied with Args
%% provided in start_link, plus current group members.
-callback joined(Args :: term(), Members :: [pid()]) ->
ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
%% Supplied with Args provided in start_link, the list of new members
%% and the list of members previously known to us that have since
%% died. Note that if a member joins and dies very quickly, it's
%% possible that we will never see that member appear in either births
%% or deaths. However we are guaranteed that (1) we will see a member
%% joining either in the births here, or in the members passed to
%% joined/2 before receiving any messages from it; and (2) we will not
%% see members die that we have not seen born (or supplied in the
%% members to joined/2).
-callback members_changed(Args :: term(),
Births :: [pid()], Deaths :: [pid()]) ->
ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
%% Supplied with Args provided in start_link, the sender, and the
%% message. This does get called for messages injected by this member,
%% however, in such cases, there is no special significance of this
%% invocation: it does not indicate that the message has made it to
%% any other members, let alone all other members.
-callback handle_msg(Args :: term(), From :: pid(), Message :: term()) ->
ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
%% Called on gm member termination as per rules in gen_server, with
%% the Args provided in start_link plus the termination Reason.
-callback handle_terminate(Args :: term(), Reason :: term()) ->
ok | term().
create_tables() ->
create_tables([?TABLE]).
create_tables([]) ->
ok;
create_tables([{Table, Attributes} | Tables]) ->
case mnesia:create_table(Table, Attributes) of
{atomic, ok} -> create_tables(Tables);
{aborted, {already_exists, Table}} -> create_tables(Tables);
Err -> Err
end.
table_definitions() ->
{Name, Attributes} = ?TABLE,
[{Name, [?TABLE_MATCH | Attributes]}].
start_link(GroupName, Module, Args, TxnFun) ->
gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun],
[{spawn_opt, [{fullsweep_after, 0}]}]).
leave(Server) ->
gen_server2:cast(Server, leave).
broadcast(Server, Msg) -> broadcast(Server, Msg, 0).
broadcast(Server, Msg, SizeHint) ->
gen_server2:cast(Server, {broadcast, Msg, SizeHint}).
confirmed_broadcast(Server, Msg) ->
gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity).
info(Server) ->
gen_server2:call(Server, info, infinity).
validate_members(Server, Members) ->
gen_server2:cast(Server, {validate_members, Members}).
forget_group(GroupName) ->
{atomic, ok} = mnesia:sync_transaction(
fun () ->
mnesia:delete({?GROUP_TABLE, GroupName})
end),
ok.
init([GroupName, Module, Args, TxnFun]) ->
put(process_name, {?MODULE, GroupName}),
Self = make_member(GroupName),
gen_server2:cast(self(), join),
{ok, #state { self = Self,
left = {Self, undefined},
right = {Self, undefined},
group_name = GroupName,
module = Module,
view = undefined,
pub_count = -1,
members_state = undefined,
callback_args = Args,
confirms = queue:new(),
broadcast_buffer = [],
broadcast_buffer_sz = 0,
broadcast_timer = undefined,
force_gc_timer = undefined,
txn_executor = TxnFun,
shutting_down = false }}.
handle_call({confirmed_broadcast, _Msg}, _From,
State = #state { shutting_down = {true, _} }) ->
reply(shutting_down, State);
handle_call({confirmed_broadcast, _Msg}, _From,
State = #state { members_state = undefined }) ->
reply(not_joined, State);
handle_call({confirmed_broadcast, Msg}, _From,
State = #state { self = Self,
right = {Self, undefined},
module = Module,
callback_args = Args }) ->
handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
ok, State});
handle_call({confirmed_broadcast, Msg}, From, State) ->
{Result, State1 = #state { pub_count = PubCount, confirms = Confirms }} =
internal_broadcast(Msg, 0, State),
Confirms1 = queue:in({PubCount, From}, Confirms),
handle_callback_result({Result, flush_broadcast_buffer(
State1 #state { confirms = Confirms1 })});
handle_call(info, _From,
State = #state { members_state = undefined }) ->
reply(not_joined, State);
handle_call(info, _From, State = #state { group_name = GroupName,
module = Module,
view = View }) ->
reply([{group_name, GroupName},
{module, Module},
{group_members, get_pids(alive_view_members(View))}], State);
handle_call({add_on_right, _NewMember}, _From,
State = #state { members_state = undefined }) ->
reply(not_ready, State);
handle_call({add_on_right, NewMember}, _From,
State = #state { self = Self,
group_name = GroupName,
members_state = MembersState,
txn_executor = TxnFun }) ->
try
Group = record_new_member_in_group(
NewMember, Self, GroupName, TxnFun),
View1 = group_to_view(check_membership(Self, Group)),
MembersState1 = remove_erased_members(MembersState, View1),
ok = send_right(NewMember, View1,
{catchup, Self, prepare_members_state(MembersState1)}),
{Result, State1} = change_view(View1, State #state {
members_state = MembersState1 }),
handle_callback_result({Result, {ok, Group}, State1})
catch
lost_membership ->
{stop, shutdown, State}
end.
%% add_on_right causes a catchup to be sent immediately from the left,
%% so we can never see this from the left neighbour. However, it's
%% possible for the right neighbour to send us a check_neighbours
%% immediately before that. We can't possibly handle it, but if we're
%% in this state we know a catchup is coming imminently anyway. So
%% just ignore it.
handle_cast({?TAG, _ReqVer, check_neighbours},
State = #state { members_state = undefined }) ->
noreply(State);
handle_cast({?TAG, ReqVer, Msg},
State = #state { view = View,
self = Self,
members_state = MembersState,
group_name = GroupName }) ->
try
{Result, State1} =
case needs_view_update(ReqVer, View) of
true ->
View1 = group_to_view(
check_membership(Self,
dirty_read_group(GroupName))),
MemberState1 = remove_erased_members(MembersState, View1),
change_view(View1, State #state {
members_state = MemberState1 });
false -> {ok, State}
end,
handle_callback_result(
if_callback_success(
Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1))
catch
lost_membership ->
{stop, shutdown, State}
end;
handle_cast({broadcast, _Msg, _SizeHint},
State = #state { shutting_down = {true, _} }) ->
noreply(State);
handle_cast({broadcast, _Msg, _SizeHint},
State = #state { members_state = undefined }) ->
noreply(State);
handle_cast({broadcast, Msg, _SizeHint},
State = #state { self = Self,
right = {Self, undefined},
module = Module,
callback_args = Args }) ->
handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
State});
handle_cast({broadcast, Msg, SizeHint}, State) ->
{Result, State1} = internal_broadcast(Msg, SizeHint, State),
handle_callback_result({Result, maybe_flush_broadcast_buffer(State1)});
handle_cast(join, State = #state { self = Self,
group_name = GroupName,
members_state = undefined,
module = Module,
callback_args = Args,
txn_executor = TxnFun }) ->
try
View = join_group(Self, GroupName, TxnFun),
MembersState =
case alive_view_members(View) of
[Self] -> blank_member_state();
_ -> undefined
end,
State1 = check_neighbours(State #state { view = View,
members_state = MembersState }),
handle_callback_result(
{Module:joined(Args, get_pids(all_known_members(View))), State1})
catch
lost_membership ->
{stop, shutdown, State}
end;
handle_cast({validate_members, OldMembers},
State = #state { view = View,
module = Module,
callback_args = Args }) ->
NewMembers = get_pids(all_known_members(View)),
Births = NewMembers -- OldMembers,
Deaths = OldMembers -- NewMembers,
case {Births, Deaths} of
{[], []} -> noreply(State);
_ -> Result = Module:members_changed(Args, Births, Deaths),
handle_callback_result({Result, State})
end;
handle_cast(leave, State) ->
{stop, normal, State}.
handle_info(force_gc, State) ->
garbage_collect(),
noreply(State #state { force_gc_timer = undefined });
handle_info(flush, State) ->
noreply(
flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
handle_info(timeout, State) ->
noreply(flush_broadcast_buffer(State));
handle_info({'DOWN', _MRef, process, _Pid, _Reason},
State = #state { shutting_down =
{true, {shutdown, ring_shutdown}} }) ->
noreply(State);
handle_info({'DOWN', MRef, process, _Pid, Reason},
State = #state { self = Self,
left = Left,
right = Right,
group_name = GroupName,
confirms = Confirms,
txn_executor = TxnFun }) ->
try
check_membership(GroupName),
Member = case {Left, Right} of
{{Member1, MRef}, _} -> Member1;
{_, {Member1, MRef}} -> Member1;
_ -> undefined
end,
case {Member, Reason} of
{undefined, _} ->
noreply(State);
{_, {shutdown, ring_shutdown}} ->
noreply(State);
_ ->
%% In the event of a partial partition we could see another member
%% go down and then remove them from Mnesia. While they can
%% recover from this they'd have to restart the queue - not
%% ideal. So let's sleep here briefly just in case this was caused
%% by a partial partition; in which case by the time we record the
%% member death in Mnesia we will probably be in a full
%% partition and will not be assassinating another member.
timer:sleep(100),
View1 = group_to_view(record_dead_member_in_group(Self,
Member, GroupName, TxnFun, true)),
handle_callback_result(
case alive_view_members(View1) of
[Self] -> maybe_erase_aliases(
State #state {
members_state = blank_member_state(),
confirms = purge_confirms(Confirms) },
View1);
_ -> change_view(View1, State)
end)
end
catch
lost_membership ->
{stop, shutdown, State}
end;
handle_info(_, State) ->
%% Discard any unexpected messages, such as late replies from neighbour_call/2
%% TODO: For #gm_group{} related info messages, it could be worthwhile to
%% change_view/2, as this might reflect an alteration in the gm group, meaning
%% we now need to update our state. see rabbitmq-server#914.
noreply(State).
terminate(Reason, #state { module = Module, callback_args = Args }) ->
Module:handle_terminate(Args, Reason).
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
prioritise_info(flush, _Len, _State) ->
1;
%% DOWN messages should not overtake initial catchups; if they do we
%% will receive a DOWN we do not know what to do with.
prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len,
#state { members_state = undefined }) ->
0;
%% We should not prioritise DOWN messages from our left since
%% otherwise the DOWN can overtake any last activity from the left,
%% causing that activity to be lost.
prioritise_info({'DOWN', _MRef, process, LeftPid, _Reason}, _Len,
#state { left = {{_LeftVer, LeftPid}, _MRef2} }) ->
0;
%% But prioritise all other DOWNs - we want to make sure we are not
%% sending activity into the void for too long because our right is
%% down but we don't know it.
prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, _State) ->
1;
prioritise_info(_, _Len, _State) ->
0.
handle_msg(check_neighbours, State) ->
%% no-op - it's already been done by the calling handle_cast
{ok, State};
handle_msg({catchup, Left, MembersStateLeft},
State = #state { self = Self,
left = {Left, _MRefL},
right = {Right, _MRefR},
view = View,
members_state = undefined }) ->
ok = send_right(Right, View, {catchup, Self, MembersStateLeft}),
MembersStateLeft1 = build_members_state(MembersStateLeft),
{ok, State #state { members_state = MembersStateLeft1 }};
handle_msg({catchup, Left, MembersStateLeft},
State = #state { self = Self,
left = {Left, _MRefL},
view = View,
members_state = MembersState })
when MembersState =/= undefined ->
MembersStateLeft1 = build_members_state(MembersStateLeft),
AllMembers = lists:usort(maps:keys(MembersState) ++
maps:keys(MembersStateLeft1)),
{MembersState1, Activity} =
lists:foldl(
fun (Id, MembersStateActivity) ->
#member { pending_ack = PALeft, last_ack = LA } =
find_member_or_blank(Id, MembersStateLeft1),
with_member_acc(
fun (#member { pending_ack = PA } = Member, Activity1) ->
case is_member_alias(Id, Self, View) of
true ->
{_AcksInFlight, Pubs, _PA1} =
find_prefix_common_suffix(PALeft, PA),
{Member #member { last_ack = LA },
activity_cons(Id, pubs_from_queue(Pubs),
[], Activity1)};
false ->
{Acks, _Common, Pubs} =
find_prefix_common_suffix(PA, PALeft),
{Member,
activity_cons(Id, pubs_from_queue(Pubs),
acks_from_queue(Acks),
Activity1)}
end
end, Id, MembersStateActivity)
end, {MembersState, activity_nil()}, AllMembers),
handle_msg({activity, Left, activity_finalise(Activity)},
State #state { members_state = MembersState1 });
handle_msg({catchup, _NotLeft, _MembersState}, State) ->
{ok, State};
handle_msg({activity, Left, Activity},
State = #state { self = Self,
group_name = GroupName,
left = {Left, _MRefL},
view = View,
members_state = MembersState,
confirms = Confirms })
when MembersState =/= undefined ->
try
%% If we have to stop, do it asap so we avoid any ack confirmation
%% Membership must be checked again by erase_members_in_group, as the
%% node can be marked as dead on the meanwhile
check_membership(GroupName),
{MembersState1, {Confirms1, Activity1}} =
calculate_activity(MembersState, Confirms, Activity, Self, View),
State1 = State #state { members_state = MembersState1,
confirms = Confirms1 },
Activity3 = activity_finalise(Activity1),
ok = maybe_send_activity(Activity3, State1),
{Result, State2} = maybe_erase_aliases(State1, View),
if_callback_success(
Result, fun activity_true/3, fun activity_false/3, Activity3, State2)
catch
lost_membership ->
{{stop, shutdown}, State}
end;
handle_msg({activity, _NotLeft, _Activity}, State) ->
{ok, State}.
noreply(State) ->
{noreply, ensure_timers(State), flush_timeout(State)}.
reply(Reply, State) ->
{reply, Reply, ensure_timers(State), flush_timeout(State)}.
ensure_timers(State) ->
ensure_force_gc_timer(ensure_broadcast_timer(State)).
flush_timeout(#state{broadcast_buffer = []}) -> infinity;
flush_timeout(_) -> 0.
ensure_force_gc_timer(State = #state { force_gc_timer = TRef })
when is_reference(TRef) ->
State;
ensure_force_gc_timer(State = #state { force_gc_timer = undefined }) ->
TRef = erlang:send_after(?FORCE_GC_TIMER, self(), force_gc),
State #state { force_gc_timer = TRef }.
ensure_broadcast_timer(State = #state { broadcast_buffer = [],
broadcast_timer = undefined }) ->
State;
ensure_broadcast_timer(State = #state { broadcast_buffer = [],
broadcast_timer = TRef }) ->
_ = erlang:cancel_timer(TRef),
State #state { broadcast_timer = undefined };
ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
State #state { broadcast_timer = TRef };
ensure_broadcast_timer(State) ->
State.
internal_broadcast(Msg, SizeHint,
State = #state { self = Self,
pub_count = PubCount,
module = Module,
callback_args = Args,
broadcast_buffer = Buffer,
broadcast_buffer_sz = BufferSize }) ->
PubCount1 = PubCount + 1,
{Module:handle_msg(Args, get_pid(Self), Msg),
State #state { pub_count = PubCount1,
broadcast_buffer = [{PubCount1, Msg} | Buffer],
broadcast_buffer_sz = BufferSize + SizeHint}}.
%% The Erlang distribution mechanism has an interesting quirk - it
%% will kill the VM cold with "Absurdly large distribution output data
%% buffer" if you attempt to send a message which serialises out to
%% more than 2^31 bytes in size. It's therefore a very good idea to
%% make sure that we don't exceed that size!
%%
%% Now, we could figure out the size of messages as they come in using
%% size(term_to_binary(Msg)) or similar. The trouble is, that requires
%% us to serialise the message only to throw the serialised form
%% away. Hard to believe that's a sensible thing to do. So instead we
%% accept a size hint from the application, via broadcast/3. This size
%% hint can be the size of anything in the message which we expect
%% could be large, and we just ignore the size of any small bits of
%% the message term. Therefore MAX_BUFFER_SIZE is set somewhat
%% conservatively at 100MB - but the buffer is only to allow us to
%% buffer tiny messages anyway, so 100MB is plenty.
maybe_flush_broadcast_buffer(State = #state{broadcast_buffer_sz = Size}) ->
case Size > ?MAX_BUFFER_SIZE of
true -> flush_broadcast_buffer(State);
false -> State
end.
flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) ->
State;
flush_broadcast_buffer(State = #state { self = Self,
members_state = MembersState,
broadcast_buffer = Buffer,
pub_count = PubCount }) ->
[{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount
Pubs = lists:reverse(Buffer),
Activity = activity_cons(Self, Pubs, [], activity_nil()),
ok = maybe_send_activity(activity_finalise(Activity), State),
MembersState1 = with_member(
fun (Member = #member { pending_ack = PA }) ->
PA1 = queue:join(PA, queue:from_list(Pubs)),
Member #member { pending_ack = PA1,
last_pub = PubCount }
end, Self, MembersState),
State #state { members_state = MembersState1,
broadcast_buffer = [],
broadcast_buffer_sz = 0 }.
%% ---------------------------------------------------------------------------
%% View construction and inspection
%% ---------------------------------------------------------------------------
needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer.
view_version({Ver, _View}) -> Ver.
is_member_alive({dead, _Member}) -> false;
is_member_alive(_) -> true.
is_member_alias(Self, Self, _View) ->
true;
is_member_alias(Member, Self, View) ->
?SETS:is_element(Member,
((fetch_view_member(Self, View)) #view_member.aliases)).
dead_member_id({dead, Member}) -> Member.
store_view_member(VMember = #view_member { id = Id }, {Ver, View}) ->
{Ver, maps:put(Id, VMember, View)}.
with_view_member(Fun, View, Id) ->
store_view_member(Fun(fetch_view_member(Id, View)), View).
fetch_view_member(Id, {_Ver, View}) -> maps:get(Id, View).
find_view_member(Id, {_Ver, View}) -> maps:find(Id, View).
blank_view(Ver) -> {Ver, maps:new()}.
alive_view_members({_Ver, View}) -> maps:keys(View).
all_known_members({_Ver, View}) ->
maps:fold(
fun (Member, #view_member { aliases = Aliases }, Acc) ->
?SETS:to_list(Aliases) ++ [Member | Acc]
end, [], View).
group_to_view(#gm_group { members = Members, version = Ver }) ->
Alive = lists:filter(fun is_member_alive/1, Members),
[_|_] = Alive, %% ASSERTION - can't have all dead members
add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members).
link_view([Left, Middle, Right | Rest], View) ->
case find_view_member(Middle, View) of
error ->
link_view(
[Middle, Right | Rest],
store_view_member(#view_member { id = Middle,
aliases = ?SETS:new(),
left = Left,
right = Right }, View));
{ok, _} ->
View
end;
link_view(_, View) ->
View.
add_aliases(View, Members) ->
Members1 = ensure_alive_suffix(Members),
{EmptyDeadSet, View1} =
lists:foldl(
fun (Member, {DeadAcc, ViewAcc}) ->
case is_member_alive(Member) of
true ->
{?SETS:new(),
with_view_member(
fun (VMember =
#view_member { aliases = Aliases }) ->
VMember #view_member {
aliases = ?SETS:union(Aliases, DeadAcc) }
end, ViewAcc, Member)};
false ->
{?SETS:add_element(dead_member_id(Member), DeadAcc),
ViewAcc}
end
end, {?SETS:new(), View}, Members1),
0 = ?SETS:size(EmptyDeadSet), %% ASSERTION
View1.
ensure_alive_suffix(Members) ->
queue:to_list(ensure_alive_suffix1(queue:from_list(Members))).
ensure_alive_suffix1(MembersQ) ->
{{value, Member}, MembersQ1} = queue:out_r(MembersQ),
case is_member_alive(Member) of
true -> MembersQ;
false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1))
end.
%% ---------------------------------------------------------------------------
%% View modification
%% ---------------------------------------------------------------------------
join_group(Self, GroupName, TxnFun) ->
join_group(Self, GroupName, dirty_read_group(GroupName), TxnFun).
join_group(Self, GroupName, {error, not_found}, TxnFun) ->
join_group(Self, GroupName,
prune_or_create_group(Self, GroupName, TxnFun), TxnFun);
join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) ->
group_to_view(Group);
join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
case lists:member(Self, Members) of
true ->
group_to_view(Group);
false ->
case lists:filter(fun is_member_alive/1, Members) of
[] ->
join_group(Self, GroupName,
prune_or_create_group(Self, GroupName, TxnFun),
TxnFun);
Alive ->
Left = lists:nth(rand:uniform(length(Alive)), Alive),
Handler =
fun () ->
join_group(
Self, GroupName,
record_dead_member_in_group(Self,
Left, GroupName, TxnFun, false),
TxnFun)
end,
try
case neighbour_call(Left, {add_on_right, Self}) of
{ok, Group1} -> group_to_view(Group1);
not_ready -> join_group(Self, GroupName, TxnFun)
end
catch
exit:{R, _}
when R =:= noproc; R =:= normal; R =:= shutdown ->
Handler();
exit:{{R, _}, _}
when R =:= nodedown; R =:= shutdown ->
Handler()
end
end
end.
dirty_read_group(GroupName) ->
case mnesia:dirty_read(?GROUP_TABLE, GroupName) of
[] -> {error, not_found};
[Group] -> Group
end.
read_group(GroupName) ->
case mnesia:read({?GROUP_TABLE, GroupName}) of
[] -> {error, not_found};
[Group] -> Group
end.
write_group(Group) -> mnesia:write(?GROUP_TABLE, Group, write), Group.
prune_or_create_group(Self, GroupName, TxnFun) ->
TxnFun(
fun () ->
GroupNew = #gm_group { name = GroupName,
members = [Self],
version = get_version(Self) },
case read_group(GroupName) of
{error, not_found} ->
write_group(GroupNew);
Group = #gm_group { members = Members } ->
case lists:any(fun is_member_alive/1, Members) of
true -> Group;
false -> write_group(GroupNew)
end
end
end).
record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) ->
Fun =
fun () ->
try
Group = #gm_group { members = Members, version = Ver } =
case Verify of
true ->
check_membership(Self, read_group(GroupName));
false ->
check_group(read_group(GroupName))
end,
case lists:splitwith(
fun (Member1) -> Member1 =/= Member end, Members) of
{_Members1, []} -> %% not found - already recorded dead
Group;
{Members1, [Member | Members2]} ->
Members3 = Members1 ++ [{dead, Member} | Members2],
write_group(Group #gm_group { members = Members3,
version = Ver + 1 })
end
catch
lost_membership ->
%% The transaction must not be abruptly crashed, but
%% leave the gen_server to stop normally
{error, lost_membership}
end
end,
handle_lost_membership_in_txn(TxnFun, Fun).
handle_lost_membership_in_txn(TxnFun, Fun) ->
case TxnFun(Fun) of
{error, lost_membership = T} ->
throw(T);
Any ->
Any
end.
record_new_member_in_group(NewMember, Left, GroupName, TxnFun) ->
Fun =
fun () ->
try
Group = #gm_group { members = Members, version = Ver } =
check_membership(Left, read_group(GroupName)),
case lists:member(NewMember, Members) of
true ->
%% This avois duplicates during partial partitions,
%% as inconsistent views might happen during them
rabbit_log:warning("(~p) GM avoiding duplicate of ~p",
[self(), NewMember]),
Group;
false ->
{Prefix, [Left | Suffix]} =
lists:splitwith(fun (M) -> M =/= Left end, Members),
write_group(Group #gm_group {
members = Prefix ++ [Left, NewMember | Suffix],
version = Ver + 1 })
end
catch
lost_membership ->
%% The transaction must not be abruptly crashed, but
%% leave the gen_server to stop normally
{error, lost_membership}
end
end,
handle_lost_membership_in_txn(TxnFun, Fun).
erase_members_in_group(Self, Members, GroupName, TxnFun) ->
DeadMembers = [{dead, Id} || Id <- Members],
Fun =
fun () ->
try
Group = #gm_group { members = [_|_] = Members1, version = Ver } =
check_membership(Self, read_group(GroupName)),
case Members1 -- DeadMembers of
Members1 -> Group;
Members2 -> write_group(
Group #gm_group { members = Members2,
version = Ver + 1 })
end
catch
lost_membership ->
%% The transaction must not be abruptly crashed, but
%% leave the gen_server to stop normally
{error, lost_membership}
end
end,
handle_lost_membership_in_txn(TxnFun, Fun).
maybe_erase_aliases(State = #state { self = Self,
group_name = GroupName,
members_state = MembersState,
txn_executor = TxnFun }, View) ->
#view_member { aliases = Aliases } = fetch_view_member(Self, View),
{Erasable, MembersState1}
= ?SETS:fold(
fun (Id, {ErasableAcc, MembersStateAcc} = Acc) ->
#member { last_pub = LP, last_ack = LA } =
find_member_or_blank(Id, MembersState),
case can_erase_view_member(Self, Id, LA, LP) of
true -> {[Id | ErasableAcc],
erase_member(Id, MembersStateAcc)};
false -> Acc
end
end, {[], MembersState}, Aliases),
View1 = case Erasable of
[] -> View;
_ -> group_to_view(
erase_members_in_group(Self, Erasable, GroupName, TxnFun))
end,
change_view(View1, State #state { members_state = MembersState1 }).
can_erase_view_member(Self, Self, _LA, _LP) -> false;
can_erase_view_member(_Self, _Id, N, N) -> true;
can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
neighbour_cast(N, Msg) -> ?INSTR_MOD:cast(get_pid(N), Msg).
neighbour_call(N, Msg) -> ?INSTR_MOD:call(get_pid(N), Msg, infinity).
%% ---------------------------------------------------------------------------
%% View monitoring and maintanence
%% ---------------------------------------------------------------------------
ensure_neighbour(_Ver, Self, {Self, undefined}, Self) ->
{Self, undefined};
ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
ok = neighbour_cast(RealNeighbour, {?TAG, Ver, check_neighbours}),
{RealNeighbour, maybe_monitor(RealNeighbour, Self)};
ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
{RealNeighbour, MRef};
ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
true = ?INSTR_MOD:demonitor(MRef),
Msg = {?TAG, Ver, check_neighbours},
ok = neighbour_cast(RealNeighbour, Msg),
ok = case Neighbour of
Self -> ok;
_ -> neighbour_cast(Neighbour, Msg)
end,
{Neighbour, maybe_monitor(Neighbour, Self)}.
maybe_monitor( Self, Self) -> undefined;
maybe_monitor(Other, _Self) -> ?INSTR_MOD:monitor(get_pid(Other)).
check_neighbours(State = #state { self = Self,
left = Left,
right = Right,
view = View,
broadcast_buffer = Buffer }) ->
#view_member { left = VLeft, right = VRight }
= fetch_view_member(Self, View),
Ver = view_version(View),
Left1 = ensure_neighbour(Ver, Self, Left, VLeft),
Right1 = ensure_neighbour(Ver, Self, Right, VRight),
Buffer1 = case Right1 of
{Self, undefined} -> [];
_ -> Buffer
end,
State1 = State #state { left = Left1, right = Right1,
broadcast_buffer = Buffer1 },
ok = maybe_send_catchup(Right, State1),
State1.
maybe_send_catchup(Right, #state { right = Right }) ->
ok;
maybe_send_catchup(_Right, #state { self = Self,
right = {Self, undefined} }) ->
ok;
maybe_send_catchup(_Right, #state { members_state = undefined }) ->
ok;
maybe_send_catchup(_Right, #state { self = Self,
right = {Right, _MRef},
view = View,
members_state = MembersState }) ->
send_right(Right, View,
{catchup, Self, prepare_members_state(MembersState)}).
%% ---------------------------------------------------------------------------
%% Catch_up delta detection
%% ---------------------------------------------------------------------------
find_prefix_common_suffix(A, B) ->
{Prefix, A1} = find_prefix(A, B, queue:new()),
{Common, Suffix} = find_common(A1, B, queue:new()),
{Prefix, Common, Suffix}.
%% Returns the elements of A that occur before the first element of B,
%% plus the remainder of A.
find_prefix(A, B, Prefix) ->
case {queue:out(A), queue:out(B)} of
{{{value, Val}, _A1}, {{value, Val}, _B1}} ->
{Prefix, A};
{{empty, A1}, {{value, _A}, _B1}} ->
{Prefix, A1};
{{{value, {NumA, _MsgA} = Val}, A1},
{{value, {NumB, _MsgB}}, _B1}} when NumA < NumB ->
find_prefix(A1, B, queue:in(Val, Prefix));
{_, {empty, _B1}} ->
{A, Prefix} %% Prefix well be empty here
end.
%% A should be a prefix of B. Returns the commonality plus the
%% remainder of B.
find_common(A, B, Common) ->
case {queue:out(A), queue:out(B)} of
{{{value, Val}, A1}, {{value, Val}, B1}} ->
find_common(A1, B1, queue:in(Val, Common));
{{empty, _A}, _} ->
{Common, B};
%% Drop value from B.
%% Match value to avoid infinite loop, since {empty, B} = queue:out(B).
{_, {{value, _}, B1}} ->
find_common(A, B1, Common);
%% Drop value from A. Empty A should be matched by second close.
{{{value, _}, A1}, _} ->
find_common(A1, B, Common)
end.
%% ---------------------------------------------------------------------------
%% Members helpers
%% ---------------------------------------------------------------------------
with_member(Fun, Id, MembersState) ->
store_member(
Id, Fun(find_member_or_blank(Id, MembersState)), MembersState).
with_member_acc(Fun, Id, {MembersState, Acc}) ->
{MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc),
{store_member(Id, MemberState, MembersState), Acc1}.
find_member_or_blank(Id, MembersState) ->
case maps:find(Id, MembersState) of
{ok, Result} -> Result;
error -> blank_member()
end.
erase_member(Id, MembersState) -> maps:remove(Id, MembersState).
blank_member() ->
#member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }.
blank_member_state() -> maps:new().
store_member(Id, MemberState, MembersState) ->
maps:put(Id, MemberState, MembersState).
prepare_members_state(MembersState) -> maps:to_list(MembersState).
build_members_state(MembersStateList) -> maps:from_list(MembersStateList).
make_member(GroupName) ->
{case dirty_read_group(GroupName) of
#gm_group { version = Version } -> Version;
{error, not_found} -> ?VERSION_START
end, self()}.
remove_erased_members(MembersState, View) ->
lists:foldl(fun (Id, MembersState1) ->
store_member(Id, find_member_or_blank(Id, MembersState),
MembersState1)
end, blank_member_state(), all_known_members(View)).
get_version({Version, _Pid}) -> Version.
get_pid({_Version, Pid}) -> Pid.
get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids].
%% ---------------------------------------------------------------------------
%% Activity assembly
%% ---------------------------------------------------------------------------
activity_nil() -> queue:new().
activity_cons( _Id, [], [], Tail) -> Tail;
activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail).
activity_finalise(Activity) -> queue:to_list(Activity).
maybe_send_activity([], _State) ->
ok;
maybe_send_activity(Activity, #state { self = Self,
right = {Right, _MRefR},
view = View }) ->
send_right(Right, View, {activity, Self, Activity}).
send_right(Right, View, Msg) ->
ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}).
calculate_activity(MembersState, Confirms, Activity, Self, View) ->
lists:foldl(
fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
with_member_acc(
fun (Member = #member { pending_ack = PA,
last_pub = LP,
last_ack = LA },
{Confirms2, Activity2}) ->
case is_member_alias(Id, Self, View) of
true ->
{ToAck, PA1} =
find_common(queue_from_pubs(Pubs), PA,
queue:new()),
LA1 = last_ack(Acks, LA),
AckNums = acks_from_queue(ToAck),
Confirms3 = maybe_confirm(
Self, Id, Confirms2, AckNums),
{Member #member { pending_ack = PA1,
last_ack = LA1 },
{Confirms3,
activity_cons(
Id, [], AckNums, Activity2)}};
false ->
PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
LA1 = last_ack(Acks, LA),
LP1 = last_pub(Pubs, LP),
{Member #member { pending_ack = PA1,
last_pub = LP1,
last_ack = LA1 },
{Confirms2,
activity_cons(Id, Pubs, Acks, Activity2)}}
end
end, Id, MembersStateConfirmsActivity)
end, {MembersState, {Confirms, activity_nil()}}, Activity).
callback(Args, Module, Activity) ->
Result =
lists:foldl(
fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) ->
lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) ->
case Module2:handle_msg(
Args2, get_pid(Id), Pub) of
ok ->
Acc;
{become, Module3, Args3} ->
{Args3, Module3, ok};
{stop, _Reason} = Error ->
Error
end;
(_, Error = {stop, _Reason}) ->
Error
end, {Args1, Module1, ok}, Pubs);
(_, Error = {stop, _Reason}) ->
Error
end, {Args, Module, ok}, Activity),
case Result of
{Args, Module, ok} -> ok;
{Args1, Module1, ok} -> {become, Module1, Args1};
{stop, _Reason} = Error -> Error
end.
change_view(View, State = #state { view = View0,
module = Module,
callback_args = Args }) ->
OldMembers = all_known_members(View0),
NewMembers = all_known_members(View),
Births = NewMembers -- OldMembers,
Deaths = OldMembers -- NewMembers,
Result = case {Births, Deaths} of
{[], []} -> ok;
_ -> Module:members_changed(
Args, get_pids(Births), get_pids(Deaths))
end,
{Result, check_neighbours(State #state { view = View })}.
handle_callback_result({Result, State}) ->
if_callback_success(
Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State);
handle_callback_result({Result, Reply, State}) ->
if_callback_success(
Result, fun reply_true/3, fun reply_false/3, Reply, State).
no_reply_true (_Result, _Undefined, State) -> noreply(State).
no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}.
reply_true (_Result, Reply, State) -> reply(Reply, State).
reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}.
handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State).
handle_msg_false(Result, _Msg, State) -> {Result, State}.
activity_true(_Result, Activity, State = #state { module = Module,
callback_args = Args }) ->
{callback(Args, Module, Activity), State}.
activity_false(Result, _Activity, State) ->
{Result, State}.
if_callback_success(Result, True, False, Arg, State) ->
{NewResult, NewState} = maybe_stop(Result, State),
if_callback_success1(NewResult, True, False, Arg, NewState).
if_callback_success1(ok, True, _False, Arg, State) ->
True(ok, Arg, State);
if_callback_success1(
{become, Module, Args} = Result, True, _False, Arg, State) ->
True(Result, Arg, State #state { module = Module,
callback_args = Args });
if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) ->
False(Result, Arg, State).
maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) ->
ShuttingDown = {true, Reason},
case has_pending_messages(State) of
true -> {ok, State #state{ shutting_down = ShuttingDown }};
false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }}
end;
maybe_stop(Result, #state{ shutting_down = false } = State) ->
{Result, State};
maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) ->
case has_pending_messages(State) of
true -> {Result, State};
false -> {{stop, Reason}, State}
end.
has_pending_messages(#state{ broadcast_buffer = Buffer })
when Buffer =/= [] ->
true;
has_pending_messages(#state{ members_state = MembersState }) ->
MembersWithPubAckMismatches = maps:filter(fun(_Id, #member{last_pub = LP, last_ack = LA}) ->
LP =/= LA
end, MembersState),
0 =/= maps:size(MembersWithPubAckMismatches).
maybe_confirm(_Self, _Id, Confirms, []) ->
Confirms;
maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) ->
case queue:out(Confirms) of
{empty, _Confirms} ->
Confirms;
{{value, {PubNum, From}}, Confirms1} ->
gen_server2:reply(From, ok),
maybe_confirm(Self, Self, Confirms1, PubNums);
{{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum ->
maybe_confirm(Self, Self, Confirms, PubNums)
end;
maybe_confirm(_Self, _Id, Confirms, _PubNums) ->
Confirms.
purge_confirms(Confirms) ->
_ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
queue:new().
%% ---------------------------------------------------------------------------
%% Msg transformation
%% ---------------------------------------------------------------------------
acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)].
pubs_from_queue(Q) -> queue:to_list(Q).
queue_from_pubs(Pubs) -> queue:from_list(Pubs).
apply_acks( [], Pubs) -> Pubs;
apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs),
Pubs1.
join_pubs(Q, []) -> Q;
join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)).
last_ack( [], LA) -> LA;
last_ack(List, LA) -> LA1 = lists:last(List),
true = LA1 > LA, %% ASSERTION
LA1.
last_pub( [], LP) -> LP;
last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
true = PubNum > LP, %% ASSERTION
PubNum.
%% ---------------------------------------------------------------------------
%% Uninstrumented versions
call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout).
cast(Pid, Msg) -> gen_server2:cast(Pid, Msg).
monitor(Pid) -> erlang:monitor(process, Pid).
demonitor(MRef) -> erlang:demonitor(MRef).
check_membership(Self, #gm_group{members = M} = Group) ->
case lists:member(Self, M) of
true ->
Group;
false ->
throw(lost_membership)
end;
check_membership(_Self, {error, not_found}) ->
throw(lost_membership).
check_membership(GroupName) ->
case dirty_read_group(GroupName) of
#gm_group{members = M} ->
case lists:keymember(self(), 2, M) of
true ->
ok;
false ->
throw(lost_membership)
end;
{error, not_found} ->
throw(lost_membership)
end.
check_group({error, not_found}) ->
throw(lost_membership);
check_group(Any) ->
Any. | data/erlang/89a9eb2d725e373b6cbc5aff41282a37_gm.erl | 0.638272 | 0.59072 | 89a9eb2d725e373b6cbc5aff41282a37_gm.erl | starcoder |
%%------------------------------------------------------------------------------
%% Copyright 2012 FlowForwarding.org
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%-----------------------------------------------------------------------------
%% @author Erlang Solutions Ltd. <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%% @copyright 2012 FlowForwarding.org
%% @doc Encode/decode module for chunked framing mechanism.
-module(enetconf_fm_chunked).
%% API
-export([encode/1,
decode/1,
decode_one/1,
new_parser/0,
parse/2]).
-define(LF, <<16#0A>>).
-define(HASH, <<16#23>>).
-define(CHUNK_SIZE, 1024).
-define(MIN_CHUNK_SIZE, 9).
-define(MAX_CHUNK_DATA_SIZE, 4294967295).
-record(chunk_parser, {
stack = <<>> :: binary
}).
%%------------------------------------------------------------------------------
%% API functions
%%------------------------------------------------------------------------------
%% @doc Encode by encapsulating into a chunked frame.
-spec encode(binary()) -> {ok, Encoded :: iolist()}.
encode(Message) ->
do_encode(Message, []).
%% @doc Decode received data to separate messages.
-spec decode(binary()) -> {ok, Messages :: [binary()], Rest :: binary()}.
decode(Binary) ->
do_decode(Binary, []).
%% @doc Decode first message from the received data.
-spec decode_one(binary()) -> {ok, Message :: [binary()], Rest :: binary()}.
decode_one(Binary) ->
case decode_chunk(Binary, <<>>) of
{ok, Chunk, Rest} ->
{ok, [Chunk], Rest};
{error, incomplete_chunk} ->
{ok, [], Binary}
end.
%% @doc Return new parser.
-spec new_parser() -> {ok, #chunk_parser{}}.
new_parser() ->
{ok, #chunk_parser{}}.
%% @doc Parse received data.
%% Returns new parser.
-spec parse(binary(), #chunk_parser{}) -> {ok, Messages :: [binary()],
NewParser :: #chunk_parser{}}.
parse(Binary, Parser) ->
do_parse(Binary, Parser, []).
%%------------------------------------------------------------------------------
%% Internal functions
%%------------------------------------------------------------------------------
%% @private
do_encode(ChunkData, Encoded) when byte_size(ChunkData) < ?CHUNK_SIZE ->
ChunkSize = integer_to_binary(byte_size(ChunkData)),
Final = [?LF, ?HASH, ?HASH, ?LF,
ChunkData, ?LF, ChunkSize, ?HASH, ?LF | Encoded],
{ok, lists:reverse(Final)};
do_encode(<<ChunkData:?CHUNK_SIZE/binary, Rest/binary>>, Encoded) ->
ChunkSize = integer_to_binary(byte_size(ChunkData)),
do_encode(Rest, [ChunkData, ?LF, ChunkSize, ?HASH, ?LF | Encoded]).
%% @private
do_decode(Binary, DecodedChunks) ->
case decode_chunk(Binary, <<>>) of
{ok, Chunk, Rest} ->
do_decode(Rest, [Chunk | DecodedChunks]);
{error, incomplete_chunk} ->
{ok, lists:reverse(DecodedChunks), Binary}
end.
%% @private
decode_chunk(<<$\n, $#, $#, $\n, Rest/binary>>, Decoded) ->
{ok, Decoded, Rest};
decode_chunk(<<$\n, $#, Chunk/binary>>, Decoded) ->
case get_chunk_size(Chunk) of
{ok, ChunkSize, ChunkRest} ->
ChunkSizeInt = binary_to_integer(ChunkSize),
case byte_size(ChunkRest) >= ChunkSizeInt of
true ->
<<ChunkData:ChunkSizeInt/binary, Rest/binary>> = ChunkRest,
decode_chunk(Rest, <<Decoded/binary, ChunkData/binary>>);
false ->
{error, incomplete_chunk}
end;
error ->
{error, incomplete_chunk}
end;
decode_chunk(_Binary, _) ->
{error, incomplete_chunk}.
%% @private
do_parse(Binary, #chunk_parser{stack = Stack} = Parser, Chunks) ->
ToDecode = <<Stack/binary, Binary/binary>>,
case decode_chunk(ToDecode, <<>>) of
{ok, Chunk, Rest} ->
do_parse(Rest, Parser#chunk_parser{stack = <<>>}, [Chunk | Chunks]);
{error, incomplete_chunk} ->
{ok, lists:reverse(Chunks), Parser#chunk_parser{stack = ToDecode}}
end.
%%------------------------------------------------------------------------------
%% Helper functions
%%------------------------------------------------------------------------------
%% binary_to_integer/1 and integer_to_binary/1 were added as BIFs in
%% R16B. Let's compile them conditionally, if we're using an earlier
%% release.
-ifndef(binary_integer_bifs).
%% @private
integer_to_binary(N) ->
list_to_binary(integer_to_list(N)).
binary_to_integer(Bin) ->
list_to_integer(binary_to_list(Bin)).
-endif.
%% @private
get_chunk_size(Binary) ->
case re:split(Binary, <<$\n>>, [{parts, 2}]) of
[<<>>, _] ->
error;
[_] ->
error;
[ChunkSize | Rest] ->
{ok, ChunkSize, list_to_binary(Rest)}
end. | src/enetconf_fm_chunked.erl | 0.582372 | 0.424531 | enetconf_fm_chunked.erl | starcoder |
%% @doc Implements a data structure for cryptographically signed transactions.
%% This is the envelope around transactions to make them cryptographically safe.
%% The transactions normally also have keys of the "signers" in the transaction,
%% which are extracted using the signers/1 function in the respective transaction
%% handler.
%%
%% The purpose of this module is to provide an API for cryptograpically signed
%% transactions and hide all implementation details. Therefore, the record
%% #signed_tx{} should be kept private and considered an abstract type.
%%
%% A transaction can be signed by one or several signers. Each transaction can
%% determine its own signers by the transaction callback 'signers'. Since we do not
%% want to depend upon transaction types in this module, the user of
%% {@module} should first obtain the signers of the transaction and then call this
%% {@link sign/2} with these signers. There is a {@link sign/3} function that can sign
%% with respect to a certain block height. This is handy whenever the governance
%% variables on what crypto to use would change.
-module(aetx_sign).
%% API
-export([sign/2,
sign/3,
tx/1,
signatures/1,
verify/2,
is_coinbase/1,
filter_invalid_signatures/1]).
%% API that should be avoided to be used
-export([verify/1,
serialize_for_client/3,
serialize_for_client_pending/2,
meta_data_from_client_serialized/2,
serialize_to_binary/1,
deserialize_from_binary/1]).
-export_type([signed_tx/0,
binary_signed_tx/0]).
-include_lib("apps/aecore/include/common.hrl").
-include_lib("apps/aecore/include/blocks.hrl").
-record(signed_tx, {
tx :: aetx:tx(),
signatures = ordsets:new() :: ordsets:ordset(binary())}).
-opaque signed_tx() :: #signed_tx{}.
-type tx() :: aetx:tx().
-type binary_signed_tx() :: binary().
%% @doc Given a transaction Tx, a private key or list of keys,
%% return the cryptographically signed transaction using the default crypto
%% parameters.
-spec sign(tx(), list(binary()) | binary()) -> signed_tx().
sign(Tx, PrivKeys) ->
sign(Tx, PrivKeys, #{}).
-spec sign(tx(), list(binary()) | binary(), map()) -> signed_tx().
%% @doc Given a transaction Tx, a private key and a crypto map,
%% return the cryptographically signed transaction.
%% A list of signers may be provided instead of one signer key.
sign(Tx, PrivKey, CryptoMap) when is_binary(PrivKey) ->
sign(Tx, [PrivKey], CryptoMap);
sign(Tx, PrivKeys, CryptoMap) when is_list(PrivKeys) ->
Bin = aetx:serialize_to_binary(Tx),
Algo = maps:get(algo, CryptoMap, ecdsa),
Digest = maps:get(digest, CryptoMap, sha256),
Curve = maps:get(curve, CryptoMap, secp256k1),
Signatures =
[ crypto:sign(Algo, Digest, Bin, [PrivKey, crypto:ec_curve(Curve)]) ||
PrivKey <- PrivKeys ],
#signed_tx{tx = Tx,
signatures = lists:sort(Signatures)}.
-spec tx(signed_tx()) -> tx().
%% @doc Get the original transaction from a signed transaction.
%% Note that no verification is performed, it just returns the transaction.
%% We have no type yest for any transaction, and coinbase_tx() | spend_tx()
%% seems restricted as type.
tx(#signed_tx{tx = Tx}) ->
Tx.
%% @doc Get the signatures of a signed transaction.
-spec signatures(signed_tx()) -> list(binary()).
signatures(#signed_tx{signatures = Sigs}) ->
Sigs.
%% @doc Verify a signed transaction by checking that the provided keys indeed all
%% have signed this transaction.
-spec verify(signed_tx(), list(binary())) -> ok | {error, signature_check_failed}.
verify(#signed_tx{tx = Tx, signatures = Sigs}, Signers) ->
%% This works even for Signers being one public key!
#signed_tx{signatures = NewSigs} = sign(Tx, Signers),
case {NewSigs -- Sigs, Sigs -- NewSigs} of
{[], []} ->
ok;
{DSigs1, []} ->
lager:debug("No matching sigs (~p - ~p) additional new signatures", [DSigs1, Sigs]),
{error, signature_check_failed};
{_, DSigs2} ->
lager:debug("No matching sigs (~p - ~p) missing signatures", [DSigs2, Sigs]),
{error, signature_check_failed}
end.
%% This should not call aec_keys verify, but aec_keys should call this module!
%% with the keys of the signers.
-spec verify(signed_tx()) -> ok | {error, signature_check_failed}.
verify(#signed_tx{tx = Tx, signatures = Sigs}) ->
case aec_keys:verify(Sigs, Tx) of
true -> ok;
false ->
lager:debug("No matching sigs (~p)", [Sigs]),
{error, signature_check_failed}
end.
-spec filter_invalid_signatures(list(signed_tx())) -> list(signed_tx()).
filter_invalid_signatures(SignedTxs) ->
lists:filter(fun(SignedTx) -> ok == verify(SignedTx) end, SignedTxs).
-define(SIG_TX_TYPE, signed_tx).
-define(SIG_TX_VSN, 1).
%% deterministic canonical serialization.
-spec serialize_to_binary(signed_tx()) -> binary_signed_tx().
serialize_to_binary(#signed_tx{tx = Tx, signatures = Sigs}) ->
%% TODO: The original binary should be kept
%% around since that is what was signed
aec_object_serialization:serialize(
?SIG_TX_TYPE,
?SIG_TX_VSN,
serialization_template(?SIG_TX_VSN),
[ {signatures, lists:sort(Sigs)}
, {transaction, aetx:serialize_to_binary(Tx)}
]).
-spec deserialize_from_binary(binary()) -> signed_tx().
deserialize_from_binary(SignedTxBin) when is_binary(SignedTxBin) ->
[ {signatures, Sigs}
, {transaction, TxBin}
] = aec_object_serialization:deserialize(
?SIG_TX_TYPE,
?SIG_TX_VSN,
serialization_template(?SIG_TX_VSN),
SignedTxBin),
#signed_tx{ tx = aetx:deserialize_from_binary(TxBin)
, signatures = Sigs
}.
serialization_template(?SIG_TX_VSN) ->
[ {signatures, [binary]}
, {transaction, binary}
].
-spec serialize_for_client(json|message_pack, #header{}, aetx_sign:signed_tx()) ->
binary() | map().
serialize_for_client(Encoding, Header, #signed_tx{tx = Tx}=S) ->
{ok, BlockHash} = aec_headers:hash_header(Header),
TxHash = aetx:hash(Tx),
serialize_for_client(Encoding, S, aec_headers:height(Header), BlockHash,
TxHash).
-spec serialize_for_client_pending(json|message_pack, aetx_sign:signed_tx()) ->
binary() | map().
serialize_for_client_pending(Encoding, #signed_tx{tx = Tx}=S) ->
TxHash = aetx:hash(Tx),
serialize_for_client(Encoding, S, -1, <<>>, TxHash).
serialize_for_client(message_pack, #signed_tx{}=S, BlockHeight, BlockHash0,
TxHash) ->
BlockHash = case BlockHash0 of
<<>> -> <<"none">>;
_ -> aec_base58c:encode(block_hash, BlockHash0)
end,
MetaData = [#{<<"block_height">> => BlockHeight},
#{<<"block_hash">> => BlockHash},
#{<<"hash">> => aec_base58c:encode(tx_hash, TxHash)}],
TxBin = serialize_to_binary(S),
Payload = [?SIG_TX_TYPE,
?SIG_TX_VSN,
#{<<"tx">> => aec_base58c:encode(transaction, TxBin)},
MetaData
],
aec_base58c:encode(transaction, msgpack:pack(Payload));
serialize_for_client(json, #signed_tx{tx = Tx, signatures = Sigs},
BlockHeight, BlockHash0, TxHash) ->
BlockHash = case BlockHash0 of
<<>> -> <<"none">>;
_ -> aec_base58c:encode(block_hash, BlockHash0)
end,
#{<<"tx">> => aetx:serialize_for_client(Tx),
<<"block_height">> => BlockHeight,
<<"block_hash">> => BlockHash,
<<"hash">> => aec_base58c:encode(tx_hash, TxHash),
<<"signatures">> => lists:map(fun(Sig) -> aec_base58c:encode(signature, Sig) end, Sigs)}.
meta_data_from_client_serialized(message_pack, Bin) ->
{transaction, MsgPackBin} = aec_base58c:decode(Bin),
{ok, [_Type, _Version, _TxSer, GenericData]} = msgpack:unpack(MsgPackBin),
[#{<<"block_height">> := BlockHeight},
#{<<"block_hash">> := BlockHashEncoded},
#{<<"hash">> := TxHashEncoded}] = GenericData,
{block_hash, BlockHash} = aec_base58c:decode(BlockHashEncoded),
{tx_hash, TxHash} = aec_base58c:decode(TxHashEncoded),
#{block_height => BlockHeight,
block_hash => BlockHash,
hash => TxHash};
meta_data_from_client_serialized(json, Serialized) ->
#{<<"tx">> := _EncodedTx,
<<"block_height">> := BlockHeight,
<<"block_hash">> := BlockHashEncoded,
<<"hash">> := TxHashEncoded,
<<"signatures">> := _Sigs} = Serialized,
{block_hash, BlockHash} = aec_base58c:decode(BlockHashEncoded),
{tx_hash, TxHash} = aec_base58c:decode(TxHashEncoded),
#{block_height => BlockHeight,
block_hash => BlockHash,
hash => TxHash}.
-spec is_coinbase(Tx :: signed_tx()) -> boolean().
is_coinbase(#signed_tx{tx = Tx}) ->
aetx:is_coinbase(Tx). | apps/aetx/src/aetx_sign.erl | 0.692226 | 0.553264 | aetx_sign.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2014 <NAME>
%%
%% @doc Utility functions for CSS processing. Also used for sanitizing HTML.
%% Copyright 2014 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_css).
-export([
scan/1,
parse/1,
sanitize/1,
sanitize_style/1
]).
scan(Bs) when is_binary(Bs) ->
scan(unicode:characters_to_list(Bs));
scan(L) when is_list(L) ->
{ok, Ts, _} = z_css_lexer:string(L),
{ok, Ts}.
parse(B) when is_binary(B) ->
{ok, Ts} = scan(B),
parse(Ts);
parse(Ts) when is_list(Ts) ->
z_css_parser:parse(Ts).
sanitize(Css) when is_binary(Css) ->
{ok, Ts} = scan(Css),
case z_css_parser:parse(Ts) of
{error, {Line, z_css_parser, Error}} ->
{error, {Line, unicode:characters_to_binary(Error)}};
{ok, {stylesheet, Charset, Import, Rules}} ->
Charset1 = sanitize_charset(Charset),
Import1 = sanitize_import(Import),
Rules1 = [ sanitize_rule(R) || R <- Rules ],
{ok, unicode:characters_to_binary([
serialize_charset(Charset1),
serialize_import(Import1),
[ serialize_rule(R) || R <- Rules1 ]
])}
end.
sanitize_style(Css) when is_list(Css) ->
sanitize_style(unicode:characters_to_binary(Css));
sanitize_style(Css) when is_binary(Css) ->
{ok, Ts} = scan(<<"a { ",Css/binary," }">>),
case z_css_parser:parse(Ts) of
{ok, {stylesheet, no_charset, no_import, [{rule, _Sel, Declarations}]}} ->
SanitizedDs = [ sanitize_declaration(D) || D <- Declarations ],
Ts1 = unicode:characters_to_binary([ serialize_declaration(D) || D <- SanitizedDs ]),
{ok, binary:replace(Ts1, <<"\n">>, <<" ">>, [global])};
{error, {Line, z_css_parser, Error}} ->
{error, {Line, unicode:characters_to_binary(Error)}}
end.
%%% --------------------------------------------------------
%%% Sanitize a CSS parse tree
%%% --------------------------------------------------------
sanitize_charset(_Charset) -> no_charset.
sanitize_import(_Import) -> no_import.
sanitize_rule({rule, Selector, Declarations}) ->
{rule, Selector, [ sanitize_declaration(D) || D <- Declarations ]};
sanitize_rule({media, MediaList, Rules}) ->
{media, MediaList, [ sanitize_rule(R) || R <- Rules ]};
sanitize_rule({page, PseudoPage, Declarations}) ->
{page, PseudoPage, [ sanitize_declaration(D) || D <- Declarations ]}.
sanitize_declaration({declaration, Ident, Expr, Prio}) ->
{declaration, Ident, sanitize_expr(Expr), Prio}.
sanitize_expr({ident, Line, Ident}) ->
% Don't allow anything to escape its bounding box.
case string:to_lower(Ident) of
"fixed" -> {ident, Line, "absolute"};
_ -> {ident, Line, Ident}
end;
sanitize_expr({uri, Line, _Uri}) ->
% No external url references
{uri, Line, "url()"};
sanitize_expr({function, _Func, Expr}) ->
% No unchecked functions
sanitize_expr(Expr);
sanitize_expr({number, _, _} = E) -> E;
sanitize_expr({length, _, _} = E) -> E;
sanitize_expr({ems, _, _} = E) -> E;
sanitize_expr({exs, _, _} = E) -> E;
sanitize_expr({angle, _, _} = E) -> E;
sanitize_expr({time, _, _} = E) -> E;
sanitize_expr({freq, _, _} = E) -> E;
sanitize_expr({dimension, _, _} = E) -> E;
sanitize_expr({percentage, _, _} = E) -> E;
sanitize_expr({string, Line, S}) -> {string, Line, sanitize_string(S)};
sanitize_expr({hash, _, _} = E) -> E;
sanitize_expr({operator, Op, E1, E2}) -> {operator, Op, sanitize_expr(E1), sanitize_expr(E2)};
sanitize_expr({operator, Op, E1}) -> {operator, Op, sanitize_expr(E1)}.
sanitize_string([Quot|S]) when Quot =:= $"; Quot =:= $' ->
S1 = lists:sublist(S, length(S)-1),
[ $", z_html:escape_check(z_html:strip(unicode:characters_to_binary(S1))), $"].
%%% --------------------------------------------------------
%%% Serialize the sanitized parse tree
%%% --------------------------------------------------------
serialize_charset(no_charset) -> <<>>;
serialize_charset({charset, {string,_,S}}) -> [ <<"@charset ">>, S, $; ].
serialize_import(no_import) -> <<>>;
serialize_import({charset, Location, MediaList}) ->
[ <<"@charset ">>,
serialize_location(Location),
serialize_medialist(MediaList)
].
serialize_location({string, _, S}) -> S;
serialize_location({url, _, Url}) -> Url.
serialize_medialist([]) ->
[];
serialize_medialist([M|Rest]) ->
[
serialize_media(M),
case Rest of
[] -> [];
Ms -> [ $, , serialize_medialist(Ms) ]
end
].
serialize_media({ident, _, Ident}) -> Ident.
serialize_rule({rule, SelectorList, Declarations}) ->
[
serialize_selectorlist(SelectorList),
${, $\n, [ serialize_declaration(D) || D <- Declarations ], $}, $\n
];
serialize_rule({media, MediaList, Rules}) ->
[
<<"@media ">>,
serialize_medialist(MediaList),
32, ${, $\n,
[ serialize_rule(R) || R <- Rules ],
$}, $\n
];
serialize_rule({page, PseudoPage, Declarations}) ->
[
<<"@page ">>,
serialize_pseudo_page(PseudoPage),
32, ${, $\n,
[ serialize_declaration(R) || R <- Declarations ],
$}, $\n
].
serialize_pseudo_page(undefined) -> <<>>;
serialize_pseudo_page({ident, _, V}) -> [ $:, V ].
serialize_selectorlist([S|Rest]) ->
[
serialize_selector(S),
case Rest of
[] -> [];
Ms -> [ $,, $\n, serialize_selectorlist(Ms) ]
end
].
serialize_selector(Sels) ->
[ serialize_selector_1(S) || S <- Sels ].
serialize_selector_1({none, S}) -> [ serialize_simpleselector(S), 32 ];
serialize_selector_1({'+', S}) -> [ "+ ", serialize_simpleselector(S), 32 ];
serialize_selector_1({'>', S}) -> [ "> ", serialize_simpleselector(S), 32 ].
serialize_simpleselector('*') -> $*;
serialize_simpleselector({ident, _Line, V}) -> V;
serialize_simpleselector({hash, _Line, V}) -> V;
serialize_simpleselector({class, {ident, _Line, V}}) -> [ $., V ];
serialize_simpleselector({attrib, {ident, _Line, V}, AttrOpVal}) -> [ $[, V, serialize_attr_opval(AttrOpVal), $] ];
serialize_simpleselector({pseudo, {ident, _Line, V}}) -> [ $:, V ];
serialize_simpleselector({pseudo, {function, {function, _Line, F}, {ident, _Line, V}}}) -> [ $:, F, V, $) ].
serialize_attr_opval(undefined) -> <<>>;
serialize_attr_opval({'=', AttrVal}) -> [ $=, serialize_attr_val(AttrVal) ];
serialize_attr_opval({includes, AttrVal}) -> [ $~, $=, serialize_attr_val(AttrVal) ];
serialize_attr_opval({dashmatch, AttrVal}) -> [ $|, $=, serialize_attr_val(AttrVal) ].
serialize_attr_val({ident, _, V}) -> V;
serialize_attr_val({string, _, V}) -> V.
serialize_declaration({declaration, {ident, _, Idn}, Expr, Prio}) ->
[
Idn, $:,
serialize_expr(Expr),
case Prio of
important -> <<" !important">>;
normal -> <<>>
end,
$;, $\n
].
serialize_expr({ident, _Line, Ident}) ->
Ident;
serialize_expr({uri, _Line, Uri}) ->
Uri;
serialize_expr({function, {function,_Line,Fun}, Expr}) ->
[ Fun, serialize_expr(Expr), $) ];
serialize_expr({number, _, V}) -> V;
serialize_expr({length, _, V}) -> V;
serialize_expr({ems, _, V}) -> V;
serialize_expr({exs, _, V}) -> V;
serialize_expr({angle, _, V}) -> V;
serialize_expr({time, _, V}) -> V;
serialize_expr({freq, _, V}) -> V;
serialize_expr({dimension, _, V}) -> V;
serialize_expr({percentage, _, V}) -> V;
serialize_expr({string, _, V}) -> V;
serialize_expr({hash, _, V}) -> V;
serialize_expr({operator, Op, E1, E2}) ->
[ serialize_expr(E1), z_convert:to_list(Op), serialize_expr(E2) ];
serialize_expr({operator, Op, E1}) ->
[ z_convert:to_list(Op), serialize_expr(E1) ]. | _build/default/lib/zotonic_stdlib/src/z_css.erl | 0.511473 | 0.46223 | z_css.erl | starcoder |
%%% @doc
%%%
%%% A DogStatsD datagram module.
%%%
%%% @end
%%% @reference
%%% See <a href="https://docs.datadoghq.com/developers/dogstatsd/#datagram-format"> Datagram Format</a>
-module(dogstatsc_datagram).
-export([new_metrics/4,
new_events/3,
new_service_check/3,
encode/1]).
%% Metrics
%%
%% metric.name:value|type|@sample_rate|#tag1:value,tag2
%%
%% metric.name — a string with no colons, bars, or @ characters. See the metric naming policy.
%% value — an integer or float.
%% type — c for counter, g for gauge, ms for timer, h for histogram, s for set.
%% sample rate (optional) — a float between 0 and 1, inclusive. Only works with counter, histogram, and timer metrics. Default is 1 (i.e. sample 100% of the time).
%% tags (optional) — a comma separated list of tags. Use colons for key/value tags, i.e. env:prod. The key device is reserved; Datadog drops a user-added tag like device:foobar.
%%
-record(metrics, {
name = undefined :: string() | undefined,
value = undefined :: float() | undefined,
type = c :: metric_type(),
rate = undefined :: float() | undefined,
tags = undefined :: tags() | undefined
}).
%% Events
%%
%% https://docs.datadoghq.com/developers/dogstatsd/#events-1
%% _e{title.length,text.length}:title|text|d:timestamp|h:hostname|p:priority|t:alert_type|#tag1,tag2
%%
%% _e - The datagram must begin with _e
%% title — Event title.
%% text — Event text. Insert line breaks with an escaped slash (\\n)
%% |d:timestamp (optional) — Add a timestamp to the event. Default is the current Unix epoch timestamp.
%% |h:hostname (optional) - Add a hostname to the event. No default.
%% |k:aggregation_key (optional) — Add an aggregation key to group the event with others that have the same key. No default.
%% |p:priority (optional) — Set to ‘normal’ or ‘low’. Default ‘normal’.
%% |s:source_type_name (optional) - Add a source type to the event. No default.
%% |t:alert_type (optional) — Set to ‘error’, ‘warning’, ‘info’ or ‘success’. Default ‘info’.
%% |#tag1:value1,tag2,tag3:value3... (optional)— The colon in tags is part of the tag list string and has no parsing purpose like for the other parameters. No default.
%%
-record(events, {
title = undefined :: string() | undefined,
text = undefined :: string() | undefined,
timestamp = 0 :: non_neg_integer(),
hostname = undefined :: string() | undefined,
aggregation_key = undefined :: string() | undefined,
priority = normal :: priority(),
source_type_name = undefined :: string() | undefined,
alert_type = info :: alert_type(),
tags = undefined :: tags() | undefined
}).
%% Service Check
%%
%% _sc|name|status|d:timestamp|h:hostname|#tag1:value1,tag2,tag3:value3,...|m:service_check_message
%%
%% _sc — the datagram must begin with _sc
%% name — Service check name.
%% status — Integer corresponding to the check status (OK = 0, WARNING = 1, CRITICAL = 2, UNKNOWN = 3).
%% d:timestamp (optional) — Add a timestamp to the check. Default is the current Unix epoch timestamp.
%% h:hostname (optional) — Add a hostname to the event. No default.
%% #tag1:value1,tag2,tag3:value3,... (optional) — The colon in tags is part of the tag list string and has no parsing purpose like for the other parameters.No default.
%% m:service_check_message (optional) — Add a message describing the current state of the service check. This field MUST be positioned last among the metadata fields. No default.
-record(service_check, {
name = undefined :: string() | undefined,
status = status_unknown :: status(),
timestamp = 0 :: non_neg_integer(),
hostname = undefined :: string() | undefined,
tags = undefined :: tags() | undefined,
message = undefined :: string() | undefined
}).
-export_type([metrics/0,
events/0,
service_check/0,
request/0,
status/0,
metric_type/0]).
-type status() :: status_ok | status_warn | status_crit | status_unknown.
-type metric_type() :: c | g | ms | h | s.
-type alert_type() :: info | warning | error | success.
-type priority() :: normal | low.
-type raw_request() :: binary().
-type tags() :: [{atom()|string()|binary(), string()}].
-type request() :: metrics() | events() | service_check().
-opaque metrics() :: #metrics{}.
-opaque events() :: #events{}.
-opaque service_check() :: #service_check{}.
%% @doc
%% new a metrics request record
%% @end
-spec new_metrics(Name :: string(), Type :: metric_type(), Value :: float(), Opts :: map()) -> metrics().
new_metrics(Name, Type, Value, Opts) ->
#metrics{
name = Name,
value = Value,
type = Type,
rate = maps:get(rate, Opts, 1.0),
tags = maps:get(tags, Opts, undefined)}.
%% @doc
%% new a events request record
%% @end
-spec new_events(Name :: string(), Text :: string(), Opts :: map()) -> events().
new_events(Name, Text, Opts) ->
#events{
title = Name,
text = Text,
timestamp = maps:get(timestamp, Opts, os:system_time(seconds)),
hostname = maps:get(hostname, Opts, undefined),
aggregation_key = maps:get(aggregation_key, Opts, undefined),
priority = maps:get(priority, Opts, normal),
source_type_name = maps:get(source_type_name, Opts, undefined),
alert_type = maps:get(alert_type, Opts, info),
tags = maps:get(tags, Opts, undefined)}.
%% @doc
%% new a service check request record.
%% @end
-spec new_service_check(Name :: string(), Status :: status(), Opts :: map()) -> service_check().
new_service_check(Name, Status, Opts) ->
#service_check{
name = Name,
status = validate_status(Status),
hostname = maps:get(hostname, Opts, undefined),
timestamp = maps:get(timestamp, Opts, os:system_time(seconds)),
tags = maps:get(tags, Opts, undefined),
message = maps:get(message, Opts, undefined)}.
%% @doc
%% encode request for DogStatsD
%% @end
-spec encode(request()) -> raw_request().
encode(Metric = #metrics{}) ->
% metric.name:value|type|@sample_rate|#tag1:value,tag2
encode_metric(Metric);
encode(Events = #events{}) ->
encode_events(Events);
encode(SC = #service_check{}) ->
encode_service_check(SC).
%% @doc
%% @private
%% encode metric
%% @end
encode_metric(#metrics{name = undefined}) -> throw(name_is_undefined);
encode_metric(#metrics{value = undefined}) -> throw(value_is_undefined);
encode_metric(#metrics{name = Name, value = Val, type = Typ, rate = undefined, tags = undefined}) ->
list_to_binary(io_lib:format("~ts:~p|~p", [Name, Val, Typ]));
encode_metric(#metrics{name = Name, value = Val, type = Typ, rate = Rate, tags = undefined}) ->
list_to_binary(io_lib:format("~ts:~p|~p|@~p", [Name, Val, Typ, Rate]));
encode_metric(#metrics{name = Name, value = Val, type = Typ, rate = undefined, tags = Tags}) ->
Str = io_lib:format("~ts:~p|~p|@1.0~ts", [Name, Val, Typ, to_tags(Tags)]),
list_to_binary(Str);
encode_metric(#metrics{name = Name, value = Val, type = Typ, rate = Rate, tags = Tags}) ->
Str = io_lib:format("~ts:~p|~p|@~p~ts", [Name, Val, Typ, Rate, to_tags(Tags)]),
list_to_binary(Str).
%% @doc
%% @private
%% encode events
%% @end
encode_events(#events{title = undefined}) -> throw(name_is_undefined);
encode_events(#events{text = undefined}) -> throw(value_is_undefined);
encode_events(#events{title = Title,
text = Txt,
timestamp = TS,
hostname = Hostname,
aggregation_key = AggrKey,
priority = Priority,
source_type_name = SrcTyp,
alert_type = AlertTyp,
tags = Tags}) ->
TxtLen = length(Txt),
TitleLen = length(Title),
HeadList = lists:reverse(["_e{", TitleLen, ",", TxtLen, "}:", Title, "|", Txt, "|d:", TS]),
WithHost =
case Hostname of
undefined ->
HeadList;
Hostname ->
[Hostname, "|h:" | HeadList]
end,
WithAggrKey =
case AggrKey of
undefined ->
WithHost;
AggrKey ->
[AggrKey, "|k:" | WithHost]
end,
WithPriority = [Priority, "|p:"| WithAggrKey],
WithSrcType =
case SrcTyp of
undefined ->
WithPriority;
SrcTyp ->
[SrcTyp, "|s:" | WithPriority]
end,
WithAlert = [AlertTyp, "|t:" | WithSrcType],
WithTags =
case Tags of
undefined ->
WithAlert;
Tags ->
[to_tags(Tags) | WithAlert]
end,
IOList = lists:reverse(WithTags),
list_to_binary(lists:concat(IOList)).
%% @doc
%% @private
%% encode service_check
%% @end
encode_service_check(#service_check{name = undefined}) -> throw(name_is_undefined);
encode_service_check(#service_check{name = Name, status = Status, timestamp = TS, hostname = Hostname, tags = Tags, message = Msg}) ->
EncodedStatus =
case Status of
status_ok -> 0;
status_warn -> 1;
status_crit -> 2;
status_unknown -> 3
end,
HeadList = lists:reverse(["_sc|", Name, "|", EncodedStatus, "|d:", TS]),
WithHost =
case Hostname of
undefined ->
HeadList;
Hostname ->
[Hostname, "|h:" | HeadList]
end,
WithTags =
case Tags of
undefined ->
WithHost;
Tags ->
[to_tags(Tags) | WithHost]
end,
WithMsg =
case Msg of
undefined ->
WithTags;
Msg ->
[Msg, "|m:"|WithTags]
end,
IOList = lists:reverse(WithMsg),
list_to_binary(lists:concat(IOList)).
%% @doc
%% @private
%% validate service check status
%% @end
validate_status(status_ok) -> status_ok;
validate_status(status_warn) -> status_warn;
validate_status(status_crit) -> status_crit;
validate_status(_) -> status_unknown.
to_tags(Tags) ->
lists:concat(to_iolist_tags(lists:reverse(Tags), [])).
to_iolist_tags([{Key, Value}], Acc) ->
["|#", Key, ":", Value|Acc];
to_iolist_tags([{Key, Value}|Tags], Acc) ->
to_iolist_tags(Tags, [",", Key, ":", Value|Acc]). | src/dogstatsc_datagram.erl | 0.621541 | 0.524395 | dogstatsc_datagram.erl | starcoder |
%%------------------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @doc The Ecron API module.
%%
%% The Ecron application executes scheduled functions.
%% A list of functions to execute might be specified in the ecron application
%% resource file as value of the `scheduled' environment variable.
%%
%% Each entry specifies a job and must contain the scheduled time and a MFA
%% tuple `{Module, Function, Arguments}'.
%% It's also possible to configure options for a retry algorithm to run in case
%% MFA fails.
%% <pre>
%% Job = {{Date, Time}, MFA, Retry, Seconds} |
%% {{Date, Time}, MFA}
%% </pre>
%% `Seconds = integer()' is the retry interval.
%%
%% `Retry = integer() | infinity' is the number of times to retry.
%%
%%
%% Example of ecron.app
%% <pre>
%% ...
%% {env,[{scheduled,
%% [{{{ '*', '*', '*'}, {0 ,0,0}}, {my_mod, my_fun1, Args}},
%% {{{ '*', 12 , 25}, {0 ,0,0}}, {my_mod, my_fun2, Args}},
%% {{{ '*', 1 , 1 }, {0 ,0,0}}, {my_mod, my_fun3, Args}, infinity, 60},
%% {{{2010, 1 , 1 }, {12,0,0}}, {my_mod, my_fun3, Args}},
%% {{{ '*', 12 ,last}, {0 ,0,0}}, {my_mod, my_fun4, Args}]}]},
%% ...
%% </pre>
%% Once the ecron application is started, it's possible to dynamically add new
%% jobs using the `ecron:insert/2' or `ecron:insert/4'
%% API.
%%
%% The MFA is executed when a task is set to run.
%% The MFA has to return `ok', `{ok, Data}', `{apply, fun()}'
%% or `{error, Reason}'.
%% If `{error, Reason}' is returned and the job was defined with retry options
%% (Retry and Seconds were specified together with the MFA) then ecron will try
%% to execute MFA later according to the given configuration.
%%
%% The MFA may return `{apply, fun()}' where `fun()' has arity zero.
%%
%% `fun' will be immediately executed after MFA execution.
%% The `fun' has to return `ok', `{ok, Data}' or `{error, Reason}'.
%%
%% If the MFA or `fun' terminates abnormally or returns an invalid
%% data type (not `ok', `{ok, Data}' or `{error, Reason}'), an event
%% is forwarded to the event manager and no retries are executed.
%%
%% If the return value of the fun is `{error, Reason}' and retry
%% options were given in the job specification then the `fun' is
%% rescheduled to be executed after the configurable amount of time.
%%
%% Data which does not change between retries of the `fun'
%% must be calculated outside the scope of the `fun'.
%% Data which changes between retries has to be calculated within the scope
%% of the `fun'.<br/>
%% In the following example, ScheduleTime will change each time the function is
%% scheduled, while ExecutionTime will change for every retry. If static data
%% has to persist across calls or retries, this is done through a function in
%% the MFA or the fun.
%%
%% <pre>
%% print() ->
%% ScheduledTime = time(),
%% {apply, fun() ->
%% ExecutionTime = time(),
%% io:format("Scheduled:~p~n",[ScheduledTime]),
%% io:format("Execution:~p~n",[ExecutionTime]),
%% {error, retry}
%% end}.
%% </pre>
%% Event handlers may be configured in the application resource file specifying
%% for each of them, a tuple as the following:
%%
%% <pre>{Handler, Args}
%%
%% Handler = Module | {Module,Id}
%% Module = atom()
%% Id = term()
%% Args = term()
%% </pre>
%% `Module:init/1' will be called to initiate the event handler and
%% its internal state<br/><br/>
%% Example of ecron.app
%% <pre>
%% ...
%% {env, [{event_handlers, [{ecron_event, []}]}]},
%% ...
%% </pre>
%% The API `add_event_handler/2' and
%% `delete_event_handler/1'
%% allow user to dynamically add and remove event handlers.
%%
%% All the configured event handlers will receive the following events:
%%
%% `{mfa_result, Result, {Schedule, {M, F, A}}, DueDateTime, ExecutionDateTime}'
%% when MFA is executed.
%%
%% `{fun_result, Result, {Schedule, {M, F, A}}, DueDateTime, ExecutionDateTime}'
%% when `fun' is executed.
%%
%% `{retry, {Schedule, MFA}, Fun, DueDateTime}'
%% when MFA, or `fun', is rescheduled to be executed later after a failure.
%%
%% `{max_retry, {Schedule, MFA}, Fun, DueDateTime}' when MFA,
%% or `fun' has reached maximum number of retry specified when
%% the job was inserted.
%%
%% `Result' is the return value of MFA or `fun'.
%% If an exception occurs during evaluation of MFA, or `fun', then
%% it's caught and sent in the event.
%% (E.g. <code>Result = {'EXIT',{Reason,Stack}}</code>).
%%
%% `Schedule = {Date, Time}' as given when the job was inserted, E.g.
%% <code> {{'*','*','*'}, {0,0,0}}</code><br/>
%% `DueDateTime = {Date, Time} ' is the exact Date and Time when the MFA,
%% or the `fun', was supposed to run.
%% E.g. ` {{2010,1,1}, {0,0,0}}'<br/>
%% `ExecutionDateTime = {Date, Time} ' is the exact Date and Time
%% when the MFA, or the `fun', was executed.<br/><br/><br/>
%% If a node is restarted while there are jobs in the list then these jobs are
%% not lost. When Ecron starts it takes a list of scheduled MFA from the
%% environment variable `scheduled' and inserts them into a persistent table
%% (mnesia). If an entry of the scheduled MFA specifies the same parameters
%% values of a job already present in the table then the entry won't be inserted
%% avoiding duplicated jobs. <br/>
%% No duplicated are removed from the MFA list configured in the `
%% scheduled' variable.
%%
%% @end
%%% Copyright (c) 2009-2010 Erlang Solutions
%%% All rights reserved.
%%%
%%% Redistribution and use in source and binary forms, with or without
%%% modification, are permitted provided that the following conditions are met:
%%% * Redistributions of source code must retain the above copyright
%%% notice, this list of conditions and the following disclaimer.
%%% * Redistributions in binary form must reproduce the above copyright
%%% notice, this list of conditions and the following disclaimer in the
%%% documentation and/or other materials provided with the distribution.
%%% * Neither the name of the Erlang Solutions nor the names of its
%%% contributors may be used to endorse or promote products
%%% derived from this software without specific prior written permission.
%%%
%%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
%%% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
%%% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
%%% ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
%%% BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
%%% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
%%% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
%%% BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
%%% WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
%%% OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
%%% ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
%%------------------------------------------------------------------------------
-module(ecron).
-author('<EMAIL>').
-copyright('Erlang Solutions Ltd.').
-behaviour(gen_server).
%% API
-export([install/0,
install/1,
start_link/0,
insert/2,
insert/4,
list/0,
print_list/0,
execute_all/0,
refresh/0,
delete/1,
delete_all/0,
add_event_handler/2,
list_event_handlers/0,
delete_event_handler/1]).
%% gen_server callbacks
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-export([execute_job/2,
create_add_job/1]).
-include("ecron.hrl").
%%==============================================================================
%% API functions
%%==============================================================================
%%------------------------------------------------------------------------------
%% @spec start_link() -> {ok,Pid} | ignore | {error,Error}
%% @doc Start the server
%% @private
%% @end
%%------------------------------------------------------------------------------
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%%------------------------------------------------------------------------------
%% @spec install() -> ok
%% @doc Create mnesia tables on those nodes where disc_copies resides according
%% to the schema. <br/>
%% Before starting the `ecron' application
%% for the first time a new database must be created, `mnesia:create_schema/1
%% ' and tables created by `ecron:install/0' or
%% `ecron:install/1'<br/>
%% E.g. <br/>
%% <pre>
%% >mnesia:create_schema([node()]).
%% >mnesia:start().
%% >ecron:install().
%% </pre>
%% @end
%%------------------------------------------------------------------------------
install() ->
install(mnesia:table_info(schema,disc_copies)).
%%------------------------------------------------------------------------------
%% @spec install(Nodes) -> ok
%% @doc Create mnesia tables on Nodes.
%% @end
%%------------------------------------------------------------------------------
install(Nodes) ->
create_table(?JOB_COUNTER, [{disc_copies, Nodes}]),
create_table(?JOB_TABLE, [{type, ordered_set},
{attributes, record_info(fields, job)},
{disc_copies, Nodes}]).
%%------------------------------------------------------------------------------
%% @spec add_event_handler(Handler, Args) -> {ok, Pid} | {error, Reason}
%% Handler = Module | {Module,Id}
%% Module = atom()
%% Id = term()
%% Args = term()
%% Pid = pid()
%% @doc Adds a new event handler. The handler is added regardless of whether
%% it's already present, thus duplicated handlers may exist.
%% @end
%%------------------------------------------------------------------------------
add_event_handler(Handler, Args) ->
ecron_event_sup:start_handler(Handler, Args).
%%------------------------------------------------------------------------------
%% @spec delete_event_handler(Pid) -> ok
%% Pid = pid()
%% @doc Deletes an event handler. Pid is the pid() returned by
%% `add_event_handler/2'.
%% @end
%%------------------------------------------------------------------------------
delete_event_handler(Pid) ->
ecron_event_sup:stop_handler(Pid).
%%------------------------------------------------------------------------------
%% @spec list_event_handlers() -> [{Pid, Handler}]
%% Handler = Module | {Module,Id}
%% Module = atom()
%% Id = term()
%% Pid = pid()
%% @doc Returns a list of all event handlers installed by the
%% `ecron:add_event_handler/2' API or configured in the
%% `event_handlers' environment variable.
%% @end
%%------------------------------------------------------------------------------
list_event_handlers() ->
ecron_event_sup:list_handlers().
%%------------------------------------------------------------------------------
%% @spec insert(DateTime, MFA) -> ok
%% DateTime = {Date, Time}
%% Date = {Year, Month, Day} | '*'
%% Time = {Hours, Minutes, Seconds}
%% Year = integer() | '*'
%% Month = integer() | '*'
%% Day = integer() | '*' | last
%% Hours = integer()
%% Minutes = integer()
%% Seconds = integer()
%% MFA = {Module, Function, Args}
%% @doc Schedules the MFA at the given Date and Time. <br/>
%% Inserts the MFA into the queue to be scheduled at
%% {Year,Month, Day},{Hours, Minutes,Seconds}<br/>
%% <pre>
%% Month = 1..12 | '*'
%% Day = 1..31 | '*' | last
%% Hours = 0..23
%% Minutes = 0..59
%% Seconds = 0..59
%% </pre>
%% If `Day = last' then the MFA will be executed last day of the month.
%%
%% <code>{'*', Time}</code> runs the MFA every day at the given time and it's
%% the same as writing <code>{{'*','*','*'}, Time}</code>.
%%
%% <code>{{'*', '*', Day}, Time}</code> runs the MFA every month at the given
%% Day and Time. It must be `Day = 1..28 | last'
%%
%% <code>{{'*', Month, Day}, Time}</code> runs the MFA every year at the given
%% Month, Day and Time. Day must be valid for the given month or the atom
%% `last'.
%% If `Month = 2' then it must be `Day = 1..28 | last'
%%
%% Combinations of the format <code>{'*', Month, '*'}</code> are not allowed.
%%
%% `{{Year, Month, Day}, Time}' runs the MFA at the given Date and Time.
%%
%% Returns `{error, Reason}' if invalid parameters have been passed.
%% @end
%%------------------------------------------------------------------------------
insert({Date, Time} = _DateTime, MFA) ->
insert({Date, Time}, MFA, undefined, undefined).
%%------------------------------------------------------------------------------
%% @spec insert(DateTime, MFA, Retry, RetrySeconds) -> ok
%% DateTime = {Date, Time}
%% Date = {Year, Month, Day} | '*'
%% Time = {Hours, Minutes, Seconds}
%% Year = integer() | '*'
%% Month = integer() | '*'
%% Day = integer() | '*' | last
%% Hours = integer()
%% Minutes = integer()
%% Seconds = integer()
%% Retry = integer() | infinity
%% RetrySeconds = integer()
%% MFA = {Module, Function, Args}
%% @doc Schedules the MFA at the given Date and Time and retry if it fails.
%%
%% Same description of insert/2. Additionally if MFA returns
%% `{error, Reason}' ecron will retry to execute
%% it after `RetrySeconds'. The MFA will be rescheduled for a
%% maximum of Retry times. If MFA returns `{apply, fun()}' and the
%% return value of `fun()' is `{error, Reason}' the
%% retry mechanism applies to `fun'. If Retry is equal to 3
%% then MFA will be executed for a maximum of four times. The first time
%% when is supposed to run according to the schedule and then three more
%% times at interval of RetrySeconds.
%% @end
%%------------------------------------------------------------------------------
insert({Date, Time} = _DateTime, MFA, Retry, Seconds) ->
case validate(Date, Time) of
ok ->
DueSec = sec({Date, Time}),
DueTime = calendar:gregorian_seconds_to_datetime(DueSec),
Key = {DueSec, mnesia:dirty_update_counter(?JOB_COUNTER, job,1)},
Job = #job{key = Key,
mfa = {MFA, DueTime},
schedule = {Date, Time},
retry = {Retry, Seconds}},
gen_server:cast(?MODULE, {insert, Job});
Error ->
Error
end.
%% @spec list() -> JobList
%% @doc Returns a list of job records defined in ecron.hrl
%% @end
%%------------------------------------------------------------------------------
list() ->
gen_server:call(?MODULE, list, 60000).
%%------------------------------------------------------------------------------
%% @spec print_list() -> ok
%% @doc Prints a pretty list of records sorted by Job ID. <br/>
%% E.g. <br/>
%% <pre>
%% -----------------------------------------------------------------------
%% ID: 208
%% Function To Execute: mfa
%% Next Execution DateTime: {{2009,11,8},{15,59,54}}
%% Scheduled Execution DateTime: {{2009,11,8},{15,59,34}}
%% MFA: {ecron_tests,test_function,[fra]}
%% Schedule: {{'*','*',8},{15,59,34}}
%% Max Retry Times: 4
%% Retry Interval: 20
%% -----------------------------------------------------------------------
%% </pre>
%% <b>`ID'</b> is the Job ID and should be used as argument in
%% `delete/1'.<br/>
%% <b>`Function To Execute'</b> says if the job refers to the
%% MFA or the `fun' returned by MFA.
%%
%% <b>`Next Execution DateTime'</b> is the date and time when
%% the job will be executed.
%%
%% <b>`Scheduled Execution DateTime'</b> is the date and time
%% when the job was supposed to be executed according to the given
%% `Schedule'.`Next Execution DateTime' and
%% `Scheduled Execution DateTime' are different if the MFA, or
%% the `fun', failed and it will be retried later
%% (as in the example given above).
%%
%% <b>`MFA'</b> is a tuple with Module, Function and Arguments as
%% given when the job was inserted.<br/>
%% <b>`Schedule'</b> is the schedule for the MFA as given when the
%% job was insterted.<br/>
%% <b>`Max Retry Times'</b> is the number of times ecron will retry to
%% execute the job in case of failure. It may be less than the value given
%% when the job was inserted if a failure and a retry has already occured.
%%
%% <b>`Retry Interval'</b> is the number of seconds ecron will wait
%% after a failure before retrying to execute the job. It's the value given
%% when the job was inserted.
%% @end
%%------------------------------------------------------------------------------
print_list() ->
Jobs = gen_server:call(?MODULE, list, 60000),
SortedJobs = lists:usort(
fun(J1, J2) ->
element(2, J1#job.key) =< element(2, J2#job.key)
end,
Jobs),
lists:foreach(
fun(Job) ->
#job{key = {ExecSec, Id},
schedule = Schedule,
mfa = {MFA, DueDateTime},
retry = {RetryTimes, Seconds},
client_fun = Fun
} = Job,
{Function, ExpectedDateTime} =
case Fun of
undefined -> {mfa, DueDateTime};
{_, DT} -> {'fun', DT}
end,
ExecDateTime = calendar:gregorian_seconds_to_datetime(ExecSec),
io:format("~70c-~n",[$-]),
io:format("ID: ~p~nFunction To Execute: ~p~n"
"Next Execution DateTime: ~p~n",
[Id, Function, ExecDateTime]),
io:format("Scheduled Execution DateTime: ~w~nMFA: ~w~n"
"Schedule: ~p~n",
[ExpectedDateTime, MFA, Schedule]),
io:format("Max Retry Times: ~p~nRetry Interval: ~p~n",
[RetryTimes, Seconds]),
io:format("~70c-~n",[$-])
end, SortedJobs).
%%------------------------------------------------------------------------------
%% @spec refresh() -> ok
%% @doc Deletes all jobs and recreates the table from the environment variables.
%% @end
%%------------------------------------------------------------------------------
refresh() ->
gen_server:cast(?MODULE, refresh).
%%------------------------------------------------------------------------------
%% @spec execute_all() -> ok
%% @doc Executes all cron jobs in the queue, irrespective of the time they are
%% scheduled to run. This might be used at startup and shutdown, ensuring no
%% data is lost and backedup data is handled correctly. <br/>
%% It asynchronously returns `ok' and then executes all the jobs
%% in parallel. <br/>
%% No retry will be executed even if the MFA, or the `fun', fails
%% mechanism is enabled for that job. Also in case of periodic jobs MFA won't
%% be rescheduled. Thus the jobs list will always be empty after calling
%% `execute_all/0'.
%% @end
%%------------------------------------------------------------------------------
execute_all() ->
gen_server:cast(?MODULE, execute_all).
%%------------------------------------------------------------------------------
%% @spec delete(ID) -> ok
%% @doc Deletes a cron job from the list.
%% If the job does not exist, the function still returns ok
%% @see print_list/0
%% @end
%%------------------------------------------------------------------------------
delete(ID) ->
gen_server:cast(?MODULE, {delete, ID}).
%%------------------------------------------------------------------------------
%% @spec delete_all() -> ok
%% @doc Delete all the scheduled jobs
%% @end
%%------------------------------------------------------------------------------
delete_all() ->
gen_server:cast(?MODULE, delete).
%%==============================================================================
%% gen_server callbacks
%%==============================================================================
%%------------------------------------------------------------------------------
%% @spec init(Args) -> {ok, State}
%% @doc
%% @private
%% @end
%%------------------------------------------------------------------------------
init(_Args) ->
NewScheduled =
case application:get_env(ecron, scheduled) of
undefined -> [];
{ok, MFAList} -> MFAList
end,
OldScheduled = mnesia:activity(async_dirty,
fun() ->
Objs = mnesia:select(?JOB_TABLE,
[{#job{_ = '_'},
[], ['$_']}]),
{atomic, ok} = mnesia:clear_table(?JOB_TABLE),
Objs
end),
Scheduled = remove_duplicated(NewScheduled, OldScheduled, OldScheduled),
lists:foreach(fun create_add_job/1, Scheduled),
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{ok, [], get_timeout(DueSec)};
'$end_of_table' ->
{ok, []}
end.
%%------------------------------------------------------------------------------
%% @spec handle_info(Info, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @doc Handling all non call/cast messages
%% @private
%% @end
%%------------------------------------------------------------------------------
handle_info(timeout, State) ->
case mnesia:dirty_first(?JOB_TABLE) of
'$end_of_table' ->
{noreply, State};
K ->
check_job(K, State)
end;
handle_info(_, State) ->
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{reply, ok, State, get_timeout(DueSec)};
'$end_of_table' ->
{reply, ok, State}
end.
%%------------------------------------------------------------------------------
%% @spec handle_call(Request, From, State) -> {reply, Reply, State, Timeout}|
%% {reply, Reply, State}
%% @doc Handling call messages
%% @private
%% @end
%%------------------------------------------------------------------------------
handle_call(list, _From, State) ->
Keys = mnesia:dirty_all_keys(?JOB_TABLE),
Jobs = lists:map(fun(Key) ->
[J] = mnesia:dirty_read(?JOB_TABLE, Key),
J
end, Keys),
case Keys of
[] ->
{reply, Jobs, State};
Keys ->
{DueSec, _} = hd(Keys),
{reply, Jobs, State, get_timeout(DueSec)}
end.
%%------------------------------------------------------------------------------
%% @spec handle_cast(Msg, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @doc Handling cast messages
%% @private
%% @end
%%------------------------------------------------------------------------------
handle_cast({insert, Job}, State) ->
ok = mnesia:dirty_write(Job),
{DueSec, _} = mnesia:dirty_first(?JOB_TABLE),
{noreply, State, get_timeout(DueSec)};
handle_cast({delete, {_, _} = Key}, State) ->
ok = mnesia:dirty_delete(?JOB_TABLE, Key),
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{noreply, State, get_timeout(DueSec)};
'$end_of_table' ->
{noreply, State}
end;
handle_cast({delete, ID}, State) ->
ok = mnesia:activity(async_dirty,
fun() ->
case mnesia:select(?JOB_TABLE,
[{#job{key = {'_', ID}, _='_'},
[], ['$_']}]) of
[] ->
ok;
[Obj] ->
ok = mnesia:delete_object(Obj)
end
end),
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{noreply, State, get_timeout(DueSec)};
'$end_of_table' ->
{noreply, State}
end;
handle_cast(delete, State) ->
{atomic, ok} = mnesia:clear_table(?JOB_TABLE),
{noreply, State};
handle_cast(execute_all, State) ->
Scheduled = mnesia:activity(async_dirty,
fun() ->
Objs = mnesia:select(?JOB_TABLE,
[{#job{_ = '_'},
[], ['$_']}]),
{atomic, ok} = mnesia:clear_table(?JOB_TABLE),
Objs
end),
lists:foreach(fun(Job) ->
NoRetryJob = Job#job{retry={undefined, undefined}},
spawn(?MODULE, execute_job, [NoRetryJob, false])
end, Scheduled),
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{noreply, State, get_timeout(DueSec)};
'$end_of_table' ->
{noreply, State}
end;
handle_cast(refresh, State) ->
{atomic, ok} = mnesia:clear_table(?JOB_TABLE),
NewScheduled =
case application:get_env(ecron, scheduled) of
undefined -> [];
{ok, MFAList} -> MFAList
end,
lists:foreach(fun create_add_job/1, NewScheduled),
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{noreply, State, get_timeout(DueSec)};
'$end_of_table' ->
{noreply, State}
end;
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(_Msg, State) ->
{noreply, State}.
%%------------------------------------------------------------------------------
%% @spec terminate(Reason, State) -> void()
%% Reason = term()
%% State = term()
%% @doc This function is called by a gen_server when it is about to
%% terminate. It should be the opposite of Module:init/1 and do
%% any necessary cleaning up. When it returns, the gen_server
%% terminates with Reason. The return value is ignored.
%% @private
%% @end
%%------------------------------------------------------------------------------
terminate(_Reason, _State) ->
ok.
%%------------------------------------------------------------------------------
%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
%% @doc Convert process state when code is changed
%% @private
%% @end
%%------------------------------------------------------------------------------
code_change(_OldVsn, State, _Extra) ->
case mnesia:dirty_first(?JOB_TABLE) of
{DueSec, _} ->
{ok, State, get_timeout(DueSec)};
'$end_of_table' ->
{ok, State}
end.
%%==============================================================================
%% Internal functions
%%==============================================================================
%% Find DayOfMonth correseponding to this DayOfWeek, Basedate is starting date.
%% Gap is the gap for the next try if initial finding failed.
duedow(Startingdate, Dow) -> duedow(Startingdate, Dow, 'undefined').
duedow({Year, Month, Day}=Startingdate, Dow, Gap) ->
Startingdow = calendar:day_of_the_week(Startingdate),
Duedate =
if
Dow < Startingdow ->
{Year, Month, Day+7-(Startingdow-Dow)};
Dow >= Startingdow ->
{Year, Month, Day+(Dow-Startingdow)}
end,
case calendar:valid_date(Duedate) of
true -> {'ok', Duedate};
false ->
case Gap of
'undefined' -> 'error';
_ -> next_duedow(Duedate, Gap)
end
end.
%% Nothing is variable, week is always variable, it's dow anyway!
next_duedow({Year, Month, Day}) ->
case calendar:valid_date({Year, Month, Day+7}) of
true -> {'ok', {Year, Month, Day+7}};
false -> 'error'
end.
%% Month and Year is variable.
next_duedow({Year, Month, Day}=Date, 'monthyear') ->
Dow = calendar:day_of_the_week(Date),
case calendar:valid_date({Year, Month, Day+7}) of
true -> {'ok', {Year, Month, Day+7}};
false ->
case calendar:valid_date({Year, Month+1, 1}) of
true -> duedow({Year, Month+1, 1}, Dow);
false -> duedow({Year+1, 1, 1}, Dow)
end
end;
%% Only month is variable.
next_duedow({Year, Month, Day}=Date, 'month') ->
Dow = calendar:day_of_the_week(Date),
case calendar:valid_date({Year, Month, Day+7}) of
true -> {Year, Month, Day+7};
false ->
case calendar:valid_date({Year, Month+1, 1}) of
true -> duedow({Year, Month+1, 1}, Dow);
false -> 'error'
end
end;
%% Only year is variable.
next_duedow({Year, Month, Day}, 'year') ->
Dow = calendar:day_of_the_week({Year, Month, Day}),
case calendar:valid_date({Year, Month, Day+7}) of
true -> {'ok', {Year, Month, Day+7}};
false -> duedow({Year+1, Month, 1}, Dow)
end.
sec() ->
calendar:datetime_to_gregorian_seconds(ecron_time:localtime()).
sec({'*', Time}) ->
sec({{'*','*','*'}, Time});
sec({{'*', '*', '*', '*'}, Time}) ->
sec({{'*', '*', '*'}, Time});
sec({{'*','*','*'}, Time}) ->
{Date1, Time1} = ecron_time:localtime(),
Now = calendar:datetime_to_gregorian_seconds({Date1, Time1}),
Due = calendar:datetime_to_gregorian_seconds({Date1, Time}),
case Due - Now of
Diff when Diff =<0 ->
%% The Job will be executed tomorrow at Time
Due + 86400;
_Diff ->
Due
end;
sec({{'*','*',Day}, Time}) ->
{{Year1, Month1, Day1}, Time1} = ecron_time:localtime(),
Now = calendar:datetime_to_gregorian_seconds({{Year1, Month1, Day1}, Time1}),
RealDay = get_real_day(Year1, Month1, Day),
Due = calendar:datetime_to_gregorian_seconds({{Year1, Month1, RealDay}, Time}),
case Due - Now of
Diff when Diff =<0 ->
%% The Job will be executed next month
DueDate = add_month({Year1, Month1, Day}),
calendar:datetime_to_gregorian_seconds({DueDate, Time});
_Diff ->
Due
end;
sec({{'*', '*', '*', Dow}, Time}) ->
{Date1, Time1} = ecron_time:localtime(),
{'ok', D} = duedow(Date1, Dow, 'monthyear'),
Duedate =
case
calendar:datetime_to_gregorian_seconds({D, Time}) >=
calendar:datetime_to_gregorian_seconds({Date1, Time1}) of
true ->
D;
_ ->
{'ok', Nd} = next_duedow(D, 'monthyear'),
Nd
end,
calendar:datetime_to_gregorian_seconds({Duedate, Time});
sec({{'*', Month, Day}, Time}) ->
{{Year1, Month1, Day1}, Time1} = ecron_time:localtime(),
Now = calendar:datetime_to_gregorian_seconds({{Year1, Month1, Day1}, Time1}),
RealDay = get_real_day(Year1, Month, Day),
Due = calendar:datetime_to_gregorian_seconds({{Year1, Month, RealDay}, Time}),
case Due - Now of
Diff when Diff =<0 ->
%% The Job will be executed next year
calendar:datetime_to_gregorian_seconds({{Year1+1, Month, Day}, Time});
_Diff ->
Due
end;
sec({{'*', Month, '*', Dow}, Time}) ->
{{Year1, Month1, _Day1}=Date1, Time1} = ecron_time:localtime(),
if
Month < Month1 ->
{'ok', D} = duedow({Year1+1, Month, 1}, Dow),
calendar:datetime_to_gregorian_seconds({D, Time});
Month > Month1 ->
{'ok', D} = duedow({Year1, Month, 1}, Dow),
calendar:datetime_to_gregorian_seconds({D, Time});
Month == Month1 ->
{'ok', D} = duedow(Date1, Dow, 'year'), %jump to next year
Duedate = case
calendar:datetime_to_gregorian_seconds({D, Time}) >
calendar:datetime_to_gregorian_seconds({Date1, Time1}) of
true ->
D;
_ ->
next_duedow(D, 'year')
end,
calendar:datetime_to_gregorian_seconds({Duedate, Time})
end;
sec({{Year, Month, '*', Dow}, Time}) ->
{{Year1, Month1, _Day1}=Date1, Time1} = ecron_time:localtime(),
if
(Year1 == Year) and (Month1 == Month)->
{'ok', D} = duedow(Date1, Dow),
case
calendar:datetime_to_gregorian_seconds({D, Time}) >=
calendar:datetime_to_gregorian_seconds({Date1, Time1}) of
true ->
calendar:datetime_to_gregorian_seconds({D, Time});
_ ->
{'ok', Nd} = next_duedow(D),
calendar:datetime_to_gregorian_seconds({Nd, Time})
end;
true ->
{'ok', D} = duedow({Year, Month, 1}, Dow),
calendar:datetime_to_gregorian_seconds({D, Time})
end;
sec({{Year, Month, Dom, _}, Time}) ->
sec({{Year, Month, Dom}, Time});
sec({{Year, Month, Day}, Time}) ->
RealDay = get_real_day(Year, Month, Day),
calendar:datetime_to_gregorian_seconds({{Year, Month, RealDay}, Time}).
add_month({Y, M, D}) ->
case M of
12 -> {Y+1, 1, get_real_day(Y+1, 1, D)};
M -> {Y, M+1, get_real_day(Y, M+1, D)}
end.
get_real_day(Year, Month, last) ->
calendar:last_day_of_the_month(Year, Month);
get_real_day(_, _, Day) ->
Day.
%%------------------------------------------------------------------------------
%% @spec check_job(Key, State) -> {noreply, State} | {noreply, State, Timeout}
%% @doc Checks if there's a job to execute in the table, extracts it and runs it
%% @private
%% @end
%%------------------------------------------------------------------------------
check_job({Due, _} = K, State)->
case Due - sec() of
Diff when Diff =< 0 ->
[Job] = mnesia:dirty_read(?JOB_TABLE, K),
ok = mnesia:dirty_delete(?JOB_TABLE, K),
NeedReschedule = is_not_retried(Job),
spawn(?MODULE, execute_job, [Job, NeedReschedule]),
case mnesia:dirty_first(?JOB_TABLE) of
'$end_of_table' ->
{noreply, State};
K1->
check_job(K1, State)
end;
_Diff ->
{noreply, State, get_timeout(Due)}
end.
%%------------------------------------------------------------------------------
%% @spec execute_job(Job, Reschedule) -> ok
%% @doc Used internally. Execute the given Job. Reschedule it in case of a
%% periodic job, or in case the date is in the future.
%% @private
%% @end
%%------------------------------------------------------------------------------
execute_job(#job{client_fun = undefined, mfa = {{M, F, A}, DueTime},
retry = {RetryTimes, Interval}, schedule = Schedule},
Reschedule) ->
ExecutionTime = ecron_time:localtime(),
try apply(M, F, A) of
{apply, Fun} when is_function(Fun, 0) ->
notify({mfa_result, {apply, Fun}, {Schedule, {M, F, A}},
DueTime, ExecutionTime}),
execute_fun(
Fun, Schedule,{M, F, A}, DueTime, {RetryTimes, Interval});
ok ->
notify({mfa_result, ok, {Schedule, {M, F, A}},
DueTime, ExecutionTime});
{ok, Data} ->
notify({mfa_result, {ok, Data}, {Schedule, {M, F, A}},
DueTime, ExecutionTime});
{error, Reason} ->
notify({mfa_result, {error, Reason}, {Schedule, {M, F, A}},
DueTime, ExecutionTime}),
retry(
{M, F, A}, undefined, Schedule, {RetryTimes, Interval}, DueTime);
Return ->
notify({mfa_result, Return, {Schedule, {M, F, A}}, DueTime,
ExecutionTime})
catch
_:Error ->
notify(
{mfa_result, Error, {Schedule, {M, F, A}}, DueTime, ExecutionTime})
end,
case Reschedule of
true -> insert(Schedule, {M, F, A}, RetryTimes, Interval);
false -> ok
end;
execute_job(#job{client_fun = {Fun, DueTime},
mfa = {MFA, _},
schedule = Schedule,
retry = Retry}, _) ->
execute_fun(Fun, Schedule, MFA, DueTime, Retry).
%%------------------------------------------------------------------------------
%% @spec execute_fun(Fun, Schedule, MFA, Time, Retry) -> ok
%% @doc Executes the `fun' returned by MFA
%% @private
%% @end
%%------------------------------------------------------------------------------
execute_fun(Fun, Schedule, MFA, DueTime, Retry) ->
ExecutionTime = ecron_time:localtime(),
try Fun() of
ok ->
notify({fun_result, ok, {Schedule, MFA}, DueTime, ExecutionTime}),
ok;
{ok, Data} ->
notify({fun_result, {ok, Data}, {Schedule, MFA}, DueTime,
ExecutionTime}),
ok;
{error, Reason} ->
notify({fun_result, {error, Reason}, {Schedule, MFA}, DueTime,
ExecutionTime}),
retry(MFA, Fun, Schedule, Retry, DueTime);
Error ->
notify({fun_result, Error, {Schedule, MFA}, DueTime, ExecutionTime})
catch
_:Error ->
notify({fun_result, Error, {Schedule, MFA}, DueTime,
ExecutionTime})
end.
%%------------------------------------------------------------------------------
%% @spec retry(MFA, Fun, Schedule, Retry, Time) -> ok
%% Retry = {RetryTimes, Seconds}
%% RetryTimes = integer()
%% Seconds = integer()
%% @doc Reschedules the job if Retry options are given.
%% Fun, or MFA if Fun is undefined, will be executed after Seconds.
%% If RetryTimes is zero it means the job has been re-scheduled too many
%% times therefore it won't be inserted again. <br/>
%% An event is sent when the job is rescheduled and in case max number
%% of retry is reached.
%% @private
%% @end
%%------------------------------------------------------------------------------
retry(_MFA, _Fun, _Schedule, {undefined, undefined}, _) ->
ok;
retry(MFA, Fun, Schedule, {0, _Seconds}, DueTime) ->
notify({max_retry, {Schedule, MFA}, Fun, DueTime});
retry(MFA, Fun, Schedule, {RetryTime, Seconds}, DueTime) ->
notify({retry, {Schedule, MFA}, Fun, DueTime}),
Now = sec(),
DueSec = Now + Seconds,
Key = {DueSec, mnesia:dirty_update_counter({job_counter, job},1)},
case Fun of
undefined -> ClientFun = undefined;
Fun -> ClientFun = {Fun, DueTime}
end,
Job = #job{key = Key,
mfa = {MFA, DueTime},
schedule = Schedule,
client_fun = ClientFun,
retry = {RetryTime-1, Seconds}},
gen_server:cast(?MODULE, {insert, Job}).
%%------------------------------------------------------------------------------
%% @spec create_add_job(Job) -> ok | error
%% Job = {{Date, Time}, MFA}
%% Job = {{Date, Time}, MFA, RetryTimes, Seconds}
%% Job = #job{}
%% @doc Used internally
%% @private
%% @end
%%------------------------------------------------------------------------------
create_add_job({{Date, Time}, MFA}) ->
create_add_job({{Date, Time}, MFA, undefined, undefined});
create_add_job({{Date, Time}, MFA, RetryTimes, Seconds}) ->
case validate(Date, Time) of
ok ->
DueSec = sec({Date, Time}),
DueTime = calendar:gregorian_seconds_to_datetime(DueSec),
Key = {DueSec, mnesia:dirty_update_counter({job_counter, job}, 1)},
Job = #job{key = Key,
mfa = {MFA, DueTime},
schedule = {Date, Time},
retry = {RetryTimes, Seconds}},
ok = mnesia:dirty_write(Job);
Error ->
Error
end;
create_add_job(#job{client_fun = undefined, schedule = {Date, Time}} = Job) ->
DueSec = sec({Date, Time}),
Key = {DueSec, mnesia:dirty_update_counter({job_counter, job}, 1)},
ok = mnesia:dirty_write(Job#job{key = Key});
%% This entry was related to a previously failed MFA execution
%% therefore it will retry after the configured retry_time
create_add_job(#job{retry = {_, Seconds}} = Job) ->
DueSec = sec() + Seconds,
Key = {DueSec, mnesia:dirty_update_counter({job_counter, job}, 1)},
ok = mnesia:dirty_write(Job#job{key = Key}).
%%------------------------------------------------------------------------------
%% @spec notify(Event) -> ok
%% Event = term()
%% @doc Sends the Event notification to all the configured event handlers
%% @private
%% @end
%%------------------------------------------------------------------------------
notify(Event) ->
ok = gen_event:notify(?EVENT_MANAGER, Event).
validate('*', Time) ->
validate({'*','*', '*'}, Time);
validate(Date, Time) ->
case validate_date(Date) andalso
validate_time(Time) of
true ->
Now = sec(),
case catch sec({Date, Time}) of
{'EXIT', _Reason} ->
{error, date};
Sec when Sec - Now >0 ->
ok;
_ ->
{error, date}
end;
false ->
{error, date}
end.
validate_date({Year, Month, Day, Dow}) when is_integer(Dow), Dow>0, Dow=<7 ->
validate_date({Year, Month, Day});
validate_date({Year, Month, Day, '*'}) ->
validate_date({Year, Month, Day});
validate_date({_Year, _Month, _Day, _Dow}) ->
false;
validate_date({'*','*', '*'}) ->
true;
validate_date({'*','*', last}) ->
true;
validate_date({'*','*', Day}) when is_integer(Day), Day>0, Day<29 ->
true;
validate_date({'*', 2, Day}) when is_integer(Day), Day>0, Day<29 ->
true;
validate_date({'*', Month, last}) when is_integer(Month), Month>0, Month<13 ->
true;
validate_date({'*', Month, Day}) when is_integer(Day), is_integer(Month),
Month>0, Month<13, Day>0 ->
ShortMonths = [4,6,9,11],
case lists:member(Month, ShortMonths) of
true -> Day < 31;
false -> Day < 32
end;
validate_date({Y, M, last}) ->
is_integer(Y) andalso is_integer(M)
andalso M>0 andalso M<13;
validate_date(Date) ->
try
calendar:valid_date(Date)
catch _:_ ->
false
end.
validate_time({H, M, S}) ->
is_integer(H) andalso is_integer(M) andalso is_integer(S)
andalso H>=0 andalso H=<23
andalso M>=0 andalso M=<59
andalso S>=0 andalso S=<59;
validate_time(_) ->
false.
%% return a lists of {Schedule,MFA} or {Schedule, MFA, RetryTimes, RetrySeconds}
%% and #job{} where elements from the
%% ScheduledMFAList are removed if they are present in the jobList
remove_duplicated([], _, AccJobs) ->
AccJobs;
remove_duplicated([{Schedule, MFA}|T], JobList, AccJobs) ->
case lists:any(fun(J) ->
element(1,J#job.mfa)==MFA
andalso J#job.schedule == Schedule
andalso J#job.client_fun == undefined
end, JobList) of
true ->
%% The MFA was already present in the table
remove_duplicated(T, JobList, AccJobs);
false ->
remove_duplicated(T, JobList, [{Schedule, MFA}|AccJobs])
end;
remove_duplicated([{Schedule, MFA, Retry, Sec}|T], JobList, AccJobs) ->
case lists:any(fun(J) ->
element(1,J#job.mfa) == MFA
andalso J#job.schedule == Schedule
andalso J#job.client_fun == undefined
andalso J#job.schedule == {Retry, Sec}
end, JobList) of
true ->
%% The MFA was already present in the table
remove_duplicated(T, JobList, AccJobs);
false ->
remove_duplicated(T, JobList, [{Schedule, MFA, Retry, Sec}|AccJobs])
end.
get_timeout(DueSec) ->
case DueSec - sec() of
Diff when Diff =< 0 -> 5;
Diff when Diff > 2592000 ->
2592000000; %% Check the queue once every 30 days anyway
Diff -> Diff*1000
end.
create_table(Table, Opts) ->
case mnesia:create_table(Table, Opts) of
{atomic, ok} -> ok;
{aborted, {already_exists, _}} -> ok;
E -> E
end.
%% Check whether it's the first attempt to execute a job and not a retried one
is_not_retried(#job{client_fun = undefined, key = {DueSec, _},
mfa = {_, ExpectedDateTime}}) ->
DueDateTime = calendar:gregorian_seconds_to_datetime(DueSec),
DueDateTime == ExpectedDateTime;
is_not_retried(_) ->
false. | src/ecron.erl | 0.698124 | 0.503357 | ecron.erl | starcoder |
%%% @doc
%%% Random Access Memory Storage
%%%
%%% Memory buffers are used to store data of a configured size. When the incoming
%%% data is larger than a buffer, a new one is automatically created.
%%%
%%% The biggest challenge here is fitting data into the correct buffer(s).
%%% Instead of using 1 huge buffer for the data, we use several small buffers
%%% indexed by a index number to store the data.
%%%
%%% When writing data, if the offset to write at and the length of the incoming
%%% data is less than or equal to the buffer size, we simply copy it all to that given page/buffer.
%%% But, what if the incoming data is larger than a single buffer, possibly
%%% spanning multiple buffers?
%%%
%%% Visualize: Buffer size = 4
%%%```
%%% Offset 0
%%% Data: [a,a,a,a]
%%% Buffer: [ , , , ]
%%%
%%% Offset 2
%%% Data: [a,a, a,a]
%%% Buffer: [ , , , ][ , , , ]
%%%
%%% Offset 3
%%% Data: [a, a,a,a,b, b,b,b,c, c]
%%% Buffer: [ , , , ][ , , , ][ , , , ][ , , , ]
%%%
%%% Offset 0 1
%%% Data: [a][a]
%%% Buffer: [ , , , ]
%%%'''
%%% @end
-module(ra_storage_memory).
-behaviour(ra_storage).
-export([
new/0,
new/1,
write/3,
read/3,
get_page/2,
del/3,
len/1
]).
-define(DEFAULT_PAGE_SIZE, (1024 * 1024)).
-type state() :: {MemoryPager :: module(), Length :: pos_integer()}.
%% @doc Create a new instance with a default page size of (1024 * 1024)
-spec new() -> State :: state().
new() ->
new(?DEFAULT_PAGE_SIZE).
%% @doc Create a new instance with the given page size. Will error if
%% page size is not a power of two.
-spec new(PageSize :: pos_integer()) -> State :: state().
new(PageSize) ->
{
memory_pager:new(PageSize),
0
}.
%% @doc Write data to memory at the given byte offset
-spec write(Offset :: pos_integer(), Data :: binary(), State :: state()) -> {ok, State :: state()}.
write(Offset, Data, {{Pager, PageSize}, Length}) ->
PageNum = Offset div PageSize,
PageCursor = (Offset - (PageNum * PageSize)),
DataSize = byte_size(Data),
NewLength = update_length(Offset, DataSize, Length),
write_to_pages(
0,
PageCursor,
PageNum,
Data,
DataSize,
{{Pager, PageSize}, NewLength}
).
%% @private
write_to_pages(_, _, _, _, DataSize, State) when DataSize =:= 0 ->
{ok, State};
write_to_pages(DataCursor, _, _, _, DataSize, State) when DataCursor >= DataSize ->
{ok, State};
write_to_pages(DataCursor, PageCursor, PageNum, Data, DataSize, {{_, PageSize} = Mp, Len}) ->
%% How much data is left
DataBound = DataSize - DataCursor,
%% What's the most we can write to the page to fill it
%% based on the current position of the Page cursor?
UpperBound = min(PageSize, PageCursor + DataBound),
%% The amount of bytes we'll write
RangeLen = UpperBound - PageCursor,
%% Get the buffer for the given page
PageBuffer =
case memory_pager:get(PageNum, Mp) of
{none, _} -> <<0:PageSize/unit:8>>;
{ok, {_, B}, _} -> B
end,
%% Copy the data to the page buffer
UpdatedBuffer = copy_binary(DataCursor, PageCursor, RangeLen, Data, PageBuffer),
%% Write it to the page
{ok, Mp1} = memory_pager:set(PageNum, UpdatedBuffer, Mp),
%% Keep going while there's still data to process
write_to_pages(
DataCursor + RangeLen,
0,
PageNum + 1,
Data,
DataSize,
{Mp1, Len}
).
%% @doc Get the page buffer for the given page number
-spec get_page(PageNum :: pos_integer(), State :: state()) -> none | {ok, Data :: binary()}.
get_page(PageNum, {Pager, _}) ->
case memory_pager:get(PageNum, Pager) of
{none, _} -> none;
{ok, {_, B}, _} -> {ok, B}
end.
%% @doc Read the given number of bytes from the byte offset. This may 'walk'
%% several 'pages' to gather the results.
-spec read(Offset :: pos_integer(), BytesToRead :: pos_integer(), State :: state()) ->
{error, out_of_bounds} | {ok, Result :: binary(), State :: state()}.
read(Offset, BytesToRead, {{_, _}, Length}) when (Offset + BytesToRead) > Length ->
{error, out_of_bounds};
read(Offset, BytesToRead, {{_, PageSize}, _} = State) ->
PageNum = Offset div PageSize,
PageCursor = (Offset - (PageNum * PageSize)),
OutBuffer = <<0:BytesToRead/unit:8>>,
read_from_pages(
PageNum,
PageCursor,
0,
BytesToRead,
OutBuffer,
State
).
%% @private
read_from_pages(
_,
_,
OutCursor,
BytesToRead,
OutBuffer,
State
) when OutCursor >= BytesToRead ->
{ok, OutBuffer, State};
read_from_pages(
PageNum,
PageCursor,
OutCursor,
BytesToRead,
OutBuffer,
{{_, PageSize} = Mp, _} = State
) ->
%% Calculate the bounds for both binaries
BufferBounds = BytesToRead - OutCursor,
PageBounds = PageSize - PageCursor,
%% Minimal amount to work with right now based on the read/write bounds
MinimalBound = min(PageBounds, BufferBounds),
%% Get the buffer for the given page and copy stuff
case memory_pager:get(PageNum, Mp) of
{ok, {_, PageBuffer}, _} ->
OutBuffer1 = copy_binary(
PageCursor,
OutCursor,
MinimalBound,
PageBuffer,
OutBuffer
),
read_from_pages(
PageNum + 1,
0,
OutCursor + MinimalBound,
BytesToRead,
OutBuffer1,
State
);
_ ->
{none, State}
end.
%% @doc Delete the number of bytes starting at the offset.
-spec del(Offset :: pos_integer(), BytesToDelete :: pos_integer(), State :: state()) ->
{ok, State :: state()}.
del(Offset, BytesToDelete, {{_, _}, _Length} = State) ->
{ok, {{Pager, PageSize}, L}} = write(Offset, <<0:BytesToDelete/unit:8>>, State),
NewLen =
case Offset + BytesToDelete > L of
true -> Offset;
_ -> L
end,
{ok, {{Pager, PageSize}, NewLen}}.
%% @doc return the amount of data stored (in bytes)
-spec len(State :: state()) -> {ok, Amount :: pos_integer(), State :: state()}.
len({_, _, Length} = State) ->
{ok, Length, State}.
%% @private Calculate the total number of bytes written
update_length(Offset, DataSize, Length) ->
Nl = Offset + DataSize,
case Nl > Length of
true -> Nl;
_ -> Length
end.
%% @private Copy a binary from src to dest. given the cursor positions, amount
%% of data to copy and the respective buffers.
copy_binary(ReadCursor, WriteCursor, BytesToProcess, ReadData, WriteData) ->
%% Consume bytes up to 'ReadCursor', 'CopyThis' the bytes you want
<<_:ReadCursor/binary, CopyThis:BytesToProcess/binary, _/binary>> = ReadData,
%% Capture the 'WriteCursor' number of bytes from the start of the write buffer
%% Ignore 'BytesToProcess' number of bytes from the middle
%% Capture what's left if anything
<<Head:WriteCursor/binary, _:BytesToProcess/binary, R/binary>> = WriteData,
%% Make the buffer:
%% Put in the head, the bytes we copied and what was left.
<<Head/binary, CopyThis:BytesToProcess/binary, R/binary>>. | src/ra_storage_memory.erl | 0.535098 | 0.761184 | ra_storage_memory.erl | starcoder |
%% @doc
%% A module to validate Erlang's records.
%%
%% The rvalidator module aims to take away the boilerplate of implementing validating functions,
%% while being agnostic of the verification rules and error handling practices.
%%
%% This module is based on the idea that a Record should have its own verification specification.
%% Since a Record is an aggregates of multiple fields, a Specification for a Record is implemented
%% as a set of constraints per field.
%%
%% @end
%%
%% @author <NAME>
-module(rvalidator).
-export([
constraint/2,
required_field/4,
optional_field/3
]).
-export([
validate/2
]).
-type error() :: any().
%% The error to be raised by the constraint, the type is intentionaly open
%% to suits your needs but we do have some recommendation:
%% ```
%% invalid_name,
%% <<"InvalidName">>,
%% {name_too_long, MaxLength}
%% '''
%%
%% In general, the error should have a unique identifier and
%% if relevant to the constraint you could embed its configuration.
-type constraint_function() :: fun((any()) -> boolean()).
%% The constraint function implement the verification rules used to validate the Records.
%% The functions must return true if the Record complies to the rule and false otherwise.
-record(constraint, {
function :: constraint_function(),
error :: error()
}).
-type field_name() :: any().
%% The field_name is used as a convenient placeholder to group the generated errors
%% under a same bucket. Most of the time the Name is expected to be
%% the same than the Record's field name, but occasionaly, it might be more
%% adequate to use a String, for example on the verification of a JSON.
-record(field_spec, {
name :: field_name(),
index :: non_neg_integer(),
is_required = false :: boolean(),
missing_error :: error(),
constraints :: list(#constraint{})
}).
%%%----------------------------------------------------------------------------
%%% Functions to build the specification.
%%%----------------------------------------------------------------------------
%% @doc
%% Create a new constraint.
%%
%% A constraint illustrate the concept of rules to attach on a record
%% to verify its integrity.
%%
%% Example:
%% ```
%% validator:constraint(fun erlang:is_binary/1, not_binary).
%% validator:constraint(fun(X) -> size(X) =:= 2 end, {equal_length, 2}).
%% '''
%%
%% @end
%%
%% @param Function implements the rule to be verified.
%% @param Err define the error to be raised on verification failure.
-spec constraint(constraint_function(), error()) -> #constraint{}.
constraint(Function, Err) ->
#constraint{function = Function, error = Err}.
%% @doc
%% Create the specification for a required field.
%%
%% A required field specification must define:
%% <ul>
%% <li>the field's name</li>
%% <li>the field's index in the Record</li>
%% <li>the field's error if the value is undefined</li>
%% <li>the field's constraints</li>
%% </ul>
%%
%% Example:
%% ```
%% validator:required_field(country_code, #country.country_code, missing_country_code, [
%% validator:constraint(fun erlang:is_binary/1, not_binary),
%% validator:constraint(fun(X) -> size(X) =:= 2 end, {equal_length, 2})
%% ]).
%% '''
%%
%% @end
%%
%% @param Name define the key which will be used to groups the errors.
%% @param Index of the field in the Record.
%% @param MissingErr error to be raised if the required field is undefined.
%% @param Constraints to be verified.
-spec required_field(field_name(), non_neg_integer(), error(), list(#constraint{})) -> #field_spec{}.
required_field(Name, Index, MissingErr, Constraints) ->
#field_spec{
name = Name,
index = Index,
is_required = true,
missing_error = MissingErr,
constraints = Constraints
}.
%% @doc
%% Create the specification for an optional field.
%%
%% An optional field specification must define:
%% <ul>
%% <li>the field's name</li>
%% <li>the field's index in the Record</li>
%% <li>the field's constraints</li>
%% </ul>
%%
%% If the value of an optional field is undefined, then the validator module
%% won't verify its constraints.
%% Example:
%% ```
%% validator:optional_field(currency_code, #country.currency_code, [
%% validator:constraint(fun erlang:is_binary/1, not_binary),
%% validator:constraint(fun(X) -> size(X) =:= 3 end, {equal_length, 3})
%% ])
%% '''
%%
%% @end
%%
%% @param Name define the key which will be used to groups the errors.
%% @param Index of the field in the Record.
%% @param Constraints to be verified.
-spec optional_field(field_name(), non_neg_integer(), list(#constraint{})) -> #field_spec{}.
optional_field(Name, Index, Constraints) ->
#field_spec{
name = Name,
index = Index,
is_required = false,
constraints = Constraints
}.
%%%----------------------------------------------------------------------------
%%% Functions to validate a Record against its Spec.
%%%----------------------------------------------------------------------------
%% @doc
%% Validate a Record against its specification.
%%
%% @end
%%
%% @param Record holds the data to validate.
%% @param FieldSpecs is a list of field specifications.
%% @see required_field/4
%% @see optional_field/3
-type field_errors() :: {field_name(), nonempty_list(error())}.
-type validate_result() :: ok | {error, nonempty_list(field_errors())}.
-spec validate(tuple(), list(#field_spec{})) -> validate_result().
validate(Record, FieldSpecs) ->
Errors = lists:foldl(
fun(Spec, Acc) ->
validate_fold(element(Spec#field_spec.index, Record), Spec, Acc)
end,
[],
FieldSpecs
),
case Errors of
[] -> ok;
_ -> {error, Errors}
end.
%%%----------------------------------------------------------------------------
%%% Private functions
%%%----------------------------------------------------------------------------
validate_fold(undefined, #field_spec{is_required = false}, Acc) -> Acc;
validate_fold(undefined, #field_spec{name = Name, is_required = true, missing_error = Error}, Acc) ->
[{Name, [Error]} | Acc];
validate_fold(Value, #field_spec{name = Name, constraints = Constraints}, Acc) ->
Errors = lists:foldl(
fun(#constraint{function = Function, error = Error}, NextAcc) ->
case Function(Value) of
true -> NextAcc;
false -> [Error | NextAcc]
end
end,
[],
Constraints
),
case Errors of
[] -> Acc;
_ -> [{Name, Errors} | Acc]
end. | src/rvalidator.erl | 0.73678 | 0.64962 | rvalidator.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% Functions related to the management of the RLOG schema
-module(mria_schema).
%% API:
-export([ init/0
, add_entry/1
, tables_of_shard/1
, shard_of_table/1
, table_specs_of_shard/1
, shards/0
, converge_replicant/2
, converge_core/0
, create_table_type/0
]).
-include("mria_rlog.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-type entry() ::
#?schema{ mnesia_table :: mria:table()
, shard :: mria_rlog:shard()
, storage :: mria:storage()
, config :: list()
}.
-export_type([entry/0]).
%% WARNING: Treatment of the schema table is different on the core
%% nodes and replicants. Schema transactions on core nodes are
%% replicated via mnesia and therefore this table is consistent, but
%% these updates do not reach the replicants. The replicants use
%% regular mnesia transactions to update the schema, so it might be
%% inconsistent with the core nodes' view.
%%
%% Therefore one should be rather careful with the contents of the
%% rlog_schema table.
%%================================================================================
%% Type declarations
%%================================================================================
%%================================================================================
%% API
%%================================================================================
%% @doc Add a table to the shard
%%
%% Note: currently it's the only schema operation that we support. No
%% removal and no handover of the table between the shards is
%% possible.
%%
%% These operations are too rare and expensive to implement, because
%% they require precise coordination of the shard processes across the
%% entire cluster.
%%
%% Adding an API to remove or modify schema would open possibility to
%% move a table from one shard to another. This requires restarting
%% both shards in a synchronized manner to avoid a race condition when
%% the replicant processes from the old shard import in-flight
%% transactions while the new shard is bootstrapping the table.
%%
%% This is further complicated by the fact that the replicant nodes
%% may consume shard transactions from different core nodes.
%%
%% So the operation of removing a table from the shard would look like
%% this:
%%
%% 1. Do an RPC call to all core nodes to stop the shard
%% 2. Each core node synchronously stops all the attached replicant
%% processes
%% 3. Only then we are sure that we can avoid data corruption
%%
%% Currently there is no requirement to implement this, so we can get
%% away with managing each shard separately
-spec add_entry(mria_schema:entry()) -> ok.
add_entry(TabDef) ->
case mnesia:transaction(fun do_add_table/1, [TabDef], infinity) of
{atomic, ok} -> ok;
{aborted, Err} -> error({bad_schema, Err, TabDef})
end.
%% @doc Create the internal schema table if needed
init() ->
?tp(debug, rlog_schema_init, #{}),
ok = mria:create_table_internal(?schema, ram_copies,
[{type, ordered_set},
{record_name, ?schema},
{attributes, record_info(fields, ?schema)}
]),
ok = mria_mnesia:copy_table(?schema, ram_copies),
mria_mnesia:wait_for_tables([?schema]),
ok.
%% @doc Return the list of tables that belong to the shard.
-spec tables_of_shard(mria_rlog:shard()) -> [mria:table()].
tables_of_shard(Shard) ->
[Tab || #?schema{mnesia_table = Tab} <- table_specs_of_shard(Shard)].
%% @doc Return the list of tables that belong to the shard and their
%% properties:
-spec table_specs_of_shard(mria_rlog:shard()) -> [mria_schema:entry()].
table_specs_of_shard(Shard) ->
%%core = mria_config:role(), % assert
Pattern = #?schema{mnesia_table = '_', shard = Shard, storage = '_', config = '_'},
{atomic, Tuples} = mnesia:transaction(fun mnesia:match_object/1, [Pattern], infinity),
Tuples.
%% @doc Get the shard of a table
-spec shard_of_table(mria:table()) -> {ok, mria_rlog:shard()} | undefined.
shard_of_table(Table) ->
case mnesia:dirty_read(?schema, Table) of
[#?schema{shard = Shard}] ->
{ok, Shard};
[] ->
undefined
end.
%% @doc Return the list of known shards
-spec shards() -> [mria_rlog:shard()].
shards() ->
MS = {#?schema{mnesia_table = '_', shard = '$1', config = '_', storage = '_'}, [], ['$1']},
{atomic, Shards} = mnesia:transaction(fun mnesia:select/2, [?schema, [MS]], infinity),
lists:usort(Shards).
%% @doc Ensure that a core node that freshly joined the cluster has
%% copies of all the tables
-spec converge_core() -> ok.
converge_core() ->
%% Assert that we've already joined the cluster, and the rest of
%% the nodes know about us, so when any of them calls
%% `create_table', they add us in the list:
DbNodes = [_, _ | _] = mnesia:system_info(db_nodes), % assert
true = lists:member(node(), DbNodes),
?tp(info, "Converging RLOG schema", #{}),
TabDefs = ets:tab2list(?schema),
lists:foreach(fun ensure_table_copy/1, TabDefs).
%% @doc Ensure that the replicant has the same tables as the upstream
-spec converge_replicant(mria_rlog:shard(), [mria_schema:entry()]) -> ok.
converge_replicant(_Shard, TableSpecs) ->
%% TODO: Check shard
lists:foreach(fun ensure_table/1, TableSpecs).
%% @doc How to create mnesia tables on the node?
-spec create_table_type() -> create | copy.
create_table_type() ->
IsAlone = case mnesia:system_info(extra_db_nodes) of
[] -> true;
[_|_] -> false
end,
case (mria_rlog:role() =:= replicant) orelse IsAlone of
true -> create;
false -> copy
end.
%%================================================================================
%% Internal functions
%%================================================================================
-spec do_add_table(mria_schema:entry()) -> ok.
do_add_table(TabDef = #?schema{shard = Shard, mnesia_table = Table}) ->
case mnesia:wread({?schema, Table}) of
[] ->
IsLive = Shard =/= ?LOCAL_CONTENT_SHARD andalso is_pid(whereis(Shard)),
?tp(info, "Adding table to a shard",
#{ shard => Shard
, table => Table
, live_change => IsLive
}),
mnesia:write(TabDef),
ok;
[#?schema{shard = Shard}] ->
%% We're just being idempotent here:
ok;
_ ->
error(bad_schema)
end.
-spec ensure_table(mria_schema:entry()) -> ok.
ensure_table(#?schema{mnesia_table = Table, storage = Storage, config = Config}) ->
ok = mria:create_table_internal(Table, Storage, Config).
-spec ensure_table_copy(mria_schema:entry()) -> ok.
ensure_table_copy(#?schema{mnesia_table = Table, storage = Storage}) ->
mria_mnesia:copy_table(Table, Storage). | src/mria_schema.erl | 0.614625 | 0.50354 | mria_schema.erl | starcoder |
%%% @author <NAME> <<EMAIL>>
%%% [http://ferd.ca/]
%%% @doc
%%% `recon_trace' is a module that handles tracing in a safe manner for single
%%% Erlang nodes, currently for function calls only. Functionality includes:
%%%
%%% <ul>
%%% <li>Nicer to use interface (arguably) than `dbg' or trace BIFs.</li>
%%% <li>Protection against dumb decisions (matching all calls on a node
%%% being traced, for example)</li>
%%% <li>Adding safe guards in terms of absolute trace count or
%%% rate-limitting</li>
%%% <li>Nicer formatting than default traces</li>
%%% </ul>
%%%
%%% == Tracing Erlang Code ==
%%%
%%% The Erlang Trace BIFs allow to trace any Erlang code at all. They work in
%%% two parts: pid specifications, and trace patterns.
%%%
%%% Pid specifications let you decide which processes to target. They can be
%%% specific pids, `all' pids, `existing' pids, or `new' pids (those not
%%% spawned at the time of the function call).
%%%
%%% The trace patterns represent functions. Functions can be specified in two
%%% parts: specifying the modules, functions, and arguments, and then with
%%% Erlang match specifications to add constraints to arguments (see
%%% {@link calls/3} for details).
%%%
%%% What defines whether you get traced or not is the intersection of both:
%%%
%%% ```
%%% _,--------,_ _,--------,_
%%% ,-' `-,,-' `-,
%%% ,-' ,-' '-, `-,
%%% | Matching -' '- Matching |
%%% | Pids | Getting | Trace |
%%% | | Traced | Patterns |
%%% | -, ,- |
%%% '-, '-, ,-' ,-'
%%% '-,_ _,-''-,_ _,-'
%%% '--------' '--------'
%%% '''
%%%
%%% If either the pid specification excludes a process or a trace pattern
%%% excludes a given call, no trace will be received.
%%%
%%% == Example Session ==
%%%
%%% First let's trace the `queue:new' functions in any process:
%%%
%%% ```
%%% 1> recon_trace:calls({queue, new, '_'}, 1).
%%% 1
%%% 13:14:34.086078 <0.44.0> queue:new()
%%% Recon tracer rate limit tripped.
%%% '''
%%%
%%% The limit was set to `1' trace message at most, and `recon' let us
%%% know when that limit was reached.
%%%
%%% Let's instead look for all the `queue:in/2' calls, to see what it is
%%% we're inserting in queues:
%%%
%%% ```
%%% 2> recon_trace:calls({queue, in, 2}, 1).
%%% 1
%%% 13:14:55.365157 <0.44.0> queue:in(a, {[],[]})
%%% Recon tracer rate limit tripped.
%%% '''
%%%
%%% In order to see the content we want, we should change the trace patterns
%%% to use a `fun' that matches on all arguments in a list (`_') and returns
%%% `return_trace()'. This last part will generate a second trace for each
%%% call that includes the return value:
%%%
%%% ```
%%% 3> recon_trace:calls({queue, in, fun(_) -> return_trace() end}, 3).
%%% 1
%%%
%%% 13:15:27.655132 <0.44.0> queue:in(a, {[],[]})
%%%
%%% 13:15:27.655467 <0.44.0> queue:in/2 --> {[a],[]}
%%%
%%% 13:15:27.757921 <0.44.0> queue:in(a, {[],[]})
%%% Recon tracer rate limit tripped.
%%% '''
%%%
%%% Matching on argument lists can be done in a more complex manner:
%%%
%%% ```
%%% 4> recon_trace:calls(
%%% 4> {queue, '_', fun([A,_]) when is_list(A); is_integer(A) andalso A > 1 -> return_trace() end},
%%% 4> {10,100}
%%% 4> ).
%%% 32
%%%
%%% 13:24:21.324309 <0.38.0> queue:in(3, {[],[]})
%%%
%%% 13:24:21.371473 <0.38.0> queue:in/2 --> {[3],[]}
%%%
%%% 13:25:14.694865 <0.53.0> queue:split(4, {[10,9,8,7],[1,2,3,4,5,6]})
%%%
%%% 13:25:14.695194 <0.53.0> queue:split/2 --> {{[4,3,2],[1]},{[10,9,8,7],[5,6]}}
%%%
%%% 5> recon_trace:clear().
%%% ok
%%% '''
%%%
%%% Note that in the pattern above, no specific function (<code>'_'</code>) was
%%% matched against. Instead, the `fun' used restricted functions to those
%%% having two arguments, the first of which is either a list or an integer
%%% greater than `1'.
%%%
%%% The limit was also set using `{10,100}' instead of an integer, making the
%%% rate-limitting at 10 messages per 100 milliseconds, instead of an absolute
%%% value.
%%%
%%% Any tracing can be manually interrupted by calling `recon_trace:clear()',
%%% or killing the shell process.
%%%
%%% Be aware that extremely broad patterns with lax rate-limitting (or very
%%% high absolute limits) may impact your node's stability in ways
%%% `recon_trace' cannot easily help you with.
%%%
%%% In doubt, start with the most restrictive tracing possible, with low
%%% limits, and progressively increase your scope.
%%%
%%% See {@link calls/3} for more details and tracing possibilities.
%%%
%%% == Structure ==
%%%
%%% This library is production-safe due to taking the following structure for
%%% tracing:
%%%
%%% ```
%%% [IO/Group leader] <---------------------,
%%% | |
%%% [shell] ---> [tracer process] ----> [formatter]
%%% '''
%%%
%%% The tracer process receives trace messages from the node, and enforces
%%% limits in absolute terms or trace rates, before forwarding the messages
%%% to the formatter. This is done so the tracer can do as little work as
%%% possible and never block while building up a large mailbox.
%%%
%%% The tracer process is linked to the shell, and the formatter to the
%%% tracer process. The formatter also traps exits to be able to handle
%%% all received trace messages until the tracer termination, but will then
%%% shut down as soon as possible.
%%%
%%% In case the operator is tracing from a remote shell which gets
%%% disconnected, the links between the shell and the tracer should make it
%%% so tracing is automatically turned off once you disconnect.
%%%
%%% If sending output to the Group Leader is not desired, you may specify
%%% a different pid() via the option `io_server' in the {@link calls/3} function.
%%% For instance to write the traces to a file you can do something like
%%%
%%% ```
%%% 1> {ok, Dev} = file:open("/tmp/trace",[write]).
%%% 2> recon_trace:calls({queue, in, fun(_) -> return_trace() end}, 3, [{io_server, Dev}]).
%%% 1
%%% 3>
%%% Recon tracer rate limit tripped.
%%% 4> file:close(Dev).
%%% '''
%%%
%%% The only output still sent to the Group Leader is the rate limit being
%%% tripped, and any errors. The rest will be sent to the other IO
%%% server (see [http://erlang.org/doc/apps/stdlib/io_protocol.html]).
%%% @end
-module(recon_trace).
%% API
-export([clear/0, calls/2, calls/3]).
-export([format/1]).
%% Internal exports
-export([count_tracer/1, rate_tracer/2, formatter/5]).
-type matchspec() :: [{[term()], [term()], [term()]}].
-type shellfun() :: fun((_) -> term()).
-type formatterfun() :: fun((_) -> iodata()).
-type millisecs() :: non_neg_integer().
-type pidspec() :: all | existing | new | recon:pid_term().
-type max_traces() :: non_neg_integer().
-type max_rate() :: {max_traces(), millisecs()}.
%% trace options
-type options() :: [ {pid, pidspec() | [pidspec(),...]} % default: all
| {timestamp, formatter | trace} % default: formatter
| {args, args | arity} % default: args
| {io_server, pid()} % default: group_leader()
| {formatter, formatterfun()} % default: internal formatter
| return_to | {return_to, boolean()} % default: false
%% match pattern options
| {scope, global | local} % default: global
].
-type mod() :: '_' | module().
-type fn() :: '_' | atom().
-type args() :: '_' | 0..255 | matchspec() | shellfun().
-type tspec() :: {mod(), fn(), args()}.
-type max() :: max_traces() | max_rate().
-type num_matches() :: non_neg_integer().
-export_type([mod/0, fn/0, args/0, tspec/0, num_matches/0, options/0,
max_traces/0, max_rate/0]).
%%%%%%%%%%%%%%
%%% PUBLIC %%%
%%%%%%%%%%%%%%
%% @doc Stops all tracing at once.
-spec clear() -> ok.
clear() ->
erlang:trace(all, false, [all]),
erlang:trace_pattern({'_','_','_'}, false, [local,meta,call_count,call_time]),
erlang:trace_pattern({'_','_','_'}, false, []), % unsets global
maybe_kill(recon_trace_tracer),
maybe_kill(recon_trace_formatter),
ok.
%% @equiv calls({Mod, Fun, Args}, Max, [])
-spec calls(tspec() | [tspec(),...], max()) -> num_matches().
calls({Mod, Fun, Args}, Max) ->
calls([{Mod,Fun,Args}], Max, []);
calls(TSpecs = [_|_], Max) ->
calls(TSpecs, Max, []).
%% @doc Allows to set trace patterns and pid specifications to trace
%% function calls.
%%
%% The basic calls take the trace patterns as tuples of the form
%% `{Module, Function, Args}' where:
%%
%% <ul>
%% <li>`Module' is any atom representing a module</li>
%% <li>`Function' is any atom representing a function, or the wildcard
%% <code>'_'</code></li>
%% <li>`Args' is either the arity of a function (`0..255'), a wildcard
%% pattern (<code>'_'</code>), a
%% <a href="http://learnyousomeerlang.com/ets#you-have-been-selected">match specification</a>,
%% or a function from a shell session that can be transformed into
%% a match specification</li>
%% </ul>
%%
%% There is also an argument specifying either a maximal count (a number)
%% of trace messages to be received, or a maximal frequency (`{Num, Millisecs}').
%%
%% Here are examples of things to trace:
%%
%% <ul>
%% <li>All calls from the `queue' module, with 10 calls printed at most:
%% ``recon_trace:calls({queue, '_', '_'}, 10)''</li>
%% <li>All calls to `lists:seq(A,B)', with 100 calls printed at most:
%% `recon_trace:calls({lists, seq, 2}, 100)'</li>
%% <li>All calls to `lists:seq(A,B)', with 100 calls per second at most:
%% `recon_trace:calls({lists, seq, 2}, {100, 1000})'</li>
%% <li>All calls to `lists:seq(A,B,2)' (all sequences increasing by two)
%% with 100 calls at most:
%% `recon_trace:calls({lists, seq, fun([_,_,2]) -> ok end}, 100)'</li>
%% <li>All calls to `iolist_to_binary/1' made with a binary as an argument
%% already (kind of useless conversion!):
%% `recon_trace:calls({erlang, iolist_to_binary, fun([X]) when is_binary(X) -> ok end}, 10)'</li>
%% <li>Calls to the queue module only in a given process `Pid', at a rate
%% of 50 per second at most:
%% ``recon_trace:calls({queue, '_', '_'}, {50,1000}, [{pid, Pid}])''</li>
%% <li>Print the traces with the function arity instead of literal arguments:
%% `recon_trace:calls(TSpec, Max, [{args, arity}])'</li>
%% <li>Matching the `filter/2' functions of both `dict' and `lists' modules,
%% across new processes only:
%% `recon_trace:calls([{dict,filter,2},{lists,filter,2}], 10, [{pid, new}])'</li>
%% <li>Tracing the `handle_call/3' functions of a given module for all new processes,
%% and those of an existing one registered with `gproc':
%% `recon_trace:calls({Mod,handle_call,3}, {10,100}, [{pid, [{via, gproc, Name}, new]}'</li>
%% <li>Show the result of a given function call:
%% `recon_trace:calls({Mod,Fun,fun(_) -> return_trace() end}, Max, Opts)'
%% or
%% ``recon_trace:calls({Mod,Fun,[{'_', [], [{return_trace}]}]}, Max, Opts)'',
%% the important bit being the `return_trace()' call or the
%% `{return_trace}' match spec value.</li>
%% </ul>
%%
%% There's a few more combination possible, with multiple trace patterns per call, and more
%% options:
%%
%% <ul>
%% <li>`{pid, PidSpec}': which processes to trace. Valid options is any of
%% `all', `new', `existing', or a process descriptor (`{A,B,C}',
%% `"<A.B.C>"', an atom representing a name, `{global, Name}',
%% `{via, Registrar, Name}', or a pid). It's also possible to specify
%% more than one by putting them in a list.</li>
%% <li>`{timestamp, formatter | trace}': by default, the formatter process
%% adds timestamps to messages received. If accurate timestamps are
%% required, it's possible to force the usage of timestamps within
%% trace messages by adding the option `{timestamp, trace}'.</li>
%% <li>`{args, arity | args}': whether to print arity in function calls
%% or their (by default) literal representation.</li>
%% <li>`{scope, global | local}': by default, only 'global' (fully qualified
%% function calls) are traced, not calls made internally. To force tracing
%% of local calls, pass in `{scope, local}'. This is useful whenever
%% you want to track the changes of code in a process that isn't called
%% with `Module:Fun(Args)', but just `Fun(Args)'.</li>
%% <li>`{formatter, fun(Term) -> io_data() end}': override the default
%% formatting functionality provided by recon.</li>
%% <li>`{io_server, pid() | atom()}': by default, recon logs to the current
%% group leader, usually the shell. This option allows to redirect
%% trace output to a different IO server (such as a file handle).</li>
%% <li>`return_to': If this option is set (in conjunction with the match
%% option `{scope, local}'), the function to which the value is returned
%% is output in a trace. Note that this is distinct from giving the
%% *caller* since exception handling or calls in tail position may
%% hide the original caller.</li>
%% </ul>
%%
%% Also note that putting extremely large `Max' values (i.e. `99999999' or
%% `{10000,1}') will probably negate most of the safe-guarding this library
%% does and be dangerous to your node. Similarly, tracing extremely large
%% amounts of function calls (all of them, or all of `io' for example)
%% can be risky if more trace messages are generated than any process on
%% the node could ever handle, despite the precautions taken by this library.
%% @end
-spec calls(tspec() | [tspec(),...], max(), options()) -> num_matches().
calls({Mod, Fun, Args}, Max, Opts) ->
calls([{Mod,Fun,Args}], Max, Opts);
calls(TSpecs = [_|_], {Max, Time}, Opts) ->
Pid = setup(rate_tracer, [Max, Time],
validate_formatter(Opts), validate_io_server(Opts)),
trace_calls(TSpecs, Pid, Opts);
calls(TSpecs = [_|_], Max, Opts) ->
Pid = setup(count_tracer, [Max],
validate_formatter(Opts), validate_io_server(Opts)),
trace_calls(TSpecs, Pid, Opts).
%%%%%%%%%%%%%%%%%%%%%%%
%%% PRIVATE EXPORTS %%%
%%%%%%%%%%%%%%%%%%%%%%%
%% @private Stops when N trace messages have been received
count_tracer(0) ->
exit(normal);
count_tracer(N) ->
receive
Msg ->
recon_trace_formatter ! Msg,
count_tracer(N-1)
end.
%% @private Stops whenever the trace message rates goes higher than
%% `Max' messages in `Time' milliseconds. Note that if the rate
%% proposed is higher than what the IO system of the formatter
%% can handle, this can still put a node at risk.
%%
%% It is recommended to try stricter rates to begin with.
rate_tracer(Max, Time) -> rate_tracer(Max, Time, 0, os:timestamp()).
rate_tracer(Max, Time, Count, Start) ->
receive
Msg ->
recon_trace_formatter ! Msg,
Now = os:timestamp(),
Delay = timer:now_diff(Now, Start) div 1000,
if Delay > Time -> rate_tracer(Max, Time, 0, Now)
; Max > Count -> rate_tracer(Max, Time, Count+1, Start)
; Max =:= Count -> exit(normal)
end
end.
%% @private Formats traces to be output
formatter(Tracer, Parent, Ref, FormatterFun, IOServer) ->
process_flag(trap_exit, true),
link(Tracer),
Parent ! {Ref, linked},
formatter(Tracer, IOServer, FormatterFun).
formatter(Tracer, IOServer, FormatterFun) ->
receive
{'EXIT', Tracer, normal} ->
io:format("Recon tracer rate limit tripped.~n"),
exit(normal);
{'EXIT', Tracer, Reason} ->
exit(Reason);
TraceMsg ->
io:format(IOServer, FormatterFun(TraceMsg), []),
formatter(Tracer, IOServer, FormatterFun)
end.
%%%%%%%%%%%%%%%%%%%%%%%
%%% SETUP FUNCTIONS %%%
%%%%%%%%%%%%%%%%%%%%%%%
%% starts the tracer and formatter processes, and
%% cleans them up before each call.
setup(TracerFun, TracerArgs, FormatterFun, IOServer) ->
clear(),
Ref = make_ref(),
Tracer = spawn_link(?MODULE, TracerFun, TracerArgs),
register(recon_trace_tracer, Tracer),
Format = spawn(?MODULE, formatter, [Tracer, self(), Ref, FormatterFun, IOServer]),
register(recon_trace_formatter, Format),
receive
{Ref, linked} -> Tracer
after 5000 ->
error(setup_failed)
end.
%% Sets the traces in action
trace_calls(TSpecs, Pid, Opts) ->
{PidSpecs, TraceOpts, MatchOpts} = validate_opts(Opts),
Matches = [begin
{Arity, Spec} = validate_tspec(Mod, Fun, Args),
erlang:trace_pattern({Mod, Fun, Arity}, Spec, MatchOpts)
end || {Mod, Fun, Args} <- TSpecs],
[erlang:trace(PidSpec, true, [call, {tracer, Pid} | TraceOpts])
|| PidSpec <- PidSpecs],
lists:sum(Matches).
%%%%%%%%%%%%%%%%%%
%%% VALIDATION %%%
%%%%%%%%%%%%%%%%%%
validate_opts(Opts) ->
PidSpecs = validate_pid_specs(proplists:get_value(pid, Opts, all)),
Scope = proplists:get_value(scope, Opts, global),
TraceOpts = case proplists:get_value(timestamp, Opts, formatter) of
formatter -> [];
trace -> [timestamp]
end ++
case proplists:get_value(args, Opts, args) of
args -> [];
arity -> [arity]
end ++
case proplists:get_value(return_to, Opts, undefined) of
true when Scope =:= local ->
[return_to];
true when Scope =:= global ->
io:format("Option return_to only works with option {scope, local}~n"),
%% Set it anyway
[return_to];
_ ->
[]
end,
MatchOpts = [Scope],
{PidSpecs, TraceOpts, MatchOpts}.
%% Support the regular specs, but also allow `recon:pid_term()' and lists
%% of further pid specs.
-spec validate_pid_specs(pidspec() | [pidspec(),...]) ->
[all | new | existing | pid(), ...].
validate_pid_specs(all) -> [all];
validate_pid_specs(existing) -> [existing];
validate_pid_specs(new) -> [new];
validate_pid_specs([Spec]) -> validate_pid_specs(Spec);
validate_pid_specs(PidTerm = [Spec|Rest]) ->
%% can be "<a.b.c>" or [pidspec()]
try
[recon_lib:term_to_pid(PidTerm)]
catch
error:function_clause ->
validate_pid_specs(Spec) ++ validate_pid_specs(Rest)
end;
validate_pid_specs(PidTerm) ->
%% has to be `recon:pid_term()'.
[recon_lib:term_to_pid(PidTerm)].
validate_tspec(Mod, Fun, Args) when is_function(Args) ->
validate_tspec(Mod, Fun, fun_to_ms(Args));
validate_tspec(Mod, Fun, Args) ->
BannedMods = ['_', ?MODULE, io, lists],
%% The banned mod check can be bypassed by using
%% match specs if you really feel like being dumb.
case {lists:member(Mod, BannedMods), Args} of
{true, '_'} -> error({dangerous_combo, {Mod,Fun,Args}});
{true, []} -> error({dangerous_combo, {Mod,Fun,Args}});
_ -> ok
end,
case Args of
'_' -> {'_', true};
_ when is_list(Args) -> {'_', Args};
_ when Args >= 0, Args =< 255 -> {Args, true}
end.
validate_formatter(Opts) ->
case proplists:get_value(formatter, Opts) of
F when is_function(F, 1) -> F;
_ -> fun format/1
end.
validate_io_server(Opts) ->
proplists:get_value(io_server, Opts, group_leader()).
%%%%%%%%%%%%%%%%%%%%%%%%
%%% TRACE FORMATTING %%%
%%%%%%%%%%%%%%%%%%%%%%%%
%% Thanks <NAME> for the foundations for this.
format(TraceMsg) ->
{Type, Pid, {Hour,Min,Sec}, TraceInfo} = extract_info(TraceMsg),
{FormatStr, FormatArgs} = case {Type, TraceInfo} of
%% {trace, Pid, 'receive', Msg}
{'receive', [Msg]} ->
{"< ~p", [Msg]};
%% {trace, Pid, send, Msg, To}
{send, [Msg, To]} ->
{" > ~p: ~p", [To, Msg]};
%% {trace, Pid, send_to_non_existing_process, Msg, To}
{send_to_non_existing_process, [Msg, To]} ->
{" > (non_existent) ~p: ~p", [To, Msg]};
%% {trace, Pid, call, {M, F, Args}}
{call, [{M,F,Args}]} ->
{"~p:~p~s", [M,F,format_args(Args)]};
%% {trace, Pid, return_to, {M, F, Arity}}
{return_to, [{M,F,Arity}]} ->
{" '--> ~p:~p/~p", [M,F,Arity]};
%% {trace, Pid, return_from, {M, F, Arity}, ReturnValue}
{return_from, [{M,F,Arity}, Return]} ->
{"~p:~p/~p --> ~p", [M,F,Arity, Return]};
%% {trace, Pid, exception_from, {M, F, Arity}, {Class, Value}}
{exception_from, [{M,F,Arity}, {Class,Val}]} ->
{"~p:~p/~p ~p ~p", [M,F,Arity, Class, Val]};
%% {trace, Pid, spawn, Spawned, {M, F, Args}}
{spawn, [Spawned, {M,F,Args}]} ->
{"spawned ~p as ~p:~p~s", [Spawned, M, F, format_args(Args)]};
%% {trace, Pid, exit, Reason}
{exit, [Reason]} ->
{"EXIT ~p", [Reason]};
%% {trace, Pid, link, Pid2}
{link, [Linked]} ->
{"link(~p)", [Linked]};
%% {trace, Pid, unlink, Pid2}
{unlink, [Linked]} ->
{"unlink(~p)", [Linked]};
%% {trace, Pid, getting_linked, Pid2}
{getting_linked, [Linker]} ->
{"getting linked by ~p", [Linker]};
%% {trace, Pid, getting_unlinked, Pid2}
{getting_unlinked, [Unlinker]} ->
{"getting unlinked by ~p", [Unlinker]};
%% {trace, Pid, register, RegName}
{register, [Name]} ->
{"registered as ~p", [Name]};
%% {trace, Pid, unregister, RegName}
{unregister, [Name]} ->
{"no longer registered as ~p", [Name]};
%% {trace, Pid, in, {M, F, Arity} | 0}
{in, [{M,F,Arity}]} ->
{"scheduled in for ~p:~p/~p", [M,F,Arity]};
{in, [0]} ->
{"scheduled in", []};
%% {trace, Pid, out, {M, F, Arity} | 0}
{out, [{M,F,Arity}]} ->
{"scheduled out from ~p:~p/~p", [M, F, Arity]};
{out, [0]} ->
{"scheduled out", []};
%% {trace, Pid, gc_start, Info}
{gc_start, [Info]} ->
HeapSize = proplists:get_value(heap_size, Info),
OldHeapSize = proplists:get_value(old_heap_size, Info),
MbufSize = proplists:get_value(mbuf_size, Info),
{"gc beginning -- heap ~p bytes",
[HeapSize + OldHeapSize + MbufSize]};
%% {trace, Pid, gc_end, Info}
{gc_end, [Info]} ->
HeapSize = proplists:get_value(heap_size, Info),
OldHeapSize = proplists:get_value(old_heap_size, Info),
MbufSize = proplists:get_value(mbuf_size, Info),
{"gc finished -- heap ~p bytes",
[HeapSize + OldHeapSize + MbufSize]};
_ ->
{"unknown trace type ~p -- ~p", [Type, TraceInfo]}
end,
io_lib:format("~n~p:~p:~9.6.0f ~p " ++ FormatStr ++ "~n",
[Hour, Min, Sec, Pid] ++ FormatArgs).
extract_info(TraceMsg) ->
case tuple_to_list(TraceMsg) of
[trace_ts, Pid, Type | Info] ->
{TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info),
{Type, Pid, to_hms(Timestamp), TraceInfo};
[trace, Pid, Type | TraceInfo] ->
{Type, Pid, to_hms(os:timestamp()), TraceInfo}
end.
to_hms(Stamp = {_, _, Micro}) ->
{_,{H, M, Secs}} = calendar:now_to_local_time(Stamp),
Seconds = Secs rem 60 + (Micro / 1000000),
{H,M,Seconds};
to_hms(_) ->
{0,0,0}.
format_args(Arity) when is_integer(Arity) ->
"/"++integer_to_list(Arity);
format_args(Args) when is_list(Args) ->
"("++string:join([io_lib:format("~p", [Arg]) || Arg <- Args], ", ")++")".
%%%%%%%%%%%%%%%
%%% HELPERS %%%
%%%%%%%%%%%%%%%
maybe_kill(Name) ->
case whereis(Name) of
undefined ->
ok;
Pid ->
unlink(Pid),
exit(Pid, kill),
wait_for_death(Pid, Name)
end.
wait_for_death(Pid, Name) ->
case is_process_alive(Pid) orelse whereis(Name) =:= Pid of
true ->
timer:sleep(10),
wait_for_death(Pid, Name);
false ->
ok
end.
%% Borrowed from dbg
fun_to_ms(ShellFun) when is_function(ShellFun) ->
case erl_eval:fun_data(ShellFun) of
{fun_data,ImportList,Clauses} ->
case ms_transform:transform_from_shell(
dbg,Clauses,ImportList) of
{error,[{_,[{_,_,Code}|_]}|_],_} ->
io:format("Error: ~s~n",
[ms_transform:format_error(Code)]),
{error,transform_error};
Else ->
Else
end;
false ->
exit(shell_funs_only)
end. | src/deps/recon/src/recon_trace.erl | 0.503418 | 0.447943 | recon_trace.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(json_stream_parse).
-export([events/2, to_ejson/1, collect_object/2]).
-define(IS_WS(X), (X == $\ orelse X == $\t orelse X == $\n orelse X == $\r)).
-define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
-define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
% Parses the json into events.
%
% The DataFun param is a function that produces the data for parsing. When
% called it must yield a tuple, or the atom done. The first element in the
% tuple is the data itself, and the second element is a function to be called
% next to get the next chunk of data in the stream.
%
% The EventFun is called every time a json element is parsed. It must produce
% a new function to be called for the next event.
%
% Events happen each time a new element in the json string is parsed.
% For simple value types, the data itself is returned:
% Strings
% Integers
% Floats
% true
% false
% null
%
% For arrays, the start of the array is signaled by the event array_start
% atom. The end is signaled by array_end. The events before the end are the
% values, or nested values.
%
% For objects, the start of the object is signaled by the event object_start
% atom. The end is signaled by object_end. Each key is signaled by
% {key, KeyString}, and the following event is the value, or start of the
% value (array_start, object_start).
%
events(Data, EventFun) when is_list(Data) ->
events(list_to_binary(Data), EventFun);
events(Data, EventFun) when is_binary(Data) ->
events(fun() -> {Data, fun() -> done end} end, EventFun);
events(DataFun, EventFun) ->
parse_one(DataFun, EventFun, <<>>).
% converts the JSON directly to the erlang represention of Json
to_ejson(DF) ->
{_DF2, EF, _Rest} = events(DF, fun(Ev) -> collect_events(Ev, []) end),
[[EJson]] = make_ejson(EF(get_results), [[]]),
EJson.
% This function is used to return complete objects while parsing streams.
%
% Return this function from inside an event function right after getting an
% object_start event. It then collects the remaining events for that object
% and converts it to the erlang represention of Json.
%
% It then calls your ReturnControl function with the erlang object. Your
% return control function then should yield another event function.
%
% This example stream parses an array of objects, calling
% fun do_something_with_the_object/1 for each object.
%
% ev_array(array_start) ->
% fun(Ev) -> ev_object_loop(Ev) end.
%
% ev_object_loop(object_start) ->
% fun(Ev) ->
% json_stream_parse:collect_object(Ev,
% fun(Obj) ->
% do_something_with_the_object(Obj),
% fun(Ev2) -> ev_object_loop(Ev2) end
% end)
% end;
% ev_object_loop(array_end) ->
% ok
% end.
%
% % invoke the parse
% main() ->
% ...
% events(Data, fun(Ev) -> ev_array(Ev) end).
collect_object(Ev, ReturnControl) ->
collect_object(Ev, 0, ReturnControl, [object_start]).
% internal methods
parse_one(DF, EF, Acc) ->
case toke(DF, Acc) of
none ->
none;
{Token, DF2, Rest} ->
case Token of
"{" ->
EF2 = EF(object_start),
{DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
{DF3, EF3(object_end), Rest2};
"[" ->
EF2 = EF(array_start),
{DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
{DF3, EF3(array_end), Rest2};
Int when is_integer(Int) ->
{DF2, EF(Int), Rest};
Float when is_float(Float) ->
{DF2, EF(Float), Rest};
Atom when is_atom(Atom) ->
{DF2, EF(Atom), Rest};
String when is_binary(String) ->
{DF2, EF(String), Rest};
_OtherToken ->
err(unexpected_token)
end
end.
must_parse_one(DF, EF, Acc, Error) ->
case parse_one(DF, EF, Acc) of
none ->
err(Error);
Else ->
Else
end.
must_toke(DF, Data, Error) ->
case toke(DF, Data) of
none ->
err(Error);
Result ->
Result
end.
toke(DF, <<>>) ->
case DF() of
done ->
none;
{Data, DF2} ->
toke(DF2, Data)
end;
toke(DF, <<C, Rest/binary>>) when ?IS_WS(C) ->
toke(DF, Rest);
toke(DF, <<${, Rest/binary>>) ->
{"{", DF, Rest};
toke(DF, <<$}, Rest/binary>>) ->
{"}", DF, Rest};
toke(DF, <<$[, Rest/binary>>) ->
{"[", DF, Rest};
toke(DF, <<$], Rest/binary>>) ->
{"]", DF, Rest};
toke(DF, <<$", Rest/binary>>) ->
toke_string(DF, Rest, []);
toke(DF, <<$,, Rest/binary>>) ->
{",", DF, Rest};
toke(DF, <<$:, Rest/binary>>) ->
{":", DF, Rest};
toke(DF, <<$-, Rest/binary>>) ->
{<<C, _/binary>> = Data, DF2} = must_df(DF, 1, Rest, expected_number),
case ?IS_DIGIT(C) of
true ->
toke_number_leading(DF2, Data, "-");
false ->
err(expected_number)
end;
toke(DF, <<C, _/binary>> = Data) when ?IS_DIGIT(C) ->
toke_number_leading(DF, Data, []);
toke(DF, <<$t, Rest/binary>>) ->
{Data, DF2} = must_match(<<"rue">>, DF, Rest),
{true, DF2, Data};
toke(DF, <<$f, Rest/binary>>) ->
{Data, DF2} = must_match(<<"alse">>, DF, Rest),
{false, DF2, Data};
toke(DF, <<$n, Rest/binary>>) ->
{Data, DF2} = must_match(<<"ull">>, DF, Rest),
{null, DF2, Data};
toke(_, _) ->
err(bad_token).
must_match(Pattern, DF, Data) ->
Size = size(Pattern),
case must_df(DF, Size, Data, bad_token) of
{<<Pattern:Size/binary, Data2/binary>>, DF2} ->
{Data2, DF2};
{_, _} ->
err(bad_token)
end.
must_df(DF, Error) ->
case DF() of
done ->
err(Error);
{Data, DF2} ->
{Data, DF2}
end.
must_df(DF, NeedLen, Acc, Error) ->
if
size(Acc) >= NeedLen ->
{Acc, DF};
true ->
case DF() of
done ->
err(Error);
{Data, DF2} ->
must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
end
end.
parse_object(DF, EF, Acc) ->
case must_toke(DF, Acc, unterminated_object) of
{String, DF2, Rest} when is_binary(String) ->
EF2 = EF({key, String}),
case must_toke(DF2, Rest, unterminated_object) of
{":", DF3, Rest2} ->
{DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
case must_toke(DF4, Rest3, unterminated_object) of
{",", DF5, Rest4} ->
parse_object(DF5, EF3, Rest4);
{"}", DF5, Rest4} ->
{DF5, EF3, Rest4};
{_, _, _} ->
err(unexpected_token)
end;
_Else ->
err(expected_colon)
end;
{"}", DF2, Rest} ->
{DF2, EF, Rest};
{_, _, _} ->
err(unexpected_token)
end.
parse_array0(DF, EF, Acc) ->
case toke(DF, Acc) of
none ->
err(unterminated_array);
{",", DF2, Rest} ->
parse_array(DF2, EF, Rest);
{"]", DF2, Rest} ->
{DF2, EF, Rest};
_ ->
err(unexpected_token)
end.
parse_array(DF, EF, Acc) ->
case toke(DF, Acc) of
none ->
err(unterminated_array);
{Token, DF2, Rest} ->
case Token of
"{" ->
EF2 = EF(object_start),
{DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
parse_array0(DF3, EF3(object_end), Rest2);
"[" ->
EF2 = EF(array_start),
{DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
parse_array0(DF3, EF3(array_end), Rest2);
Int when is_integer(Int) ->
parse_array0(DF2, EF(Int), Rest);
Float when is_float(Float) ->
parse_array0(DF2, EF(Float), Rest);
Atom when is_atom(Atom) ->
parse_array0(DF2, EF(Atom), Rest);
String when is_binary(String) ->
parse_array0(DF2, EF(String), Rest);
"]" ->
{DF2, EF, Rest};
_ ->
err(unexpected_token)
end
end.
toke_string(DF, <<>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
toke_string(DF2, Data, Acc);
toke_string(DF, <<$\\, $", Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$" | Acc]);
toke_string(DF, <<$\\, $\\, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\\ | Acc]);
toke_string(DF, <<$\\, $/, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$/ | Acc]);
toke_string(DF, <<$\\, $b, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\b | Acc]);
toke_string(DF, <<$\\, $f, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\f | Acc]);
toke_string(DF, <<$\\, $n, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\n | Acc]);
toke_string(DF, <<$\\, $r, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\r | Acc]);
toke_string(DF, <<$\\, $t, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\t | Acc]);
toke_string(DF, <<$\\, $u, Rest/binary>>, Acc) ->
{<<A, B, C, D, Data/binary>>, DF2} = must_df(DF, 4, Rest, missing_hex),
UTFChar = erlang:list_to_integer([A, B, C, D], 16),
if
UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
err(invalid_utf_char);
true ->
ok
end,
Chars = xmerl_ucs:to_utf8(UTFChar),
toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
toke_string(DF, <<$\\>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
toke_string(DF2, <<$\\, Data/binary>>, Acc);
toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
err(bad_escape);
toke_string(DF, <<$", Rest/binary>>, Acc) ->
{list_to_binary(lists:reverse(Acc)), DF, Rest};
toke_string(DF, <<C, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [C | Acc]).
toke_number_leading(DF, <<Digit, Rest/binary>>, Acc) when
?IS_DIGIT(Digit)
->
toke_number_leading(DF, Rest, [Digit | Acc]);
toke_number_leading(DF, <<C, _/binary>> = Rest, Acc) when
?IS_WS(C) orelse ?IS_DELIM(C)
->
{list_to_integer(lists:reverse(Acc)), DF, Rest};
toke_number_leading(DF, <<>>, Acc) ->
case DF() of
done ->
{list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
{Data, DF2} ->
toke_number_leading(DF2, Data, Acc)
end;
toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
toke_number_trailing(DF, Rest, [$. | Acc]);
toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(_, _, _) ->
err(unexpected_character_in_number).
toke_number_trailing(DF, <<Digit, Rest/binary>>, Acc) when
?IS_DIGIT(Digit)
->
toke_number_trailing(DF, Rest, [Digit | Acc]);
toke_number_trailing(DF, <<C, _/binary>> = Rest, Acc) when
?IS_WS(C) orelse ?IS_DELIM(C)
->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_trailing(DF, <<>>, Acc) ->
case DF() of
done ->
{list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
{Data, DF2} ->
toke_number_trailing(DF2, Data, Acc)
end;
toke_number_trailing(DF, <<"e", Rest/binary>>, [C | _] = Acc) when C /= $. ->
toke_number_exponent(DF, Rest, [$e | Acc]);
toke_number_trailing(DF, <<"E", Rest/binary>>, [C | _] = Acc) when C /= $. ->
toke_number_exponent(DF, Rest, [$e | Acc]);
toke_number_trailing(_, _, _) ->
err(unexpected_character_in_number).
toke_number_exponent(DF, <<Digit, Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
toke_number_exponent(DF, Rest, [Digit | Acc]);
toke_number_exponent(DF, <<Sign, Rest/binary>>, [$e | _] = Acc) when
Sign == $+ orelse Sign == $-
->
toke_number_exponent(DF, Rest, [Sign | Acc]);
toke_number_exponent(DF, <<C, _/binary>> = Rest, Acc) when
?IS_WS(C) orelse ?IS_DELIM(C)
->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_exponent(DF, <<>>, Acc) ->
case DF() of
done ->
{list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
{Data, DF2} ->
toke_number_exponent(DF2, Data, Acc)
end;
toke_number_exponent(_, _, _) ->
err(unexpected_character_in_number).
err(Error) ->
throw({parse_error, Error}).
make_ejson([], Stack) ->
Stack;
make_ejson([array_start | RevEvs], [ArrayValues, PrevValues | RestStack]) ->
make_ejson(RevEvs, [[ArrayValues | PrevValues] | RestStack]);
make_ejson([array_end | RevEvs], Stack) ->
make_ejson(RevEvs, [[] | Stack]);
make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
make_ejson([object_end | RevEvs], Stack) ->
make_ejson(RevEvs, [[] | Stack]);
make_ejson([{key, String} | RevEvs], [[PrevValue | RestObject] | RestStack] = _Stack) ->
make_ejson(RevEvs, [[{String, PrevValue} | RestObject] | RestStack]);
make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
make_ejson(RevEvs, [[Value | Vals] | RestStack]).
collect_events(get_results, Acc) ->
Acc;
collect_events(Ev, Acc) ->
fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
collect_object(object_end, 0, ReturnControl, Acc) ->
[[Obj]] = make_ejson([object_end | Acc], [[]]),
ReturnControl(Obj);
collect_object(object_end, NestCount, ReturnControl, Acc) ->
fun(Ev) ->
collect_object(Ev, NestCount - 1, ReturnControl, [object_end | Acc])
end;
collect_object(object_start, NestCount, ReturnControl, Acc) ->
fun(Ev) ->
collect_object(Ev, NestCount + 1, ReturnControl, [object_start | Acc])
end;
collect_object(Ev, NestCount, ReturnControl, Acc) ->
fun(Ev2) ->
collect_object(Ev2, NestCount, ReturnControl, [Ev | Acc])
end. | src/couch_replicator/src/json_stream_parse.erl | 0.63341 | 0.753104 | json_stream_parse.erl | starcoder |
-module(bloom).
%% API exports
-export([
new/2,
new_manual/2,
add/2,
exists/2,
union/2,
intersection/2,
difference/2,
optimal_params/2
]).
-export_type([bloom_state/0]).
-define(BLOCK, 32).
-record(bloom_state, {state :: binary(), width :: pos_integer(), rounds :: pos_integer()}).
-opaque bloom_state() :: #bloom_state{state :: binary(), width :: pos_integer(), rounds :: pos_integer()}.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%%====================================================================
%% API functions
%%====================================================================
%@doc Returns a new bloom filter, given the approximate number of elements, and the desired conflict resilience.
-spec new(Elements :: pos_integer(), Odds :: pos_integer()) -> bloom_state().
new(Elements, Odds) ->
{ok, {Width, Hashes}} = optimal_params(Elements, Odds),
new_manual(Width, Hashes).
%@doc Returns a new bloom filter, with manually specified bit width and hash count.
-spec new_manual(Width :: pos_integer(), Rounds :: pos_integer()) -> bloom_state().
new_manual(Width, Rounds) when Width rem ?BLOCK == 0 ->
#bloom_state{ state= <<0:Width>>, width=Width, rounds=Rounds}.
%@doc Given an estimate of the number of elements in the filter, and the conflict resilience, returns the optimal bit width and hash count
-spec optimal_params(Elements :: pos_integer(), Odds :: pos_integer()) ->
{ok, {Width :: pos_integer(), Rounds :: pos_integer() }}.
optimal_params(Elements, Odds) when Elements > 0, Odds > 0 ->
Probability = math:pow(Odds, -1),
%% -1/( ln(2)^2 ) == -2.0813689810056077
Width = nearest_block_size(-2.0813689810056077 * ( Elements * math:log(Probability))),
Hashes = round((Width/Elements) * math:log(2)),
{ok, {Width, Hashes}}.
%@doc Adds a new element to the filter
%
% If the item is not a binary, it will be converted using term to binary
%
%@end
-spec add(Filter :: bloom_state(), Data:: term()) -> NewFilter :: bloom_state().
add(#bloom_state{state=State, width=Width, rounds=Rounds} = Bloom, Data) when is_binary(Data) ->
NewState = setBits(State, hash_bits(Width, Data, lists:seq(1, Rounds))),
Bloom#bloom_state{ state=NewState };
add(State, Data) when not is_binary(Data) ->
add(State, term_to_binary(Data)).
%@doc Checks for the existence of an element in the filter.
%
% Will convert non-binary terms into binary useing term_to_binary
%
%@end
-spec exists(Filter :: bloom_state(), Data :: term()) -> Exists :: boolean().
exists(#bloom_state{state=State, width=Width, rounds=Rounds}, Data) when is_binary(Data) ->
lists:all(fun(HashValue) ->
getBit(State, HashValue)
end, hash_bits(Width, Data, lists:seq(1, Rounds)));
exists(State, Data) when not is_binary(Data) ->
exists(State, term_to_binary(Data)).
%@doc Calculates the union of two bloom filters
-spec union(LeftFilter :: bloom_state(), RightFilter :: bloom_state()) -> UnionFilter :: bloom_state().
union(#bloom_state{state=LeftState, width=Width, rounds=Rounds}, #bloom_state{state=RightState, width=Width, rounds=Rounds}) ->
#bloom_state{state=merge_binary(LeftState, RightState, <<>>), width=Width, rounds=Rounds}.
%@doc Caluculates the intersection of two bloom filters
-spec intersection(LeftFilter :: bloom_state(), RightFilter :: bloom_state()) -> IntersectionFilter :: bloom_state().
intersection(#bloom_state{state=LeftState, width=Width, rounds=Rounds}, #bloom_state{state=RightState, width=Width, rounds=Rounds}) ->
#bloom_state{state=intersect_binary(LeftState, RightState, <<>>), width=Width, rounds=Rounds}.
%@doc Calculates the difference of two bloom filters
-spec difference(LeftFilter :: bloom_state(), RightFilter :: bloom_state()) -> DifferenceFilter :: bloom_state().
difference(#bloom_state{state=LeftState, width=Width, rounds=Rounds}, #bloom_state{state=RightState, width=Width, rounds=Rounds}) ->
#bloom_state{state=diff_binary(LeftState, RightState, <<>>), width=Width, rounds=Rounds}.
%%====================================================================
%% Internal functions
%%====================================================================
nearest_block_size(Length) when is_float(Length) -> nearest_block_size(trunc(Length));
nearest_block_size(Length) when Length rem ?BLOCK == 0 -> Length;
nearest_block_size(Length) -> (Length + ?BLOCK) - (Length rem ?BLOCK).
getBit(Bin, N)->
case Bin of
<<_:N/bits, 0:1, _/bits>> -> false;
<<_:N/bits, 1:1, _/bits>> -> true
end.
hash_bits(Width, Data, Taints) ->
MainHash = erlang:crc32(Data),
lists:sort([erlang:crc32(MainHash, <<Taint>>) rem Width || Taint <- Taints]).
setBits(Bin, []) -> Bin;
setBits(Bin, [Offset |Rest]) ->
case Bin of
<<_:Offset/bits,1:1,_/bits>> -> setBits(Bin, Rest);
<<A:Offset/bits,0:1,B/bits>> -> setBits(<<A:Offset/bits,1:1,B/bits>>, Rest)
end.
merge_binary(<<LeftBlock:?BLOCK/integer>>, <<RightBlock:?BLOCK/integer>>, Acc) -> <<Acc/binary, (LeftBlock bor RightBlock):?BLOCK/integer>>;
merge_binary(<<LeftBlock:?BLOCK/integer, LeftRest/binary>>, <<RightBlock:?BLOCK/integer, RightRest/binary>>, Acc) ->
merge_binary(LeftRest, RightRest, <<Acc/binary, (LeftBlock bor RightBlock):?BLOCK/integer>>).
intersect_binary(<<LeftBlock:?BLOCK/integer>>, <<RightBlock:?BLOCK/integer>>, Acc) -> <<Acc/binary, (LeftBlock band RightBlock):?BLOCK/integer>>;
intersect_binary(<<LeftBlock:?BLOCK/integer, LeftRest/binary>>, <<RightBlock:?BLOCK/integer, RightRest/binary>>, Acc) ->
intersect_binary(LeftRest, RightRest, <<Acc/binary, (LeftBlock band RightBlock):?BLOCK/integer>>).
diff_binary(<<LeftBlock:?BLOCK/integer>>, <<RightBlock:?BLOCK/integer>>, Acc) -> <<Acc/binary, (LeftBlock bxor RightBlock):?BLOCK/integer>>;
diff_binary(<<LeftBlock:?BLOCK/integer, LeftRest/binary>>, <<RightBlock:?BLOCK/integer, RightRest/binary>>, Acc) ->
diff_binary(LeftRest, RightRest, <<Acc/binary, (LeftBlock bxor RightBlock):?BLOCK/integer>>).
-ifdef(TEST).
basic_test_() ->
{"Bloom Filter Tests", [
{"basic tests", [
{"Can get optimal params", ?_assertMatch({ok, {_, _}}, optimal_params(100, 10))},
{"create tests", [
{"Can simple create", ?_assertMatch(#bloom_state{width=480, rounds=3}, new(100, 10))},
{"Can manual create", ?_assertMatch(#bloom_state{width=1024, rounds=3}, new_manual(1024, 3))}
]},
{"add tests", [
{"Can add", ?_assertMatch(#bloom_state{}, add(new_manual(1024,3), cat))},
{"Can binary add", ?_assertMatch(#bloom_state{}, add(new_manual(1024,3), <<"cat">>))},
{"Can double add", ?_assertMatch(#bloom_state{}, add(add(new_manual(1024,3), cat), cat))}
]},
{"exists checks", [
{"Can check missing", ?_assertNot(exists(new_manual(1024,3), cat))},
{"Can check present", ?_assert(exists(add(new_manual(1024,3), cat), cat))}
]},
{"Multifilter tests", setup, fun() -> {new_manual(1024,3), new_manual(1024,3)} end, fun({Left, Right})->[
{"union tests", [
{"can basic union", ?_assertMatch(#bloom_state{}, union(Left, Right))},
{"add left, exists", ?_assert(exists(union(add(Left, cat), Right), cat))},
{"add right, exists", ?_assert(exists(union(add(Right, dog), Left), dog))},
{"add both, left exists", ?_assert(exists(union(add(Left, cat), add(Right, dog)), cat))},
{"add both, right exists", ?_assert(exists(union(add(Left, cat), add(Right, dog)), dog))},
{"add same both and exists", ?_assert(exists(intersection(add(Left, cat), add(Right, cat)), cat))}
]},
{"intersect tests", [
{"can basic intersection", ?_assertMatch(#bloom_state{}, intersection(Left, Right))},
{"add left, not exists", ?_assertNot(exists(intersection(add(Left, cat), Right), cat))},
{"add right, not exists", ?_assertNot(exists(intersection(add(Right, dog), Left), dog))},
{"add both, left not exists", ?_assertNot(exists(intersection(add(Left, cat), add(Right, dog)), cat))},
{"add both, right not exists", ?_assertNot(exists(intersection(add(Left, cat), add(Right, dog)), dog))},
{"add same both and exists", ?_assert(exists(intersection(add(Left, cat), add(Right, cat)), cat))}
]},
{"difference tests", [
{"can basic difference", ?_assertMatch(#bloom_state{}, difference(Left, Right))},
{"add left and exists", ?_assert(exists(difference(add(Left, cat), Right), cat))},
{"add right and exists", ?_assert(exists(difference(add(Right, dog), Left), dog))},
{"add both, left exists", ?_assert(exists(difference(add(Left, cat), add(Right, dog)), cat))},
{"add both, right exists", ?_assert(exists(difference(add(Left, cat), add(Right, dog)), dog))},
{"add same both not exists", ?_assertNot(exists(difference(add(Left, cat), add(Right, cat)), cat))}
]}
] end}
]}
]}.
-endif. | src/bloom.erl | 0.613931 | 0.497253 | bloom.erl | starcoder |
% @copyright 2012 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Test suite for the mathlib module.
%% @end
%% @version $Id$
-module(mathlib_SUITE).
-author('<EMAIL>').
-vsn('$Id$ ').
-compile(export_all).
-include("unittest.hrl").
-include("scalaris.hrl").
all() -> [
euclidian_distance
, nearest_centroid
, closest_points
, agglomerative_clustering
].
suite() ->
[
{timetrap, {seconds, 30}}
].
init_per_suite(Config) ->
unittest_helper:init_per_suite(Config).
end_per_suite(Config) ->
_ = unittest_helper:end_per_suite(Config),
ok.
%% helper functions
-spec in(X::any(), List::[any()]) -> boolean().
in(X, List) ->
lists:foldl(fun(_, true) -> true;
(E, false) when E == X -> true;
(_, _) -> false
end, false, List)
.
% Test mathlib:euclideanDistance/2
euclidian_distance(_Config) ->
{U1, V1, V2, V3, V4} = {
[0.0,0.0]
, [0, 1.0]
, [1.0, 0]
, [1.0, 1.0]
, [-1.0,-1.0]
},
?equals(mathlib:euclideanDistance(U1, U1), 0.0),
?equals(mathlib:euclideanDistance(U1, V1), 1.0),
?equals(mathlib:euclideanDistance(U1, V2), 1.0),
?equals(mathlib:euclideanDistance(U1, V3), math:sqrt(2)),
?equals(mathlib:euclideanDistance(U1, V4), math:sqrt(2)),
ok.
% Test mathlib:nearestCentroid/2
%
% Testcases:
% - Centroids empty (problemcase)
% - Centroids non-empty and items are unique, U not contained (good case)
% - Centroids contains U again (problemcase)
% - Centroids non-empty, not unique, doesn't contain U (should be ok)
% - Centroids non-empty, not unique, contains U
nearest_centroid(_Config) ->
% Note: The relative size will be ignored in this unittest, so we set it to zero
U = dc_centroids:new([0.0,0.0], 0),
%% ----- Good cases which should work ------
%% List of Centroids is not empty. U is not an element and only one
% element in the list is nearest to U
C1 = [C1Nearest1, C1Nearest2|_]=[dc_centroids:new([X,X], 0) || X <- lists:seq(1,5)],
?equals(mathlib:nearestCentroid(U, C1), {math:sqrt(2), C1Nearest1}),
?equals(mathlib:nearestCentroid(U, tl(C1)), {math:sqrt(8), C1Nearest2}),
%% List not empty, U is an element, only one nearest element
C2 = [U|C1],
?equals(mathlib:nearestCentroid(U, C2), {math:sqrt(2), C1Nearest1}),
C3 = C1 ++ [U],
?equals(mathlib:nearestCentroid(U, C3), {math:sqrt(2), C1Nearest1}),
%% List contains the same entry multiple times
C4 = C1 ++ C1 ++ C1,
?equals(mathlib:nearestCentroid(U, C4), {math:sqrt(2), C1Nearest1}),
%% Order of the list should not be important
RC4 = lists:reverse(C4),
?equals(mathlib:nearestCentroid(U, RC4), {math:sqrt(2), C1Nearest1}),
%%
%% ----- Cases that should behave well ------
% empty centroids: return 'none'
?equals(mathlib:nearestCentroid(U, []), none),
% ambiguous centroids: return a node from a set of nodes
Ambiguous1 = [dc_centroids:new([X,Y],0) || X <- lists:seq(1,5), Y <- lists:seq(1,5)],
{AmbiguousDistance1, Element} = mathlib:nearestCentroid(U, Ambiguous1),
?equals(AmbiguousDistance1, math:sqrt(2)),
AllowedElements1 = [dc_centroids:new([X,Y],0) || {X, Y} <- [
{1.0,1.0},{1.0,-1.0},{-1.0,1.0},{-1.0,-1.0}
]],
?assert_w_note(in(Element, AllowedElements1),
"Nearest element not in list of allowed coordinates"),
% regression test
U2 = dc_centroids:new([0,0], 1.0),
V2 = dc_centroids:new([1,1], 1.0),
FarAway = dc_centroids:new([100,100], 1.0),
?equals(mathlib:nearestCentroid(U2, [V2, FarAway]), {dc_centroids:distance(U2, V2),
V2}),
?equals(mathlib:nearestCentroid(U2, [FarAway, V2]), {dc_centroids:distance(U2, V2),
V2}),
ok.
% Test mathlib:closestPoints/1
%
% Testcases:
% - Return none for an empty list of centroids
% - Return none for a list containing only one centroid
% - Return the two elements with the smallest distance
% - When ambiguous, pick any two elements with a smallest distance
closest_points(_Config) ->
%% ----- Good cases which should work ------
C1 = [C1_1,C1_2|_]=[dc_centroids:new([X,X], 0) || X <- lists:seq(1,5)],
Dist1 = dc_centroids:distance(C1_1, C1_2),
?equals(mathlib:closestPoints(C1), {Dist1, C1_1, C1_2}),
%% ----- Cases that should behave well ------
% empty list
?equals(mathlib:closestPoints([]), none),
% list with only one element
U = dc_centroids:new([0,0], 0),
?equals(mathlib:closestPoints([U]), none),
% ambiguous list
C2 = [C2_1,C2_2|_]=[dc_centroids:new([X,Y], 0) || X <- lists:seq(1,5),
Y <- lists:seq(1,5)],
Dist2 = dc_centroids:distance(C2_1, C2_2),
?equals(mathlib:closestPoints(C2), {Dist2, C2_1, C2_2}),
% shuffled ambiguous list
C3 = util:shuffle(C2),
{Dist3, _A, _B} = mathlib:closestPoints(C3),
?equals(Dist3, 1.0),
% regression test
U2 = dc_centroids:new([0,0], 1.0),
V2 = dc_centroids:new([1,1], 1.0),
FarAway = dc_centroids:new([100,100], 1.0),
?equals(mathlib:closestPoints([U2, V2, FarAway]),
{dc_centroids:distance(U2,V2), U2, V2}),
?equals(mathlib:closestPoints([U2, FarAway, V2]),
{dc_centroids:distance(U2,V2), U2, V2}),
ok.
% Test mathlib:aggloClustering/1
%
% Testcases:
% - Clustering should fail with error when Radius < 0
% - Clustering an empty list should return an empty list
% - Clustering of one centroid should return the same centroid
% - Clustering two centroids with a distance less/equal Radius should return a merged centroid
% - Clustering two centroids with a distance > Radius should return both centroids
% - Clustering a set of centroids should return the correct set of merged centroids
% - The sum of the relative size over all centroids should remain the same
% - XXX What should it do if elements/coordinates are duplicated?
agglomerative_clustering(_Config) ->
% crash when radius < 0
?expect_exception(mathlib:aggloClustering([], -1), error, function_clause),
% empty centroid list
?equals(mathlib:aggloClustering([], 0), []),
% single node
U = dc_centroids:new([0,0], 1.0),
?equals(mathlib:aggloClustering([U], 0), [U]),
% merge two nodes
V = dc_centroids:new([1,1], 1.0),
MergedUV = dc_centroids:new([0.5,0.5], 2.0),
?equals(mathlib:aggloClustering([U,V], 2), [MergedUV]),
?equals(mathlib:aggloClustering([V,U], 2), [MergedUV]),
% don't merge far-away nodes
FarAway = dc_centroids:new([100,100], 1.0),
?equals(mathlib:aggloClustering([V, FarAway], 50), [V, FarAway]),
?equals(mathlib:aggloClustering([V, U, FarAway], 99), [MergedUV, FarAway]),
?equals(mathlib:aggloClustering([V, FarAway, U], 99), [MergedUV, FarAway]),
% merge many nodes, relative size sum should remain the same
C = [dc_centroids:new([X,X], 1/6) || X <- lists:seq(1,6)],
?equals(mathlib:aggloClustering(C, 7), [dc_centroids:new([3.5, 3.5], 1.0)]),
ok. | test/mathlib_SUITE.erl | 0.729134 | 0.46217 | mathlib_SUITE.erl | starcoder |
%% =====================================================================
%% @copyright 2011 <NAME>
%% @author <NAME> <<EMAIL>>
%% @end
%% =====================================================================
%%
%% @doc stat - simple statistical function. Currently, this module
%% provides an <code>accum</code> function for accumulating samples,
%% and functions for computing the mean and standard deviation from
%% such accumulations. I might add more statistical and data analysis
%% functions at a later date, but I don't have any plans to do so
%% immediately.
-module stat.
-export [accum/1, accum/2, mean/1, std/1, std/2].
-record(stat_acc, {s0, s1, s2}).
new_acc() -> #stat_acc{s0=0, s1=0, s2=0}.
new_acc(X) when is_number(X) -> #stat_acc{s0=1, s1=X, s2=X*X}.
new_acc(X, #stat_acc{s0=S0, s1=S1, s2=S2}) ->
#stat_acc{s0=S0+1, s1=S1+X, s2=S2+(X*X)}.
%% @spec accum(Data, Acc) -> accumulator()
%% Data = [ number() ],
%% Acc = accumulator()
%% @doc Add the sample from <code>Data</code> to the accumulator
%% <code>Acc</code>.
%% Return the resulting accumulator.
accum([], Acc) -> Acc;
accum([X | T], Acc) -> accum(T, new_acc(X, Acc));
%% @spec accum(X, Acc) -> accumulator()
%% X = number(),
%% Acc = accumulator()
%% @doc Add the sample <code>X</code> to the accumulator <code>Acc</code>.
%% Return the resulting accumulator.
accum(X, Acc) when is_number(X) -> new_acc(X, Acc).
%% @spec accum(Data) -> accumulator()
%% Data = number() | [ number() ]
%% @doc Create a new accumulator using <code>Data</code>
%% If <code>Data</code> is a number, an accumulator is created with
%% that number as it's one sample. If <code>Data</code> is a list
%% of numbers, then an accumulator is created with all of the samples
%% from the list.
accum([]) -> new_acc();
accum([X | T]) -> accum(T, new_acc(X));
accum(X) when is_number(X) -> new_acc(X).
%% @spec mean(X) -> number() | undefined
%% X = accumulator() | number() | [ number() ]
%% @doc Compute the mean of the samples in <code>X</code>.
%% If <code>X</code> an accumulator, then the mean is computed for
%% the samples that were used to generate <code>X</code>.
%% If <code>X</code> is a number or a list of numbers, it is used
%% to create an accumulator (see <code>accum/1</code>) and then the
%% mean is calculated.
%% If there are no samples in the accumulator of the list, then the
%% atom <code>'undefined'</code> is returned.
mean(#stat_acc{s0=0}) -> undefined;
mean(#stat_acc{s0=N, s1=S1}) -> S1/N;
mean(X) when (is_list(X) or is_number(X)) -> mean(accum(X)).
%% @spec std(X, SampleOrPopulation) -> number() | undefined
%% X = accumulator() | number | [number() ],
%% SampleOrPopulation = sample | population
%% @doc Compute the standard deviation of the samples in <code>X</code>.
%% If <code>X</code> an accumulator, then the standard deviation is
%% computed for the samples that were used to generate <code>X</code>.
%% If <code>X</code> is a number or a list of numbers, it is used
%% to create an accumulator (see <code>accum/1</code>) and then the
%% standard deviation is calculated.
%% The parameter <code>SampleOrPopulation</code> specifies whether
%% to compute a
%% <a href="http://en.wikipedia.org/wiki/Standard_deviation">sample
%% or population standard deviation</a>. If you're not sure which
%% you want, you probably want <code>'sample'</code>.
%% For a <code>'sample'</code> standard-deviation, there must be at
%% least two samples, or the result is the atom 'undefined'.
%% For a <code>'population'</code> standard-deviation, there must be at
%% least one sample, or the result is the atom 'undefined'.
std(#stat_acc{s0=0}, _) -> undefined;
std(#stat_acc{s0=1}, population) -> 0;
std(#stat_acc{s0=1}, sample) -> undefined;
std(#stat_acc{s0=N, s1=S1, s2 = S2}, P_or_S) ->
M = case P_or_S of population -> N; sample -> N-1 end,
D = S2 - (S1*S2/N),
if
(D > 0) -> math:sqrt(D/M);
(D =< 0) -> 0 % avoid arithmetic trap if std is so small that D < 0
end; % due to rounding errors.
std(X, S_or_P) when is_list(X) -> std(accum(X), S_or_P).
%% @spec std(X) -> number() | undefined
%% @equiv std(X, sample)
std(X) -> std(X, sample). | lib/stat.erl | 0.621426 | 0.624565 | stat.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% Aggregation represents a data aggregation method.
%% @end
%%%-----------------------------------------------------------------------
-module(oc_stat_aggregation).
-export([convert/3]).
-export_type([data/0]).
-type data_rows(AggregationValue) :: [#{tags := tv(),
value := AggregationValue}].
-type data(Type, AggregationValue) :: #{type := Type,
rows := data_rows(AggregationValue)}.
-type data() :: data(latest, number())
| data(count, number())
| data(sum, #{count := non_neg_integer(),
mean := number(),
sum := number()})
| data(distribution, #{count := non_neg_integer(),
mean := number(),
sum := number(),
buckets := [{number(), non_neg_integer()}]}).
-type keys() :: [oc_tags:key()].
-type tv() :: [oc_tags:value()].
-callback init(oc_stat_view:name(), keys(), any()) -> any().
-callback type() -> latest | count | sum | distribution.
-callback add_sample(oc_stat_view:name(), oc_tags:tags(), number(), any()) -> ok.
-callback export(oc_stat_view:name(), any()) -> data().
-callback clear_rows(oc_stat_view:name(), any()) -> ok.
%% @private
convert(Data, _From, undefined) ->
Data;
convert(#{type := Type,
rows := Rows}, From, To) ->
#{type => Type,
rows => convert_rows(Type, Rows, From, To)}.
convert_rows(count, Rows, _From, _To) ->
Rows;
convert_rows(latest, Rows, From, To) ->
[Row#{value => oc_stat_unit:convert(Value, From, To)}
|| #{value := Value}=Row <- Rows];
convert_rows(sum, Rows, From, To) ->
[Row#{value => Value#{sum => oc_stat_unit:convert(Sum, From, To),
mean => oc_stat_unit:convert(Mean, From, To)}}
|| #{value := #{sum := Sum,
mean := Mean}=Value}=Row <- Rows];
convert_rows(distribution, Rows, From, To) ->
[Row#{value => Value#{sum => oc_stat_unit:convert(Sum, From, To),
mean => oc_stat_unit:convert(Mean, From, To),
buckets => convert_buckets(Buckets, From, To)}}
|| #{value := #{sum := Sum,
mean := Mean,
buckets := Buckets}=Value}=Row <- Rows].
convert_buckets(Buckets, From, To) ->
[{maybe_convert_bound(Bound, From, To), Counter} || {Bound, Counter} <- Buckets].
maybe_convert_bound(infinity, _From, _To) ->
infinity;
maybe_convert_bound(Bound, From, To) ->
oc_stat_unit:convert(Bound, From, To). | src/oc_stat_aggregation.erl | 0.741768 | 0.510985 | oc_stat_aggregation.erl | starcoder |
%%% ==========================================================================
%%% ep_paper_stock.erl
%%% @author <NAME>
%%% @copyright 2018 <NAME>
%%% @version .01
%%% @doc
%%% License:
%%% File: ep_paper_stock.erl
%%% Description: Standard paper stock dimensions
%%% @end
%%% ==========================================================================
-module (ep_paper_stock).
-export([
desktop_printer_stock/0,
standard_sizes/0,
standards/0,
stock_e/1, stock_n/1, stock_s/1, stock_w/1,
stock_size_inches/1, stock_size_picas/1, stock_size_points/1,
stock_height/1, stock_width/1
]). % a..z
-include("ep_erltypes.hrl").
standards() ->
%% Unzip will split pairs into 2 lists of all first and all second elements
{Standards, _Sizes} = lists:unzip(standard_sizes()),
Standards.
%% @doc Return paper stock dimensions in inches
-spec stock_size_inches(paper_stock()) -> tuple().
stock_size_inches(Stock) ->
{_Type, Dimensions} = lists:keyfind(Stock, 1, standard_sizes()),
Dimensions.
%% @doc Return paper stock dimensions in picas
-spec stock_size_picas(paper_stock()) -> integer_xy().
stock_size_picas(Stock) ->
Dimensions = stock_size_inches(Stock),
ep_metrics:to_picas(Dimensions).
%% @doc Return paper stock dimensions in points
-spec stock_size_points(paper_stock()) -> integer_xy().
stock_size_points(Stock) ->
Dimensions = stock_size_inches(Stock),
ep_metrics:to_points(Dimensions).
%% @doc Return paper stock width in points
-spec stock_width(paper_stock()) -> integer().
stock_width(Stock) ->
{Width, _Height} = stock_size_points(Stock),
Width.
%% @doc Return paper stock height in points
-spec stock_height(paper_stock()) -> points().
stock_height(Stock) ->
{_Width, Height} = stock_size_points(Stock),
Height.
stock_n(_Stock) ->
{0, 0}.
stock_e(Stock) ->
{Width, _Height} = stock_size_points(Stock),
{0, Width}.
stock_s(Stock) ->
stock_size_points(Stock).
stock_w(Stock) ->
{_Width, Height} = stock_size_points(Stock),
{0, Height}.
%% @doc Return list of standard desktop printer stock
-spec desktop_printer_stock() -> list().
desktop_printer_stock() ->
[letter,
legal,
a4
].
%% @doc Return paper stock dimensions
%% http://resources.printhandbook.com/pages/paper-size-chart.php
%% https://www.thebookdesigner.com/2010/09/self-publishing-basics-how-to-pick-the-size-of-your-book/
-spec standard_sizes() -> list({paper_stock(), xy()}).
standard_sizes() ->
[{a0, {33.1, 46.8}},
{a1, {23.4, 33.1}},
{a2, {16.5, 23.4}},
{a3, {11.7, 16.5}},
{a4, {8.3, 11.7}},
{a5, {5.8, 8.3}},
{a6, {4.1, 5.8}},
{a7, {2.9, 4.1}},
{a8, {2.0, 2.9}},
{a9, {1.5, 2.0}},
{a10, {1.0, 1.5}},
{avery_labels, {8.5, 11}},
{envelope_no10, {4.125, 9.5}},
{letter, {8.5, 11}},
{legal, {8.5, 14}},
{tabloid, {11, 17}}
]. | src/media/ep_paper_stock.erl | 0.503662 | 0.404449 | ep_paper_stock.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2021 <NAME>
%% @doc Fetch data from URLs. Interfaces to z_url_fetch and z_url_metadata.
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_fetch).
-export([
fetch/3,
fetch_partial/3,
metadata/3,
as_data_url/3,
error_msg/2
]).
-include("../../include/zotonic.hrl").
%% @doc Fetch data from an URL. Let modules change the fetch options.
-spec fetch( string() | binary(), z_url_fetch:options(), z:context() ) -> z_url_fetch:fetch_result().
fetch(Url, Options, Context) ->
Url1 = z_convert:to_binary(Url),
Options1 = add_options(Url1, Options, Context),
z_url_fetch:fetch(Url1, Options1).
%% @doc Fetch data from an URL. Let modules change the fetch options.
-spec fetch_partial( string() | binary(), z_url_fetch:options(), z:context() ) -> z_url_fetch:fetch_result().
fetch_partial(Url, Options, Context) ->
Url1 = z_convert:to_binary(Url),
Options1 = add_options(Url1, Options, Context),
z_url_fetch:fetch_partial(Url1, Options1).
%% @doc Fetch the metadata from an URL. Let modules change the fetch options.
-spec metadata( string() | binary(), z_url_fetch:options(), z:context() ) -> {ok, z_url_metadat:metadata()} | {error, term()}.
metadata(Url, Options, Context) ->
Options1 = add_options(Url, Options, Context),
z_url_metadata:fetch(Url, Options1).
%% @doc Fetch data from an URL. Return the data as a data url.
-spec as_data_url( string() | binary() | undefined, z_url_fetch:options(), z:contex()) -> {ok, binary()} | {error, term()}.
as_data_url(Url, Options, Context) ->
case fetch(Url, Options, Context) of
{ok, {_Final, Hs, _Size, Data}} ->
Mime = content_type(Hs),
{ok, iolist_to_binary([
<<"data:">>, Mime,
<<";base64,">>,
base64:encode(Data)
])};
{error, _} = Error ->
Error
end.
-spec content_type( [ {string(), string()} ]) -> binary().
content_type(Hs) ->
case proplists:get_value("content-type", Hs) of
undefined ->
<<"application/octet-stream">>;
CT ->
[ Mime | _ ] = binary:split(z_convert:to_binary(CT), <<";">>),
z_string:trim(Mime)
end.
%% @doc Map (http) errors to readable format.
-spec error_msg(integer() | {error, term()}, z:context()) -> binary().
error_msg({Status, _Url, _Hs, _Length, _Body}, Context) ->
error_msg(Status, Context);
error_msg(401, Context) ->
?__("Unauthorized to access the remote resource URL.", Context);
error_msg(403, Context) ->
?__("Forbidden to access the remote resource URL.", Context);
error_msg(404, Context) ->
?__("The resource at the URL can not be found.", Context);
error_msg(410, Context) ->
?__("The resource at the URL is gone.", Context);
error_msg(429, Context) ->
?__("Too many requests for the remote server, try again later.", Context);
error_msg(S4xx, Context) when S4xx >= 400, S4xx < 500 ->
?__("The remote server can not handle this URL.", Context);
error_msg(503, Context) ->
?__("The remote server for the URL is having temporary problems.", Context);
error_msg(S5xx, Context) when S5xx >= 500 ->
?__("The remote server for the URL is having problems.", Context);
error_msg(_, Context) ->
?__("Could not fetch the remote resource URL.", Context).
%% @doc Add or modify fetch options. For development sites the 'insecure' options is added, as
%% development sites are using self-signed certificates. The #url_fetch_options notification is
%% used to add an authorization header or other option for a specific site. If no language is
%% set in the options then the current context language is used for the preferred language.
add_options(Url,Options, Context) ->
Options1 = case proplists:is_defined(insecure, Options) of
false ->
case m_site:environment(Context) of
development -> [ insecure | Options ];
_ -> Options
end;
true ->
Options
end,
Options2 = case proplists:is_defined(language, Options1) of
false ->
[ {language, z_context:language(Context)} | Options1 ];
true ->
Options1
end,
case uri_string:parse(Url) of
#{ host := Host } = Parts ->
HostPort = case maps:find(port, Parts) of
{ok, Port} -> <<Host/binary, $:, (integer_to_binary(Port))/binary>>;
error -> Host
end,
case z_notifier:first(#url_fetch_options{
url = Url,
host = HostPort,
options = Options2
}, Context)
of
undefined -> Options1;
Options3 when is_list(Options3) -> Options3
end;
_ ->
Options2
end. | apps/zotonic_core/src/support/z_fetch.erl | 0.519034 | 0.478529 | z_fetch.erl | starcoder |
%% @doc Utility module for working with compressed and compact ECC keys.
%%
%% The "compact" set of routines check whether NIST P-256 (secp256r1) ECC keys
%% can be compressed to only their X coordinate. This implementation
%% implements the strategy described in
%% [https://tools.ietf.org/html/draft-jivsov-ecc-compact-05] and is based on a
%% 1986 publication by <NAME> in 'CRYPTO 85'. This method is believed to
%% be unpatentable. See [https://cr.yp.to/ecdh/patents.html] for more details.
%%
%% The "compressed" set of routines compress and uncompress SEC2 K-256
%% (secp256k1) ECC keys. The patents on these methods are believed to have
%% expired.
%%
%% The implementation is done as a NIF linked against the system's libcrypto.
-module(ecc_compact).
-export([is_compact/1, generate_key/0, recover_compact_key/1,
recover_compressed_key/1]).
-on_load(init/0).
-include_lib("public_key/include/public_key.hrl").
-define(APPNAME, ecc_compact).
-define(LIBNAME, ecc_compact).
-type private_key() :: #'ECPrivateKey'{}.
-type public_key_p256() :: {#'ECPoint'{}, {namedCurve, ?secp256r1}}.
-type public_key_k256() :: {#'ECPoint'{}, {namedCurve, ?secp256k1}}.
-type coordinate() :: <<_:256>>.
-type point() :: <<_:512>>.
-type compressed_point() :: <<_:264>>.
-type compact_key() :: coordinate().
-export_type([public_key_p256/0, public_key_k256/0, private_key/0,
compact_key/0]).
init() ->
SoName = case code:priv_dir(?APPNAME) of
{error, bad_name} ->
case filelib:is_dir(filename:join(["..", priv])) of
true ->
filename:join(["..", priv, ?LIBNAME]);
_ ->
filename:join([priv, ?LIBNAME])
end;
Dir ->
filename:join(Dir, ?LIBNAME)
end,
erlang:load_nif(SoName, 0).
%% @doc Generate a NIST p-256 key that is compliant with the compactness
%% restrictions.
-spec generate_key() -> {ok, private_key(), compact_key()}.
generate_key() ->
Key = public_key:generate_key({namedCurve,?secp256r1}),
#'ECPrivateKey'{parameters=_Params, publicKey=PubKey} = Key,
case is_compact_nif(PubKey) of
true ->
%% ok, get the X/Y coordinates
<<4, X:32/binary, Y:32/binary>> = PubKey,
case recover_compact_int(X) of
Y ->
{ok, Key, X};
Z ->
%% this should never happen, but blow up dramatically if it does
erlang:error({key_recovery_failure, Key, X, Y, Z})
end;
false ->
generate_key()
end.
%% @doc Given the X coordinate of a public key from a compliant point on the
%% curve, return the public key.
-spec recover_compact_key(compact_key()) -> public_key_p256().
recover_compact_key(X) when is_binary(X), byte_size(X) == 32 ->
Y = recover_compact_int(X),
{#'ECPoint'{point = <<4, X:32/binary, Y:32/binary>>}, {namedCurve,
?secp256r1}}.
-spec recover_compact_int(compact_key()) -> coordinate().
recover_compact_int(X) ->
Y0 = recover_compact_nif(X),
case byte_size(Y0) < 32 of
false ->
Y0;
true ->
%% Sometimes the Y coordinate has leading 0s in its big-endian
%% representation and thus the size of the bignum is less than
%% 32 bytes. This leads the corresponding binary to be less than
%% 32 bytes long and so we must pad it back up to 32 bytes
%% manually here.
Length = (32 - byte_size(Y0))*8,
<<0:Length/integer-unsigned, Y0/binary>>
end.
%% @doc Given a compressed point representing a secp256k1 public key
%% return the full uncompressed point.
-spec recover_compressed_key(compressed_point()) -> public_key_k256().
recover_compressed_key(C) when is_binary(C), byte_size(C) == 33 ->
<<TaggedFullPoint:65/binary>> = recover_compressed_nif(C),
{#'ECPoint'{point = TaggedFullPoint}, {namedCurve, ?secp256k1}}.
%% @doc Returns whether a given key is compliant with the compactness
%% restrictions. In the case that the key is compliant, also return the bare X
%% coordinate.
-spec is_compact(private_key() | public_key_p256() | point()) ->
{true, compact_key()} | false.
is_compact(#'ECPrivateKey'{parameters={namedCurve, ?secp256r1}, publicKey=PubKey}) ->
is_compact(PubKey);
is_compact({#'ECPoint'{point=PubKey}, {namedCurve, ?secp256r1}}) ->
is_compact(PubKey);
is_compact(<<4, X:32/binary, _Y:32/binary>> = PubKey) ->
case is_compact_nif(PubKey) of
true ->
{true, X};
false ->
false
end;
is_compact(_) ->
erlang:error(badarg).
% This is just a simple place holder. It mostly shouldn't ever be called
% unless there was an unexpected error loading the NIF shared library.
is_compact_nif(_) ->
not_loaded(?LINE).
recover_compact_nif(_) ->
not_loaded(?LINE).
recover_compressed_nif(_) ->
not_loaded(?LINE).
not_loaded(Line) ->
erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}). | src/ecc_compact.erl | 0.560734 | 0.413418 | ecc_compact.erl | starcoder |
-module(eql).
-export([ compile/1
, compile/2
, compile/3
, new_tab/1
, get_query/2
, get_query/3
]).
%% a query is either a binary string or a list of binary strings and atoms
%% the atoms are replaced during `get_query` with the provided parameters
%% resulting in an iolist
-type query() :: binary() | [binary() | atom()].
-type query_list() :: [{atom(), query()}].
-type compile_opt() :: flush | %% delete all (or for this namespace) queries first
namespace. %% store queries in ets table keyed on a 2-tuple of
%% the filename as an atom and the name of the query
%% returns a list of
-spec compile(file:filename_all()) -> {ok, query_list()} | any().
compile({directory,Path,Ext}) when is_binary(Path) ->
compile({directory,binary_to_list(Path),Ext});
compile({directory,Path,Ext}) when is_binary(Ext) ->
compile({directory,Path,binary_to_list(Ext)});
compile({directory,Path,Ext}) ->
Ext1 = "."++Ext,
{ok,List} = file:list_dir(Path),
Files = [filename:join(Path, Name) || Name <- List,filename:extension(Name) =:= Ext1],
{ok,lists:foldl(fun(File,A)->
io:format("~p~n",[File]),
{ok,Res} = compile(File),
A ++ Res
end,[],Files)};
% [compile(File) || File <- Files];
compile(File) ->
eql_parse:file(File).
compile(Tab, File) ->
compile(Tab, File, []).
-spec compile(atom() | ets:tid(), file:filename_all(), [compile_opt()]) -> ok | any().
compile(Tab, File, Opts) ->
maybe_flush(Tab, File, Opts),
case compile(File) of
{ok, Queries} when is_list(Queries) ->
case proplists:get_value(namespace, Opts, false) of
true ->
Namespace = file_to_namespace(File),
true = ets:insert(Tab, [{{Namespace, Name}, Query} || {Name, Query} <- Queries]),
ok;
false ->
true = ets:insert(Tab, Queries),
ok
end;
Error ->
Error
end.
new_tab(Name) ->
ets:new(Name, [named_table, set, {read_concurrency, true}]).
-spec get_query(atom(), query_list() | ets:tab(), proplists:proplist()) ->
{ok, iolist()} | undefined.
get_query(Name, TidOrList, Params) ->
case get_query(Name, TidOrList) of
{ok, Query} ->
{ok, lists:map(fun(Key) when is_atom(Key) ->
proplists:get_value(Key, Params);
(S) ->
S
end, Query)};
Other ->
Other
end.
-spec get_query(atom(), query_list() | ets:tab()) -> {ok, iolist()} | undefined.
get_query(Name, Tid) when is_reference(Tid)
; is_atom(Tid) ->
case ets:lookup(Tid, Name) of
[] ->
undefined;
[{_, Query}] ->
{ok, Query}
end;
get_query(Name, Proplist) ->
case lists:keyfind(Name, 1, Proplist) of
{Name, Value} -> {ok, Value};
false -> undefined
end.
%% internal functions
-spec file_to_namespace(file:filename_all()) -> atom().
file_to_namespace(File) ->
list_to_atom(filename:rootname(filename:basename(File))).
maybe_flush(Tab, File, Opts) ->
case proplists:get_value(flush, Opts, false) of
true ->
case proplists:get_value(namespace, Opts, false) of
true ->
Namespace = file_to_namespace(File),
ets:match_delete(Tab, {{Namespace, '_'}, '_'});
false ->
ets:delete_all_objects(Tab)
end;
false ->
ok
end. | src/eql.erl | 0.537041 | 0.457561 | eql.erl | starcoder |
% @copyright 2007-2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Math utility functions.
%% @version $Id$
-module(mathlib).
-author('<EMAIL>').
-vsn('$Id$').
-export([closestPoints/1, euclideanDistance/1, euclideanDistance/2, u/1,
vecAdd/2, vecSub/2, vecMult/2, vecWeightedAvg/4, zeros/1, median/1,
aggloClustering/3]).
-type(vector() :: [number(),...]).
-type(centroid() :: vector()).
%% @doc Median of an unsorted non-empty list of numbers, i.e. a vector.
-spec median(vector()) -> number().
median(L) ->
L1 = lists:sort(L),
N = length(L1),
case N rem 2 of
1 -> lists:nth(round(N / 2), L1);
0 -> (lists:nth(trunc(N / 2), L1) + lists:nth(trunc(N / 2) + 1, L1)) / 2
end.
%% @doc Add two vectors X,Y, i.e. X + Y.
-spec vecAdd(X::vector(), Y::vector()) -> vector().
vecAdd(X, Y) ->
lists:zipwith(fun(Xi, Yi) -> Xi + Yi end, X, Y).
%% @doc Substract two vectors X,Y, i.e. X - Y.
-spec vecSub(X::vector(), Y::vector()) -> vector().
vecSub(X, Y) ->
lists:zipwith(fun(Xi, Yi) -> Xi - Yi end, X, Y).
%% @doc Multiply vector V with a scalar S.
-spec vecMult(V::vector(), S::float()) -> vector().
vecMult(V, S) ->
lists:map(fun(X) -> S*X end, V).
-spec vecWeightedAvg(V1::vector(), V2::vector(), W1::float(), W2::float()) -> vector().
vecWeightedAvg(V1, V2, W1, W2) ->
vecMult(vecAdd(vecMult(V1, W1), vecMult(V2, W2)), 1 / (W1 + W2)).
%% @doc Euclidean distance between origin and V.
-spec euclideanDistance(V::vector()) -> Distance::float().
euclideanDistance(V) ->
math:sqrt(lists:foldl(fun(Vi, OldDist) -> OldDist + math:pow(Vi, 2) end,
0.0, V)).
%% @doc Euclidean distance between two vectors.
-spec euclideanDistance(V::vector(), W::vector()) -> Distance::float().
euclideanDistance(V, W) ->
math:sqrt(util:zipfoldl(fun(Vi, Wi) -> math:pow(Vi - Wi, 2) end,
fun(Dist, OldDist) -> OldDist + Dist end,
V, W, 0.0)).
%% @doc Unit vector u(v) = v/||v||
-spec u(V::vector()) -> UV::vector().
u(V) ->
vecMult(V, 1 / euclideanDistance(V)).
%% @doc Find indices of closest centroids.
-spec closestPoints(Centroids::[centroid()])
-> {Min::number(), I::pos_integer(), J::pos_integer()} | {-1, -1, -1}.
closestPoints([C1, C2 | Rest]) ->
closestPointsForI([C1, C2 | Rest], 1, 2, euclideanDistance(C1, C2), 1, 2);
closestPoints(_) ->
{-1, -1, -1}.
-spec closestPointsForI(Centroids::[centroid()], I::pos_integer(), J::pos_integer(),
Min::number(), MinI::pos_integer(), MinJ::pos_integer())
-> {DistMin::number(), IMin::pos_integer(), JMin::pos_integer()}.
closestPointsForI([First | Rest], I, J, Min, MinI, MinJ) ->
{Min1, MinI1, MinJ1} = closestPointsForJ(First, Rest, I, J, Min, MinI, MinJ),
I1 = I + 1,
J1 = J + 1,
closestPointsForI(Rest, I1, J1, Min1, MinI1, MinJ1);
closestPointsForI([], _, _, Min, I, J) ->
{Min, I, J}.
-spec closestPointsForJ(First::centroid(), Rest::[centroid()],
I::pos_integer(), J::pos_integer(),
Min::number(), MinI::pos_integer(), MinJ::pos_integer())
-> {DistMin::number(), IMin::pos_integer(), JMin::pos_integer()}.
closestPointsForJ(First, [Centroid | Rest], I, J, Min, MinI, MinJ) ->
Dist = euclideanDistance(First, Centroid),
{Min1, MinI1, MinJ1} = condExchange(Min, MinI, MinJ, Dist, I, J),
J1 = J + 1,
closestPointsForJ(First, Rest, I, J1, Min1, MinI1, MinJ1);
closestPointsForJ(_, [], _, _, Min, MinI, MinJ) ->
{Min, MinI, MinJ}.
%% @doc Update smallest distance and its indices.
-spec condExchange(Min::number(), MinI::pos_integer(), MinJ::pos_integer(),
Dist::number(), DistI::pos_integer(), DistJ::pos_integer())
-> {DistMin::number(), IMin::integer(), JMin::integer()}.
condExchange(Min, I, J, Dist, _, _) when Min =< Dist ->
{Min, I, J};
condExchange(_, _, _, Dist, I, J) ->
{Dist, I, J}.
%% @doc Create a list with N zeros.
-spec zeros(N::0) -> [];
(N::pos_integer()) -> [0,...].
zeros(0) ->
[];
zeros(N) ->
[0 || _ <- lists:seq(1,N)].
%% @doc Get closest centroids and merge them if their distance is within Radius.
-spec aggloClustering(Centroids::[centroid()], Sizes::vector(),
Radius::number()) -> {[centroid()], vector()}.
aggloClustering(Centroids, Sizes, Radius) ->
{Min, I, J} = closestPoints(Centroids),
aggloClusteringHelper(Centroids, Sizes, Radius, Min, I, J).
-spec aggloClusteringHelper
(Centroids::[centroid(),...], Sizes::vector(), Radius::number(),
Min::number(), I::pos_integer(), J::pos_integer()) -> {[centroid()], vector()};
(Centroids::[centroid()], Sizes::vector(), Radius::number(),
Min::-1, I::-1, J::-1) -> {[centroid()], vector()}.
% Note: closestPoints/1 creates Min, I, J and only returns {-1, -1, -1} if
% Centroids contains less than two elements. This is not the case in the first
% pattern and we can thus assume these values are pos_integer().
aggloClusteringHelper([_,_|_] = Centroids, [_,_|_] = Sizes, Radius, Min, I, J) when Min =< Radius ->
C1 = lists:nth(I, Centroids),
C2 = lists:nth(J, Centroids),
S1 = lists:nth(I, Sizes),
S2 = lists:nth(J, Sizes),
Centroids1 = [vecWeightedAvg(C1, C2, S1, S2) | tools:rmvTwo(Centroids, I, J)],
{Min1, I1, J1} = closestPoints(Centroids1),
aggloClusteringHelper(Centroids1, [S1 + S2 | tools:rmvTwo(Sizes, I, J)],
Radius, Min1, I1, J1);
aggloClusteringHelper(Centroids, Sizes, _Radius, _Min, _I, _J) ->
{Centroids, Sizes}. | src/mathlib.erl | 0.731346 | 0.441824 | mathlib.erl | starcoder |
-module(slacker_reaction).
-include("spec.hrl").
-export([add/3, get/2, list/2, remove/3]).
%% @doc Add a reaction for an item at a given timestamp.
%%
%% Options can be:
%% file: file to add reaction to
%% file_comment: file comment to add reaction to
%% channel: channel where the message to add reaction to was posted
%% timestamp: timestamp of the message to add reaction to
%%
-spec add(Token :: string(), Name :: string(), Options :: list()) -> http_response().
add(Token, Name, Options) ->
slacker_request:send("reactions.add", [{"token", Token},{"name", Name}], Options).
%% @doc List all reactions at a given timestamp.
%%
%% Options can be:
%% file: file to get reaction for
%% file_comment: file comment to get reaction for
%% channel: channel where the message to add reaction to was posted
%% timestamp: timestamp of the message to add reaction to
%% full: if true always return the complete reaction list
%%
-spec get(Token :: string(), Options :: list()) -> http_response().
get(Token, Options) ->
slacker_request:send("reactions.get", [{"token", Token}], Options).
%% @doc returns a list of all items reacted to by a use.
%%
%% Options can be:
%% user: show reactions made by this user (default: current user)
%% full: if true always return the complete reaction list
%% count: number of items to return per page (default: 100)
%% page: page number of results to return (default: 1)
%%
-spec list(Token :: string(), Options :: list()) -> http_response().
list(Token, Options) ->
slacker_request:send("reactions.list", [{"token", Token}], Options).
%% @doc Removes a reaction at a given timestamp.
%%
%% Options can be:
%% file: file to remove reaction from
%% file_comment: file comment to remove reaction from
%% channel: channel where the message to remove reaction from was posted
%% timestamp: timestamp of the message to remove reaction from
%%
-spec remove(Token :: string(), Name :: string(), Options :: list()) -> http_response().
remove(Token, Name, Options) ->
slacker_request:send("reactions.remove", [{"token", Token},{"name", Name}], Options). | src/slacker_reaction.erl | 0.562056 | 0.489931 | slacker_reaction.erl | starcoder |
-module(prop_tchannel_conn).
-include_lib("proper/include/proper.hrl").
%% @doc Checks if TCP stream receiver can receive arbitrarily-cutoff packets.
%%
%% 1. Construct many valid length + payload packets. Merge them.
%% 2. Split the result to arbitrary chunks.
%% 3. Pass the list through tchannel_conn:stream_recv/2 and verify they were
%% properly reconstructed.
prop_stream_recv() ->
?FORALL({Orig, Split},
big_packet_split(),
begin
{Got, <<>>, undefined} = lists:foldl(
fun tchannel_packet:stream_recv/2,
{[], <<>>, undefined},
Split),
Orig =:= lists:reverse(Got)
end
).
%%==============================================================================
%% Helpers
%%==============================================================================
%% @doc Generate a few valid packets, merge them, split them out arbitrarily.
big_packet_split() ->
PacketSize = union([N-2 || N <- lists:seq(16, 512)]),
PacketSizedBinary = ?LET(Len, PacketSize, binary(Len)),
Packet = ?LET(Bin, PacketSizedBinary, <<(size(Bin)+2):16, Bin/binary>>),
FewPackets = ?LET(N, union([1,2,3,5,7]), vector(N, Packet)),
PCutoffs = ?LET({Pkts, VectorSize},
{FewPackets, union([0,1,2,3,4,5])},
{Pkts, vector(VectorSize, integer(1, iolist_size(Pkts)))}),
?LET({Pkts, Cutoff},
PCutoffs,
{Pkts, cutoff_bin(Cutoff, iolist_to_binary(Pkts))}).
%% @doc Given binary and cutoff array, do the actual cutoff.
cutoff_bin(Cutoff, BigPacket) ->
Parts = cutoff([0|Cutoff] ++ [iolist_size(BigPacket)]),
{<<>>, X} = lists:foldl(
fun(Size, {Rem, Acc}) ->
<<Part:Size/binary, Rest/binary>> = Rem,
{Rest, [Part|Acc]}
end,
{BigPacket, []},
Parts),
lists:reverse(X).
%% @doc Converts breaking points in binary to sizes between the slices.
cutoff(Cutoff) ->
cutoff(lists:usort(Cutoff), []).
cutoff([A, B], Acc) ->
lists:reverse([B - A | Acc]);
cutoff([A, B | Rest], Acc) ->
cutoff([B|Rest], [B - A | Acc]). | test/prop_tchannel_conn.erl | 0.545044 | 0.489503 | prop_tchannel_conn.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.